Skip to content

Commit c6db833

Browse files
timofeev1995mryab
andauthored
Add Hugging Face Hub upload option (#333)
* WIP extend continued FT * remove the hf training ckpt * Bump version * Add fixes * Add to create finetuning request * Fix naming * Fix * Fix * Wording --------- Co-authored-by: Max Ryabinin <[email protected]>
1 parent a0fadd3 commit c6db833

File tree

4 files changed

+37
-2
lines changed

4 files changed

+37
-2
lines changed

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ build-backend = "poetry.masonry.api"
1212

1313
[tool.poetry]
1414
name = "together"
15-
version = "1.5.17"
15+
version = "1.5.18"
1616
authors = ["Together AI <[email protected]>"]
1717
description = "Python client for Together's Cloud Platform!"
1818
readme = "README.md"

src/together/cli/api/finetune.py

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -200,6 +200,18 @@ def fine_tuning(ctx: click.Context) -> None:
200200
"The format: {$JOB_ID/$OUTPUT_MODEL_NAME}:{$STEP}. "
201201
"The step value is optional, without it the final checkpoint will be used.",
202202
)
203+
@click.option(
204+
"--hf-api-token",
205+
type=str,
206+
default=None,
207+
help="HF API token to use for uploading a checkpoint to a private repo",
208+
)
209+
@click.option(
210+
"--hf-output-repo-name",
211+
type=str,
212+
default=None,
213+
help="HF repo to upload the fine-tuned model to",
214+
)
203215
def create(
204216
ctx: click.Context,
205217
training_file: str,
@@ -234,6 +246,8 @@ def create(
234246
rpo_alpha: float | None,
235247
simpo_gamma: float | None,
236248
from_checkpoint: str,
249+
hf_api_token: str | None,
250+
hf_output_repo_name: str | None,
237251
) -> None:
238252
"""Start fine-tuning"""
239253
client: Together = ctx.obj
@@ -270,6 +284,8 @@ def create(
270284
rpo_alpha=rpo_alpha,
271285
simpo_gamma=simpo_gamma,
272286
from_checkpoint=from_checkpoint,
287+
hf_api_token=hf_api_token,
288+
hf_output_repo_name=hf_output_repo_name,
273289
)
274290

275291
if model is None and from_checkpoint is None:
@@ -280,7 +296,7 @@ def create(
280296
model_name = from_checkpoint.split(":")[0]
281297

282298
model_limits: FinetuneTrainingLimits = client.fine_tuning.get_model_limits(
283-
model=model_name
299+
model=model_name,
284300
)
285301

286302
if lora:

src/together/resources/finetune.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,8 @@ def create_finetune_request(
7676
rpo_alpha: float | None = None,
7777
simpo_gamma: float | None = None,
7878
from_checkpoint: str | None = None,
79+
hf_api_token: str | None = None,
80+
hf_output_repo_name: str | None = None,
7981
) -> FinetuneRequest:
8082
if model is not None and from_checkpoint is not None:
8183
raise ValueError(
@@ -262,6 +264,8 @@ def create_finetune_request(
262264
wandb_name=wandb_name,
263265
training_method=training_method_cls,
264266
from_checkpoint=from_checkpoint,
267+
hf_api_token=hf_api_token,
268+
hf_output_repo_name=hf_output_repo_name,
265269
)
266270

267271
return finetune_request
@@ -341,6 +345,8 @@ def create(
341345
rpo_alpha: float | None = None,
342346
simpo_gamma: float | None = None,
343347
from_checkpoint: str | None = None,
348+
hf_api_token: str | None = None,
349+
hf_output_repo_name: str | None = None,
344350
) -> FinetuneResponse:
345351
"""
346352
Method to initiate a fine-tuning job
@@ -397,6 +403,8 @@ def create(
397403
from_checkpoint (str, optional): The checkpoint identifier to continue training from a previous fine-tuning job.
398404
The format: {$JOB_ID/$OUTPUT_MODEL_NAME}:{$STEP}.
399405
The step value is optional, without it the final checkpoint will be used.
406+
hf_api_token (str, optional): API key for the Hugging Face Hub. Defaults to None.
407+
hf_output_repo_name (str, optional): HF repo to upload the fine-tuned model to. Defaults to None.
400408
401409
Returns:
402410
FinetuneResponse: Object containing information about fine-tuning job.
@@ -450,6 +458,8 @@ def create(
450458
rpo_alpha=rpo_alpha,
451459
simpo_gamma=simpo_gamma,
452460
from_checkpoint=from_checkpoint,
461+
hf_api_token=hf_api_token,
462+
hf_output_repo_name=hf_output_repo_name,
453463
)
454464

455465
if verbose:
@@ -762,6 +772,8 @@ async def create(
762772
rpo_alpha: float | None = None,
763773
simpo_gamma: float | None = None,
764774
from_checkpoint: str | None = None,
775+
hf_api_token: str | None = None,
776+
hf_output_repo_name: str | None = None,
765777
) -> FinetuneResponse:
766778
"""
767779
Async method to initiate a fine-tuning job
@@ -818,6 +830,8 @@ async def create(
818830
from_checkpoint (str, optional): The checkpoint identifier to continue training from a previous fine-tuning job.
819831
The format: {$JOB_ID/$OUTPUT_MODEL_NAME}:{$STEP}.
820832
The step value is optional, without it the final checkpoint will be used.
833+
hf_api_token (str, optional): API key for the Huggging Face Hub. Defaults to None.
834+
hf_output_repo_name (str, optional): HF repo to upload the fine-tuned model to. Defaults to None.
821835
822836
Returns:
823837
FinetuneResponse: Object containing information about fine-tuning job.
@@ -871,6 +885,8 @@ async def create(
871885
rpo_alpha=rpo_alpha,
872886
simpo_gamma=simpo_gamma,
873887
from_checkpoint=from_checkpoint,
888+
hf_api_token=hf_api_token,
889+
hf_output_repo_name=hf_output_repo_name,
874890
)
875891

876892
if verbose:

src/together/types/finetune.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -212,6 +212,9 @@ class FinetuneRequest(BaseModel):
212212
)
213213
# from step
214214
from_checkpoint: str | None = None
215+
# hf related fields
216+
hf_api_token: str | None = None
217+
hf_output_repo_name: str | None = None
215218

216219

217220
class FinetuneResponse(BaseModel):

0 commit comments

Comments
 (0)