Skip to content

Commit

Permalink
lint
Browse files Browse the repository at this point in the history
  • Loading branch information
hiyouga committed Jun 17, 2024
1 parent 7857c09 commit 24c160d
Show file tree
Hide file tree
Showing 2 changed files with 2 additions and 2 deletions.
2 changes: 1 addition & 1 deletion scripts/pissa_init.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def quantize_pissa(
lora_alpha=lora_alpha if lora_alpha is not None else lora_rank * 2,
lora_dropout=lora_dropout,
target_modules=[name.strip() for name in lora_target.split(",")],
init_lora_weights="pissa" if pissa_iter == -1 else "pissa_niter_{}".format(pissa_iter)
init_lora_weights="pissa" if pissa_iter == -1 else "pissa_niter_{}".format(pissa_iter),
)

# Init PiSSA model
Expand Down
2 changes: 1 addition & 1 deletion src/llamafactory/hparams/finetuning_args.py
Original file line number Diff line number Diff line change
Expand Up @@ -352,7 +352,7 @@ def split_arg(arg):
self.additional_target: Optional[List[str]] = split_arg(self.additional_target)
self.galore_target: List[str] = split_arg(self.galore_target)
self.freeze_vision_tower = self.freeze_vision_tower or self.train_mm_proj_only
self.use_ref_model = (self.stage == "dpo" and self.pref_loss not in ["orpo", "simpo"])
self.use_ref_model = self.stage == "dpo" and self.pref_loss not in ["orpo", "simpo"]

assert self.finetuning_type in ["lora", "freeze", "full"], "Invalid fine-tuning method."
assert self.ref_model_quantization_bit in [None, 8, 4], "We only accept 4-bit or 8-bit quantization."
Expand Down

0 comments on commit 24c160d

Please sign in to comment.