Skip to content

Commit

Permalink
Add trust_remote_code option to finetune shells
Browse files Browse the repository at this point in the history
  • Loading branch information
wheresmyhair committed Apr 24, 2024
1 parent cf5f50c commit 5a526a7
Show file tree
Hide file tree
Showing 5 changed files with 40 additions and 0 deletions.
8 changes: 8 additions & 0 deletions scripts/run_finetune.sh
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,9 @@ output_dir=output_models/finetune
deepspeed_args="--master_port=11000"
conversation_template=llama2

# Safty related arguments
trust_remote_code=0

while [[ $# -ge 1 ]]; do
key="$1"
case ${key} in
Expand All @@ -33,6 +36,10 @@ while [[ $# -ge 1 ]]; do
deepspeed_args="$2"
shift
;;
--trust_remote_code)
trust_remote_code="$2"
shift
;;
*)
echo "error: unknown option \"${key}\"" 1>&2
exit 1
Expand All @@ -49,6 +56,7 @@ mkdir -p ${output_dir} ${log_dir}
deepspeed ${deepspeed_args} \
examples/finetune.py \
--model_name_or_path ${model_name_or_path} \
--trust_remote_code ${trust_remote_code} \
--dataset_path ${dataset_path} \
--output_dir ${output_dir} --overwrite_output_dir \
--conversation_template ${conversation_template} \
Expand Down
8 changes: 8 additions & 0 deletions scripts/run_finetune_with_lisa.sh
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,9 @@ block_size=256
per_device_train_batch_size=1
conversation_template=llama2

# Safty related arguments
trust_remote_code=0

# Enable model parallelism for multiple gpus, modify this if you prefer
# customized deepspeed zero-redundancy optimization settings
num_gpu=$(python -c "import torch; print(torch.cuda.device_count())")
Expand Down Expand Up @@ -77,6 +80,10 @@ while [[ $# -ge 1 ]]; do
per_device_train_batch_size="$2"
shift
;;
--trust_remote_code)
trust_remote_code="$2"
shift
;;
*)
echo "error: unknown option \"${key}\"" 1>&2
exit 1
Expand All @@ -92,6 +99,7 @@ mkdir -p ${output_dir} ${log_dir}

python examples/finetune.py \
--model_name_or_path ${model_name_or_path} \
--trust_remote_code ${trust_remote_code} \
--dataset_path ${dataset_path} \
--output_dir ${output_dir} --overwrite_output_dir \
--conversation_template ${conversation_template} \
Expand Down
8 changes: 8 additions & 0 deletions scripts/run_finetune_with_lora.sh
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,9 @@ conversation_template=llama2
output_dir=output_models/finetune
deepspeed_args="--master_port=11000"

# Safty related arguments
trust_remote_code=0

while [[ $# -ge 1 ]]; do
key="$1"
case ${key} in
Expand All @@ -31,6 +34,10 @@ while [[ $# -ge 1 ]]; do
deepspeed_args="$2"
shift
;;
--trust_remote_code)
trust_remote_code="$2"
shift
;;
*)
echo "error: unknown option \"${key}\"" 1>&2
exit 1
Expand All @@ -47,6 +54,7 @@ mkdir -p ${output_dir} ${log_dir}
deepspeed ${deepspeed_args} \
examples/finetune.py \
--model_name_or_path ${model_name_or_path} \
--trust_remote_code ${trust_remote_code} \
--dataset_path ${dataset_path} \
--conversation_template ${conversation_template} \
--output_dir ${output_dir} --overwrite_output_dir \
Expand Down
8 changes: 8 additions & 0 deletions scripts/run_finetune_with_lora_save_aggregated_weights.sh
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,9 @@ conversation_template=llama2
output_dir=output_models/finetune
deepspeed_args="--master_port=11000"

# Safty related arguments
trust_remote_code=0

while [[ $# -ge 1 ]]; do
key="$1"
case ${key} in
Expand All @@ -31,6 +34,10 @@ while [[ $# -ge 1 ]]; do
deepspeed_args="$2"
shift
;;
--trust_remote_code)
trust_remote_code="$2"
shift
;;
*)
echo "error: unknown option \"${key}\"" 1>&2
exit 1
Expand All @@ -47,6 +54,7 @@ mkdir -p ${output_dir} ${log_dir}
deepspeed ${deepspeed_args} \
examples/finetune.py \
--model_name_or_path ${model_name_or_path} \
--trust_remote_code ${trust_remote_code} \
--dataset_path ${dataset_path} \
--conversation_template ${conversation_template} \
--output_dir ${output_dir} --overwrite_output_dir \
Expand Down
8 changes: 8 additions & 0 deletions scripts/run_finetune_with_qlora_save_aggregated_weights.sh
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,9 @@ conversation_template=llama2
output_dir=output_models/finetune
deepspeed_args="--master_port=11000"

# Safty related arguments
trust_remote_code=0

while [[ $# -ge 1 ]]; do
key="$1"
case ${key} in
Expand All @@ -31,6 +34,10 @@ while [[ $# -ge 1 ]]; do
deepspeed_args="$2"
shift
;;
--trust_remote_code)
trust_remote_code="$2"
shift
;;
*)
echo "error: unknown option \"${key}\"" 1>&2
exit 1
Expand All @@ -47,6 +54,7 @@ mkdir -p ${output_dir} ${log_dir}
deepspeed ${deepspeed_args} \
examples/finetune.py \
--model_name_or_path ${model_name_or_path} \
--trust_remote_code ${trust_remote_code} \
--dataset_path ${dataset_path} \
--conversation_template ${conversation_template} \
--output_dir ${output_dir} --overwrite_output_dir \
Expand Down

0 comments on commit 5a526a7

Please sign in to comment.