diff --git a/README.md b/README.md
index 541fe546a..b5bc4015c 100644
--- a/README.md
+++ b/README.md
@@ -14,7 +14,7 @@ English | [简体中文](README_zh-CN.md)
## 🎉 News
-- **\[2023/10\]** Support [ChatGLM3-6B-Base](https://huggingface.co/THUDM/chatglm3-6b-base) model!
+- **\[2023/10\]** Support [ChatGLM3-6B](https://huggingface.co/THUDM/chatglm3-6b) model!
- **\[2023/10\]** Support [MSAgent-Bench](https://modelscope.cn/datasets/damo/MSAgent-Bench) dataset, and the fine-tuned LLMs can be applied by [Lagent](https://github.com/InternLM/lagent)!
- **\[2023/10\]** Optimize the data processing to accommodate `system` context. More information can be found on [Docs](docs/en/user_guides/dataset_format.md)!
- **\[2023/09\]** Support [InternLM-20B](https://huggingface.co/internlm) models!
@@ -79,7 +79,7 @@ XTuner is a toolkit for efficiently fine-tuning LLM, developed by the [MMRazor](
Llama
Llama2
ChatGLM2
- ChatGLM3
+ ChatGLM3
Qwen
Baichuan
Baichuan2
diff --git a/README_zh-CN.md b/README_zh-CN.md
index 67b5f2bb8..bacb040df 100644
--- a/README_zh-CN.md
+++ b/README_zh-CN.md
@@ -14,7 +14,7 @@
## 🎉 更新
-- **\[2023/10\]** 支持 [ChatGLM3-6B-Base](https://huggingface.co/THUDM/chatglm3-6b-base) 模型!
+- **\[2023/10\]** 支持 [ChatGLM3-6B](https://huggingface.co/THUDM/chatglm3-6b) 模型!
- **\[2023/10\]** 支持 [MSAgent-Bench](https://modelscope.cn/datasets/damo/MSAgent-Bench) 数据集,并且微调所得大语言模型可应用至 [Lagent](https://github.com/InternLM/lagent) 框架!
- **\[2023/10\]** 优化数据处理逻辑以兼容 `system` 字段,相关细节请查阅[文档](docs/zh_cn/user_guides/dataset_format.md)!
- **\[2023/09\]** 支持 [InternLM-20B](https://huggingface.co/internlm) 系列模型!
@@ -79,7 +79,7 @@ XTuner 是一个轻量级微调大语言模型的工具库,由 [MMRazor](https
Llama
Llama2
ChatGLM2
- ChatGLM3
+ ChatGLM3
Qwen
Baichuan
Baichuan2
diff --git a/xtuner/apis/model.py b/xtuner/apis/model.py
index 143883c47..efd9370df 100644
--- a/xtuner/apis/model.py
+++ b/xtuner/apis/model.py
@@ -41,7 +41,9 @@ def build_qlora_model(model_name_or_path,
if return_tokenizer:
tokenizer = AutoTokenizer.from_pretrained(
- model_name_or_path, trust_remote_code=True)
+ model_name_or_path,
+ trust_remote_code=True,
+ encode_special_tokens=True)
return model.llm, tokenizer
else:
return model.llm
@@ -65,7 +67,9 @@ def build_lora_model(model_name_or_path,
if return_tokenizer:
tokenizer = AutoTokenizer.from_pretrained(
- model_name_or_path, trust_remote_code=True)
+ model_name_or_path,
+ trust_remote_code=True,
+ encode_special_tokens=True)
return model.llm, tokenizer
else:
return model.llm
@@ -77,7 +81,9 @@ def build_model(model_name_or_path, return_tokenizer=True):
if return_tokenizer:
tokenizer = AutoTokenizer.from_pretrained(
- model_name_or_path, trust_remote_code=True)
+ model_name_or_path,
+ trust_remote_code=True,
+ encode_special_tokens=True)
return model, tokenizer
else:
return model
diff --git a/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_alpaca_e3.py b/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_alpaca_e3.py
new file mode 100644
index 000000000..59debe384
--- /dev/null
+++ b/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_alpaca_e3.py
@@ -0,0 +1,183 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+from bitsandbytes.optim import PagedAdamW32bit
+from datasets import load_dataset
+from mmengine.dataset import DefaultSampler
+from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
+ LoggerHook, ParamSchedulerHook)
+from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR
+from peft import LoraConfig
+from transformers import (AutoModelForCausalLM, AutoTokenizer,
+ BitsAndBytesConfig)
+
+from xtuner.dataset import process_hf_dataset
+from xtuner.dataset.collate_fns import default_collate_fn
+from xtuner.dataset.map_fns import alpaca_map_fn, template_map_fn_factory
+from xtuner.engine import DatasetInfoHook, EvaluateChatHook
+from xtuner.model import SupervisedFinetune
+from xtuner.utils import PROMPT_TEMPLATE, SYSTEM_TEMPLATE
+
+#######################################################################
+# PART 1 Settings #
+#######################################################################
+# Model
+pretrained_model_name_or_path = 'THUDM/chatglm3-6b'
+
+# Data
+alpaca_en_path = 'tatsu-lab/alpaca'
+prompt_template = PROMPT_TEMPLATE.chatglm3
+max_length = 2048
+pack_to_max_length = True
+
+# Scheduler & Optimizer
+batch_size = 1 # per_device
+accumulative_counts = 16
+dataloader_num_workers = 0
+max_epochs = 3
+optim_type = PagedAdamW32bit
+lr = 2e-4
+betas = (0.9, 0.999)
+weight_decay = 0
+max_norm = 1 # grad clip
+
+# Evaluate the generation performance during the training
+evaluation_freq = 500
+SYSTEM = SYSTEM_TEMPLATE.alpaca
+evaluation_inputs = [
+ '请给我介绍五个上海的景点', 'Please tell me five scenic spots in Shanghai'
+]
+
+#######################################################################
+# PART 2 Model & Tokenizer #
+#######################################################################
+tokenizer = dict(
+ type=AutoTokenizer.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ encode_special_tokens=True,
+ padding_side='left')
+
+model = dict(
+ type=SupervisedFinetune,
+ llm=dict(
+ type=AutoModelForCausalLM.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ torch_dtype=torch.float16,
+ quantization_config=dict(
+ type=BitsAndBytesConfig,
+ load_in_4bit=True,
+ load_in_8bit=False,
+ llm_int8_threshold=6.0,
+ llm_int8_has_fp16_weight=False,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_use_double_quant=True,
+ bnb_4bit_quant_type='nf4')),
+ lora=dict(
+ type=LoraConfig,
+ r=64,
+ lora_alpha=16,
+ lora_dropout=0.1,
+ bias='none',
+ task_type='CAUSAL_LM'))
+
+#######################################################################
+# PART 3 Dataset & Dataloader #
+#######################################################################
+alpaca_en = dict(
+ type=process_hf_dataset,
+ dataset=dict(type=load_dataset, path=alpaca_en_path),
+ tokenizer=tokenizer,
+ max_length=max_length,
+ dataset_map_fn=alpaca_map_fn,
+ template_map_fn=dict(
+ type=template_map_fn_factory, template=prompt_template),
+ remove_unused_columns=True,
+ shuffle_before_pack=True,
+ pack_to_max_length=pack_to_max_length)
+
+train_dataloader = dict(
+ batch_size=batch_size,
+ num_workers=dataloader_num_workers,
+ dataset=alpaca_en,
+ sampler=dict(type=DefaultSampler, shuffle=True),
+ collate_fn=dict(type=default_collate_fn))
+
+#######################################################################
+# PART 4 Scheduler & Optimizer #
+#######################################################################
+# optimizer
+optim_wrapper = dict(
+ type=AmpOptimWrapper,
+ optimizer=dict(
+ type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay),
+ clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False),
+ accumulative_counts=accumulative_counts,
+ loss_scale='dynamic',
+ dtype='float16')
+
+# learning policy
+# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501
+param_scheduler = dict(
+ type=CosineAnnealingLR,
+ eta_min=lr * 0.1,
+ by_epoch=True,
+ T_max=max_epochs,
+ convert_to_iter_based=True)
+
+# train, val, test setting
+train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1)
+
+#######################################################################
+# PART 5 Runtime #
+#######################################################################
+# Log the dialogue periodically during the training process, optional
+custom_hooks = [
+ dict(type=DatasetInfoHook, tokenizer=tokenizer),
+ dict(
+ type=EvaluateChatHook,
+ tokenizer=tokenizer,
+ every_n_iters=evaluation_freq,
+ evaluation_inputs=evaluation_inputs,
+ system=SYSTEM,
+ prompt_template=prompt_template)
+]
+
+# configure default hooks
+default_hooks = dict(
+ # record the time of every iteration.
+ timer=dict(type=IterTimerHook),
+ # print log every 100 iterations.
+ logger=dict(type=LoggerHook, interval=10),
+ # enable the parameter scheduler.
+ param_scheduler=dict(type=ParamSchedulerHook),
+ # save checkpoint per epoch.
+ checkpoint=dict(type=CheckpointHook, interval=1),
+ # set sampler seed in distributed evrionment.
+ sampler_seed=dict(type=DistSamplerSeedHook),
+)
+
+# configure environment
+env_cfg = dict(
+ # whether to enable cudnn benchmark
+ cudnn_benchmark=False,
+ # set multi process parameters
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
+ # set distributed parameters
+ dist_cfg=dict(backend='nccl'),
+)
+
+# set visualizer
+visualizer = None
+
+# set log level
+log_level = 'INFO'
+
+# load from which checkpoint
+load_from = None
+
+# whether to resume training from the loaded checkpoint
+resume = False
+
+# Defaults to use random seed and disable `deterministic`
+randomness = dict(seed=None, deterministic=False)
diff --git a/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_alpaca_enzh_e3.py b/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_alpaca_enzh_e3.py
new file mode 100644
index 000000000..e53bae2f9
--- /dev/null
+++ b/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_alpaca_enzh_e3.py
@@ -0,0 +1,201 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+from bitsandbytes.optim import PagedAdamW32bit
+from datasets import load_dataset
+from mmengine.dataset import DefaultSampler
+from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
+ LoggerHook, ParamSchedulerHook)
+from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR
+from peft import LoraConfig
+from transformers import (AutoModelForCausalLM, AutoTokenizer,
+ BitsAndBytesConfig)
+
+from xtuner.dataset import ConcatDataset, process_hf_dataset
+from xtuner.dataset.collate_fns import default_collate_fn
+from xtuner.dataset.map_fns import (alpaca_map_fn, alpaca_zh_map_fn,
+ template_map_fn_factory)
+from xtuner.engine import DatasetInfoHook, EvaluateChatHook
+from xtuner.model import SupervisedFinetune
+from xtuner.utils import PROMPT_TEMPLATE, SYSTEM_TEMPLATE
+
+#######################################################################
+# PART 1 Settings #
+#######################################################################
+# Model
+pretrained_model_name_or_path = 'THUDM/chatglm3-6b'
+
+# Data
+alpaca_zh_path = 'silk-road/alpaca-data-gpt4-chinese'
+alpaca_en_path = 'tatsu-lab/alpaca'
+prompt_template = PROMPT_TEMPLATE.chatglm3
+max_length = 2048
+pack_to_max_length = True
+
+# Scheduler & Optimizer
+batch_size = 1 # per_device
+accumulative_counts = 16
+dataloader_num_workers = 0
+max_epochs = 3
+optim_type = PagedAdamW32bit
+lr = 2e-4
+betas = (0.9, 0.999)
+weight_decay = 0
+max_norm = 1 # grad clip
+
+# Evaluate the generation performance during the training
+evaluation_freq = 500
+SYSTEM = SYSTEM_TEMPLATE.alpaca
+evaluation_inputs = [
+ '请给我介绍五个上海的景点', 'Please tell me five scenic spots in Shanghai'
+]
+
+#######################################################################
+# PART 2 Model & Tokenizer #
+#######################################################################
+tokenizer = dict(
+ type=AutoTokenizer.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ encode_special_tokens=True,
+ padding_side='left')
+
+model = dict(
+ type=SupervisedFinetune,
+ llm=dict(
+ type=AutoModelForCausalLM.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ torch_dtype=torch.float16,
+ quantization_config=dict(
+ type=BitsAndBytesConfig,
+ load_in_4bit=True,
+ load_in_8bit=False,
+ llm_int8_threshold=6.0,
+ llm_int8_has_fp16_weight=False,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_use_double_quant=True,
+ bnb_4bit_quant_type='nf4')),
+ lora=dict(
+ type=LoraConfig,
+ r=64,
+ lora_alpha=16,
+ lora_dropout=0.1,
+ bias='none',
+ task_type='CAUSAL_LM'))
+
+#######################################################################
+# PART 3 Dataset & Dataloader #
+#######################################################################
+alpaca_en = dict(
+ type=process_hf_dataset,
+ dataset=dict(type=load_dataset, path=alpaca_en_path),
+ tokenizer=tokenizer,
+ max_length=max_length,
+ dataset_map_fn=alpaca_map_fn,
+ template_map_fn=dict(
+ type=template_map_fn_factory, template=prompt_template),
+ remove_unused_columns=True,
+ shuffle_before_pack=True,
+ pack_to_max_length=pack_to_max_length)
+
+alpaca_zh = dict(
+ type=process_hf_dataset,
+ dataset=dict(type=load_dataset, path=alpaca_zh_path),
+ tokenizer=tokenizer,
+ max_length=max_length,
+ dataset_map_fn=alpaca_zh_map_fn,
+ template_map_fn=dict(
+ type=template_map_fn_factory, template=prompt_template),
+ remove_unused_columns=True,
+ shuffle_before_pack=True,
+ pack_to_max_length=pack_to_max_length)
+
+train_dataset = dict(
+ type=ConcatDataset,
+ datasets_cfg=dict(alpaca_en=alpaca_en, alpaca_zh=alpaca_zh))
+
+train_dataloader = dict(
+ batch_size=batch_size,
+ num_workers=dataloader_num_workers,
+ dataset=train_dataset,
+ sampler=dict(type=DefaultSampler, shuffle=True),
+ collate_fn=dict(type=default_collate_fn))
+
+#######################################################################
+# PART 4 Scheduler & Optimizer #
+#######################################################################
+# optimizer
+optim_wrapper = dict(
+ type=AmpOptimWrapper,
+ optimizer=dict(
+ type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay),
+ clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False),
+ accumulative_counts=accumulative_counts,
+ loss_scale='dynamic',
+ dtype='float16')
+
+# learning policy
+# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501
+param_scheduler = dict(
+ type=CosineAnnealingLR,
+ eta_min=lr * 0.1,
+ by_epoch=True,
+ T_max=max_epochs,
+ convert_to_iter_based=True)
+
+# train, val, test setting
+train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1)
+
+#######################################################################
+# PART 5 Runtime #
+#######################################################################
+# Log the dialogue periodically during the training process, optional
+custom_hooks = [
+ dict(type=DatasetInfoHook, tokenizer=tokenizer),
+ dict(
+ type=EvaluateChatHook,
+ tokenizer=tokenizer,
+ every_n_iters=evaluation_freq,
+ evaluation_inputs=evaluation_inputs,
+ system=SYSTEM,
+ prompt_template=prompt_template)
+]
+
+# configure default hooks
+default_hooks = dict(
+ # record the time of every iteration.
+ timer=dict(type=IterTimerHook),
+ # print log every 100 iterations.
+ logger=dict(type=LoggerHook, interval=10),
+ # enable the parameter scheduler.
+ param_scheduler=dict(type=ParamSchedulerHook),
+ # save checkpoint per epoch.
+ checkpoint=dict(type=CheckpointHook, interval=1),
+ # set sampler seed in distributed evrionment.
+ sampler_seed=dict(type=DistSamplerSeedHook),
+)
+
+# configure environment
+env_cfg = dict(
+ # whether to enable cudnn benchmark
+ cudnn_benchmark=False,
+ # set multi process parameters
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
+ # set distributed parameters
+ dist_cfg=dict(backend='nccl'),
+)
+
+# set visualizer
+visualizer = None
+
+# set log level
+log_level = 'INFO'
+
+# load from which checkpoint
+load_from = None
+
+# whether to resume training from the loaded checkpoint
+resume = False
+
+# Defaults to use random seed and disable `deterministic`
+randomness = dict(seed=None, deterministic=False)
diff --git a/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_alpaca_enzh_oasst1_e3.py b/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_alpaca_enzh_oasst1_e3.py
new file mode 100644
index 000000000..7164873f8
--- /dev/null
+++ b/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_alpaca_enzh_oasst1_e3.py
@@ -0,0 +1,214 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+from bitsandbytes.optim import PagedAdamW32bit
+from datasets import load_dataset
+from mmengine.dataset import DefaultSampler
+from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
+ LoggerHook, ParamSchedulerHook)
+from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR
+from peft import LoraConfig
+from transformers import (AutoModelForCausalLM, AutoTokenizer,
+ BitsAndBytesConfig)
+
+from xtuner.dataset import ConcatDataset, process_hf_dataset
+from xtuner.dataset.collate_fns import default_collate_fn
+from xtuner.dataset.map_fns import (alpaca_map_fn, alpaca_zh_map_fn,
+ oasst1_map_fn, template_map_fn_factory)
+from xtuner.engine import DatasetInfoHook, EvaluateChatHook
+from xtuner.model import SupervisedFinetune
+from xtuner.utils import PROMPT_TEMPLATE, SYSTEM_TEMPLATE
+
+#######################################################################
+# PART 1 Settings #
+#######################################################################
+# Model
+pretrained_model_name_or_path = 'THUDM/chatglm3-6b'
+
+# Data
+alpaca_zh_path = 'silk-road/alpaca-data-gpt4-chinese'
+alpaca_en_path = 'tatsu-lab/alpaca'
+oasst1_path = 'timdettmers/openassistant-guanaco'
+prompt_template = PROMPT_TEMPLATE.chatglm3
+max_length = 2048
+pack_to_max_length = True
+
+# Scheduler & Optimizer
+batch_size = 1 # per_device
+accumulative_counts = 16
+dataloader_num_workers = 0
+max_epochs = 3
+optim_type = PagedAdamW32bit
+lr = 2e-4
+betas = (0.9, 0.999)
+weight_decay = 0
+max_norm = 1 # grad clip
+
+# Evaluate the generation performance during the training
+evaluation_freq = 500
+SYSTEM = SYSTEM_TEMPLATE.alpaca
+evaluation_inputs = [
+ '请给我介绍五个上海的景点', 'Please tell me five scenic spots in Shanghai'
+]
+
+#######################################################################
+# PART 2 Model & Tokenizer #
+#######################################################################
+tokenizer = dict(
+ type=AutoTokenizer.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ encode_special_tokens=True,
+ padding_side='left')
+
+model = dict(
+ type=SupervisedFinetune,
+ llm=dict(
+ type=AutoModelForCausalLM.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ torch_dtype=torch.float16,
+ quantization_config=dict(
+ type=BitsAndBytesConfig,
+ load_in_4bit=True,
+ load_in_8bit=False,
+ llm_int8_threshold=6.0,
+ llm_int8_has_fp16_weight=False,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_use_double_quant=True,
+ bnb_4bit_quant_type='nf4')),
+ lora=dict(
+ type=LoraConfig,
+ r=64,
+ lora_alpha=16,
+ lora_dropout=0.1,
+ bias='none',
+ task_type='CAUSAL_LM'))
+
+#######################################################################
+# PART 3 Dataset & Dataloader #
+#######################################################################
+alpaca_en = dict(
+ type=process_hf_dataset,
+ dataset=dict(type=load_dataset, path=alpaca_en_path),
+ tokenizer=tokenizer,
+ max_length=max_length,
+ dataset_map_fn=alpaca_map_fn,
+ template_map_fn=dict(
+ type=template_map_fn_factory, template=prompt_template),
+ remove_unused_columns=True,
+ shuffle_before_pack=True,
+ pack_to_max_length=pack_to_max_length)
+
+alpaca_zh = dict(
+ type=process_hf_dataset,
+ dataset=dict(type=load_dataset, path=alpaca_zh_path),
+ tokenizer=tokenizer,
+ max_length=max_length,
+ dataset_map_fn=alpaca_zh_map_fn,
+ template_map_fn=dict(
+ type=template_map_fn_factory, template=prompt_template),
+ remove_unused_columns=True,
+ shuffle_before_pack=True,
+ pack_to_max_length=pack_to_max_length)
+
+oasst1 = dict(
+ type=process_hf_dataset,
+ dataset=dict(type=load_dataset, path=oasst1_path),
+ tokenizer=tokenizer,
+ max_length=max_length,
+ dataset_map_fn=oasst1_map_fn,
+ template_map_fn=dict(
+ type=template_map_fn_factory, template=prompt_template),
+ remove_unused_columns=True,
+ shuffle_before_pack=True,
+ pack_to_max_length=pack_to_max_length)
+
+train_dataset = dict(
+ type=ConcatDataset,
+ datasets_cfg=dict(alpaca_en=alpaca_en, alpaca_zh=alpaca_zh, oasst1=oasst1))
+
+train_dataloader = dict(
+ batch_size=batch_size,
+ num_workers=dataloader_num_workers,
+ dataset=train_dataset,
+ sampler=dict(type=DefaultSampler, shuffle=True),
+ collate_fn=dict(type=default_collate_fn))
+
+#######################################################################
+# PART 4 Scheduler & Optimizer #
+#######################################################################
+# optimizer
+optim_wrapper = dict(
+ type=AmpOptimWrapper,
+ optimizer=dict(
+ type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay),
+ clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False),
+ accumulative_counts=accumulative_counts,
+ loss_scale='dynamic',
+ dtype='float16')
+
+# learning policy
+# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501
+param_scheduler = dict(
+ type=CosineAnnealingLR,
+ eta_min=lr * 0.1,
+ by_epoch=True,
+ T_max=max_epochs,
+ convert_to_iter_based=True)
+
+# train, val, test setting
+train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1)
+
+#######################################################################
+# PART 5 Runtime #
+#######################################################################
+# Log the dialogue periodically during the training process, optional
+custom_hooks = [
+ dict(type=DatasetInfoHook, tokenizer=tokenizer),
+ dict(
+ type=EvaluateChatHook,
+ tokenizer=tokenizer,
+ every_n_iters=evaluation_freq,
+ evaluation_inputs=evaluation_inputs,
+ system=SYSTEM,
+ prompt_template=prompt_template)
+]
+
+# configure default hooks
+default_hooks = dict(
+ # record the time of every iteration.
+ timer=dict(type=IterTimerHook),
+ # print log every 100 iterations.
+ logger=dict(type=LoggerHook, interval=10),
+ # enable the parameter scheduler.
+ param_scheduler=dict(type=ParamSchedulerHook),
+ # save checkpoint per epoch.
+ checkpoint=dict(type=CheckpointHook, interval=1),
+ # set sampler seed in distributed evrionment.
+ sampler_seed=dict(type=DistSamplerSeedHook),
+)
+
+# configure environment
+env_cfg = dict(
+ # whether to enable cudnn benchmark
+ cudnn_benchmark=False,
+ # set multi process parameters
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
+ # set distributed parameters
+ dist_cfg=dict(backend='nccl'),
+)
+
+# set visualizer
+visualizer = None
+
+# set log level
+log_level = 'INFO'
+
+# load from which checkpoint
+load_from = None
+
+# whether to resume training from the loaded checkpoint
+resume = False
+
+# Defaults to use random seed and disable `deterministic`
+randomness = dict(seed=None, deterministic=False)
diff --git a/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_alpaca_zh_e3.py b/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_alpaca_zh_e3.py
new file mode 100644
index 000000000..128c6c2a6
--- /dev/null
+++ b/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_alpaca_zh_e3.py
@@ -0,0 +1,183 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+from bitsandbytes.optim import PagedAdamW32bit
+from datasets import load_dataset
+from mmengine.dataset import DefaultSampler
+from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
+ LoggerHook, ParamSchedulerHook)
+from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR
+from peft import LoraConfig
+from transformers import (AutoModelForCausalLM, AutoTokenizer,
+ BitsAndBytesConfig)
+
+from xtuner.dataset import process_hf_dataset
+from xtuner.dataset.collate_fns import default_collate_fn
+from xtuner.dataset.map_fns import alpaca_zh_map_fn, template_map_fn_factory
+from xtuner.engine import DatasetInfoHook, EvaluateChatHook
+from xtuner.model import SupervisedFinetune
+from xtuner.utils import PROMPT_TEMPLATE, SYSTEM_TEMPLATE
+
+#######################################################################
+# PART 1 Settings #
+#######################################################################
+# Model
+pretrained_model_name_or_path = 'THUDM/chatglm3-6b'
+
+# Data
+alpaca_zh_path = 'silk-road/alpaca-data-gpt4-chinese'
+prompt_template = PROMPT_TEMPLATE.chatglm3
+max_length = 2048
+pack_to_max_length = True
+
+# Scheduler & Optimizer
+batch_size = 1 # per_device
+accumulative_counts = 16
+dataloader_num_workers = 0
+max_epochs = 3
+optim_type = PagedAdamW32bit
+lr = 2e-4
+betas = (0.9, 0.999)
+weight_decay = 0
+max_norm = 1 # grad clip
+
+# Evaluate the generation performance during the training
+evaluation_freq = 500
+SYSTEM = SYSTEM_TEMPLATE.alpaca
+evaluation_inputs = [
+ '请给我介绍五个上海的景点', 'Please tell me five scenic spots in Shanghai'
+]
+
+#######################################################################
+# PART 2 Model & Tokenizer #
+#######################################################################
+tokenizer = dict(
+ type=AutoTokenizer.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ encode_special_tokens=True,
+ padding_side='left')
+
+model = dict(
+ type=SupervisedFinetune,
+ llm=dict(
+ type=AutoModelForCausalLM.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ torch_dtype=torch.float16,
+ quantization_config=dict(
+ type=BitsAndBytesConfig,
+ load_in_4bit=True,
+ load_in_8bit=False,
+ llm_int8_threshold=6.0,
+ llm_int8_has_fp16_weight=False,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_use_double_quant=True,
+ bnb_4bit_quant_type='nf4')),
+ lora=dict(
+ type=LoraConfig,
+ r=64,
+ lora_alpha=16,
+ lora_dropout=0.1,
+ bias='none',
+ task_type='CAUSAL_LM'))
+
+#######################################################################
+# PART 3 Dataset & Dataloader #
+#######################################################################
+alpaca_zh = dict(
+ type=process_hf_dataset,
+ dataset=dict(type=load_dataset, path=alpaca_zh_path),
+ tokenizer=tokenizer,
+ max_length=max_length,
+ dataset_map_fn=alpaca_zh_map_fn,
+ template_map_fn=dict(
+ type=template_map_fn_factory, template=prompt_template),
+ remove_unused_columns=True,
+ shuffle_before_pack=True,
+ pack_to_max_length=pack_to_max_length)
+
+train_dataloader = dict(
+ batch_size=batch_size,
+ num_workers=dataloader_num_workers,
+ dataset=alpaca_zh,
+ sampler=dict(type=DefaultSampler, shuffle=True),
+ collate_fn=dict(type=default_collate_fn))
+
+#######################################################################
+# PART 4 Scheduler & Optimizer #
+#######################################################################
+# optimizer
+optim_wrapper = dict(
+ type=AmpOptimWrapper,
+ optimizer=dict(
+ type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay),
+ clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False),
+ accumulative_counts=accumulative_counts,
+ loss_scale='dynamic',
+ dtype='float16')
+
+# learning policy
+# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501
+param_scheduler = dict(
+ type=CosineAnnealingLR,
+ eta_min=lr * 0.1,
+ by_epoch=True,
+ T_max=max_epochs,
+ convert_to_iter_based=True)
+
+# train, val, test setting
+train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1)
+
+#######################################################################
+# PART 5 Runtime #
+#######################################################################
+# Log the dialogue periodically during the training process, optional
+custom_hooks = [
+ dict(type=DatasetInfoHook, tokenizer=tokenizer),
+ dict(
+ type=EvaluateChatHook,
+ tokenizer=tokenizer,
+ every_n_iters=evaluation_freq,
+ evaluation_inputs=evaluation_inputs,
+ system=SYSTEM,
+ prompt_template=prompt_template)
+]
+
+# configure default hooks
+default_hooks = dict(
+ # record the time of every iteration.
+ timer=dict(type=IterTimerHook),
+ # print log every 100 iterations.
+ logger=dict(type=LoggerHook, interval=10),
+ # enable the parameter scheduler.
+ param_scheduler=dict(type=ParamSchedulerHook),
+ # save checkpoint per epoch.
+ checkpoint=dict(type=CheckpointHook, interval=1),
+ # set sampler seed in distributed evrionment.
+ sampler_seed=dict(type=DistSamplerSeedHook),
+)
+
+# configure environment
+env_cfg = dict(
+ # whether to enable cudnn benchmark
+ cudnn_benchmark=False,
+ # set multi process parameters
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
+ # set distributed parameters
+ dist_cfg=dict(backend='nccl'),
+)
+
+# set visualizer
+visualizer = None
+
+# set log level
+log_level = 'INFO'
+
+# load from which checkpoint
+load_from = None
+
+# whether to resume training from the loaded checkpoint
+resume = False
+
+# Defaults to use random seed and disable `deterministic`
+randomness = dict(seed=None, deterministic=False)
diff --git a/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_arxiv_gentitle_e3.py b/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_arxiv_gentitle_e3.py
new file mode 100644
index 000000000..57000b6fa
--- /dev/null
+++ b/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_arxiv_gentitle_e3.py
@@ -0,0 +1,218 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+from bitsandbytes.optim import PagedAdamW32bit
+from datasets import load_dataset
+from mmengine.dataset import DefaultSampler
+from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
+ LoggerHook, ParamSchedulerHook)
+from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR
+from peft import LoraConfig
+from transformers import (AutoModelForCausalLM, AutoTokenizer,
+ BitsAndBytesConfig)
+
+from xtuner.dataset import process_hf_dataset
+from xtuner.dataset.collate_fns import default_collate_fn
+from xtuner.dataset.map_fns import arxiv_map_fn, template_map_fn_factory
+from xtuner.engine import DatasetInfoHook, EvaluateChatHook
+from xtuner.model import SupervisedFinetune
+from xtuner.utils import PROMPT_TEMPLATE, SYSTEM_TEMPLATE
+
+#######################################################################
+# PART 1 Settings #
+#######################################################################
+# Model
+pretrained_model_name_or_path = 'THUDM/chatglm3-6b'
+
+# Data
+# 1. Download data from https://kaggle.com/datasets/Cornell-University/arxiv
+# 2. Process data by `xtuner preprocess arxiv ${DOWNLOADED_DATA} ./data/arxiv_data.json [optional arguments]` # noqa: E501
+data_path = './data/arxiv_data.json'
+prompt_template = PROMPT_TEMPLATE.chatglm3
+max_length = 2048
+pack_to_max_length = True
+
+# Scheduler & Optimizer
+batch_size = 1 # per_device
+accumulative_counts = 16
+dataloader_num_workers = 0
+max_epochs = 3
+optim_type = PagedAdamW32bit
+lr = 2e-4
+betas = (0.9, 0.999)
+weight_decay = 0
+max_norm = 1 # grad clip
+
+# Evaluate the generation performance during the training
+evaluation_freq = 500
+SYSTEM = SYSTEM_TEMPLATE.arxiv_gentile
+evaluation_inputs = [
+ ('We present InternLM, a multilingual foundational language '
+ 'model with 104B parameters. InternLM is pre-trained on a large '
+ 'corpora with 1.6T tokens with a multi-phase progressive '
+ 'process, and then fine-tuned to align with human preferences. '
+ 'We also developed a training system called Uniscale-LLM for '
+ 'efficient large language model training. The evaluation on a '
+ 'number of benchmarks shows that InternLM achieves '
+ 'state-of-the-art performance in multiple aspects, including '
+ 'knowledge understanding, reading comprehension, mathematics, '
+ 'and coding. With such well-rounded capabilities, InternLM '
+ 'achieves outstanding performances on comprehensive exams, '
+ 'including MMLU, AGIEval, C-Eval and GAOKAO-Bench, without '
+ 'resorting to external tools. On these benchmarks, InternLM '
+ 'not only significantly outperforms open-source models, but '
+ 'also obtains superior performance compared to ChatGPT. Also, '
+ 'InternLM demonstrates excellent capability of understanding '
+ 'Chinese language and Chinese culture, which makes it a '
+ 'suitable foundation model to support Chinese-oriented language '
+ 'applications. This manuscript gives a detailed study of '
+ 'our results, with benchmarks and examples across a diverse '
+ 'set of knowledge domains and tasks.'),
+ ('In this work, we develop and release Llama 2, a collection of '
+ 'pretrained and fine-tuned large language models (LLMs) ranging '
+ 'in scale from 7 billion to 70 billion parameters.\nOur '
+ 'fine-tuned LLMs, called LLAMA 2-CHAT, are optimized for '
+ 'dialogue use cases. Our models outperform open-source chat '
+ 'models on most benchmarks we tested, and based on our human '
+ 'evaluations for helpfulness and safety, may be a suitable '
+ 'substitute for closedsource models. We provide a detailed '
+ 'description of our approach to fine-tuning and safety '
+ 'improvements of LLAMA 2-CHAT in order to enable the community '
+ 'to build on our work and contribute to the responsible '
+ 'development of LLMs.')
+]
+
+#######################################################################
+# PART 2 Model & Tokenizer #
+#######################################################################
+tokenizer = dict(
+ type=AutoTokenizer.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ encode_special_tokens=True,
+ padding_side='left')
+
+model = dict(
+ type=SupervisedFinetune,
+ llm=dict(
+ type=AutoModelForCausalLM.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ torch_dtype=torch.float16,
+ quantization_config=dict(
+ type=BitsAndBytesConfig,
+ load_in_4bit=True,
+ load_in_8bit=False,
+ llm_int8_threshold=6.0,
+ llm_int8_has_fp16_weight=False,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_use_double_quant=True,
+ bnb_4bit_quant_type='nf4')),
+ lora=dict(
+ type=LoraConfig,
+ r=64,
+ lora_alpha=16,
+ lora_dropout=0.1,
+ bias='none',
+ task_type='CAUSAL_LM'))
+
+#######################################################################
+# PART 3 Dataset & Dataloader #
+#######################################################################
+train_dataset = dict(
+ type=process_hf_dataset,
+ dataset=dict(
+ type=load_dataset, path='json', data_files=dict(train=data_path)),
+ tokenizer=tokenizer,
+ max_length=max_length,
+ dataset_map_fn=arxiv_map_fn,
+ template_map_fn=dict(
+ type=template_map_fn_factory, template=prompt_template),
+ remove_unused_columns=True,
+ shuffle_before_pack=True,
+ pack_to_max_length=pack_to_max_length)
+
+train_dataloader = dict(
+ batch_size=batch_size,
+ num_workers=dataloader_num_workers,
+ dataset=train_dataset,
+ sampler=dict(type=DefaultSampler, shuffle=True),
+ collate_fn=dict(type=default_collate_fn))
+
+#######################################################################
+# PART 4 Scheduler & Optimizer #
+#######################################################################
+# optimizer
+optim_wrapper = dict(
+ type=AmpOptimWrapper,
+ optimizer=dict(
+ type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay),
+ clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False),
+ accumulative_counts=accumulative_counts,
+ loss_scale='dynamic',
+ dtype='float16')
+
+# learning policy
+# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501
+param_scheduler = dict(
+ type=CosineAnnealingLR,
+ eta_min=lr * 0.1,
+ by_epoch=True,
+ T_max=max_epochs,
+ convert_to_iter_based=True)
+
+# train, val, test setting
+train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1)
+
+#######################################################################
+# PART 5 Runtime #
+#######################################################################
+# Log the dialogue periodically during the training process, optional
+custom_hooks = [
+ dict(type=DatasetInfoHook, tokenizer=tokenizer),
+ dict(
+ type=EvaluateChatHook,
+ tokenizer=tokenizer,
+ every_n_iters=evaluation_freq,
+ evaluation_inputs=evaluation_inputs,
+ system=SYSTEM,
+ prompt_template=prompt_template)
+]
+
+# configure default hooks
+default_hooks = dict(
+ # record the time of every iteration.
+ timer=dict(type=IterTimerHook),
+ # print log every 100 iterations.
+ logger=dict(type=LoggerHook, interval=10),
+ # enable the parameter scheduler.
+ param_scheduler=dict(type=ParamSchedulerHook),
+ # save checkpoint per epoch.
+ checkpoint=dict(type=CheckpointHook, interval=1),
+ # set sampler seed in distributed evrionment.
+ sampler_seed=dict(type=DistSamplerSeedHook),
+)
+
+# configure environment
+env_cfg = dict(
+ # whether to enable cudnn benchmark
+ cudnn_benchmark=False,
+ # set multi process parameters
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
+ # set distributed parameters
+ dist_cfg=dict(backend='nccl'),
+)
+
+# set visualizer
+visualizer = None
+
+# set log level
+log_level = 'INFO'
+
+# load from which checkpoint
+load_from = None
+
+# whether to resume training from the loaded checkpoint
+resume = False
+
+# Defaults to use random seed and disable `deterministic`
+randomness = dict(seed=None, deterministic=False)
diff --git a/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_code_alpaca_e3.py b/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_code_alpaca_e3.py
new file mode 100644
index 000000000..7f9e97bae
--- /dev/null
+++ b/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_code_alpaca_e3.py
@@ -0,0 +1,187 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+from bitsandbytes.optim import PagedAdamW32bit
+from datasets import load_dataset
+from mmengine.dataset import DefaultSampler
+from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
+ LoggerHook, ParamSchedulerHook)
+from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR
+from peft import LoraConfig
+from transformers import (AutoModelForCausalLM, AutoTokenizer,
+ BitsAndBytesConfig)
+
+from xtuner.dataset import process_hf_dataset
+from xtuner.dataset.collate_fns import default_collate_fn
+from xtuner.dataset.map_fns import code_alpaca_map_fn, template_map_fn_factory
+from xtuner.engine import DatasetInfoHook, EvaluateChatHook
+from xtuner.model import SupervisedFinetune
+from xtuner.utils import PROMPT_TEMPLATE, SYSTEM_TEMPLATE
+
+#######################################################################
+# PART 1 Settings #
+#######################################################################
+# Model
+pretrained_model_name_or_path = 'THUDM/chatglm3-6b'
+
+# Data
+data_path = 'HuggingFaceH4/CodeAlpaca_20K'
+prompt_template = PROMPT_TEMPLATE.chatglm3
+max_length = 2048
+pack_to_max_length = True
+
+# Scheduler & Optimizer
+batch_size = 1 # per_device
+accumulative_counts = 16
+dataloader_num_workers = 0
+max_epochs = 3
+optim_type = PagedAdamW32bit
+lr = 2e-4
+betas = (0.9, 0.999)
+weight_decay = 0
+max_norm = 1 # grad clip
+
+# Evaluate the generation performance during the training
+evaluation_freq = 100
+SYSTEM = SYSTEM_TEMPLATE.coder
+evaluation_inputs = [
+ ('写一个Python函数,将十六进制颜色代码(如#0066ee)转换为对应的'
+ '红、绿、蓝(RGB)三个颜色分量值,并以元组的形式返回。'),
+ ('Write a Python function that takes a hexadecimal color code '
+ '(e.g., #0066ee) as input and converts it into the corresponding '
+ 'red, green, and blue (RGB) color component values.')
+]
+
+#######################################################################
+# PART 2 Model & Tokenizer #
+#######################################################################
+tokenizer = dict(
+ type=AutoTokenizer.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ encode_special_tokens=True,
+ padding_side='left')
+
+model = dict(
+ type=SupervisedFinetune,
+ llm=dict(
+ type=AutoModelForCausalLM.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ torch_dtype=torch.float16,
+ quantization_config=dict(
+ type=BitsAndBytesConfig,
+ load_in_4bit=True,
+ load_in_8bit=False,
+ llm_int8_threshold=6.0,
+ llm_int8_has_fp16_weight=False,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_use_double_quant=True,
+ bnb_4bit_quant_type='nf4')),
+ lora=dict(
+ type=LoraConfig,
+ r=64,
+ lora_alpha=16,
+ lora_dropout=0.1,
+ bias='none',
+ task_type='CAUSAL_LM'))
+
+#######################################################################
+# PART 3 Dataset & Dataloader #
+#######################################################################
+train_dataset = dict(
+ type=process_hf_dataset,
+ dataset=dict(type=load_dataset, path=data_path),
+ tokenizer=tokenizer,
+ max_length=max_length,
+ dataset_map_fn=code_alpaca_map_fn,
+ template_map_fn=dict(
+ type=template_map_fn_factory, template=prompt_template),
+ remove_unused_columns=True,
+ shuffle_before_pack=True,
+ pack_to_max_length=pack_to_max_length)
+
+train_dataloader = dict(
+ batch_size=batch_size,
+ num_workers=dataloader_num_workers,
+ dataset=train_dataset,
+ sampler=dict(type=DefaultSampler, shuffle=True),
+ collate_fn=dict(type=default_collate_fn))
+
+#######################################################################
+# PART 4 Scheduler & Optimizer #
+#######################################################################
+# optimizer
+optim_wrapper = dict(
+ type=AmpOptimWrapper,
+ optimizer=dict(
+ type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay),
+ clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False),
+ accumulative_counts=accumulative_counts,
+ loss_scale='dynamic',
+ dtype='float16')
+
+# learning policy
+# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501
+param_scheduler = dict(
+ type=CosineAnnealingLR,
+ eta_min=lr * 0.1,
+ by_epoch=True,
+ T_max=max_epochs,
+ convert_to_iter_based=True)
+
+# train, val, test setting
+train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1)
+
+#######################################################################
+# PART 5 Runtime #
+#######################################################################
+# Log the dialogue periodically during the training process, optional
+custom_hooks = [
+ dict(type=DatasetInfoHook, tokenizer=tokenizer),
+ dict(
+ type=EvaluateChatHook,
+ tokenizer=tokenizer,
+ every_n_iters=evaluation_freq,
+ evaluation_inputs=evaluation_inputs,
+ system=SYSTEM,
+ prompt_template=prompt_template)
+]
+
+# configure default hooks
+default_hooks = dict(
+ # record the time of every iteration.
+ timer=dict(type=IterTimerHook),
+ # print log every 100 iterations.
+ logger=dict(type=LoggerHook, interval=10),
+ # enable the parameter scheduler.
+ param_scheduler=dict(type=ParamSchedulerHook),
+ # save checkpoint per epoch.
+ checkpoint=dict(type=CheckpointHook, interval=1),
+ # set sampler seed in distributed evrionment.
+ sampler_seed=dict(type=DistSamplerSeedHook),
+)
+
+# configure environment
+env_cfg = dict(
+ # whether to enable cudnn benchmark
+ cudnn_benchmark=False,
+ # set multi process parameters
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
+ # set distributed parameters
+ dist_cfg=dict(backend='nccl'),
+)
+
+# set visualizer
+visualizer = None
+
+# set log level
+log_level = 'INFO'
+
+# load from which checkpoint
+load_from = None
+
+# whether to resume training from the loaded checkpoint
+resume = False
+
+# Defaults to use random seed and disable `deterministic`
+randomness = dict(seed=None, deterministic=False)
diff --git a/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_colorist_e5.py b/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_colorist_e5.py
new file mode 100644
index 000000000..572697f1e
--- /dev/null
+++ b/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_colorist_e5.py
@@ -0,0 +1,183 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+from bitsandbytes.optim import PagedAdamW32bit
+from datasets import load_dataset
+from mmengine.dataset import DefaultSampler
+from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
+ LoggerHook, ParamSchedulerHook)
+from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR
+from peft import LoraConfig
+from transformers import (AutoModelForCausalLM, AutoTokenizer,
+ BitsAndBytesConfig)
+
+from xtuner.dataset import process_hf_dataset
+from xtuner.dataset.collate_fns import default_collate_fn
+from xtuner.dataset.map_fns import colors_map_fn, template_map_fn_factory
+from xtuner.engine import DatasetInfoHook, EvaluateChatHook
+from xtuner.model import SupervisedFinetune
+from xtuner.utils import PROMPT_TEMPLATE, SYSTEM_TEMPLATE
+
+#######################################################################
+# PART 1 Settings #
+#######################################################################
+# Model
+pretrained_model_name_or_path = 'THUDM/chatglm3-6b'
+
+# Data
+data_path = 'burkelibbey/colors'
+prompt_template = PROMPT_TEMPLATE.chatglm3
+max_length = 2048
+pack_to_max_length = True
+
+# Scheduler & Optimizer
+batch_size = 1 # per_device
+accumulative_counts = 16
+dataloader_num_workers = 0
+max_epochs = 5
+optim_type = PagedAdamW32bit
+lr = 2e-4
+betas = (0.9, 0.999)
+weight_decay = 0
+max_norm = 1 # grad clip
+
+# Evaluate the generation performance during the training
+evaluation_freq = 200
+SYSTEM = SYSTEM_TEMPLATE.colorist
+evaluation_inputs = [
+ '请给我一个像天空一样清澈透明的蓝色。', 'Please give me a clear blue like the sky.'
+]
+
+#######################################################################
+# PART 2 Model & Tokenizer #
+#######################################################################
+tokenizer = dict(
+ type=AutoTokenizer.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ encode_special_tokens=True,
+ padding_side='left')
+
+model = dict(
+ type=SupervisedFinetune,
+ llm=dict(
+ type=AutoModelForCausalLM.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ torch_dtype=torch.float16,
+ quantization_config=dict(
+ type=BitsAndBytesConfig,
+ load_in_4bit=True,
+ load_in_8bit=False,
+ llm_int8_threshold=6.0,
+ llm_int8_has_fp16_weight=False,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_use_double_quant=True,
+ bnb_4bit_quant_type='nf4')),
+ lora=dict(
+ type=LoraConfig,
+ r=64,
+ lora_alpha=16,
+ lora_dropout=0.1,
+ bias='none',
+ task_type='CAUSAL_LM'))
+
+#######################################################################
+# PART 3 Dataset & Dataloader #
+#######################################################################
+train_dataset = dict(
+ type=process_hf_dataset,
+ dataset=dict(type=load_dataset, path=data_path),
+ tokenizer=tokenizer,
+ max_length=max_length,
+ dataset_map_fn=colors_map_fn,
+ template_map_fn=dict(
+ type=template_map_fn_factory, template=prompt_template),
+ remove_unused_columns=True,
+ shuffle_before_pack=True,
+ pack_to_max_length=pack_to_max_length)
+
+train_dataloader = dict(
+ batch_size=batch_size,
+ num_workers=dataloader_num_workers,
+ dataset=train_dataset,
+ sampler=dict(type=DefaultSampler, shuffle=True),
+ collate_fn=dict(type=default_collate_fn))
+
+#######################################################################
+# PART 4 Scheduler & Optimizer #
+#######################################################################
+# optimizer
+optim_wrapper = dict(
+ type=AmpOptimWrapper,
+ optimizer=dict(
+ type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay),
+ clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False),
+ accumulative_counts=accumulative_counts,
+ loss_scale='dynamic',
+ dtype='float16')
+
+# learning policy
+# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501
+param_scheduler = dict(
+ type=CosineAnnealingLR,
+ eta_min=lr * 0.1,
+ by_epoch=True,
+ T_max=max_epochs,
+ convert_to_iter_based=True)
+
+# train, val, test setting
+train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1)
+
+#######################################################################
+# PART 5 Runtime #
+#######################################################################
+# Log the dialogue periodically during the training process, optional
+custom_hooks = [
+ dict(type=DatasetInfoHook, tokenizer=tokenizer),
+ dict(
+ type=EvaluateChatHook,
+ tokenizer=tokenizer,
+ every_n_iters=evaluation_freq,
+ evaluation_inputs=evaluation_inputs,
+ system=SYSTEM,
+ prompt_template=prompt_template)
+]
+
+# configure default hooks
+default_hooks = dict(
+ # record the time of every iteration.
+ timer=dict(type=IterTimerHook),
+ # print log every 100 iterations.
+ logger=dict(type=LoggerHook, interval=10),
+ # enable the parameter scheduler.
+ param_scheduler=dict(type=ParamSchedulerHook),
+ # save checkpoint per epoch.
+ checkpoint=dict(type=CheckpointHook, interval=1),
+ # set sampler seed in distributed evrionment.
+ sampler_seed=dict(type=DistSamplerSeedHook),
+)
+
+# configure environment
+env_cfg = dict(
+ # whether to enable cudnn benchmark
+ cudnn_benchmark=False,
+ # set multi process parameters
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
+ # set distributed parameters
+ dist_cfg=dict(backend='nccl'),
+)
+
+# set visualizer
+visualizer = None
+
+# set log level
+log_level = 'INFO'
+
+# load from which checkpoint
+load_from = None
+
+# whether to resume training from the loaded checkpoint
+resume = False
+
+# Defaults to use random seed and disable `deterministic`
+randomness = dict(seed=None, deterministic=False)
diff --git a/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_lawyer_e3.py b/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_lawyer_e3.py
new file mode 100644
index 000000000..59fc11e33
--- /dev/null
+++ b/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_lawyer_e3.py
@@ -0,0 +1,209 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+from bitsandbytes.optim import PagedAdamW32bit
+from datasets import load_dataset
+from mmengine.dataset import DefaultSampler
+from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
+ LoggerHook, ParamSchedulerHook)
+from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR
+from peft import LoraConfig
+from transformers import (AutoModelForCausalLM, AutoTokenizer,
+ BitsAndBytesConfig)
+
+from xtuner.dataset import ConcatDataset, process_hf_dataset
+from xtuner.dataset.collate_fns import default_collate_fn
+from xtuner.dataset.map_fns import (crime_kg_assitant_map_fn,
+ law_reference_map_fn,
+ template_map_fn_factory)
+from xtuner.engine import DatasetInfoHook, EvaluateChatHook
+from xtuner.model import SupervisedFinetune
+from xtuner.utils import PROMPT_TEMPLATE, SYSTEM_TEMPLATE
+
+#######################################################################
+# PART 1 Settings #
+#######################################################################
+# Model
+pretrained_model_name_or_path = 'THUDM/chatglm3-6b'
+
+# Data
+# download data from https://github.com/LiuHC0428/LAW-GPT
+crime_kg_assitant_path = './data/CrimeKgAssitant清洗后_52k.json'
+law_reference_data_path = './data/训练数据_带法律依据_92k.json'
+prompt_template = PROMPT_TEMPLATE.chatglm3
+max_length = 2048
+pack_to_max_length = True
+
+# Scheduler & Optimizer
+batch_size = 1 # per_device
+accumulative_counts = 16
+dataloader_num_workers = 0
+max_epochs = 3
+optim_type = PagedAdamW32bit
+lr = 2e-4
+betas = (0.9, 0.999)
+weight_decay = 0
+max_norm = 1 # grad clip
+
+# Evaluate the generation performance during the training
+evaluation_freq = 500
+SYSTEM = SYSTEM_TEMPLATE.lawyer
+evaluation_inputs = ['请问离婚需要准备什么材料?', '销售鳄鱼皮包违法吗?']
+
+#######################################################################
+# PART 2 Model & Tokenizer #
+#######################################################################
+tokenizer = dict(
+ type=AutoTokenizer.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ encode_special_tokens=True,
+ padding_side='left')
+
+model = dict(
+ type=SupervisedFinetune,
+ llm=dict(
+ type=AutoModelForCausalLM.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ torch_dtype=torch.float16,
+ quantization_config=dict(
+ type=BitsAndBytesConfig,
+ load_in_4bit=True,
+ load_in_8bit=False,
+ llm_int8_threshold=6.0,
+ llm_int8_has_fp16_weight=False,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_use_double_quant=True,
+ bnb_4bit_quant_type='nf4')),
+ lora=dict(
+ type=LoraConfig,
+ r=64,
+ lora_alpha=16,
+ lora_dropout=0.1,
+ bias='none',
+ task_type='CAUSAL_LM'))
+
+#######################################################################
+# PART 3 Dataset & Dataloader #
+#######################################################################
+crime_kg_assitant = dict(
+ type=process_hf_dataset,
+ dataset=dict(
+ type=load_dataset,
+ path='json',
+ data_files=dict(train=crime_kg_assitant_path)),
+ tokenizer=tokenizer,
+ max_length=max_length,
+ dataset_map_fn=crime_kg_assitant_map_fn,
+ template_map_fn=dict(
+ type=template_map_fn_factory, template=prompt_template),
+ remove_unused_columns=True,
+ shuffle_before_pack=True,
+ pack_to_max_length=pack_to_max_length)
+
+law_reference_data = dict(
+ type=process_hf_dataset,
+ dataset=dict(
+ type=load_dataset,
+ path='json',
+ data_files=dict(train=law_reference_data_path)),
+ tokenizer=tokenizer,
+ max_length=max_length,
+ dataset_map_fn=law_reference_map_fn,
+ template_map_fn=dict(
+ type=template_map_fn_factory, template=prompt_template),
+ remove_unused_columns=True,
+ shuffle_before_pack=True,
+ pack_to_max_length=pack_to_max_length)
+
+train_dataset = dict(
+ type=ConcatDataset,
+ datasets_cfg=dict(
+ crime_kg_assitant=crime_kg_assitant,
+ law_reference_data=law_reference_data))
+
+train_dataloader = dict(
+ batch_size=batch_size,
+ num_workers=dataloader_num_workers,
+ dataset=train_dataset,
+ sampler=dict(type=DefaultSampler, shuffle=True),
+ collate_fn=dict(type=default_collate_fn))
+
+#######################################################################
+# PART 4 Scheduler & Optimizer #
+#######################################################################
+# optimizer
+optim_wrapper = dict(
+ type=AmpOptimWrapper,
+ optimizer=dict(
+ type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay),
+ clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False),
+ accumulative_counts=accumulative_counts,
+ loss_scale='dynamic',
+ dtype='float16')
+
+# learning policy
+# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501
+param_scheduler = dict(
+ type=CosineAnnealingLR,
+ eta_min=lr * 0.1,
+ by_epoch=True,
+ T_max=max_epochs,
+ convert_to_iter_based=True)
+
+# train, val, test setting
+train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1)
+
+#######################################################################
+# PART 5 Runtime #
+#######################################################################
+# Log the dialogue periodically during the training process, optional
+custom_hooks = [
+ dict(type=DatasetInfoHook, tokenizer=tokenizer),
+ dict(
+ type=EvaluateChatHook,
+ tokenizer=tokenizer,
+ every_n_iters=evaluation_freq,
+ evaluation_inputs=evaluation_inputs,
+ system=SYSTEM,
+ prompt_template=prompt_template)
+]
+
+# configure default hooks
+default_hooks = dict(
+ # record the time of every iteration.
+ timer=dict(type=IterTimerHook),
+ # print log every 100 iterations.
+ logger=dict(type=LoggerHook, interval=10),
+ # enable the parameter scheduler.
+ param_scheduler=dict(type=ParamSchedulerHook),
+ # save checkpoint per epoch.
+ checkpoint=dict(type=CheckpointHook, interval=1),
+ # set sampler seed in distributed evrionment.
+ sampler_seed=dict(type=DistSamplerSeedHook),
+)
+
+# configure environment
+env_cfg = dict(
+ # whether to enable cudnn benchmark
+ cudnn_benchmark=False,
+ # set multi process parameters
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
+ # set distributed parameters
+ dist_cfg=dict(backend='nccl'),
+)
+
+# set visualizer
+visualizer = None
+
+# set log level
+log_level = 'INFO'
+
+# load from which checkpoint
+load_from = None
+
+# whether to resume training from the loaded checkpoint
+resume = False
+
+# Defaults to use random seed and disable `deterministic`
+randomness = dict(seed=None, deterministic=False)
diff --git a/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_medical_e1.py b/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_medical_e1.py
new file mode 100644
index 000000000..3f3918063
--- /dev/null
+++ b/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_medical_e1.py
@@ -0,0 +1,185 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+from bitsandbytes.optim import PagedAdamW32bit
+from datasets import load_dataset
+from mmengine.dataset import DefaultSampler
+from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
+ LoggerHook, ParamSchedulerHook)
+from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR
+from peft import LoraConfig
+from transformers import (AutoModelForCausalLM, AutoTokenizer,
+ BitsAndBytesConfig)
+
+from xtuner.dataset import process_hf_dataset
+from xtuner.dataset.collate_fns import default_collate_fn
+from xtuner.dataset.map_fns import medical_map_fn, template_map_fn_factory
+from xtuner.engine import DatasetInfoHook, EvaluateChatHook
+from xtuner.model import SupervisedFinetune
+from xtuner.utils import PROMPT_TEMPLATE, SYSTEM_TEMPLATE
+
+#######################################################################
+# PART 1 Settings #
+#######################################################################
+# Model
+pretrained_model_name_or_path = 'THUDM/chatglm3-6b'
+
+# Data
+data_path = 'shibing624/medical'
+data_config_name = 'finetune'
+prompt_template = PROMPT_TEMPLATE.chatglm3
+max_length = 2048
+pack_to_max_length = True
+
+# Scheduler & Optimizer
+batch_size = 1 # per_device
+accumulative_counts = 16
+dataloader_num_workers = 0
+max_epochs = 1
+optim_type = PagedAdamW32bit
+lr = 2e-4
+betas = (0.9, 0.999)
+weight_decay = 0
+max_norm = 1 # grad clip
+
+# Evaluate the generation performance during the training
+evaluation_freq = 500
+SYSTEM = SYSTEM_TEMPLATE.medical
+evaluation_inputs = [
+ '我有家族遗传性的过敏,请问可以可以献血吗?', '我爷爷有高血压,请问他可以喝咖啡吗?',
+ '我女儿今年3岁了,从昨天晚上九点开始腹泻,到现在已经八个小时了,请问应该怎么办?'
+]
+
+#######################################################################
+# PART 2 Model & Tokenizer #
+#######################################################################
+tokenizer = dict(
+ type=AutoTokenizer.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ encode_special_tokens=True,
+ padding_side='left')
+
+model = dict(
+ type=SupervisedFinetune,
+ llm=dict(
+ type=AutoModelForCausalLM.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ torch_dtype=torch.float16,
+ quantization_config=dict(
+ type=BitsAndBytesConfig,
+ load_in_4bit=True,
+ load_in_8bit=False,
+ llm_int8_threshold=6.0,
+ llm_int8_has_fp16_weight=False,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_use_double_quant=True,
+ bnb_4bit_quant_type='nf4')),
+ lora=dict(
+ type=LoraConfig,
+ r=64,
+ lora_alpha=16,
+ lora_dropout=0.1,
+ bias='none',
+ task_type='CAUSAL_LM'))
+
+#######################################################################
+# PART 3 Dataset & Dataloader #
+#######################################################################
+train_dataset = dict(
+ type=process_hf_dataset,
+ dataset=dict(type=load_dataset, path=data_path, name=data_config_name),
+ tokenizer=tokenizer,
+ max_length=max_length,
+ dataset_map_fn=medical_map_fn,
+ template_map_fn=dict(
+ type=template_map_fn_factory, template=prompt_template),
+ remove_unused_columns=True,
+ shuffle_before_pack=True,
+ pack_to_max_length=pack_to_max_length)
+
+train_dataloader = dict(
+ batch_size=batch_size,
+ num_workers=dataloader_num_workers,
+ dataset=train_dataset,
+ sampler=dict(type=DefaultSampler, shuffle=True),
+ collate_fn=dict(type=default_collate_fn))
+
+#######################################################################
+# PART 4 Scheduler & Optimizer #
+#######################################################################
+# optimizer
+optim_wrapper = dict(
+ type=AmpOptimWrapper,
+ optimizer=dict(
+ type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay),
+ clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False),
+ accumulative_counts=accumulative_counts,
+ loss_scale='dynamic',
+ dtype='float16')
+
+# learning policy
+# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501
+param_scheduler = dict(
+ type=CosineAnnealingLR,
+ eta_min=lr * 0.1,
+ by_epoch=True,
+ T_max=max_epochs,
+ convert_to_iter_based=True)
+
+# train, val, test setting
+train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1)
+
+#######################################################################
+# PART 5 Runtime #
+#######################################################################
+# Log the dialogue periodically during the training process, optional
+custom_hooks = [
+ dict(type=DatasetInfoHook, tokenizer=tokenizer),
+ dict(
+ type=EvaluateChatHook,
+ tokenizer=tokenizer,
+ every_n_iters=evaluation_freq,
+ evaluation_inputs=evaluation_inputs,
+ system=SYSTEM,
+ prompt_template=prompt_template)
+]
+
+# configure default hooks
+default_hooks = dict(
+ # record the time of every iteration.
+ timer=dict(type=IterTimerHook),
+ # print log every 100 iterations.
+ logger=dict(type=LoggerHook, interval=10),
+ # enable the parameter scheduler.
+ param_scheduler=dict(type=ParamSchedulerHook),
+ # save checkpoint per epoch.
+ checkpoint=dict(type=CheckpointHook, interval=1),
+ # set sampler seed in distributed evrionment.
+ sampler_seed=dict(type=DistSamplerSeedHook),
+)
+
+# configure environment
+env_cfg = dict(
+ # whether to enable cudnn benchmark
+ cudnn_benchmark=False,
+ # set multi process parameters
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
+ # set distributed parameters
+ dist_cfg=dict(backend='nccl'),
+)
+
+# set visualizer
+visualizer = None
+
+# set log level
+log_level = 'INFO'
+
+# load from which checkpoint
+load_from = None
+
+# whether to resume training from the loaded checkpoint
+resume = False
+
+# Defaults to use random seed and disable `deterministic`
+randomness = dict(seed=None, deterministic=False)
diff --git a/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_oasst1_512_e3.py b/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_oasst1_512_e3.py
new file mode 100644
index 000000000..61a6ff1d4
--- /dev/null
+++ b/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_oasst1_512_e3.py
@@ -0,0 +1,183 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+from bitsandbytes.optim import PagedAdamW32bit
+from datasets import load_dataset
+from mmengine.dataset import DefaultSampler
+from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
+ LoggerHook, ParamSchedulerHook)
+from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR
+from peft import LoraConfig
+from transformers import (AutoModelForCausalLM, AutoTokenizer,
+ BitsAndBytesConfig)
+
+from xtuner.dataset import process_hf_dataset
+from xtuner.dataset.collate_fns import default_collate_fn
+from xtuner.dataset.map_fns import oasst1_map_fn, template_map_fn_factory
+from xtuner.engine import DatasetInfoHook, EvaluateChatHook
+from xtuner.model import SupervisedFinetune
+from xtuner.utils import PROMPT_TEMPLATE
+
+#######################################################################
+# PART 1 Settings #
+#######################################################################
+# Model
+pretrained_model_name_or_path = 'THUDM/chatglm3-6b'
+
+# Data
+data_path = 'timdettmers/openassistant-guanaco'
+prompt_template = PROMPT_TEMPLATE.chatglm3
+max_length = 512
+pack_to_max_length = False
+
+# Scheduler & Optimizer
+batch_size = 1 # per_device
+accumulative_counts = 16
+dataloader_num_workers = 0
+max_epochs = 3
+optim_type = PagedAdamW32bit
+lr = 2e-4
+betas = (0.9, 0.999)
+weight_decay = 0
+max_norm = 1 # grad clip
+
+# Evaluate the generation performance during the training
+evaluation_freq = 500
+SYSTEM = ''
+evaluation_inputs = [
+ '请给我介绍五个上海的景点', 'Please tell me five scenic spots in Shanghai'
+]
+
+#######################################################################
+# PART 2 Model & Tokenizer #
+#######################################################################
+tokenizer = dict(
+ type=AutoTokenizer.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ encode_special_tokens=True,
+ padding_side='left')
+
+model = dict(
+ type=SupervisedFinetune,
+ llm=dict(
+ type=AutoModelForCausalLM.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ torch_dtype=torch.float16,
+ quantization_config=dict(
+ type=BitsAndBytesConfig,
+ load_in_4bit=True,
+ load_in_8bit=False,
+ llm_int8_threshold=6.0,
+ llm_int8_has_fp16_weight=False,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_use_double_quant=True,
+ bnb_4bit_quant_type='nf4')),
+ lora=dict(
+ type=LoraConfig,
+ r=64,
+ lora_alpha=16,
+ lora_dropout=0.1,
+ bias='none',
+ task_type='CAUSAL_LM'))
+
+#######################################################################
+# PART 3 Dataset & Dataloader #
+#######################################################################
+train_dataset = dict(
+ type=process_hf_dataset,
+ dataset=dict(type=load_dataset, path=data_path),
+ tokenizer=tokenizer,
+ max_length=max_length,
+ dataset_map_fn=oasst1_map_fn,
+ template_map_fn=dict(
+ type=template_map_fn_factory, template=prompt_template),
+ remove_unused_columns=True,
+ shuffle_before_pack=True,
+ pack_to_max_length=pack_to_max_length)
+
+train_dataloader = dict(
+ batch_size=batch_size,
+ num_workers=dataloader_num_workers,
+ dataset=train_dataset,
+ sampler=dict(type=DefaultSampler, shuffle=True),
+ collate_fn=dict(type=default_collate_fn))
+
+#######################################################################
+# PART 4 Scheduler & Optimizer #
+#######################################################################
+# optimizer
+optim_wrapper = dict(
+ type=AmpOptimWrapper,
+ optimizer=dict(
+ type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay),
+ clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False),
+ accumulative_counts=accumulative_counts,
+ loss_scale='dynamic',
+ dtype='float16')
+
+# learning policy
+# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501
+param_scheduler = dict(
+ type=CosineAnnealingLR,
+ eta_min=lr * 0.1,
+ by_epoch=True,
+ T_max=max_epochs,
+ convert_to_iter_based=True)
+
+# train, val, test setting
+train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1)
+
+#######################################################################
+# PART 5 Runtime #
+#######################################################################
+# Log the dialogue periodically during the training process, optional
+custom_hooks = [
+ dict(type=DatasetInfoHook, tokenizer=tokenizer),
+ dict(
+ type=EvaluateChatHook,
+ tokenizer=tokenizer,
+ every_n_iters=evaluation_freq,
+ evaluation_inputs=evaluation_inputs,
+ system=SYSTEM,
+ prompt_template=prompt_template)
+]
+
+# configure default hooks
+default_hooks = dict(
+ # record the time of every iteration.
+ timer=dict(type=IterTimerHook),
+ # print log every 100 iterations.
+ logger=dict(type=LoggerHook, interval=10),
+ # enable the parameter scheduler.
+ param_scheduler=dict(type=ParamSchedulerHook),
+ # save checkpoint per epoch.
+ checkpoint=dict(type=CheckpointHook, interval=1),
+ # set sampler seed in distributed evrionment.
+ sampler_seed=dict(type=DistSamplerSeedHook),
+)
+
+# configure environment
+env_cfg = dict(
+ # whether to enable cudnn benchmark
+ cudnn_benchmark=False,
+ # set multi process parameters
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
+ # set distributed parameters
+ dist_cfg=dict(backend='nccl'),
+)
+
+# set visualizer
+visualizer = None
+
+# set log level
+log_level = 'INFO'
+
+# load from which checkpoint
+load_from = None
+
+# whether to resume training from the loaded checkpoint
+resume = False
+
+# Defaults to use random seed and disable `deterministic`
+randomness = dict(seed=None, deterministic=False)
diff --git a/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_oasst1_e3.py b/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_oasst1_e3.py
new file mode 100644
index 000000000..bb04b5eca
--- /dev/null
+++ b/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_oasst1_e3.py
@@ -0,0 +1,183 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+from bitsandbytes.optim import PagedAdamW32bit
+from datasets import load_dataset
+from mmengine.dataset import DefaultSampler
+from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
+ LoggerHook, ParamSchedulerHook)
+from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR
+from peft import LoraConfig
+from transformers import (AutoModelForCausalLM, AutoTokenizer,
+ BitsAndBytesConfig)
+
+from xtuner.dataset import process_hf_dataset
+from xtuner.dataset.collate_fns import default_collate_fn
+from xtuner.dataset.map_fns import oasst1_map_fn, template_map_fn_factory
+from xtuner.engine import DatasetInfoHook, EvaluateChatHook
+from xtuner.model import SupervisedFinetune
+from xtuner.utils import PROMPT_TEMPLATE
+
+#######################################################################
+# PART 1 Settings #
+#######################################################################
+# Model
+pretrained_model_name_or_path = 'THUDM/chatglm3-6b'
+
+# Data
+data_path = 'timdettmers/openassistant-guanaco'
+prompt_template = PROMPT_TEMPLATE.chatglm3
+max_length = 2048
+pack_to_max_length = True
+
+# Scheduler & Optimizer
+batch_size = 1 # per_device
+accumulative_counts = 16
+dataloader_num_workers = 0
+max_epochs = 3
+optim_type = PagedAdamW32bit
+lr = 2e-4
+betas = (0.9, 0.999)
+weight_decay = 0
+max_norm = 1 # grad clip
+
+# Evaluate the generation performance during the training
+evaluation_freq = 500
+SYSTEM = ''
+evaluation_inputs = [
+ '请给我介绍五个上海的景点', 'Please tell me five scenic spots in Shanghai'
+]
+
+#######################################################################
+# PART 2 Model & Tokenizer #
+#######################################################################
+tokenizer = dict(
+ type=AutoTokenizer.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ encode_special_tokens=True,
+ padding_side='left')
+
+model = dict(
+ type=SupervisedFinetune,
+ llm=dict(
+ type=AutoModelForCausalLM.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ torch_dtype=torch.float16,
+ quantization_config=dict(
+ type=BitsAndBytesConfig,
+ load_in_4bit=True,
+ load_in_8bit=False,
+ llm_int8_threshold=6.0,
+ llm_int8_has_fp16_weight=False,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_use_double_quant=True,
+ bnb_4bit_quant_type='nf4')),
+ lora=dict(
+ type=LoraConfig,
+ r=64,
+ lora_alpha=16,
+ lora_dropout=0.1,
+ bias='none',
+ task_type='CAUSAL_LM'))
+
+#######################################################################
+# PART 3 Dataset & Dataloader #
+#######################################################################
+train_dataset = dict(
+ type=process_hf_dataset,
+ dataset=dict(type=load_dataset, path=data_path),
+ tokenizer=tokenizer,
+ max_length=max_length,
+ dataset_map_fn=oasst1_map_fn,
+ template_map_fn=dict(
+ type=template_map_fn_factory, template=prompt_template),
+ remove_unused_columns=True,
+ shuffle_before_pack=True,
+ pack_to_max_length=pack_to_max_length)
+
+train_dataloader = dict(
+ batch_size=batch_size,
+ num_workers=dataloader_num_workers,
+ dataset=train_dataset,
+ sampler=dict(type=DefaultSampler, shuffle=True),
+ collate_fn=dict(type=default_collate_fn))
+
+#######################################################################
+# PART 4 Scheduler & Optimizer #
+#######################################################################
+# optimizer
+optim_wrapper = dict(
+ type=AmpOptimWrapper,
+ optimizer=dict(
+ type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay),
+ clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False),
+ accumulative_counts=accumulative_counts,
+ loss_scale='dynamic',
+ dtype='float16')
+
+# learning policy
+# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501
+param_scheduler = dict(
+ type=CosineAnnealingLR,
+ eta_min=lr * 0.1,
+ by_epoch=True,
+ T_max=max_epochs,
+ convert_to_iter_based=True)
+
+# train, val, test setting
+train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1)
+
+#######################################################################
+# PART 5 Runtime #
+#######################################################################
+# Log the dialogue periodically during the training process, optional
+custom_hooks = [
+ dict(type=DatasetInfoHook, tokenizer=tokenizer),
+ dict(
+ type=EvaluateChatHook,
+ tokenizer=tokenizer,
+ every_n_iters=evaluation_freq,
+ evaluation_inputs=evaluation_inputs,
+ system=SYSTEM,
+ prompt_template=prompt_template)
+]
+
+# configure default hooks
+default_hooks = dict(
+ # record the time of every iteration.
+ timer=dict(type=IterTimerHook),
+ # print log every 100 iterations.
+ logger=dict(type=LoggerHook, interval=10),
+ # enable the parameter scheduler.
+ param_scheduler=dict(type=ParamSchedulerHook),
+ # save checkpoint per epoch.
+ checkpoint=dict(type=CheckpointHook, interval=1),
+ # set sampler seed in distributed evrionment.
+ sampler_seed=dict(type=DistSamplerSeedHook),
+)
+
+# configure environment
+env_cfg = dict(
+ # whether to enable cudnn benchmark
+ cudnn_benchmark=False,
+ # set multi process parameters
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
+ # set distributed parameters
+ dist_cfg=dict(backend='nccl'),
+)
+
+# set visualizer
+visualizer = None
+
+# set log level
+log_level = 'INFO'
+
+# load from which checkpoint
+load_from = None
+
+# whether to resume training from the loaded checkpoint
+resume = False
+
+# Defaults to use random seed and disable `deterministic`
+randomness = dict(seed=None, deterministic=False)
diff --git a/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_open_platypus_e3.py b/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_open_platypus_e3.py
new file mode 100644
index 000000000..a60726746
--- /dev/null
+++ b/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_open_platypus_e3.py
@@ -0,0 +1,183 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+from bitsandbytes.optim import PagedAdamW32bit
+from datasets import load_dataset
+from mmengine.dataset import DefaultSampler
+from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
+ LoggerHook, ParamSchedulerHook)
+from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR
+from peft import LoraConfig
+from transformers import (AutoModelForCausalLM, AutoTokenizer,
+ BitsAndBytesConfig)
+
+from xtuner.dataset import process_hf_dataset
+from xtuner.dataset.collate_fns import default_collate_fn
+from xtuner.dataset.map_fns import alpaca_map_fn, template_map_fn_factory
+from xtuner.engine import DatasetInfoHook, EvaluateChatHook
+from xtuner.model import SupervisedFinetune
+from xtuner.utils import PROMPT_TEMPLATE, SYSTEM_TEMPLATE
+
+#######################################################################
+# PART 1 Settings #
+#######################################################################
+# Model
+pretrained_model_name_or_path = 'THUDM/chatglm3-6b'
+
+# Data
+data_path = 'garage-bAInd/Open-Platypus'
+prompt_template = PROMPT_TEMPLATE.chatglm3
+max_length = 2048
+pack_to_max_length = True
+
+# Scheduler & Optimizer
+batch_size = 1 # per_device
+accumulative_counts = 16
+dataloader_num_workers = 0
+max_epochs = 3
+optim_type = PagedAdamW32bit
+lr = 2e-4
+betas = (0.9, 0.999)
+weight_decay = 0
+max_norm = 1 # grad clip
+
+# Evaluate the generation performance during the training
+evaluation_freq = 500
+SYSTEM = SYSTEM_TEMPLATE.alpaca
+evaluation_inputs = [
+ '请给我介绍五个上海的景点', 'Please tell me five scenic spots in Shanghai'
+]
+
+#######################################################################
+# PART 2 Model & Tokenizer #
+#######################################################################
+tokenizer = dict(
+ type=AutoTokenizer.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ encode_special_tokens=True,
+ padding_side='left')
+
+model = dict(
+ type=SupervisedFinetune,
+ llm=dict(
+ type=AutoModelForCausalLM.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ torch_dtype=torch.float16,
+ quantization_config=dict(
+ type=BitsAndBytesConfig,
+ load_in_4bit=True,
+ load_in_8bit=False,
+ llm_int8_threshold=6.0,
+ llm_int8_has_fp16_weight=False,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_use_double_quant=True,
+ bnb_4bit_quant_type='nf4')),
+ lora=dict(
+ type=LoraConfig,
+ r=64,
+ lora_alpha=16,
+ lora_dropout=0.1,
+ bias='none',
+ task_type='CAUSAL_LM'))
+
+#######################################################################
+# PART 3 Dataset & Dataloader #
+#######################################################################
+train_dataset = dict(
+ type=process_hf_dataset,
+ dataset=dict(type=load_dataset, path=data_path),
+ tokenizer=tokenizer,
+ max_length=max_length,
+ dataset_map_fn=alpaca_map_fn,
+ template_map_fn=dict(
+ type=template_map_fn_factory, template=prompt_template),
+ remove_unused_columns=True,
+ shuffle_before_pack=True,
+ pack_to_max_length=pack_to_max_length)
+
+train_dataloader = dict(
+ batch_size=batch_size,
+ num_workers=dataloader_num_workers,
+ dataset=train_dataset,
+ sampler=dict(type=DefaultSampler, shuffle=True),
+ collate_fn=dict(type=default_collate_fn))
+
+#######################################################################
+# PART 4 Scheduler & Optimizer #
+#######################################################################
+# optimizer
+optim_wrapper = dict(
+ type=AmpOptimWrapper,
+ optimizer=dict(
+ type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay),
+ clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False),
+ accumulative_counts=accumulative_counts,
+ loss_scale='dynamic',
+ dtype='float16')
+
+# learning policy
+# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501
+param_scheduler = dict(
+ type=CosineAnnealingLR,
+ eta_min=lr * 0.1,
+ by_epoch=True,
+ T_max=max_epochs,
+ convert_to_iter_based=True)
+
+# train, val, test setting
+train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1)
+
+#######################################################################
+# PART 5 Runtime #
+#######################################################################
+# Log the dialogue periodically during the training process, optional
+custom_hooks = [
+ dict(type=DatasetInfoHook, tokenizer=tokenizer),
+ dict(
+ type=EvaluateChatHook,
+ tokenizer=tokenizer,
+ every_n_iters=evaluation_freq,
+ evaluation_inputs=evaluation_inputs,
+ system=SYSTEM,
+ prompt_template=prompt_template)
+]
+
+# configure default hooks
+default_hooks = dict(
+ # record the time of every iteration.
+ timer=dict(type=IterTimerHook),
+ # print log every 100 iterations.
+ logger=dict(type=LoggerHook, interval=10),
+ # enable the parameter scheduler.
+ param_scheduler=dict(type=ParamSchedulerHook),
+ # save checkpoint per epoch.
+ checkpoint=dict(type=CheckpointHook, interval=1),
+ # set sampler seed in distributed evrionment.
+ sampler_seed=dict(type=DistSamplerSeedHook),
+)
+
+# configure environment
+env_cfg = dict(
+ # whether to enable cudnn benchmark
+ cudnn_benchmark=False,
+ # set multi process parameters
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
+ # set distributed parameters
+ dist_cfg=dict(backend='nccl'),
+)
+
+# set visualizer
+visualizer = None
+
+# set log level
+log_level = 'INFO'
+
+# load from which checkpoint
+load_from = None
+
+# whether to resume training from the loaded checkpoint
+resume = False
+
+# Defaults to use random seed and disable `deterministic`
+randomness = dict(seed=None, deterministic=False)
diff --git a/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_openorca_e1.py b/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_openorca_e1.py
new file mode 100644
index 000000000..7356d30bd
--- /dev/null
+++ b/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_openorca_e1.py
@@ -0,0 +1,183 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+from bitsandbytes.optim import PagedAdamW32bit
+from datasets import load_dataset
+from mmengine.dataset import DefaultSampler
+from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
+ LoggerHook, ParamSchedulerHook)
+from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR
+from peft import LoraConfig
+from transformers import (AutoModelForCausalLM, AutoTokenizer,
+ BitsAndBytesConfig)
+
+from xtuner.dataset import process_hf_dataset
+from xtuner.dataset.collate_fns import default_collate_fn
+from xtuner.dataset.map_fns import openorca_map_fn, template_map_fn_factory
+from xtuner.engine import DatasetInfoHook, EvaluateChatHook
+from xtuner.model import SupervisedFinetune
+from xtuner.utils import PROMPT_TEMPLATE
+
+#######################################################################
+# PART 1 Settings #
+#######################################################################
+# Model
+pretrained_model_name_or_path = 'THUDM/chatglm3-6b'
+
+# Data
+data_path = 'Open-Orca/OpenOrca'
+prompt_template = PROMPT_TEMPLATE.chatglm3
+max_length = 2048
+pack_to_max_length = True
+
+# Scheduler & Optimizer
+batch_size = 1 # per_device
+accumulative_counts = 16
+dataloader_num_workers = 0
+max_epochs = 1
+optim_type = PagedAdamW32bit
+lr = 2e-4
+betas = (0.9, 0.999)
+weight_decay = 0
+max_norm = 1 # grad clip
+
+# Evaluate the generation performance during the training
+evaluation_freq = 5000
+SYSTEM = ''
+evaluation_inputs = [
+ '请给我介绍五个上海的景点', 'Please tell me five scenic spots in Shanghai'
+]
+
+#######################################################################
+# PART 2 Model & Tokenizer #
+#######################################################################
+tokenizer = dict(
+ type=AutoTokenizer.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ encode_special_tokens=True,
+ padding_side='left')
+
+model = dict(
+ type=SupervisedFinetune,
+ llm=dict(
+ type=AutoModelForCausalLM.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ torch_dtype=torch.float16,
+ quantization_config=dict(
+ type=BitsAndBytesConfig,
+ load_in_4bit=True,
+ load_in_8bit=False,
+ llm_int8_threshold=6.0,
+ llm_int8_has_fp16_weight=False,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_use_double_quant=True,
+ bnb_4bit_quant_type='nf4')),
+ lora=dict(
+ type=LoraConfig,
+ r=64,
+ lora_alpha=16,
+ lora_dropout=0.1,
+ bias='none',
+ task_type='CAUSAL_LM'))
+
+#######################################################################
+# PART 3 Dataset & Dataloader #
+#######################################################################
+train_dataset = dict(
+ type=process_hf_dataset,
+ dataset=dict(type=load_dataset, path=data_path),
+ tokenizer=tokenizer,
+ max_length=max_length,
+ dataset_map_fn=openorca_map_fn,
+ template_map_fn=dict(
+ type=template_map_fn_factory, template=prompt_template),
+ remove_unused_columns=True,
+ shuffle_before_pack=True,
+ pack_to_max_length=pack_to_max_length)
+
+train_dataloader = dict(
+ batch_size=batch_size,
+ num_workers=dataloader_num_workers,
+ dataset=train_dataset,
+ sampler=dict(type=DefaultSampler, shuffle=True),
+ collate_fn=dict(type=default_collate_fn))
+
+#######################################################################
+# PART 4 Scheduler & Optimizer #
+#######################################################################
+# optimizer
+optim_wrapper = dict(
+ type=AmpOptimWrapper,
+ optimizer=dict(
+ type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay),
+ clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False),
+ accumulative_counts=accumulative_counts,
+ loss_scale='dynamic',
+ dtype='float16')
+
+# learning policy
+# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501
+param_scheduler = dict(
+ type=CosineAnnealingLR,
+ eta_min=lr * 0.1,
+ by_epoch=True,
+ T_max=max_epochs,
+ convert_to_iter_based=True)
+
+# train, val, test setting
+train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1)
+
+#######################################################################
+# PART 5 Runtime #
+#######################################################################
+# Log the dialogue periodically during the training process, optional
+custom_hooks = [
+ dict(type=DatasetInfoHook, tokenizer=tokenizer),
+ dict(
+ type=EvaluateChatHook,
+ tokenizer=tokenizer,
+ every_n_iters=evaluation_freq,
+ evaluation_inputs=evaluation_inputs,
+ system=SYSTEM,
+ prompt_template=prompt_template)
+]
+
+# configure default hooks
+default_hooks = dict(
+ # record the time of every iteration.
+ timer=dict(type=IterTimerHook),
+ # print log every 100 iterations.
+ logger=dict(type=LoggerHook, interval=10),
+ # enable the parameter scheduler.
+ param_scheduler=dict(type=ParamSchedulerHook),
+ # save checkpoint per epoch.
+ checkpoint=dict(type=CheckpointHook, interval=1),
+ # set sampler seed in distributed evrionment.
+ sampler_seed=dict(type=DistSamplerSeedHook),
+)
+
+# configure environment
+env_cfg = dict(
+ # whether to enable cudnn benchmark
+ cudnn_benchmark=False,
+ # set multi process parameters
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
+ # set distributed parameters
+ dist_cfg=dict(backend='nccl'),
+)
+
+# set visualizer
+visualizer = None
+
+# set log level
+log_level = 'INFO'
+
+# load from which checkpoint
+load_from = None
+
+# whether to resume training from the loaded checkpoint
+resume = False
+
+# Defaults to use random seed and disable `deterministic`
+randomness = dict(seed=None, deterministic=False)
diff --git a/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_sql_e3.py b/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_sql_e3.py
new file mode 100644
index 000000000..154a3b07c
--- /dev/null
+++ b/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_sql_e3.py
@@ -0,0 +1,187 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+from bitsandbytes.optim import PagedAdamW32bit
+from datasets import load_dataset
+from mmengine.dataset import DefaultSampler
+from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
+ LoggerHook, ParamSchedulerHook)
+from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR
+from peft import LoraConfig
+from transformers import (AutoModelForCausalLM, AutoTokenizer,
+ BitsAndBytesConfig)
+
+from xtuner.dataset import process_hf_dataset
+from xtuner.dataset.collate_fns import default_collate_fn
+from xtuner.dataset.map_fns import sql_map_fn, template_map_fn_factory
+from xtuner.engine import DatasetInfoHook, EvaluateChatHook
+from xtuner.model import SupervisedFinetune
+from xtuner.utils import PROMPT_TEMPLATE, SYSTEM_TEMPLATE
+
+#######################################################################
+# PART 1 Settings #
+#######################################################################
+# Model
+pretrained_model_name_or_path = 'THUDM/chatglm3-6b'
+
+# Data
+data_path = 'b-mc2/sql-create-context'
+prompt_template = PROMPT_TEMPLATE.chatglm3
+max_length = 2048
+pack_to_max_length = True
+
+# Scheduler & Optimizer
+batch_size = 1 # per_device
+accumulative_counts = 16
+dataloader_num_workers = 0
+max_epochs = 3
+optim_type = PagedAdamW32bit
+lr = 2e-4
+betas = (0.9, 0.999)
+weight_decay = 0
+max_norm = 1 # grad clip
+
+# Evaluate the generation performance during the training
+evaluation_freq = 500
+SYSTEM = SYSTEM_TEMPLATE.sql
+evaluation_inputs = [
+ ('CREATE TABLE station (name VARCHAR, lat VARCHAR, city VARCHAR)\n'
+ 'Find the name, latitude, and city of stations with latitude '
+ 'above 50.'),
+ ('CREATE TABLE weather (zip_code VARCHAR, mean_visibility_miles '
+ 'INTEGER)\n找到mean_visibility_miles最大的zip_code。')
+]
+
+#######################################################################
+# PART 2 Model & Tokenizer #
+#######################################################################
+tokenizer = dict(
+ type=AutoTokenizer.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ encode_special_tokens=True,
+ padding_side='left')
+
+model = dict(
+ type=SupervisedFinetune,
+ llm=dict(
+ type=AutoModelForCausalLM.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ torch_dtype=torch.float16,
+ quantization_config=dict(
+ type=BitsAndBytesConfig,
+ load_in_4bit=True,
+ load_in_8bit=False,
+ llm_int8_threshold=6.0,
+ llm_int8_has_fp16_weight=False,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_use_double_quant=True,
+ bnb_4bit_quant_type='nf4')),
+ lora=dict(
+ type=LoraConfig,
+ r=64,
+ lora_alpha=16,
+ lora_dropout=0.1,
+ bias='none',
+ task_type='CAUSAL_LM'))
+
+#######################################################################
+# PART 3 Dataset & Dataloader #
+#######################################################################
+train_dataset = dict(
+ type=process_hf_dataset,
+ dataset=dict(type=load_dataset, path=data_path),
+ tokenizer=tokenizer,
+ max_length=max_length,
+ dataset_map_fn=sql_map_fn,
+ template_map_fn=dict(
+ type=template_map_fn_factory, template=prompt_template),
+ remove_unused_columns=True,
+ shuffle_before_pack=True,
+ pack_to_max_length=pack_to_max_length)
+
+train_dataloader = dict(
+ batch_size=batch_size,
+ num_workers=dataloader_num_workers,
+ dataset=train_dataset,
+ sampler=dict(type=DefaultSampler, shuffle=True),
+ collate_fn=dict(type=default_collate_fn))
+
+#######################################################################
+# PART 4 Scheduler & Optimizer #
+#######################################################################
+# optimizer
+optim_wrapper = dict(
+ type=AmpOptimWrapper,
+ optimizer=dict(
+ type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay),
+ clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False),
+ accumulative_counts=accumulative_counts,
+ loss_scale='dynamic',
+ dtype='float16')
+
+# learning policy
+# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501
+param_scheduler = dict(
+ type=CosineAnnealingLR,
+ eta_min=lr * 0.1,
+ by_epoch=True,
+ T_max=max_epochs,
+ convert_to_iter_based=True)
+
+# train, val, test setting
+train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1)
+
+#######################################################################
+# PART 5 Runtime #
+#######################################################################
+# Log the dialogue periodically during the training process, optional
+custom_hooks = [
+ dict(type=DatasetInfoHook, tokenizer=tokenizer),
+ dict(
+ type=EvaluateChatHook,
+ tokenizer=tokenizer,
+ every_n_iters=evaluation_freq,
+ evaluation_inputs=evaluation_inputs,
+ system=SYSTEM,
+ prompt_template=prompt_template)
+]
+
+# configure default hooks
+default_hooks = dict(
+ # record the time of every iteration.
+ timer=dict(type=IterTimerHook),
+ # print log every 100 iterations.
+ logger=dict(type=LoggerHook, interval=10),
+ # enable the parameter scheduler.
+ param_scheduler=dict(type=ParamSchedulerHook),
+ # save checkpoint per epoch.
+ checkpoint=dict(type=CheckpointHook, interval=1),
+ # set sampler seed in distributed evrionment.
+ sampler_seed=dict(type=DistSamplerSeedHook),
+)
+
+# configure environment
+env_cfg = dict(
+ # whether to enable cudnn benchmark
+ cudnn_benchmark=False,
+ # set multi process parameters
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
+ # set distributed parameters
+ dist_cfg=dict(backend='nccl'),
+)
+
+# set visualizer
+visualizer = None
+
+# set log level
+log_level = 'INFO'
+
+# load from which checkpoint
+load_from = None
+
+# whether to resume training from the loaded checkpoint
+resume = False
+
+# Defaults to use random seed and disable `deterministic`
+randomness = dict(seed=None, deterministic=False)
diff --git a/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_tiny_codes_e1.py b/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_tiny_codes_e1.py
new file mode 100644
index 000000000..4978c4053
--- /dev/null
+++ b/xtuner/configs/chatglm/chatglm3_6b/chatglm3_6b_qlora_tiny_codes_e1.py
@@ -0,0 +1,187 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+from bitsandbytes.optim import PagedAdamW32bit
+from datasets import load_dataset
+from mmengine.dataset import DefaultSampler
+from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
+ LoggerHook, ParamSchedulerHook)
+from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR
+from peft import LoraConfig
+from transformers import (AutoModelForCausalLM, AutoTokenizer,
+ BitsAndBytesConfig)
+
+from xtuner.dataset import process_hf_dataset
+from xtuner.dataset.collate_fns import default_collate_fn
+from xtuner.dataset.map_fns import template_map_fn_factory, tiny_codes_map_fn
+from xtuner.engine import DatasetInfoHook, EvaluateChatHook
+from xtuner.model import SupervisedFinetune
+from xtuner.utils import PROMPT_TEMPLATE, SYSTEM_TEMPLATE
+
+#######################################################################
+# PART 1 Settings #
+#######################################################################
+# Model
+pretrained_model_name_or_path = 'THUDM/chatglm3-6b'
+
+# Data
+data_path = 'nampdn-ai/tiny-codes'
+prompt_template = PROMPT_TEMPLATE.chatglm3
+max_length = 2048
+pack_to_max_length = True
+
+# Scheduler & Optimizer
+batch_size = 1 # per_device
+accumulative_counts = 16
+dataloader_num_workers = 0
+max_epochs = 1
+optim_type = PagedAdamW32bit
+lr = 2e-4
+betas = (0.9, 0.999)
+weight_decay = 0
+max_norm = 1 # grad clip
+
+# Evaluate the generation performance during the training
+evaluation_freq = 500
+SYSTEM = SYSTEM_TEMPLATE.coder
+evaluation_inputs = [
+ ('写一个Python函数,将十六进制颜色代码(如#0066ee)转换为对应的'
+ '红、绿、蓝(RGB)三个颜色分量值,并以元组的形式返回。'),
+ ('Write a Python function that takes a hexadecimal color code '
+ '(e.g., #0066ee) as input and converts it into the corresponding '
+ 'red, green, and blue (RGB) color component values.')
+]
+
+#######################################################################
+# PART 2 Model & Tokenizer #
+#######################################################################
+tokenizer = dict(
+ type=AutoTokenizer.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ encode_special_tokens=True,
+ padding_side='left')
+
+model = dict(
+ type=SupervisedFinetune,
+ llm=dict(
+ type=AutoModelForCausalLM.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ torch_dtype=torch.float16,
+ quantization_config=dict(
+ type=BitsAndBytesConfig,
+ load_in_4bit=True,
+ load_in_8bit=False,
+ llm_int8_threshold=6.0,
+ llm_int8_has_fp16_weight=False,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_use_double_quant=True,
+ bnb_4bit_quant_type='nf4')),
+ lora=dict(
+ type=LoraConfig,
+ r=64,
+ lora_alpha=16,
+ lora_dropout=0.1,
+ bias='none',
+ task_type='CAUSAL_LM'))
+
+#######################################################################
+# PART 3 Dataset & Dataloader #
+#######################################################################
+train_dataset = dict(
+ type=process_hf_dataset,
+ dataset=dict(type=load_dataset, path=data_path),
+ tokenizer=tokenizer,
+ max_length=max_length,
+ dataset_map_fn=tiny_codes_map_fn,
+ template_map_fn=dict(
+ type=template_map_fn_factory, template=prompt_template),
+ remove_unused_columns=True,
+ shuffle_before_pack=True,
+ pack_to_max_length=pack_to_max_length)
+
+train_dataloader = dict(
+ batch_size=batch_size,
+ num_workers=dataloader_num_workers,
+ dataset=train_dataset,
+ sampler=dict(type=DefaultSampler, shuffle=True),
+ collate_fn=dict(type=default_collate_fn))
+
+#######################################################################
+# PART 4 Scheduler & Optimizer #
+#######################################################################
+# optimizer
+optim_wrapper = dict(
+ type=AmpOptimWrapper,
+ optimizer=dict(
+ type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay),
+ clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False),
+ accumulative_counts=accumulative_counts,
+ loss_scale='dynamic',
+ dtype='float16')
+
+# learning policy
+# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501
+param_scheduler = dict(
+ type=CosineAnnealingLR,
+ eta_min=lr * 0.1,
+ by_epoch=True,
+ T_max=max_epochs,
+ convert_to_iter_based=True)
+
+# train, val, test setting
+train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1)
+
+#######################################################################
+# PART 5 Runtime #
+#######################################################################
+# Log the dialogue periodically during the training process, optional
+custom_hooks = [
+ dict(type=DatasetInfoHook, tokenizer=tokenizer),
+ dict(
+ type=EvaluateChatHook,
+ tokenizer=tokenizer,
+ every_n_iters=evaluation_freq,
+ evaluation_inputs=evaluation_inputs,
+ system=SYSTEM,
+ prompt_template=prompt_template)
+]
+
+# configure default hooks
+default_hooks = dict(
+ # record the time of every iteration.
+ timer=dict(type=IterTimerHook),
+ # print log every 100 iterations.
+ logger=dict(type=LoggerHook, interval=10),
+ # enable the parameter scheduler.
+ param_scheduler=dict(type=ParamSchedulerHook),
+ # save checkpoint per epoch.
+ checkpoint=dict(type=CheckpointHook, interval=1),
+ # set sampler seed in distributed evrionment.
+ sampler_seed=dict(type=DistSamplerSeedHook),
+)
+
+# configure environment
+env_cfg = dict(
+ # whether to enable cudnn benchmark
+ cudnn_benchmark=False,
+ # set multi process parameters
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
+ # set distributed parameters
+ dist_cfg=dict(backend='nccl'),
+)
+
+# set visualizer
+visualizer = None
+
+# set log level
+log_level = 'INFO'
+
+# load from which checkpoint
+load_from = None
+
+# whether to resume training from the loaded checkpoint
+resume = False
+
+# Defaults to use random seed and disable `deterministic`
+randomness = dict(seed=None, deterministic=False)
diff --git a/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_alpaca_e3.py b/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_alpaca_e3.py
index d12e6cd89..5c3f91b3a 100644
--- a/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_alpaca_e3.py
+++ b/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_alpaca_e3.py
@@ -25,7 +25,7 @@
# Data
alpaca_en_path = 'tatsu-lab/alpaca'
-prompt_template = PROMPT_TEMPLATE.default
+prompt_template = PROMPT_TEMPLATE.chatglm3
max_length = 2048
pack_to_max_length = True
@@ -54,6 +54,7 @@
type=AutoTokenizer.from_pretrained,
pretrained_model_name_or_path=pretrained_model_name_or_path,
trust_remote_code=True,
+ encode_special_tokens=True,
padding_side='left')
model = dict(
diff --git a/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_alpaca_enzh_e3.py b/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_alpaca_enzh_e3.py
index 06cde1989..da55d9ff8 100644
--- a/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_alpaca_enzh_e3.py
+++ b/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_alpaca_enzh_e3.py
@@ -27,7 +27,7 @@
# Data
alpaca_zh_path = 'silk-road/alpaca-data-gpt4-chinese'
alpaca_en_path = 'tatsu-lab/alpaca'
-prompt_template = PROMPT_TEMPLATE.default
+prompt_template = PROMPT_TEMPLATE.chatglm3
max_length = 2048
pack_to_max_length = True
@@ -56,6 +56,7 @@
type=AutoTokenizer.from_pretrained,
pretrained_model_name_or_path=pretrained_model_name_or_path,
trust_remote_code=True,
+ encode_special_tokens=True,
padding_side='left')
model = dict(
diff --git a/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_alpaca_enzh_oasst1_e3.py b/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_alpaca_enzh_oasst1_e3.py
index 3e6a2f1ac..5b39b3727 100644
--- a/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_alpaca_enzh_oasst1_e3.py
+++ b/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_alpaca_enzh_oasst1_e3.py
@@ -28,7 +28,7 @@
alpaca_zh_path = 'silk-road/alpaca-data-gpt4-chinese'
alpaca_en_path = 'tatsu-lab/alpaca'
oasst1_path = 'timdettmers/openassistant-guanaco'
-prompt_template = PROMPT_TEMPLATE.default
+prompt_template = PROMPT_TEMPLATE.chatglm3
max_length = 2048
pack_to_max_length = True
@@ -57,6 +57,7 @@
type=AutoTokenizer.from_pretrained,
pretrained_model_name_or_path=pretrained_model_name_or_path,
trust_remote_code=True,
+ encode_special_tokens=True,
padding_side='left')
model = dict(
diff --git a/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_alpaca_zh_e3.py b/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_alpaca_zh_e3.py
index 299b2f935..a85212686 100644
--- a/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_alpaca_zh_e3.py
+++ b/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_alpaca_zh_e3.py
@@ -25,7 +25,7 @@
# Data
alpaca_zh_path = 'silk-road/alpaca-data-gpt4-chinese'
-prompt_template = PROMPT_TEMPLATE.default
+prompt_template = PROMPT_TEMPLATE.chatglm3
max_length = 2048
pack_to_max_length = True
@@ -54,6 +54,7 @@
type=AutoTokenizer.from_pretrained,
pretrained_model_name_or_path=pretrained_model_name_or_path,
trust_remote_code=True,
+ encode_special_tokens=True,
padding_side='left')
model = dict(
diff --git a/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_arxiv_gentitle_e3.py b/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_arxiv_gentitle_e3.py
index d47634fc3..c826e33eb 100644
--- a/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_arxiv_gentitle_e3.py
+++ b/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_arxiv_gentitle_e3.py
@@ -27,7 +27,7 @@
# 1. Download data from https://kaggle.com/datasets/Cornell-University/arxiv
# 2. Process data by `xtuner preprocess arxiv ${DOWNLOADED_DATA} ./data/arxiv_data.json [optional arguments]` # noqa: E501
data_path = './data/arxiv_data.json'
-prompt_template = PROMPT_TEMPLATE.default
+prompt_template = PROMPT_TEMPLATE.chatglm3
max_length = 2048
pack_to_max_length = True
@@ -88,6 +88,7 @@
type=AutoTokenizer.from_pretrained,
pretrained_model_name_or_path=pretrained_model_name_or_path,
trust_remote_code=True,
+ encode_special_tokens=True,
padding_side='left')
model = dict(
diff --git a/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_code_alpaca_e3.py b/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_code_alpaca_e3.py
index d75b92277..43c83b7bb 100644
--- a/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_code_alpaca_e3.py
+++ b/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_code_alpaca_e3.py
@@ -25,7 +25,7 @@
# Data
data_path = 'HuggingFaceH4/CodeAlpaca_20K'
-prompt_template = PROMPT_TEMPLATE.default
+prompt_template = PROMPT_TEMPLATE.chatglm3
max_length = 2048
pack_to_max_length = True
@@ -58,6 +58,7 @@
type=AutoTokenizer.from_pretrained,
pretrained_model_name_or_path=pretrained_model_name_or_path,
trust_remote_code=True,
+ encode_special_tokens=True,
padding_side='left')
model = dict(
diff --git a/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_colorist_e5.py b/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_colorist_e5.py
index e81e0285d..598ad368f 100644
--- a/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_colorist_e5.py
+++ b/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_colorist_e5.py
@@ -25,7 +25,7 @@
# Data
data_path = 'burkelibbey/colors'
-prompt_template = PROMPT_TEMPLATE.default
+prompt_template = PROMPT_TEMPLATE.chatglm3
max_length = 2048
pack_to_max_length = True
@@ -54,6 +54,7 @@
type=AutoTokenizer.from_pretrained,
pretrained_model_name_or_path=pretrained_model_name_or_path,
trust_remote_code=True,
+ encode_special_tokens=True,
padding_side='left')
model = dict(
diff --git a/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_lawyer_e3.py b/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_lawyer_e3.py
index 99eb4fc58..068bff02b 100644
--- a/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_lawyer_e3.py
+++ b/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_lawyer_e3.py
@@ -29,7 +29,7 @@
# download data from https://github.com/LiuHC0428/LAW-GPT
crime_kg_assitant_path = './data/CrimeKgAssitant清洗后_52k.json'
law_reference_data_path = './data/训练数据_带法律依据_92k.json'
-prompt_template = PROMPT_TEMPLATE.default
+prompt_template = PROMPT_TEMPLATE.chatglm3
max_length = 2048
pack_to_max_length = True
@@ -56,6 +56,7 @@
type=AutoTokenizer.from_pretrained,
pretrained_model_name_or_path=pretrained_model_name_or_path,
trust_remote_code=True,
+ encode_special_tokens=True,
padding_side='left')
model = dict(
diff --git a/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_medical_e1.py b/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_medical_e1.py
index d99bf7301..6a6917cea 100644
--- a/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_medical_e1.py
+++ b/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_medical_e1.py
@@ -26,7 +26,7 @@
# Data
data_path = 'shibing624/medical'
data_config_name = 'finetune'
-prompt_template = PROMPT_TEMPLATE.default
+prompt_template = PROMPT_TEMPLATE.chatglm3
max_length = 2048
pack_to_max_length = True
@@ -56,6 +56,7 @@
type=AutoTokenizer.from_pretrained,
pretrained_model_name_or_path=pretrained_model_name_or_path,
trust_remote_code=True,
+ encode_special_tokens=True,
padding_side='left')
model = dict(
diff --git a/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_oasst1_512_e3.py b/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_oasst1_512_e3.py
index 3cc3cb1b5..3ae7381e9 100644
--- a/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_oasst1_512_e3.py
+++ b/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_oasst1_512_e3.py
@@ -25,7 +25,7 @@
# Data
data_path = 'timdettmers/openassistant-guanaco'
-prompt_template = PROMPT_TEMPLATE.default
+prompt_template = PROMPT_TEMPLATE.chatglm3
max_length = 512
pack_to_max_length = False
@@ -54,6 +54,7 @@
type=AutoTokenizer.from_pretrained,
pretrained_model_name_or_path=pretrained_model_name_or_path,
trust_remote_code=True,
+ encode_special_tokens=True,
padding_side='left')
model = dict(
diff --git a/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_oasst1_e3.py b/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_oasst1_e3.py
index 31ecdfb33..3d778d348 100644
--- a/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_oasst1_e3.py
+++ b/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_oasst1_e3.py
@@ -25,7 +25,7 @@
# Data
data_path = 'timdettmers/openassistant-guanaco'
-prompt_template = PROMPT_TEMPLATE.default
+prompt_template = PROMPT_TEMPLATE.chatglm3
max_length = 2048
pack_to_max_length = True
@@ -54,6 +54,7 @@
type=AutoTokenizer.from_pretrained,
pretrained_model_name_or_path=pretrained_model_name_or_path,
trust_remote_code=True,
+ encode_special_tokens=True,
padding_side='left')
model = dict(
diff --git a/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_open_platypus_e3.py b/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_open_platypus_e3.py
index 47519ba1d..039ea8ef5 100644
--- a/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_open_platypus_e3.py
+++ b/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_open_platypus_e3.py
@@ -25,7 +25,7 @@
# Data
data_path = 'garage-bAInd/Open-Platypus'
-prompt_template = PROMPT_TEMPLATE.default
+prompt_template = PROMPT_TEMPLATE.chatglm3
max_length = 2048
pack_to_max_length = True
@@ -54,6 +54,7 @@
type=AutoTokenizer.from_pretrained,
pretrained_model_name_or_path=pretrained_model_name_or_path,
trust_remote_code=True,
+ encode_special_tokens=True,
padding_side='left')
model = dict(
diff --git a/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_openorca_e1.py b/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_openorca_e1.py
index 6385a6e71..42dc2e7b4 100644
--- a/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_openorca_e1.py
+++ b/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_openorca_e1.py
@@ -25,7 +25,7 @@
# Data
data_path = 'Open-Orca/OpenOrca'
-prompt_template = PROMPT_TEMPLATE.default
+prompt_template = PROMPT_TEMPLATE.chatglm3
max_length = 2048
pack_to_max_length = True
@@ -54,6 +54,7 @@
type=AutoTokenizer.from_pretrained,
pretrained_model_name_or_path=pretrained_model_name_or_path,
trust_remote_code=True,
+ encode_special_tokens=True,
padding_side='left')
model = dict(
diff --git a/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_sql_e3.py b/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_sql_e3.py
index 8c0056363..7c826cc76 100644
--- a/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_sql_e3.py
+++ b/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_sql_e3.py
@@ -25,7 +25,7 @@
# Data
data_path = 'b-mc2/sql-create-context'
-prompt_template = PROMPT_TEMPLATE.default
+prompt_template = PROMPT_TEMPLATE.chatglm3
max_length = 2048
pack_to_max_length = True
@@ -58,6 +58,7 @@
type=AutoTokenizer.from_pretrained,
pretrained_model_name_or_path=pretrained_model_name_or_path,
trust_remote_code=True,
+ encode_special_tokens=True,
padding_side='left')
model = dict(
diff --git a/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_tiny_codes_e1.py b/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_tiny_codes_e1.py
index e42be37fa..6ede6df61 100644
--- a/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_tiny_codes_e1.py
+++ b/xtuner/configs/chatglm/chatglm3_6b_base/chatglm3_6b_base_qlora_tiny_codes_e1.py
@@ -25,7 +25,7 @@
# Data
data_path = 'nampdn-ai/tiny-codes'
-prompt_template = PROMPT_TEMPLATE.default
+prompt_template = PROMPT_TEMPLATE.chatglm3
max_length = 2048
pack_to_max_length = True
@@ -58,6 +58,7 @@
type=AutoTokenizer.from_pretrained,
pretrained_model_name_or_path=pretrained_model_name_or_path,
trust_remote_code=True,
+ encode_special_tokens=True,
padding_side='left')
model = dict(
diff --git a/xtuner/dataset/utils.py b/xtuner/dataset/utils.py
index 6e1d33f80..a5e464eb1 100644
--- a/xtuner/dataset/utils.py
+++ b/xtuner/dataset/utils.py
@@ -40,7 +40,7 @@ def encode_fn(example, tokenizer, max_length, input_ids_with_output=True):
bos_token_id = []
eos_token_id = tokenizer.eos_token_id
elif tokenizer.__class__.__name__ == 'ChatGLMTokenizer':
- bos_token_id = []
+ bos_token_id = [64790, 64792]
eos_token_id = tokenizer.eos_token_id
else:
bos_token_id = tokenizer.bos_token_id
diff --git a/xtuner/tools/chat.py b/xtuner/tools/chat.py
index 9d87818b8..0dfb6cecf 100644
--- a/xtuner/tools/chat.py
+++ b/xtuner/tools/chat.py
@@ -200,7 +200,9 @@ def main():
model = AutoModelForCausalLM.from_pretrained(args.model_name_or_path,
**model_kwargs)
tokenizer = AutoTokenizer.from_pretrained(
- args.model_name_or_path, trust_remote_code=True)
+ args.model_name_or_path,
+ trust_remote_code=True,
+ encode_special_tokens=True)
if args.adapter is not None:
model = PeftModel.from_pretrained(
model, args.adapter, offload_folder=args.offload_folder)
diff --git a/xtuner/tools/model_converters/merge.py b/xtuner/tools/model_converters/merge.py
index cd0a58970..4b4503eb0 100644
--- a/xtuner/tools/model_converters/merge.py
+++ b/xtuner/tools/model_converters/merge.py
@@ -38,7 +38,9 @@ def main():
offload_folder=args.offload_folder,
trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(
- args.model_name_or_path, trust_remote_code=True)
+ args.model_name_or_path,
+ trust_remote_code=True,
+ encode_special_tokens=True)
model_unmerged = PeftModel.from_pretrained(
model,
args.adapter_name_or_path,
diff --git a/xtuner/utils/templates.py b/xtuner/utils/templates.py
index b8111b024..5dcbc851b 100644
--- a/xtuner/utils/templates.py
+++ b/xtuner/utils/templates.py
@@ -26,6 +26,9 @@
chatglm2=dict(
SYSTEM='{system}\n',
INSTRUCTION='\n\n[Round {round}]\n\n问:{input}\n\n答:'),
+ chatglm3=dict(
+ SYSTEM='<|system|>\n{system}',
+ INSTRUCTION='<|user|>\n{input}<|assistant|>'),
qwen_chat=dict(
SYSTEM=('\n<|im_start|>system\n{system}<|im_end|>'),
INSTRUCTION=(