Skip to content

Commit

Permalink
[Fix] Fix MMLU evaluation (#208)
Browse files Browse the repository at this point in the history
* add default_map_fn

* fix bug

* add mmlu cfg

* update

* remove prompt_template

* update hook

* fix test bug

* Update runtime.txt
  • Loading branch information
LZHgrla authored Nov 14, 2023
1 parent 7e0f966 commit 51ae023
Show file tree
Hide file tree
Showing 7 changed files with 286 additions and 6 deletions.
1 change: 1 addition & 0 deletions requirements/runtime.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
bitsandbytes>=0.40.0
datasets
einops
fsspec<=2023.6.0
lagent>=0.1.2
mmengine>=0.9.0
modelscope
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,237 @@
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from bitsandbytes.optim import PagedAdamW32bit
from datasets import load_dataset
from mmengine.dataset import DefaultSampler
from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
LoggerHook, ParamSchedulerHook)
from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR
from peft import LoraConfig
from transformers import (AutoModelForCausalLM, AutoTokenizer,
BitsAndBytesConfig)

from xtuner.dataset import process_hf_dataset
from xtuner.dataset.collate_fns import default_collate_fn, mmlu_collate_fn
from xtuner.dataset.map_fns import (default_map_fn, oasst1_map_fn,
template_map_fn_factory)
from xtuner.engine import DatasetInfoHook, EvaluateChatHook
from xtuner.evaluation import MMLUMetric
from xtuner.model import SupervisedFinetune
from xtuner.utils import PROMPT_TEMPLATE

#######################################################################
# PART 1 Settings #
#######################################################################
# Model
pretrained_model_name_or_path = 'internlm/internlm-7b'

# Data
data_path = 'timdettmers/openassistant-guanaco'
prompt_template = PROMPT_TEMPLATE.internlm_chat
max_length = 2048
pack_to_max_length = True

# Val/Test data
# Download from https://github.com/artidoro/qlora/tree/main/data/mmlu
mmlu_data_root = './data/mmlu/'

# Scheduler & Optimizer
batch_size = 1 # per_device
accumulative_counts = 16
dataloader_num_workers = 0
max_epochs = 3
optim_type = PagedAdamW32bit
lr = 2e-4
betas = (0.9, 0.999)
weight_decay = 0
max_norm = 1 # grad clip

# Evaluate the generation performance during the training
evaluation_freq = 500
SYSTEM = ''
evaluation_inputs = [
'请给我介绍五个上海的景点', 'Please tell me five scenic spots in Shanghai'
]

#######################################################################
# PART 2 Model & Tokenizer #
#######################################################################
tokenizer = dict(
type=AutoTokenizer.from_pretrained,
pretrained_model_name_or_path=pretrained_model_name_or_path,
trust_remote_code=True,
padding_side='right')

model = dict(
type=SupervisedFinetune,
llm=dict(
type=AutoModelForCausalLM.from_pretrained,
pretrained_model_name_or_path=pretrained_model_name_or_path,
trust_remote_code=True,
torch_dtype=torch.float16,
quantization_config=dict(
type=BitsAndBytesConfig,
load_in_4bit=True,
load_in_8bit=False,
llm_int8_threshold=6.0,
llm_int8_has_fp16_weight=False,
bnb_4bit_compute_dtype=torch.float16,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type='nf4')),
lora=dict(
type=LoraConfig,
r=64,
lora_alpha=16,
lora_dropout=0.1,
bias='none',
task_type='CAUSAL_LM'))

#######################################################################
# PART 3 Dataset & Dataloader #
#######################################################################
train_dataset = dict(
type=process_hf_dataset,
dataset=dict(type=load_dataset, path=data_path),
tokenizer=tokenizer,
max_length=max_length,
dataset_map_fn=oasst1_map_fn,
template_map_fn=dict(
type=template_map_fn_factory, template=prompt_template),
remove_unused_columns=True,
shuffle_before_pack=True,
pack_to_max_length=pack_to_max_length)

train_dataloader = dict(
batch_size=batch_size,
num_workers=dataloader_num_workers,
dataset=train_dataset,
sampler=dict(type=DefaultSampler, shuffle=True),
collate_fn=dict(type=default_collate_fn))

mmlu_fs_dataset = dict(
type=load_dataset,
path='json',
data_files=dict(
val=mmlu_data_root + 'five_shot_mmlu_val.json',
test=mmlu_data_root + 'five_shot_mmlu_test.json'))

val_mmlu_fs = dict(
type=process_hf_dataset,
dataset=mmlu_fs_dataset,
tokenizer=tokenizer,
dataset_map_fn=default_map_fn,
max_length=max_length,
input_ids_with_output=False,
pack_to_max_length=False,
split='val')

val_dataloader = dict(
batch_size=1,
num_workers=0,
dataset=val_mmlu_fs,
sampler=dict(type=DefaultSampler, shuffle=False),
collate_fn=dict(type=mmlu_collate_fn))

val_evaluator = dict(
type=MMLUMetric, tokenizer=tokenizer, prefix='mmlu_fs_val')

test_mmlu_fs = dict(
type=process_hf_dataset,
dataset=mmlu_fs_dataset,
tokenizer=tokenizer,
dataset_map_fn=default_map_fn,
max_length=max_length,
input_ids_with_output=False,
pack_to_max_length=False,
split='test')

test_dataloader = dict(
batch_size=1,
num_workers=0,
dataset=test_mmlu_fs,
sampler=dict(type=DefaultSampler, shuffle=False),
collate_fn=dict(type=mmlu_collate_fn))

test_evaluator = dict(
type=MMLUMetric, tokenizer=tokenizer, prefix='mmlu_fs_test')

#######################################################################
# PART 4 Scheduler & Optimizer #
#######################################################################
# optimizer
optim_wrapper = dict(
type=AmpOptimWrapper,
optimizer=dict(
type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay),
clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False),
accumulative_counts=accumulative_counts,
loss_scale='dynamic',
dtype='float16')

# learning policy
# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501
param_scheduler = dict(
type=CosineAnnealingLR,
eta_min=lr * 0.1,
by_epoch=True,
T_max=max_epochs,
convert_to_iter_based=True)

# train, val, test setting
train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')

#######################################################################
# PART 5 Runtime #
#######################################################################
# Log the dialogue periodically during the training process, optional
custom_hooks = [
dict(type=DatasetInfoHook, tokenizer=tokenizer),
dict(
type=EvaluateChatHook,
tokenizer=tokenizer,
every_n_iters=evaluation_freq,
evaluation_inputs=evaluation_inputs,
system=SYSTEM,
prompt_template=prompt_template)
]

# configure default hooks
default_hooks = dict(
# record the time of every iteration.
timer=dict(type=IterTimerHook),
# print log every 100 iterations.
logger=dict(type=LoggerHook, interval=10),
# enable the parameter scheduler.
param_scheduler=dict(type=ParamSchedulerHook),
# save checkpoint per epoch.
checkpoint=dict(type=CheckpointHook, interval=1),
# set sampler seed in distributed evrionment.
sampler_seed=dict(type=DistSamplerSeedHook),
)

# configure environment
env_cfg = dict(
# whether to enable cudnn benchmark
cudnn_benchmark=False,
# set multi process parameters
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
# set distributed parameters
dist_cfg=dict(backend='nccl'),
)

# set visualizer
visualizer = None

# set log level
log_level = 'INFO'

# load from which checkpoint
load_from = None

# whether to resume training from the loaded checkpoint
resume = False

# Defaults to use random seed and disable `deterministic`
randomness = dict(seed=None, deterministic=False)
2 changes: 1 addition & 1 deletion xtuner/dataset/collate_fns/mmlu_collate_fn.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,4 +36,4 @@ def mmlu_collate_fn(instances: Sequence[Dict],
if return_hf_format:
return data_dict
else:
return {'data': data_dict, 'data_samples': None}
return {'data': data_dict, 'data_samples': data_samples}
3 changes: 2 additions & 1 deletion xtuner/dataset/map_fns/dataset_map_fns/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
from .code_alpaca_map_fn import code_alpaca_map_fn
from .colors_map_fn import colors_map_fn
from .crime_kg_assitant_map_fn import crime_kg_assitant_map_fn
from .default_map_fn import default_map_fn
from .law_reference_map_fn import law_reference_map_fn
from .medical_map_fn import medical_map_fn
from .msagent_map_fn import msagent_react_map_fn
Expand All @@ -23,5 +24,5 @@
'tiny_codes_map_fn', 'colors_map_fn', 'law_reference_map_fn',
'crime_kg_assitant_map_fn', 'sql_map_fn', 'openai_map_fn',
'wizardlm_map_fn', 'stack_exchange_map_fn', 'msagent_react_map_fn',
'pretrain_map_fn'
'default_map_fn', 'pretrain_map_fn'
]
8 changes: 8 additions & 0 deletions xtuner/dataset/map_fns/dataset_map_fns/default_map_fn.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
# Copyright (c) OpenMMLab. All rights reserved.
def default_map_fn(example):
return {
'conversation': [{
'input': example['input'],
'output': example['output']
}]
}
10 changes: 9 additions & 1 deletion xtuner/engine/hooks/dataset_info_hook.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ def log(self, runner, dataset, mode='train'):
runner.logger.info(f'{mode} example:')
runner.logger.info(self.tokenizer.decode(dataset[0]['input_ids']))

def before_run(self, runner) -> None:
def before_train(self, runner) -> None:
do_train = runner.train_loop is not None
do_eval = runner.val_loop is not None
do_test = runner.test_loop is not None
Expand All @@ -27,3 +27,11 @@ def before_run(self, runner) -> None:
if do_test:
test_dataset = runner.test_dataloader.dataset
self.log(runner, test_dataset, mode='test')

def before_val(self, runner) -> None:
eval_dataset = runner.val_dataloader.dataset
self.log(runner, eval_dataset, mode='eval')

def before_test(self, runner) -> None:
test_dataset = runner.test_dataloader.dataset
self.log(runner, test_dataset, mode='test')
31 changes: 28 additions & 3 deletions xtuner/tools/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import os.path as osp
from types import FunctionType

import torch
from mmengine.config import Config, DictAction
from mmengine.registry import RUNNERS
from mmengine.runner import Runner
Expand All @@ -12,6 +13,29 @@
from xtuner.registry import MAP_FUNC


def guess_load_checkpoint(pth_model):
if os.path.isfile(pth_model):
state_dict = torch.load(pth_model, map_location='cpu')
if 'state_dict' in state_dict:
state_dict = state_dict['state_dict']
elif os.path.isdir(pth_model):
try:
from deepspeed.utils.zero_to_fp32 import \
get_fp32_state_dict_from_zero_checkpoint
except ImportError:
raise ImportError(
'The provided PTH model appears to be a DeepSpeed checkpoint. '
'However, DeepSpeed library is not detected in current '
'environment. This suggests that DeepSpeed may not be '
'installed or is incorrectly configured. Please verify your '
'setup.')
state_dict = get_fp32_state_dict_from_zero_checkpoint(
os.path.dirname(pth_model), os.path.basename(pth_model))
else:
raise FileNotFoundError(f'Cannot find {pth_model}')
return state_dict


def parse_args():
parser = argparse.ArgumentParser(description='Test model')
parser.add_argument('config', help='config file name or path.')
Expand Down Expand Up @@ -85,9 +109,6 @@ def main():
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])

if args.checkpoint is not None:
cfg.load_from = args.checkpoint

# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
Expand All @@ -97,6 +118,10 @@ def main():
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)

state_dict = guess_load_checkpoint(args.checkpoint)
runner.model.load_state_dict(state_dict, strict=False)
runner.logger.info(f'Load checkpoint from {args.checkpoint}')

# start testing
runner.test()

Expand Down

0 comments on commit 51ae023

Please sign in to comment.