Skip to content

Commit

Permalink
update mistral settings (#368)
Browse files Browse the repository at this point in the history
* update mistral settings

* add vllm rolling batch strategy

* remove vllm dependencies
  • Loading branch information
lanking520 committed Oct 9, 2023
1 parent 9309d68 commit 4761d1b
Show file tree
Hide file tree
Showing 4 changed files with 37 additions and 38 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -72,19 +72,6 @@
"option.dtype=fp16"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "70210d4c",
"metadata": {},
"outputs": [],
"source": [
"%%writefile requirements.txt\n",
"vllm==0.1.7\n",
"pandas\n",
"pyarrow"
]
},
{
"cell_type": "code",
"execution_count": null,
Expand All @@ -95,7 +82,6 @@
"%%sh\n",
"mkdir mymodel\n",
"mv serving.properties mymodel/\n",
"mv requirements.txt mymodel/\n",
"tar czvf mymodel.tar.gz mymodel/\n",
"rm -rf mymodel"
]
Expand Down Expand Up @@ -129,7 +115,7 @@
"image_uri = image_uris.retrieve(\n",
" framework=\"djl-deepspeed\",\n",
" region=sess.boto_session.region_name,\n",
" version=\"0.23.0\"\n",
" version=\"0.24.0\"\n",
" )"
]
},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -72,19 +72,6 @@
"option.device_map=auto"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "de59119f",
"metadata": {},
"outputs": [],
"source": [
"%%writefile requirements.txt\n",
"vllm==0.2.0\n",
"git+https://github.com/huggingface/transformers\n",
"accelerate==0.23.0"
]
},
{
"cell_type": "code",
"execution_count": null,
Expand All @@ -95,7 +82,6 @@
"%%sh\n",
"mkdir mymodel\n",
"mv serving.properties mymodel/\n",
"mv requirements.txt mymodel/\n",
"tar czvf mymodel.tar.gz mymodel/\n",
"rm -rf mymodel"
]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,10 @@
"GPTJForCausalLM": 'gptj',
"OPTForCausalLM": 'opt',
"BloomForCausalLM": 'bloom',
"ChatGLMModel": 'chatglm'
"ChatGLMModel": 'chatglm',
"BaiChuanForCausalLM": "baichuan",
"InternLMForCausalLM": "internlm",
"MistralForCausalLM": "mistral"
}

neuron_supported_arch = {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,10 @@
}

tnx_rolling_batch = {
'strategy': {0.1: {"option.max_rolling_batch_size": 4},
0.3: {"option.max_rolling_batch_size": 8},
0.5: {"option.max_rolling_batch_size": 32},
0.7: {"option.max_rolling_batch_size": 64}
'strategy': {0.1: {"option.max_rolling_batch_size": 4, "option.batch_size": 4},
0.3: {"option.max_rolling_batch_size": 8, "option.batch_size": 8},
0.5: {"option.max_rolling_batch_size": 32, "option.batch_size": 32},
0.7: {"option.max_rolling_batch_size": 64, "option.batch_size": 64}
},
'fp16': {
"engine": "Python",
Expand All @@ -60,7 +60,7 @@
'model_category': ['llama-13b', 'llama-7b', 'falcon-7b', 'falcon-40b', 'flan-t5', 'gptneox', 'mpt', 'bigcode'],
'instance': ['g4', 'g5', 'p4', 'p5']},
'flash2': {'model_category': {'llama', 'falcon', 'flan-t5', 'gptneox', 'mpt', 'bigcode'},
'instance': ['g5', 'p4']},
'instance': ['g5', 'p4', 'p5']},
'strategy': {0.1: {"option.max_rolling_batch_size": 4,
"option.max_rolling_batch_prefill_tokens": 1560},
0.3: {"option.max_rolling_batch_size": 8,
Expand All @@ -84,6 +84,23 @@
}
}

vllm_rolling_batch = {
'vllm': {
'model_category': {'llama', 'falcon', 'flan-t5', 'gptneox', 'mpt', 'bigcode', 'chatglm', 'baichuan', 'internlm',
'mistral'},
'instance': ['g5', 'p4', 'p5']},
'strategy': {0.1: {"option.max_rolling_batch_size": 4},
0.3: {"option.max_rolling_batch_size": 8},
0.5: {"option.max_rolling_batch_size": 32},
0.7: {"option.max_rolling_batch_size": 64},
0.85: {"option.max_rolling_batch_size": 128}},
'fp16': {
"engine": "Python",
"option.dtype": "fp16",
"option.rolling_batch": "vllm"
}
}


def category_checker(target, category):
found = False
Expand All @@ -94,7 +111,7 @@ def category_checker(target, category):
return found


def rolling_batch_chooser(model_category: str, instance: dict):
def rolling_batch_chooser(model_category: str, instance: dict, dtype: str):
instance_name = instance['name']
if instance_name.startswith('inf2') or instance_name.startswith('trn1'):
return 'neuronx'
Expand All @@ -103,6 +120,11 @@ def rolling_batch_chooser(model_category: str, instance: dict):
if category_checker(model_category, flash2_checker['model_category']):
if category_checker(instance['name'], flash2_checker['instance']):
return 'flash2'
# find vllm supported model
vllm_checker = vllm_rolling_batch['vllm']
if category_checker(model_category, vllm_checker['model_category']):
if category_checker(instance['name'], vllm_checker['instance']) and dtype == 'fp16':
return 'vllm'
# find Flash 1 supported model
flash1_checker = lmi_dist_rolling_batch['flash1']
if category_checker(model_category, flash1_checker['model_category']):
Expand All @@ -115,7 +137,7 @@ def memory_prealloc_chooser(model_size, instance_info):
tp_sizes = [1, 2, 4, 8]
if instance_info['name'].startswith('inf2'):
# https://awsdocs-neuron.readthedocs-hosted.com/en/latest/libraries/transformers-neuronx/transformers-neuronx-developer-guide.html#tensor-parallelism-support
tp_sizes = [1, 2, 4, 8, 24]
tp_sizes = [1, 2, 4, 8, 12, 24]
elif instance_info['name'].startswith('trn1'):
tp_sizes = [1, 2, 8, 16, 32]
tp_sizes = filter(lambda x: x <= instance_info['num_acc'], tp_sizes)
Expand All @@ -132,12 +154,14 @@ def lmi_config_recommender(instance_recommendation: dict):
dtype = instance_recommendation['dtype']
for instance in instance_recommendation['instances']:
result = []
strategy = rolling_batch_chooser(instance_recommendation['category'], instance)
strategy = rolling_batch_chooser(instance_recommendation['category'], instance, dtype)
tp_memories = memory_prealloc_chooser(instance_recommendation['size'], instance)
if 'flash2' == strategy or 'flash1' == strategy:
rolling_batch = lmi_dist_rolling_batch
elif 'neuronx' == strategy:
rolling_batch = tnx_rolling_batch
elif 'vllm' == strategy:
rolling_batch = vllm_rolling_batch
else:
rolling_batch = rolling_batch_fallback
remained_memory_ratios = sorted(rolling_batch['strategy'].keys(), reverse=True)
Expand Down

0 comments on commit 4761d1b

Please sign in to comment.