Skip to content

Commit

Permalink
Merge pull request #25 from anandhu-eng/mlperf-inference
Browse files Browse the repository at this point in the history
Bug Fix - Model downloading in LON(client)
  • Loading branch information
arjunsuresh authored May 24, 2024
2 parents 2719254 + 96f47dc commit 524e065
Show file tree
Hide file tree
Showing 2 changed files with 18 additions and 7 deletions.
4 changes: 4 additions & 0 deletions script/app-mlperf-inference-mlcommons-python/_cm.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -347,6 +347,10 @@ deps:
CM_MODEL:
- gptj-99
- gptj-99.9
skip_if_env:
NETWORK:
- lon



## RetinaNet (PyTorch weights, FP32)
Expand Down
21 changes: 14 additions & 7 deletions script/app-mlperf-inference-mlcommons-python/customize.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,9 +75,10 @@ def preprocess(i):
else:
env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --mlperf_conf "+ x + env['CM_MLPERF_CONF'] + x

env['MODEL_DIR'] = env.get('CM_ML_MODEL_PATH')
if not env['MODEL_DIR']:
env['MODEL_DIR'] = os.path.dirname(env.get('CM_MLPERF_CUSTOM_MODEL_PATH', env.get('CM_ML_MODEL_FILE_WITH_PATH')))
if env.get('CM_NETWORK_LOADGEN', '') != "lon":
env['MODEL_DIR'] = env.get('CM_ML_MODEL_PATH')
if not env['MODEL_DIR']:
env['MODEL_DIR'] = os.path.dirname(env.get('CM_MLPERF_CUSTOM_MODEL_PATH', env.get('CM_ML_MODEL_FILE_WITH_PATH')))

RUN_CMD = ""
state['RUN'] = {}
Expand Down Expand Up @@ -176,10 +177,16 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, mode_extra_optio
if env['CM_MODEL'] in [ "gptj-99", "gptj-99.9" ]:

env['RUN_DIR'] = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "language", "gpt-j")
cmd = env['CM_PYTHON_BIN_WITH_PATH'] + \
" main.py --model-path=" + env['CM_ML_MODEL_FILE_WITH_PATH'] + ' --dataset-path=' + env['CM_DATASET_EVAL_PATH'] + " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + " " + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \
' --dtype ' + env['CM_MLPERF_MODEL_PRECISION'] + \
scenario_extra_options + mode_extra_options + dataset_options
if env.get('CM_NETWORK_LOADGEN', '') != "lon":
cmd = env['CM_PYTHON_BIN_WITH_PATH'] + \
" main.py --model-path=" + env['CM_ML_MODEL_FILE_WITH_PATH'] + ' --dataset-path=' + env['CM_DATASET_EVAL_PATH'] + " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + " " + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \
' --dtype ' + env['CM_MLPERF_MODEL_PRECISION'] + \
scenario_extra_options + mode_extra_options + dataset_options
else:
cmd = env['CM_PYTHON_BIN_WITH_PATH'] + \
" main.py" + ' --dataset-path=' + env['CM_DATASET_EVAL_PATH'] + " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + " " + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \
' --dtype ' + env['CM_MLPERF_MODEL_PRECISION'] + \
scenario_extra_options + mode_extra_options + dataset_options
cmd = cmd.replace("--count", "--max_examples")
if env['CM_MLPERF_DEVICE'] == "gpu":
gpu_options = " --gpu"
Expand Down

0 comments on commit 524e065

Please sign in to comment.