From 0bbb7a7025e86010cc65cf80b4cafbad72d57693 Mon Sep 17 00:00:00 2001 From: Po-Han Huang Date: Tue, 1 Sep 2020 13:30:16 -0700 Subject: [PATCH] Populate user.conf with user-configurable LoadGen settings. Update benchmark names --- .../TEST01/{resnet => resnet50}/audit.config | 0 .../{ssd-small => ssd-mobilenet}/audit.config | 0 .../{ssd-large => ssd-resnet34}/audit.config | 0 language/bert/user.conf | 7 ++++++- mlperf.conf | 20 ++++++++++++------- recommendation/dlrm/pytorch/user.conf | 6 ++++++ recommendation/dlrm/user.conf | 0 speech_recognition/rnnt/user.conf | 6 ++++++ vision/classification_and_detection/user.conf | 6 ++++++ vision/medical_imaging/3d-unet/user.conf | 7 ++++++- 10 files changed, 43 insertions(+), 9 deletions(-) rename compliance/nvidia/TEST01/{resnet => resnet50}/audit.config (100%) rename compliance/nvidia/TEST01/{ssd-small => ssd-mobilenet}/audit.config (100%) rename compliance/nvidia/TEST01/{ssd-large => ssd-resnet34}/audit.config (100%) create mode 100644 recommendation/dlrm/pytorch/user.conf delete mode 100644 recommendation/dlrm/user.conf diff --git a/compliance/nvidia/TEST01/resnet/audit.config b/compliance/nvidia/TEST01/resnet50/audit.config similarity index 100% rename from compliance/nvidia/TEST01/resnet/audit.config rename to compliance/nvidia/TEST01/resnet50/audit.config diff --git a/compliance/nvidia/TEST01/ssd-small/audit.config b/compliance/nvidia/TEST01/ssd-mobilenet/audit.config similarity index 100% rename from compliance/nvidia/TEST01/ssd-small/audit.config rename to compliance/nvidia/TEST01/ssd-mobilenet/audit.config diff --git a/compliance/nvidia/TEST01/ssd-large/audit.config b/compliance/nvidia/TEST01/ssd-resnet34/audit.config similarity index 100% rename from compliance/nvidia/TEST01/ssd-large/audit.config rename to compliance/nvidia/TEST01/ssd-resnet34/audit.config diff --git a/language/bert/user.conf b/language/bert/user.conf index 14df8111c..394bc092f 100644 --- a/language/bert/user.conf +++ b/language/bert/user.conf @@ -1 +1,6 @@ -bert.Offline.target_qps = 32.0 +# Please set these fields depending on the performance of your system to +# override default LoadGen settings. +*.SingleStream.target_latency = 10 +*.Server.target_qps = 1.0 +*.Offline.target_qps = 32.0 +*.MultiStream.samples_per_query = 4 \ No newline at end of file diff --git a/mlperf.conf b/mlperf.conf index 0815469d5..3ee227120 100644 --- a/mlperf.conf +++ b/mlperf.conf @@ -3,7 +3,8 @@ # Model maybe '*' as wildcard. In that case the value applies to all models. # All times are in milli seconds -# set performance_sample_count for each model +# Set performance_sample_count for each model. +# User can optionally set this to higher values in user.conf. mobilenet.*.performance_sample_count_override = 1024 gnmt.*.performance_sample_count_override = 3903900 resnet50.*.performance_sample_count_override = 1024 @@ -14,19 +15,17 @@ dlrm.*.performance_sample_count_override = 204800 rnnt.*.performance_sample_count_override = 2513 3d-unet.*.performance_sample_count_override = 16 -# set seeds +# Set seeds. The seeds will be distributed two weeks before the submission. *.*.qsl_rng_seed = 3133965575612453542 *.*.sample_index_rng_seed = 665484352860916858 *.*.schedule_rng_seed = 3622009729038561421 -*.SingleStream.target_latency = 10 *.SingleStream.target_latency_percentile = 90 *.SingleStream.min_duration = 60000 *.SingleStream.min_query_count = 1024 *.MultiStream.target_qps = 20 *.MultiStream.target_latency_percentile = 99 -*.MultiStream.samples_per_query = 4 *.MultiStream.max_async_queries = 1 *.MultiStream.target_latency = 50 *.MultiStream.min_duration = 60000 @@ -38,7 +37,6 @@ gnmt.MultiStream.target_latency = 100 gnmt.MultiStream.target_qps = 10 gnmt.MultiStream.target_latency_percentile = 97 -*.Server.target_qps = 1.0 *.Server.target_latency = 10 *.Server.target_latency_percentile = 99 *.Server.target_duration = 0 @@ -53,7 +51,15 @@ bert.Server.target_latency = 130 dlrm.Server.target_latency = 30 rnnt.Server.target_latency = 1000 -*.Offline.target_qps = 1.0 *.Offline.target_latency_percentile = 90 *.Offline.min_duration = 60000 -*.Offline.min_query_count = 1 \ No newline at end of file +# In Offline scenario, we always have one query. But LoadGen maps this to +# min_sample_count internally in Offline scenario, so set this to 24576 since +# the rule requires that Offline scenario run for at least 24576 samples. +*.Offline.min_query_count = 24576 + +# These fields should be defined and overridden by user.conf. +*.SingleStream.target_latency = 10 +*.Server.target_qps = 1.0 +*.Offline.target_qps = 1.0 +*.MultiStream.samples_per_query = 4 diff --git a/recommendation/dlrm/pytorch/user.conf b/recommendation/dlrm/pytorch/user.conf new file mode 100644 index 000000000..545569c1a --- /dev/null +++ b/recommendation/dlrm/pytorch/user.conf @@ -0,0 +1,6 @@ +# Please set these fields depending on the performance of your system to +# override default LoadGen settings. +*.SingleStream.target_latency = 10 +*.Server.target_qps = 1.0 +*.Offline.target_qps = 1.0 +*.MultiStream.samples_per_query = 4 \ No newline at end of file diff --git a/recommendation/dlrm/user.conf b/recommendation/dlrm/user.conf deleted file mode 100644 index e69de29bb..000000000 diff --git a/speech_recognition/rnnt/user.conf b/speech_recognition/rnnt/user.conf index e69de29bb..545569c1a 100644 --- a/speech_recognition/rnnt/user.conf +++ b/speech_recognition/rnnt/user.conf @@ -0,0 +1,6 @@ +# Please set these fields depending on the performance of your system to +# override default LoadGen settings. +*.SingleStream.target_latency = 10 +*.Server.target_qps = 1.0 +*.Offline.target_qps = 1.0 +*.MultiStream.samples_per_query = 4 \ No newline at end of file diff --git a/vision/classification_and_detection/user.conf b/vision/classification_and_detection/user.conf index e69de29bb..545569c1a 100644 --- a/vision/classification_and_detection/user.conf +++ b/vision/classification_and_detection/user.conf @@ -0,0 +1,6 @@ +# Please set these fields depending on the performance of your system to +# override default LoadGen settings. +*.SingleStream.target_latency = 10 +*.Server.target_qps = 1.0 +*.Offline.target_qps = 1.0 +*.MultiStream.samples_per_query = 4 \ No newline at end of file diff --git a/vision/medical_imaging/3d-unet/user.conf b/vision/medical_imaging/3d-unet/user.conf index 16c8551e1..545569c1a 100644 --- a/vision/medical_imaging/3d-unet/user.conf +++ b/vision/medical_imaging/3d-unet/user.conf @@ -1 +1,6 @@ -3d-unet.Offline.target_qps = 1.0 +# Please set these fields depending on the performance of your system to +# override default LoadGen settings. +*.SingleStream.target_latency = 10 +*.Server.target_qps = 1.0 +*.Offline.target_qps = 1.0 +*.MultiStream.samples_per_query = 4 \ No newline at end of file