forked from mlcommons/inference
-
Notifications
You must be signed in to change notification settings - Fork 0
/
mlperf.conf
69 lines (59 loc) · 2.66 KB
/
mlperf.conf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
# The format of this config file is 'key = value'.
# The key has the format 'model.scenario.key'. Value is mostly int64_t.
# Model maybe '*' as wildcard. In that case the value applies to all models.
# All times are in milli seconds
# Set performance_sample_count for each model.
# User can optionally set this to higher values in user.conf.
resnet50.*.performance_sample_count_override = 1024
ssd-mobilenet.*.performance_sample_count_override = 256
retinanet.*.performance_sample_count_override = 64
bert.*.performance_sample_count_override = 10833
dlrm.*.performance_sample_count_override = 204800
dlrm-v2.*.performance_sample_count_override = 204800
rnnt.*.performance_sample_count_override = 2513
# set to 0 to let entire sample set to be performance sample
3d-unet.*.performance_sample_count_override = 0
# Set seeds. The seeds will be distributed two weeks before the submission.
*.*.qsl_rng_seed = 148687905518835231
*.*.sample_index_rng_seed = 520418551913322573
*.*.schedule_rng_seed = 811580660758947900
# Set seeds for TEST_05. The seeds will be distributed two weeks before the submission.
*.*.test05_qsl_rng_seed = 793197339507417767
*.*.test05_sample_index_rng_seed = 255610748586851044
*.*.test05_schedule_rng_seed = 352213341366340113
*.SingleStream.target_latency_percentile = 90
*.SingleStream.min_duration = 600000
#*.SingleStream.min_query_count = 1024
*.MultiStream.target_latency_percentile = 99
*.MultiStream.samples_per_query = 8
*.MultiStream.min_duration = 600000
#*.MultiStream.min_query_count = 270336
*.MultiStream.min_query_count = 662
retinanet.MultiStream.target_latency = 528
# 3D-UNet uses equal issue mode
3d-unet.*.sample_concatenate_permutation = 1
# GPT-J uses equal issue mode for Single-Stream
gptj.SingleStream.sample_concatenate_permutation = 1
*.Server.target_latency = 10
*.Server.target_latency_percentile = 99
*.Server.target_duration = 0
*.Server.min_duration = 600000
#*.Server.min_query_count = 270336
resnet50.Server.target_latency = 15
retinanet.Server.target_latency = 100
bert.Server.target_latency = 130
dlrm.Server.target_latency = 60
dlrm-v2.Server.target_latency = 60
rnnt.Server.target_latency = 1000
gptj.Server.target_latency = 20000
*.Offline.target_latency_percentile = 90
*.Offline.min_duration = 600000
# In Offline scenario, we always have one query. But LoadGen maps this to
# min_sample_count internally in Offline scenario, so set this to 24576 since
# the rule requires that Offline scenario run for at least 24576 samples.
*.Offline.min_query_count = 24576
# These fields should be defined and overridden by user.conf.
*.SingleStream.target_latency = 10
*.MultiStream.target_latency = 80
*.Server.target_qps = 1.0
*.Offline.target_qps = 1.0