1
+ graph TD
2
+ app -mlperf -inference ,d775cac873ee4231_ ( _nvidia,_resnet50,_tensorrt,_cuda,_valid,_r5.0-dev_default,_multistream_) --> detect ,os
3
+ app -mlperf -inference ,d775cac873ee4231_ ( _nvidia,_resnet50,_tensorrt,_cuda,_valid,_r5.0-dev_default,_multistream_) --> get ,sys -utils -cm
4
+ app -mlperf -inference ,d775cac873ee4231_ ( _nvidia,_resnet50,_tensorrt,_cuda,_valid,_r5.0-dev_default,_multistream_) --> get ,python
5
+ app -mlperf -inference ,d775cac873ee4231_ ( _nvidia,_resnet50,_tensorrt,_cuda,_valid,_r5.0-dev_default,_multistream_) --> get ,mlcommons ,inference ,src
6
+ pull -git -repo ,c23132ed65c4421d --> detect ,os
7
+ app -mlperf -inference ,d775cac873ee4231_ ( _nvidia,_resnet50,_tensorrt,_cuda,_valid,_r5.0-dev_default,_multistream_) --> pull ,git ,repo
8
+ get -mlperf -inference -utils ,e341e5f86d8342e5 --> get ,mlperf ,inference ,src
9
+ app -mlperf -inference ,d775cac873ee4231_ ( _nvidia,_resnet50,_tensorrt,_cuda,_valid,_r5.0-dev_default,_multistream_) --> get ,mlperf ,inference ,utils
10
+ app -mlperf -inference ,d775cac873ee4231_ ( _nvidia,_resnet50,_tensorrt,_cuda,_valid,_r5.0-dev_default,_multistream_) --> get ,dataset -aux ,imagenet -aux
11
+ get -cuda -devices ,7a3ede4d3558427a_ ( _with-pycuda_) --> get ,cuda ,_toolkit
12
+ get -cuda -devices ,7a3ede4d3558427a_ ( _with-pycuda_) --> get ,python3
13
+ get -generic -python -lib ,94b62a682bc44791_ ( _package.pycuda_) --> get ,python3
14
+ get -cuda -devices ,7a3ede4d3558427a_ ( _with-pycuda_) --> get ,generic -python -lib ,_package .pycuda
15
+ get -generic -python -lib ,94b62a682bc44791_ ( _package.numpy_) --> get ,python3
16
+ get -cuda -devices ,7a3ede4d3558427a_ ( _with-pycuda_) --> get ,generic -python -lib ,_package .numpy
17
+ app -mlperf -inference ,d775cac873ee4231_ ( _nvidia,_resnet50,_tensorrt,_cuda,_valid,_r5.0-dev_default,_multistream_) --> get ,cuda -devices ,_with -pycuda
18
+ app -mlperf -inference -nvidia ,bc3b17fb430f4732_ ( _run_harness,_resnet50,_cuda,_multistream,_tensorrt,_rtx_4090_) --> detect ,os
19
+ detect -cpu ,586c8a43320142f7 --> detect ,os
20
+ app -mlperf -inference -nvidia ,bc3b17fb430f4732_ ( _run_harness,_resnet50,_cuda,_multistream,_tensorrt,_rtx_4090_) --> detect ,cpu
21
+ app -mlperf -inference -nvidia ,bc3b17fb430f4732_ ( _run_harness,_resnet50,_cuda,_multistream,_tensorrt,_rtx_4090_) --> get ,sys -utils -cm
22
+ app -mlperf -inference -nvidia ,bc3b17fb430f4732_ ( _run_harness,_resnet50,_cuda,_multistream,_tensorrt,_rtx_4090_) --> get ,mlperf ,inference ,nvidia ,scratch ,space ,_version .5 .0 -dev
23
+ get -generic -python -lib ,94b62a682bc44791_ ( _mlperf_logging_) --> get ,python3
24
+ app -mlperf -inference -nvidia ,bc3b17fb430f4732_ ( _run_harness,_resnet50,_cuda,_multistream,_tensorrt,_rtx_4090_) --> get ,generic -python -lib ,_mlperf_logging
25
+ app -mlperf -inference -nvidia ,bc3b17fb430f4732_ ( _run_harness,_resnet50,_cuda,_multistream,_tensorrt,_rtx_4090_) --> get ,dataset ,original ,imagenet ,_full
26
+ app -mlperf -inference -nvidia ,bc3b17fb430f4732_ ( _run_harness,_resnet50,_cuda,_multistream,_tensorrt,_rtx_4090_) --> get ,ml -model ,resnet50 ,_fp32 ,_onnx ,_opset -8
27
+ app -mlperf -inference -nvidia ,bc3b17fb430f4732_ ( _run_harness,_resnet50,_cuda,_multistream,_tensorrt,_rtx_4090_) --> get ,mlcommons ,inference ,src
28
+ app -mlperf -inference -nvidia ,bc3b17fb430f4732_ ( _run_harness,_resnet50,_cuda,_multistream,_tensorrt,_rtx_4090_) --> get ,nvidia ,mlperf ,inference ,common -code ,_mlcommons
29
+ pull -git -repo ,c23132ed65c4421d --> detect ,os
30
+ app -mlperf -inference -nvidia ,bc3b17fb430f4732_ ( _run_harness,_resnet50,_cuda,_multistream,_tensorrt,_rtx_4090_) --> pull ,git ,repo
31
+ generate -mlperf -inference -user -conf ,3af4475745964b93 --> detect ,os
32
+ detect -cpu ,586c8a43320142f7 --> detect ,os
33
+ generate -mlperf -inference -user -conf ,3af4475745964b93 --> detect ,cpu
34
+ generate -mlperf -inference -user -conf ,3af4475745964b93 --> get ,python
35
+ generate -mlperf -inference -user -conf ,3af4475745964b93 --> get ,mlcommons ,inference ,src
36
+ get -mlperf -inference -sut -configs ,c2fbf72009e2445b --> get ,cache ,dir ,_name .mlperf -inference -sut -configs
37
+ generate -mlperf -inference -user -conf ,3af4475745964b93 --> get ,sut ,configs
38
+ app -mlperf -inference -nvidia ,bc3b17fb430f4732_ ( _run_harness,_resnet50,_cuda,_multistream,_tensorrt,_rtx_4090_) --> generate ,user -conf ,mlperf ,inference
39
+ get -generic -python -lib ,94b62a682bc44791_ ( _package.pycuda_) --> get ,python3
40
+ app -mlperf -inference -nvidia ,bc3b17fb430f4732_ ( _run_harness,_resnet50,_cuda,_multistream,_tensorrt,_rtx_4090_) --> get ,generic -python -lib ,_package .pycuda
41
+ app -mlperf -inference -nvidia ,bc3b17fb430f4732_ ( _run_harness,_resnet50,_cuda,_multistream,_tensorrt,_rtx_4090_) --> get ,cuda ,_cudnn
42
+ app -mlperf -inference -nvidia ,bc3b17fb430f4732_ ( _run_harness,_resnet50,_cuda,_multistream,_tensorrt,_rtx_4090_) --> get ,tensorrt
43
+ app -mlperf -inference -nvidia ,bc3b17fb430f4732_ ( _run_harness,_resnet50,_cuda,_multistream,_tensorrt,_rtx_4090_) --> build ,nvidia ,inference ,server ,_mlcommons
44
+ app -mlperf -inference -nvidia ,bc3b17fb430f4732_ ( _build_engine,_resnet50,_cuda,_multistream,_tensorrt,_v4.1-dev,_batch_size.8_) --> detect ,os
45
+ detect -cpu ,586c8a43320142f7 --> detect ,os
46
+ app -mlperf -inference -nvidia ,bc3b17fb430f4732_ ( _build_engine,_resnet50,_cuda,_multistream,_tensorrt,_v4.1-dev,_batch_size.8_) --> detect ,cpu
47
+ app -mlperf -inference -nvidia ,bc3b17fb430f4732_ ( _build_engine,_resnet50,_cuda,_multistream,_tensorrt,_v4.1-dev,_batch_size.8_) --> get ,sys -utils -cm
48
+ app -mlperf -inference -nvidia ,bc3b17fb430f4732_ ( _build_engine,_resnet50,_cuda,_multistream,_tensorrt,_v4.1-dev,_batch_size.8_) --> get ,mlperf ,inference ,nvidia ,scratch ,space ,_version .5 .0 -dev
49
+ get -generic -python -lib ,94b62a682bc44791_ ( _mlperf_logging_) --> get ,python3
50
+ app -mlperf -inference -nvidia ,bc3b17fb430f4732_ ( _build_engine,_resnet50,_cuda,_multistream,_tensorrt,_v4.1-dev,_batch_size.8_) --> get ,generic -python -lib ,_mlperf_logging
51
+ app -mlperf -inference -nvidia ,bc3b17fb430f4732_ ( _build_engine,_resnet50,_cuda,_multistream,_tensorrt,_v4.1-dev,_batch_size.8_) --> get ,dataset ,original ,imagenet ,_full
52
+ app -mlperf -inference -nvidia ,bc3b17fb430f4732_ ( _build_engine,_resnet50,_cuda,_multistream,_tensorrt,_v4.1-dev,_batch_size.8_) --> get ,ml -model ,resnet50 ,_fp32 ,_onnx ,_opset -8
53
+ app -mlperf -inference -nvidia ,bc3b17fb430f4732_ ( _build_engine,_resnet50,_cuda,_multistream,_tensorrt,_v4.1-dev,_batch_size.8_) --> get ,mlcommons ,inference ,src
54
+ app -mlperf -inference -nvidia ,bc3b17fb430f4732_ ( _build_engine,_resnet50,_cuda,_multistream,_tensorrt,_v4.1-dev,_batch_size.8_) --> get ,nvidia ,mlperf ,inference ,common -code ,_mlcommons
55
+ pull -git -repo ,c23132ed65c4421d --> detect ,os
56
+ app -mlperf -inference -nvidia ,bc3b17fb430f4732_ ( _build_engine,_resnet50,_cuda,_multistream,_tensorrt,_v4.1-dev,_batch_size.8_) --> pull ,git ,repo
57
+ get -generic -python -lib ,94b62a682bc44791_ ( _package.pycuda_) --> get ,python3
58
+ app -mlperf -inference -nvidia ,bc3b17fb430f4732_ ( _build_engine,_resnet50,_cuda,_multistream,_tensorrt,_v4.1-dev,_batch_size.8_) --> get ,generic -python -lib ,_package .pycuda
59
+ app -mlperf -inference -nvidia ,bc3b17fb430f4732_ ( _build_engine,_resnet50,_cuda,_multistream,_tensorrt,_v4.1-dev,_batch_size.8_) --> get ,cuda ,_cudnn
60
+ app -mlperf -inference -nvidia ,bc3b17fb430f4732_ ( _build_engine,_resnet50,_cuda,_multistream,_tensorrt,_v4.1-dev,_batch_size.8_) --> get ,tensorrt
61
+ app -mlperf -inference -nvidia ,bc3b17fb430f4732_ ( _build_engine,_resnet50,_cuda,_multistream,_tensorrt,_v4.1-dev,_batch_size.8_) --> build ,nvidia ,inference ,server ,_mlcommons
62
+ app -mlperf -inference -nvidia ,bc3b17fb430f4732_ ( _build_engine,_resnet50,_cuda,_multistream,_tensorrt,_v4.1-dev,_batch_size.8_) --> reproduce ,mlperf ,inference ,nvidia ,harness ,_preprocess_data ,_resnet50 ,_cuda ,_tensorrt ,_v4 .1 -dev
63
+ get -generic -python -lib ,94b62a682bc44791_ ( _onnx-graphsurgeon_) --> get ,python3
64
+ app -mlperf -inference -nvidia ,bc3b17fb430f4732_ ( _build_engine,_resnet50,_cuda,_multistream,_tensorrt,_v4.1-dev,_batch_size.8_) --> get ,generic -python -lib ,_onnx -graphsurgeon
65
+ get -generic -python -lib ,94b62a682bc44791_ ( _package.onnx_) --> get ,python3
66
+ app -mlperf -inference -nvidia ,bc3b17fb430f4732_ ( _build_engine,_resnet50,_cuda,_multistream,_tensorrt,_v4.1-dev,_batch_size.8_) --> get ,generic -python -lib ,_package .onnx
67
+ app -mlperf -inference -nvidia ,bc3b17fb430f4732_ ( _build_engine,_resnet50,_cuda,_multistream,_tensorrt,_v4.1-dev,_batch_size.8_) --> save ,mlperf ,inference ,state
68
+ app -mlperf -inference -nvidia ,bc3b17fb430f4732_ ( _run_harness,_resnet50,_cuda,_multistream,_tensorrt,_rtx_4090_) --> reproduce ,mlperf ,inference ,nvidia ,harness ,_build_engine ,_resnet50 ,_cuda ,_multistream ,_tensorrt ,_v4 .1 -dev ,_batch_size .8
69
+ app -mlperf -inference -nvidia ,bc3b17fb430f4732_ ( _run_harness,_resnet50,_cuda,_multistream,_tensorrt,_rtx_4090_) --> reproduce ,mlperf ,inference ,nvidia ,harness ,_preprocess_data ,_resnet50 ,_cuda ,_tensorrt ,_v4 .1 -dev
70
+ get -generic -python -lib ,94b62a682bc44791_ ( _onnx-graphsurgeon_) --> get ,python3
71
+ app -mlperf -inference -nvidia ,bc3b17fb430f4732_ ( _run_harness,_resnet50,_cuda,_multistream,_tensorrt,_rtx_4090_) --> get ,generic -python -lib ,_onnx -graphsurgeon
72
+ get -generic -python -lib ,94b62a682bc44791_ ( _package.onnx_) --> get ,python3
73
+ app -mlperf -inference -nvidia ,bc3b17fb430f4732_ ( _run_harness,_resnet50,_cuda,_multistream,_tensorrt,_rtx_4090_) --> get ,generic -python -lib ,_package .onnx
74
+ detect -cpu ,586c8a43320142f7 --> detect ,os
75
+ benchmark -program ,19f369ef47084895 --> detect ,cpu
76
+ benchmark -program -mlperf ,cfff0132a8aa4018 --> benchmark -program ,program
77
+ app -mlperf -inference -nvidia ,bc3b17fb430f4732_ ( _run_harness,_resnet50,_cuda,_multistream,_tensorrt,_rtx_4090_) --> benchmark -mlperf
0 commit comments