35
35
device_name = str (torch .cuda .get_device_name (0 ))
36
36
# Training settings
37
37
parser = argparse .ArgumentParser (description = "PyTorch Benchmarking" )
38
- parser .add_argument ("--WARM_UP" , "-w" , type = int , default = 5 , required = False , help = "Num of warm up" )
39
- parser .add_argument ("--NUM_TEST" , "-n" , type = int , default = 50 , required = False , help = "Num of Test" )
38
+ parser .add_argument (
39
+ "--WARM_UP" , "-w" , type = int , default = 5 , required = False , help = "Num of warm up"
40
+ )
41
+ parser .add_argument (
42
+ "--NUM_TEST" , "-n" , type = int , default = 50 , required = False , help = "Num of Test"
43
+ )
40
44
parser .add_argument (
41
45
"--BATCH_SIZE" , "-b" , type = int , default = 12 , required = False , help = "Num of batch size"
42
46
)
43
47
parser .add_argument (
44
48
"--NUM_CLASSES" , "-c" , type = int , default = 1000 , required = False , help = "Num of class"
45
49
)
46
- parser .add_argument ("--NUM_GPU" , "-g" , type = int , default = 1 , required = False , help = "Num of gpus" )
47
50
parser .add_argument (
48
- "--folder" , "-f" , type = str , default = "result" , required = False , help = "folder to save results"
51
+ "--NUM_GPU" , "-g" , type = int , default = 1 , required = False , help = "Num of gpus"
52
+ )
53
+ parser .add_argument (
54
+ "--folder" ,
55
+ "-f" ,
56
+ type = str ,
57
+ default = "result" ,
58
+ required = False ,
59
+ help = "folder to save results" ,
49
60
)
50
61
args = parser .parse_args ()
51
62
args .BATCH_SIZE *= args .NUM_GPU
@@ -97,7 +108,9 @@ def train(precision="single"):
97
108
end = time .time ()
98
109
if step >= args .WARM_UP :
99
110
durations .append ((end - start ) * 1000 )
100
- print (f"{ model_name } model average train time : { sum (durations )/ len (durations )} ms" )
111
+ print (
112
+ f"{ model_name } model average train time : { sum (durations )/ len (durations )} ms"
113
+ )
101
114
del model
102
115
benchmark [model_name ] = durations
103
116
return benchmark
@@ -115,7 +128,9 @@ def inference(precision="float"):
115
128
model = model .to ("cuda" )
116
129
model .eval ()
117
130
durations = []
118
- print (f"Benchmarking Inference { precision } precision type { model_name } " )
131
+ print (
132
+ f"Benchmarking Inference { precision } precision type { model_name } "
133
+ )
119
134
for step , img in enumerate (rand_loader ):
120
135
img = getattr (img , precision )()
121
136
torch .cuda .synchronize ()
0 commit comments