File tree 2 files changed +23
-21
lines changed
2 files changed +23
-21
lines changed Original file line number Diff line number Diff line change @@ -59,7 +59,7 @@ def _run_one_operator(
59
59
):
60
60
if tb_args .op in skip_tests :
61
61
# If the op itself is in the skip list, skip all tests
62
- if skip_tests [tb_args .op ] is None :
62
+ if not skip_tests [tb_args .op ]:
63
63
return
64
64
tb_args .skip = "," .join (skip_tests [tb_args .op ])
65
65
Operator = load_opbench_by_name (tb_args .op )
Original file line number Diff line number Diff line change 2
2
# This file is regarding to the Triton version bundled with pytorch
3
3
# Use <op-name> to skip an entire operator
4
4
# Use <op-name/impl-name> to skip an impl
5
- - bf16xint16_gemm/bf16xint16
6
- - fp8_attention/colfax_fmha
7
- - fp8_fused_quant_gemm_rowwise
8
- - fp8_gemm/triton_persistent_fp8_gemm
9
- - fp8_gemm/triton_tma_persistent_fp8_gemm
10
- - fp8_gemm_rowwise
11
- - gemm
12
- - grouped_gemm
13
- - int4_gemm
14
- - jagged_layer_norm
15
- - jagged_mean
16
- - jagged_softmax
17
- - jagged_sum
18
- - layer_norm
19
- - low_mem_dropout
20
- - rms_norm
21
- - rope
22
- - swiglu
23
- - template_attention
24
- - test_op
5
+ bf16xint16_gemm :
6
+ - bf16xint16
7
+ fp8_attention :
8
+ - colfax_fmha
9
+ fp8_fused_quant_gemm_rowwise :
10
+ fp8_gemm :
11
+ - triton_persistent_fp8_gemm
12
+ - triton_tma_persistent_fp8_gemm
13
+ fp8_gemm_rowwise :
14
+ gemm :
15
+ grouped_gemm :
16
+ int4_gemm :
17
+ jagged_layer_norm :
18
+ jagged_mean :
19
+ jagged_softmax :
20
+ jagged_sum :
21
+ layer_norm :
22
+ low_mem_dropout :
23
+ rms_norm :
24
+ rope :
25
+ template_attention :
26
+ test_op :
You can’t perform that action at this time.
0 commit comments