Skip to content

Commit d59ed4e

Browse files
committed
Skip impls
1 parent b8cb1a4 commit d59ed4e

File tree

2 files changed

+23
-21
lines changed

2 files changed

+23
-21
lines changed

test/test_gpu/main.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ def _run_one_operator(
5959
):
6060
if tb_args.op in skip_tests:
6161
# If the op itself is in the skip list, skip all tests
62-
if skip_tests[tb_args.op] is None:
62+
if not skip_tests[tb_args.op]:
6363
return
6464
tb_args.skip = ",".join(skip_tests[tb_args.op])
6565
Operator = load_opbench_by_name(tb_args.op)

test/test_gpu/skip_tests_h100_pytorch.yaml

+22-20
Original file line numberDiff line numberDiff line change
@@ -2,23 +2,25 @@
22
# This file is regarding to the Triton version bundled with pytorch
33
# Use <op-name> to skip an entire operator
44
# Use <op-name/impl-name> to skip an impl
5-
- bf16xint16_gemm/bf16xint16
6-
- fp8_attention/colfax_fmha
7-
- fp8_fused_quant_gemm_rowwise
8-
- fp8_gemm/triton_persistent_fp8_gemm
9-
- fp8_gemm/triton_tma_persistent_fp8_gemm
10-
- fp8_gemm_rowwise
11-
- gemm
12-
- grouped_gemm
13-
- int4_gemm
14-
- jagged_layer_norm
15-
- jagged_mean
16-
- jagged_softmax
17-
- jagged_sum
18-
- layer_norm
19-
- low_mem_dropout
20-
- rms_norm
21-
- rope
22-
- swiglu
23-
- template_attention
24-
- test_op
5+
bf16xint16_gemm:
6+
- bf16xint16
7+
fp8_attention:
8+
- colfax_fmha
9+
fp8_fused_quant_gemm_rowwise:
10+
fp8_gemm:
11+
- triton_persistent_fp8_gemm
12+
- triton_tma_persistent_fp8_gemm
13+
fp8_gemm_rowwise:
14+
gemm:
15+
grouped_gemm:
16+
int4_gemm:
17+
jagged_layer_norm:
18+
jagged_mean:
19+
jagged_softmax:
20+
jagged_sum:
21+
layer_norm:
22+
low_mem_dropout:
23+
rms_norm:
24+
rope:
25+
template_attention:
26+
test_op:

0 commit comments

Comments
 (0)