Skip to content

Commit

Permalink
add tunableop for gemm
Browse files Browse the repository at this point in the history
Summary: per request

Reviewed By: xuzhao9

Differential Revision: D66183878

fbshipit-source-id: 053165599c1171bdcf6cc41472a8c129ecf8b37d
  • Loading branch information
nmacchioni authored and facebook-github-bot committed Nov 19, 2024
1 parent d9633be commit 0b8e36c
Showing 1 changed file with 19 additions and 0 deletions.
19 changes: 19 additions & 0 deletions tritonbench/operators/gemm/operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,6 +194,25 @@ def aten_matmul(self, a, b, bias) -> Callable:
else:
return lambda: torch.matmul(a, b)

@register_benchmark()
def aten_tunableop_matmul(self, a, b, bias) -> Callable:
is_enabled = torch.cuda.tunable.is_enabled()

def op():
torch.cuda.tunable.enable(True)
output = (
torch.matmul(a, b) + bias if bias is not None else torch.matmul(a, b)
)
torch.cuda.tunable.enable(is_enabled)
return output

torch.cuda.tunable.enable(True)

# trigger tuning
op()

return op

@register_benchmark(enabled=HAS_HAMMER)
def hstu_triton_matmul(self, a, b, bias) -> Callable:
if not bias == None:
Expand Down

0 comments on commit 0b8e36c

Please sign in to comment.