From 8a199308a71aa77199e5d7f4c91e6c9d10e327ef Mon Sep 17 00:00:00 2001 From: Xu Zhao Date: Mon, 18 Nov 2024 18:52:10 -0500 Subject: [PATCH] Fix hstu --- tritonbench/operators/ragged_attention/hstu.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tritonbench/operators/ragged_attention/hstu.py b/tritonbench/operators/ragged_attention/hstu.py index 9a050298..59224ece 100644 --- a/tritonbench/operators/ragged_attention/hstu.py +++ b/tritonbench/operators/ragged_attention/hstu.py @@ -19,8 +19,8 @@ _ragged_hstu_attn_fwd_persistent = ( triton_ragged_hstu_attention._ragged_hstu_attn_fwd_persistent ) - _RaggedAttentionRelativeBiasFunction = ( - triton_ragged_hstu_attention._RaggedAttentionRelativeBiasFunction + RaggedAttentionRelativeBiasFunction = ( + triton_ragged_hstu_attention.RaggedAttentionRelativeBiasFunction ) @torch.fx.wrap @@ -150,7 +150,7 @@ def forward( grid = (1216,) _ragged_hstu_attn_fwd_persistent[grid](**kwargs) else: - out = _RaggedAttentionRelativeBiasFunction.apply( + out = RaggedAttentionRelativeBiasFunction.apply( self.max_seq_len, # N kwargs["alpha"], q,