Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
373 changes: 195 additions & 178 deletions Cargo.lock

Large diffs are not rendered by default.

12 changes: 6 additions & 6 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -180,13 +180,13 @@ portable-atomic = { version = "1.11.1" }
portable-atomic-util = { version = "0.2.4", features = ["alloc"] }

### For the main burn branch. ###
cubecl = { git = "https://github.com/tracel-ai/cubecl", default-features = false, rev = "48ff83f19952d053b80ab5762baf387f451e5c63" }
cubecl-common = { git = "https://github.com/tracel-ai/cubecl", default-features = false, rev = "48ff83f19952d053b80ab5762baf387f451e5c63" }
cubek = { git = "https://github.com/tracel-ai/cubek", default-features = false, rev = "0d9a635229d3cabfa8297ddc967ff4e783be348c" }
# cubecl = { git = "https://github.com/tracel-ai/cubecl", default-features = false, rev = "48ff83f19952d053b80ab5762baf387f451e5c63" }
# cubecl-common = { git = "https://github.com/tracel-ai/cubecl", default-features = false, rev = "48ff83f19952d053b80ab5762baf387f451e5c63" }
# cubek = { git = "https://github.com/tracel-ai/cubek", default-features = false, rev = "0d9a635229d3cabfa8297ddc967ff4e783be348c" }
### For local development. ###
# cubecl = { path = "../../cubecl/crates/cubecl", default-features = false }
# cubecl-common = { path = "../../cubecl/crates/cubecl-common", default-features = false }
# cubek = { path = "../../cubek/crates/cubek", default-features = false }
cubecl = { path = "../cubecl/crates/cubecl", default-features = false }
cubecl-common = { path = "../cubecl/crates/cubecl-common", default-features = false }
cubek = { path = "../cubek/crates/cubek", default-features = false }
### For the release. ###
# cubecl = { version = "=0.9.0-pre.6", default-features = false }
# cubecl-common = { version = "=0.9.0-pre.6", default-features = false }
Expand Down
2 changes: 1 addition & 1 deletion crates/burn-backend-tests/tests/common/autodiff.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,5 +30,5 @@ test_float_elem_variant!(
bf16,
burn_tensor::bf16,
"../autodiff/mod.rs",
["vulkan", "metal"] // ["cuda", "rocm"] TODO
["metal"] // ["cuda", "rocm"] TODO, ["vulkan"] only supports bf16 for matmul
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Whoops, I added bf16 for vulkan but clearly the tests don't pass 😅 thanks for fixing.

[Unrelated to this PR]

I vaguely remember adding it when refactoring the tests since it was a supported global type for vulkan in cubecl. Also reflected by B::supports_dtype:

#[test]
fn should_support_dtypes() {
type B = Wgpu;
let device = Default::default();
assert!(B::supports_dtype(&device, DType::F32));
assert!(B::supports_dtype(&device, DType::I64));
assert!(B::supports_dtype(&device, DType::I32));
assert!(B::supports_dtype(&device, DType::U64));
assert!(B::supports_dtype(&device, DType::U32));
assert!(B::supports_dtype(
&device,
DType::QFloat(CubeTensor::<WgpuRuntime>::default_scheme())
));
// Registered as supported type but we don't actually use it?
assert!(B::supports_dtype(&device, DType::Bool));
#[cfg(feature = "vulkan")]
{
assert!(B::supports_dtype(&device, DType::F16));
assert!(B::supports_dtype(&device, DType::BF16));
assert!(B::supports_dtype(&device, DType::I16));
assert!(B::supports_dtype(&device, DType::I8));
assert!(B::supports_dtype(&device, DType::U16));
assert!(B::supports_dtype(&device, DType::U8));
assert!(!B::supports_dtype(&device, DType::F64));
assert!(!B::supports_dtype(&device, DType::Flex32));
}

maybe we should have a better way to represent the actual supported types?

fn supports_dtype(device: &Self::Device, dtype: DType) -> bool {
let client = R::client(device);
let ty: StorageType = dtype.into();
client.properties().supports_type(ty.elem_type())
}

That way, tested dtypes can actually reflected supported dtypes.

Copy link
Contributor Author

@wingertge wingertge Jan 5, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The supports_type method on the CubeCL side only checks if a type is supported in any way (in this case, it's supported for conversion, as a type for buffers, and for dot product on Intel, along with tensor core instructions). It's kinda tough though because there's no good way to express that in just a single boolean (hence why the TypeUsage enum set exists in CubeCL).

This is how it's registered for Vulkan

if let Some(bfloat16) = ext_feat.bfloat16 {
    if bfloat16.shader_b_float16_type == TRUE {
        register(
            ElemType::Float(FloatKind::BF16).into(),
            TypeUsage::Conversion | TypeUsage::Buffer,
        );
    }
    if bfloat16.shader_b_float16_dot_product == TRUE {
        register(
            ElemType::Float(FloatKind::BF16).into(),
            TypeUsage::DotProduct.into(),
        );
    }
}

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So it's supported for matmul and casting, but none of the other ops.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The supports_type method on the CubeCL side only checks if a type is supported in any way

Yeah and for the first draft I simply mirrored that for the backends, but I think it should be refined.

It's kinda tough though because there's no good way to express that in just a single boolean (hence why the TypeUsage enum set exists in CubeCL).

That's a good point. It's still useful to query backend supported types for burn, so maybe we should also define an enum similar to TypeUsage? (without atomics, and perhaps consolidate conversion / buffer into "storage" variant or similar).

);
2 changes: 1 addition & 1 deletion crates/burn-backend-tests/tests/common/tensor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,5 +34,5 @@ test_float_elem_variant!(
bf16,
burn_tensor::bf16,
"../tensor/float/mod.rs",
["vulkan", "metal"] // ["cuda", "rocm"] TODO
["metal"] // ["cuda", "rocm"] TODO, ["vulkan"] only supports bf16 for matmul
);
2 changes: 1 addition & 1 deletion crates/burn-backend-tests/tests/cubecl/mask_fill.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ use super::*;
use burn_cubecl::kernel::{MaskFillStrategy, mask_fill};
use burn_tensor::Tolerance;
use burn_tensor::{Bool, Distribution, Element, Tensor, TensorPrimitive, backend::Backend};
use cubecl::std::scalar::InputScalar;
use cubecl::prelude::InputScalar;

#[test]
fn mask_fill_should_match_reference_backend() {
Expand Down
13 changes: 10 additions & 3 deletions crates/burn-cubecl-fusion/src/base.rs
Original file line number Diff line number Diff line change
@@ -1,12 +1,15 @@
use burn_fusion::stream::Context;
use burn_std::{DType, quantization::QParamTensor};
use cubecl::quant::scheme::{QuantParam, QuantScheme};
use cubecl::{
CubeElement, Runtime,
client::ComputeClient,
ir::ElemType,
prelude::{TensorArg, TensorHandleRef},
};
use cubecl::{
ir::LineSize,
quant::scheme::{QuantParam, QuantScheme},
};
use std::marker::PhantomData;

/// Defines a fallback operation when fusion isn't possible.
Expand Down Expand Up @@ -73,15 +76,19 @@ impl<R: Runtime> CubeFusionHandle<R> {
}
}
/// Return the reference to a tensor argument.
pub fn as_tensor_arg<'a>(&'a self, shape: &'a [usize], vectorisation: u8) -> TensorArg<'a, R> {
pub fn as_tensor_arg<'a>(
&'a self,
shape: &'a [usize],
line_size: LineSize,
) -> TensorArg<'a, R> {
let handle: TensorHandleRef<'a, R> = self.as_handle_ref(shape);

unsafe {
TensorArg::from_raw_parts_and_size(
handle.handle,
handle.strides,
handle.shape,
vectorisation,
line_size,
self.dtype.size(),
)
}
Expand Down
Loading
Loading