diff --git a/test/test_hardshrink.py b/test/test_hardshrink.py new file mode 100644 index 0000000..4541986 --- /dev/null +++ b/test/test_hardshrink.py @@ -0,0 +1,118 @@ +import numpy as np +import mindspore +from mindspore import Tensor, dtype as mstype, ops +import mindspore.mint.nn.functional as F_ms +import torch +import torch.nn.functional as F_torch + +def test_hardshrink_random_dtype_support(): + """ + (1a) 测试随机输入、不同 dtype 支持度 + """ + print("===== Hardshrink random dtype support test =====") + dtypes_to_test = [mstype.float16, mstype.float32, mstype.int32] + for dt in dtypes_to_test: + x_np = np.random.randn(3, 3).astype(mindspore.dtype_to_nptype(dt) if dt != mstype.int32 else np.int32) + print(f"Testing MindSpore Hardshrink with dtype={dt}") + try: + out_ms = F_ms.hardshrink(Tensor(x_np, dt), lambd=0.5) + print(" MindSpore output shape:", out_ms.shape) + except Exception as e: + print(" MindSpore error:", e) + + # 对应 PyTorch + torch_dt = torch.float16 if dt == mstype.float16 else torch.float32 + # int32在torch里对应 int32,但F.hardshrink需要浮点 => 这里可能会报错或自动提升 + x_torch = torch.tensor(x_np, dtype=torch_dt) + print(f"Testing PyTorch Hardshrink with dtype={torch_dt}") + try: + out_torch = F_torch.hardshrink(x_torch, lambd=0.5) + print(" PyTorch output shape:", out_torch.shape) + except Exception as e: + print(" PyTorch error:", e) + print("------------------------------------------------") + + +def test_hardshrink_fixed_dtype_output_equality(): + """ + (1b) 固定dtype=float32,随机输入值,对比两个框架输出是否相等(误差<1e-3) + """ + print("===== Hardshrink fixed dtype output equality test =====") + x_np = np.random.randn(4, 4).astype(np.float32) + ms_in = Tensor(x_np, mstype.float32) + torch_in = torch.tensor(x_np, dtype=torch.float32) + + out_ms = F_ms.hardshrink(ms_in, lambd=0.5).asnumpy() + out_torch = F_torch.hardshrink(torch_in, lambd=0.5).detach().numpy() + + diff = np.abs(out_ms - out_torch).max() + print("Max diff:", diff) + assert diff < 1e-3, f"Hardshrink outputs differ too much: {diff}" + + +def test_hardshrink_fixed_shape_diff_params(): + """ + (1c) 固定 shape + 固定输入值,不同输入参数类型 + """ + print("===== Hardshrink fixed shape diff params test =====") + x = Tensor(np.array([-1.0, -0.3, 0.3, 1.0]), mstype.float32) + + # 1) lambda default=0.5 + out_default = F_ms.hardshrink(x) + # 2) lambda float + out_float = F_ms.hardshrink(x, lambd=1.0) + # 3) lambda int + out_int = F_ms.hardshrink(x, lambd=1) + # 4) lambda bool => bool会被转成 float(1.0) or 0.0 + out_bool = F_ms.hardshrink(x, lambd=True) + + print(" out_default:", out_default.asnumpy()) + print(" out_float:", out_float.asnumpy()) + print(" out_int:", out_int.asnumpy()) + print(" out_bool:", out_bool.asnumpy()) + + +def test_hardshrink_error_messages(): + """ + (1d) 测试随机混乱输入,报错信息的准确性 + """ + print("===== Hardshrink error messages test =====") + # 非Tensor输入 + try: + F_ms.hardshrink([1, -1, 2]) + except Exception as e: + print(" Non-tensor input error:", e) + + # 不支持的dtype (e.g. int32) + try: + F_ms.hardshrink(Tensor(np.array([1, -1], np.int32))) + except Exception as e: + print(" Int32 input error:", e) + + +def test_hardshrink_network_forward_backward(): + """ + (2b, 2c) 使用 Hardshrink 在网络中测试前向输出和梯度 + """ + print("===== Hardshrink network forward/backward test =====") + x_np = np.random.randn(3,).astype(np.float32) + + # PyTorch + x_torch = torch.tensor(x_np, requires_grad=True) + y_torch = F_torch.hardshrink(x_torch, lambd=0.5) + y_torch_sum = y_torch.sum() + y_torch_sum.backward() + grad_torch = x_torch.grad.detach().numpy() + + # MindSpore + x_ms = Tensor(x_np, mstype.float32) + x_ms.requires_grad = True + + def ms_forward(inp): + return F_ms.hardshrink(inp, lambd=0.5).sum() + + grad_fn = ops.grad(ms_forward, grad_position=0) + grad_ms = grad_fn(x_ms).asnumpy() + + print("PyTorch grad:", grad_torch) + print("MindSpore grad:", grad_ms) diff --git a/test/test_hardsigmoid.py b/test/test_hardsigmoid.py new file mode 100644 index 0000000..437a481 --- /dev/null +++ b/test/test_hardsigmoid.py @@ -0,0 +1,114 @@ +import numpy as np +import mindspore +from mindspore import Tensor, dtype as mstype, ops +import mindspore.mint.nn.functional as F_ms +import torch +import torch.nn.functional as F_torch + + +def test_hardsigmoid_random_dtype_support(): + """ + (1a) 测试随机输入、不同 dtype 支持度 + """ + print("===== Hardsigmoid random dtype support test =====") + for dt in [mstype.float16, mstype.float32, mstype.int32]: + x_np = np.random.randn(3,3) + if dt == mstype.int32: + x_np = x_np.astype(np.int32) + else: + x_np = x_np.astype(mindspore.dtype_to_nptype(dt)) + + print(f"Testing MindSpore Hardsigmoid with dtype={dt}") + try: + out_ms = F_ms.hardsigmoid(Tensor(x_np, dt)) + print(" MindSpore output shape:", out_ms.shape) + except Exception as e: + print(" MindSpore error:", e) + + # PyTorch + torch_dt = torch.float16 if dt == mstype.float16 else torch.float32 + x_torch = torch.tensor(x_np, dtype=torch_dt) + print(f"Testing PyTorch Hardsigmoid with dtype={torch_dt}") + try: + out_torch = F_torch.hardsigmoid(x_torch) + print(" PyTorch output shape:", out_torch.shape) + except Exception as e: + print(" PyTorch error:", e) + print("------------------------------------------------") + + +def test_hardsigmoid_fixed_dtype_output_equality(): + """ + (1b) 固定dtype=float32,随机输入,对比两个框架输出(误差<1e-3) + """ + print("===== Hardsigmoid fixed dtype output equality test =====") + x_np = np.random.uniform(-5,5,size=(4,4)).astype(np.float32) + ms_in = Tensor(x_np, mstype.float32) + torch_in = torch.tensor(x_np, dtype=torch.float32) + + out_ms = F_ms.hardsigmoid(ms_in).asnumpy() + out_torch = F_torch.hardsigmoid(torch_in).numpy() + + diff = np.abs(out_ms - out_torch).max() + print("Max diff:", diff) + assert diff < 1e-3, f"Hardsigmoid output mismatch, diff={diff}" + + +def test_hardsigmoid_fixed_shape_diff_params(): + """ + (1c) Hardsigmoid 只有输入,没有其他参数,可以测试形状不同 + """ + print("===== Hardsigmoid fixed shape diff params test =====") + # 不同形状 + arr1 = Tensor(np.array([-4.0,0.0,4.0], np.float32)) + arr2 = Tensor(np.array([[-2.0,0.0],[2.0,4.0]], np.float32)) + + out1 = F_ms.hardsigmoid(arr1) + out2 = F_ms.hardsigmoid(arr2) + + print("arr1 hardsigmoid:", out1.asnumpy()) + print("arr2 hardsigmoid:", out2.asnumpy()) + + # 传入非 tensor/非支持dtype + try: + F_ms.hardsigmoid("not a tensor") + except Exception as e: + print("Error with str input:", e) + + +def test_hardsigmoid_error_messages(): + """ + (1d) 测试随机无效输入,报错信息 + """ + print("===== Hardsigmoid error messages test =====") + try: + F_ms.hardsigmoid(Tensor(np.ones((2,2), np.int64))) # MindSpore不一定支持int64 + except Exception as e: + print("int64 input error:", e) + + +def test_hardsigmoid_network_forward_backward(): + """ + (2b,2c) 使用Hardsigmoid前向和梯度对比 + """ + print("===== Hardsigmoid forward/backward test =====") + x_np = np.array([-4., -2., 0., 2., 4.], np.float32) + + # PyTorch + x_pt = torch.tensor(x_np, requires_grad=True) + y_pt = F_torch.hardsigmoid(x_pt) + y_pt.sum().backward() + grad_pt = x_pt.grad.numpy() + + # MindSpore + x_ms = Tensor(x_np, mstype.float32) + x_ms.requires_grad = True + def ms_forward(x): + return F_ms.hardsigmoid(x).sum() + grad_fn = ops.grad(ms_forward, grad_position=0) + grad_ms = grad_fn(x_ms).asnumpy() + + print("PyTorch grad:", grad_pt) + print("MindSpore grad:", grad_ms) + + diff --git a/test/test_hardswish.py b/test/test_hardswish.py new file mode 100644 index 0000000..b7bb322 --- /dev/null +++ b/test/test_hardswish.py @@ -0,0 +1,101 @@ +import numpy as np +import mindspore +from mindspore import Tensor, dtype as mstype, ops +import mindspore.mint.nn.functional as F_ms +import torch +import torch.nn.functional as F_torch + + +def test_hardswish_random_dtype_support(): + """ + (1a) 测试不同 dtype + """ + print("===== Hardswish random dtype support test =====") + for dt in [mstype.float16, mstype.float32, mstype.int32]: + x_np = np.random.uniform(-5,5,size=(3,3)) + if dt == mstype.int32: + x_np = x_np.astype(np.int32) + else: + x_np = x_np.astype(mindspore.dtype_to_nptype(dt)) + try: + out_ms = F_ms.hardswish(Tensor(x_np, dt)) + print(f"MindSpore dtype={dt}, output shape={out_ms.shape}") + except Exception as e: + print(f"MindSpore error with dtype={dt}:", e) + + torch_dt = torch.float16 if dt == mstype.float16 else torch.float32 + try: + out_torch = F_torch.hardswish(torch.tensor(x_np, dtype=torch_dt)) + print(f"PyTorch dtype={torch_dt}, output shape={out_torch.shape}") + except Exception as e: + print(f"PyTorch error with dtype={torch_dt}:", e) + print("--------------------------------------------") + + +def test_hardswish_fixed_dtype_output_equality(): + """ + (1b) 固定dtype float32,对比输出 + """ + print("===== Hardswish fixed dtype output equality test =====") + x_np = np.random.uniform(-5,5,(4,4)).astype(np.float32) + ms_in = Tensor(x_np, mstype.float32) + torch_in = torch.tensor(x_np, dtype=torch.float32) + + out_ms = F_ms.hardswish(ms_in).asnumpy() + out_torch = F_torch.hardswish(torch_in).numpy() + + diff = np.abs(out_ms - out_torch).max() + print("Max diff:", diff) + assert diff < 1e-3 + + +def test_hardswish_fixed_shape_diff_params(): + """ + (1c) Hardswish无额外参数, 仅测试不同形状 + """ + print("===== Hardswish fixed shape diff params test =====") + arr1 = Tensor(np.array([-2., -1., 0., 1., 2.], np.float32)) + out1 = F_ms.hardswish(arr1) + print("arr1 shape:", arr1.shape, "out:", out1.asnumpy()) + + arr2 = Tensor(np.array([[-3,3],[4,-5]], np.float32)) + out2 = F_ms.hardswish(arr2) + print("arr2 shape:", arr2.shape, "out:\n", out2.asnumpy()) + + +def test_hardswish_error_messages(): + """ + (1d) 非tensor或不支持dtype + """ + print("===== Hardswish error messages test =====") + try: + F_ms.hardswish("not a tensor") + except Exception as e: + print("String input error:", e) + + try: + F_ms.hardswish(Tensor([0,1,2], mstype.int64)) + except Exception as e: + print("Int64 input error:", e) + + +def test_hardswish_network_forward_backward(): + """ + (2b,2c) 前向与梯度 + """ + print("===== Hardswish network forward/backward test =====") + # PyTorch + x_torch = torch.tensor([-4.,-2.,0.,2.,4.], requires_grad=True) + y_torch = F_torch.hardswish(x_torch) + y_torch.sum().backward() + grad_torch = x_torch.grad.numpy() + print("PyTorch grad:", grad_torch) + + # MindSpore + x_ms = Tensor(np.array([-4.,-2.,0.,2.,4.], np.float32)) + x_ms.requires_grad = True + def net(inp): + return F_ms.hardswish(inp).sum() + grad_fn = ops.grad(net, grad_position=0) + grad_ms = grad_fn(x_ms).asnumpy() + print("MindSpore grad:", grad_ms) diff --git a/test/test_layer_norm.py b/test/test_layer_norm.py new file mode 100644 index 0000000..7664d6c --- /dev/null +++ b/test/test_layer_norm.py @@ -0,0 +1,122 @@ +import numpy as np +import mindspore +from mindspore import Tensor, dtype as mstype, ops +import mindspore.mint.nn.functional as F_ms +import torch +import torch.nn.functional as F_torch + +def test_layernorm_random_dtype_support(): + """ + (1a) 测试 random 输入 不同 dtype + MindSpore LayerNorm 一般支持 float16, float32 + """ + print("===== LayerNorm random dtype support test =====") + dtypes_to_test = [mstype.float16, mstype.float32, mstype.int32] + for dt in dtypes_to_test: + x_np = np.random.randn(2,4).astype(np.float32 if dt != mstype.int32 else np.int32) + try: + out_ms = F_ms.layer_norm(Tensor(x_np, dt), normalized_shape=(4,)) + print(f"MindSpore dtype={dt}, shape={out_ms.shape}") + except Exception as e: + print(f"MindSpore error with dtype={dt}:", e) + + # PyTorch + torch_dt = torch.float16 if dt == mstype.float16 else torch.float32 + x_torch = torch.tensor(x_np, dtype=torch_dt) + try: + out_pt = F_torch.layer_norm(x_torch, normalized_shape=(4,)) + print(f"PyTorch dtype={torch_dt}, shape={out_pt.shape}") + except Exception as e: + print(f"PyTorch error with dtype={torch_dt}:", e) + print("--------------------------------------") + + +def test_layernorm_fixed_dtype_output_equality(): + """ + (1b) 固定dtype=float32, 随机输入, 对比输出 + """ + print("===== LayerNorm fixed dtype output equality test =====") + x_np = np.random.randn(2,3,4).astype(np.float32) + w_np = np.random.randn(4).astype(np.float32) + b_np = np.random.randn(4).astype(np.float32) + ms_in = Tensor(x_np, mstype.float32) + ms_w = Tensor(w_np, mstype.float32) + ms_b = Tensor(b_np, mstype.float32) + + out_ms = F_ms.layer_norm(ms_in, normalized_shape=(4,), weight=ms_w, bias=ms_b, eps=1e-5).asnumpy() + + x_torch = torch.tensor(x_np, dtype=torch.float32) + w_torch = torch.tensor(w_np, dtype=torch.float32) + b_torch = torch.tensor(b_np, dtype=torch.float32) + out_pt = F_torch.layer_norm(x_torch, normalized_shape=(4,), weight=w_torch, bias=b_torch, eps=1e-5).numpy() + + diff = np.abs(out_ms - out_pt).max() + print("Max diff:", diff) + assert diff < 1e-3, f"LayerNorm diff too large: {diff}" + + +def test_layernorm_fixed_shape_diff_params(): + """ + (1c) 测试 normalized_shape 是 int or tuple, weight/bias 可省略 + """ + print("===== LayerNorm fixed shape diff params test =====") + x = Tensor(np.random.randn(2,4).astype(np.float32)) + # normalized_shape int vs tuple + out1 = F_ms.layer_norm(x, 4) # int + out2 = F_ms.layer_norm(x, (4,)) # tuple + diff = np.abs(out1.asnumpy() - out2.asnumpy()).max() + print("normalized_shape int vs tuple diff:", diff) + + # weight/bias省略 + out3 = F_ms.layer_norm(x, normalized_shape=4) + print("No weight/bias out shape:", out3.shape) + + +def test_layernorm_error_messages(): + """ + (1d) 非法输入 + """ + print("===== LayerNorm error messages test =====") + # not matching shape + try: + F_ms.layer_norm(Tensor(np.random.randn(2,3), mstype.float32), normalized_shape=(4,)) + except Exception as e: + print("shape not match error:", e) + + # weight dimension mismatch + try: + F_ms.layer_norm(Tensor(np.random.randn(2,4), mstype.float32), normalized_shape=(4,), + weight=Tensor(np.random.randn(5).astype(np.float32))) + except Exception as e: + print("weight mismatch error:", e) + + # invalid eps + try: + F_ms.layer_norm(Tensor(np.random.randn(2,4), mstype.float32), normalized_shape=(4,), eps="1e-5") + except Exception as e: + print("invalid eps error:", e) + + +def test_layernorm_network_forward_backward(): + """ + (2b,2c) 测试LayerNorm在网络的正向推理和反向梯度 + """ + print("===== LayerNorm forward/backward test =====") + # PyTorch + x_pt = torch.randn(2,3,4, requires_grad=True) + out_pt = F_torch.layer_norm(x_pt, normalized_shape=(4,)) + loss_pt = out_pt.sum() + loss_pt.backward() + grad_pt = x_pt.grad.numpy() + + # MindSpore + x_ms = Tensor(x_pt.detach().numpy(), mstype.float32) + x_ms.requires_grad = True + def forward_fn(x): + return F_ms.layer_norm(x, normalized_shape=(4,)).sum() + grad_fn = ops.grad(forward_fn, grad_position=0) + grad_ms = grad_fn(x_ms).asnumpy() + + diff = np.abs(grad_pt - grad_ms).max() + print("Max grad diff:", diff) + assert diff < 1e-3 diff --git a/test/test_leaky_relu.py b/test/test_leaky_relu.py new file mode 100644 index 0000000..af0594f --- /dev/null +++ b/test/test_leaky_relu.py @@ -0,0 +1,109 @@ +import numpy as np +import mindspore +from mindspore import Tensor, dtype as mstype, ops +import mindspore.mint.nn.functional as F_ms +import torch +import torch.nn.functional as F_torch + +def test_leaky_relu_random_dtype_support(): + """ + (1a) 测试random输入不同dtype + """ + print("===== LeakyReLU random dtype support test =====") + for dt in [mstype.float16, mstype.float32, mstype.int32]: + x_np = np.random.randn(3,3) + if dt == mstype.int32: + x_np = x_np.astype(np.int32) + else: + x_np = x_np.astype(mindspore.dtype_to_nptype(dt)) + try: + out_ms = F_ms.leaky_relu(Tensor(x_np, dt), negative_slope=0.1) + print(f"MindSpore dtype={dt}, shape={out_ms.shape}") + except Exception as e: + print("MindSpore error:", e) + + torch_dt = torch.float16 if dt == mstype.float16 else torch.float32 + x_torch = torch.tensor(x_np, dtype=torch_dt) + try: + out_pt = F_torch.leaky_relu(x_torch, negative_slope=0.1) + print(f"PyTorch dtype={torch_dt}, shape={out_pt.shape}") + except Exception as e: + print("PyTorch error:", e) + print("------------------------------------") + + +def test_leaky_relu_fixed_dtype_output_equality(): + """ + (1b) 固定dtype=float32 随机输入,对比输出 + """ + print("===== LeakyReLU fixed dtype output equality test =====") + x_np = np.random.randn(4,4).astype(np.float32) + ms_in = Tensor(x_np, mstype.float32) + torch_in = torch.tensor(x_np, dtype=torch.float32) + + out_ms = F_ms.leaky_relu(ms_in, 0.1).asnumpy() + out_pt = F_torch.leaky_relu(torch_in, 0.1).numpy() + + diff = np.abs(out_ms - out_pt).max() + print("Max diff:", diff) + assert diff < 1e-3 + + +def test_leaky_relu_fixed_shape_diff_params(): + """ + (1c) 测试 negative_slope 参数不同类型 + """ + print("===== LeakyReLU fixed shape diff params test =====") + x = Tensor(np.array([-2., -1., 0., 1., 2.], np.float32)) + + out_slope_default = F_ms.leaky_relu(x) # default=0.01 + out_slope_float = F_ms.leaky_relu(x, 0.2) + out_slope_int = F_ms.leaky_relu(x, 1) + out_slope_bool = F_ms.leaky_relu(x, True) # True => 1 + + print("default=0.01:", out_slope_default.asnumpy()) + print("0.2:", out_slope_float.asnumpy()) + print("1:", out_slope_int.asnumpy()) + print("True(=1):", out_slope_bool.asnumpy()) + + # string slope => error + try: + F_ms.leaky_relu(x, negative_slope="0.1") + except Exception as e: + print("slope=string error:", e) + + +def test_leaky_relu_error_messages(): + """ + (1d) 测试随机无效输入 + """ + print("===== LeakyReLU error messages test =====") + # 输入非Tensor + try: + F_ms.leaky_relu([-1,0,1], negative_slope=0.1) + except Exception as e: + print("non-tensor input error:", e) + + +def test_leaky_relu_network_forward_backward(): + """ + (2b,2c) 使用LeakyReLU验证前向输出 & 反向梯度 + """ + print("===== LeakyReLU forward/backward test =====") + + # PyTorch + x_pt = torch.tensor([-1.,0.,1.], requires_grad=True) + out_pt = F_torch.leaky_relu(x_pt, 0.2) + out_pt.sum().backward() + grad_pt = x_pt.grad.numpy() + + # MindSpore + x_ms = Tensor(np.array([-1.,0.,1.], np.float32)) + x_ms.requires_grad = True + def forward_fn(inp): + return F_ms.leaky_relu(inp, 0.2).sum() + grad_fn = ops.grad(forward_fn, grad_position=0) + grad_ms = grad_fn(x_ms).asnumpy() + + print("PyTorch grad:", grad_pt) + print("MindSpore grad:", grad_ms) \ No newline at end of file