diff --git a/test/test_eq.py b/test/test_eq.py new file mode 100644 index 0000000..80c86ce --- /dev/null +++ b/test/test_eq.py @@ -0,0 +1,176 @@ +import torch +import mindspore +import numpy as np +import pytest + +''' + 测试: + mindspore.mint.eq(input, other) + 逐元素比较两个输入Tensor是否相等。 + 第二个输入可以是一个shape可以广播成第一个输入的Number或Tensor, 反之亦然。 + + input (Union[Tensor, Number]) - 第一个输入可以是数值型,也可以是数据类型为数值型的Tensor。 + other (Union[Tensor, Number]) - 当第一个输入是Tensor时,第二个输入是数值型或数据类型为数值型的Tensor,数据类型与第一个输入相同。 + 当第一个输入是数值型时,第二个输入应为Tensor。 +''' + +@pytest.mark.parametrize("dtype", [ + np.bool_, + np.int_, + np.intc, + np.intp, + np.int8, + np.int16, + np.int32, + np.int64, + np.uint8, + np.uint16, + np.uint32, + np.uint64, + np.float_, + np.float16, + np.float32, + np.float64, + np.complex_, + np.complex64, + np.complex128 +]) +def test_eq_random_input_dtype(dtype): + """ + 测试random输入不同dtype,对比MindSpore和Pytorch的支持度 + """ + flag1 = True + flag2 = True + shape = (4, 4) + try: + # MindSpore + input_ms = mindspore.Tensor(np.random.random(size=shape).astype(dtype)) + result_ms = mindspore.mint.eq(input_ms,input_ms) + print(result_ms) + assert isinstance(result_ms, mindspore.Tensor) + except Exception as e: + print(f"MindSpore出现报错: {e}") + flag1 = False + + try: + # Pytorch + input_pt = torch.from_numpy(np.random.random(size=shape).astype(dtype)) + result_pt = torch.eq(input_pt,input_pt) + assert isinstance(result_pt, torch.Tensor) + + except Exception as e: + print(f"Pytorch出现报错: {e}") + flag2 = False + + if not flag1 and flag2: + pytest.fail("mindspore不支持:"+str(dtype)) + if flag1 and not flag2: + pytest.fail("pytorch不支持:"+str(dtype)) + if not flag1 and not flag2: + pytest.fail("both mindspore 和 pytorch不支持:"+str(dtype)) + + + +@pytest.mark.parametrize("input", [ + {"input":np.random.random(size=(4,4)), "other":np.random.random(size=(4,4))}, + {"input":np.random.random(size=(4,4)), "other":np.random.random(size=(1))}, + {"input":np.random.random(size=(1)), "other":np.random.random(size=(4,4))}, + {"input":np.random.random(size=(4,4)), "other":np.random.random(size=(4,4,4))}, + {"input":np.random.random(size=(4,4,4)), "other":np.random.random(size=(4,4))}, +]) +def test_eq_fixed_dtype_random_value(input): + """ + 测试固定dtype,random输入值,对比两个框架输出(误差范围小于1e-3) + """ + + + # MindSpore部分 + input_ms = mindspore.Tensor(input["input"]) + other_ms = mindspore.Tensor(input["other"]) + result_ms = mindspore.mint.eq(input_ms,other_ms) + + # Pytorch部分 + input_pt = torch.from_numpy(input["input"]) + other_ms = torch.from_numpy(input["other"]) + result_pt = torch.eq(input_pt,other_ms) + + assert np.allclose(result_ms.asnumpy(), result_pt.numpy(), atol=1e-3) + + + +@pytest.mark.parametrize("input_param", [ + +]) +def test_eq_fixed_shape_fixed_value_different_params(input_param): + """ + 测试固定shape,固定输入值,不同输入参数,两个框架的支持度 + """ + pass + + +@pytest.mark.parametrize("random_messy_input", [ + {"input":np.random.random(size=[4,4]), "other":np.random.random(size=[4]), "error":TypeError}, + {"input":mindspore.Tensor(np.random.random(size=[4,4])), "other":mindspore.Tensor(np.random.random(size=(3))), "error":ValueError} +]) +def test_eq_random_messy_input_error_info(random_messy_input): + """ + 测试随机混乱输入,报错信息的准确性 + TypeError - input 和 other 都不是Tensor。 + """ + flag = False + input_ms = random_messy_input["input"] + other_ms = random_messy_input["other"] + try: + result_ms = mindspore.mint.eq(input_ms,other_ms) + print(result_ms) + except Exception as e_ms: + assert isinstance(e_ms, random_messy_input["error"]) + flag = True + if not flag: + pytest.fail("在预期应捕获异常的情况下,未捕获到任何异常,测试不通过") + + + + + +def test_eq_in_neural_network(): + """ + 测试包含eq操作的简单网络示例 + """ + input_value = np.random.random(size=(4)) + + class SimpleNet_pt(torch.nn.Module): + def __init__(self): + super(SimpleNet_pt, self).__init__() + def forward(self, x, y): + r = torch.eq(x,y) + return r.sum() + + class SimpleNet_ms(mindspore.nn.Cell): + def __init__(self): + super(SimpleNet_ms, self).__init__() + def construct(self, x, y): + r = mindspore.mint.eq(x,y) + return r.sum() + + input_ms = mindspore.Tensor(input_value) + net_ms = SimpleNet_ms() + result_ms = net_ms(input_ms,input_ms) + + + input_pt = torch.from_numpy(input_value) + net_pt = SimpleNet_pt() + result_pt = net_pt(input_pt,input_pt) + + assert np.allclose(result_ms.asnumpy(), result_pt.detach().numpy(), atol=1e-3) + + +def test_eq_backward(): + """ + 测试函数反向 + """ + pass + + + + diff --git a/test/test_greater.py b/test/test_greater.py new file mode 100644 index 0000000..a2f8ef7 --- /dev/null +++ b/test/test_greater.py @@ -0,0 +1,125 @@ +import torch +import mindspore +import numpy as np +import pytest + +''' + 测试: + mindspore.mint.greater(input, other) + 逐元素比较两个输入Tensor,返回一个布尔型Tensor,表示input中的元素是否大于other中的对应元素。 + 第二个输入可以是一个shape可以广播成第一个输入的Number或Tensor,反之亦然。 + + input (Union[Tensor, Number]) - 第一个输入可以是数值型,也可以是数据类型为数值型的Tensor。 + other (Union[Tensor, Number]) - 当第一个输入是Tensor时,第二个输入是数值型或数据类型为数值型的Tensor,数据类型与第一个输入相同。 + 当第一个输入是数值型时,第二个输入应为Tensor。 +''' + +@pytest.mark.parametrize("dtype", [ + np.bool_, + np.int_, + np.intc, + np.intp, + np.int8, + np.int16, + np.int32, + np.int64, + np.uint8, + np.uint16, + np.uint32, + np.uint64, + np.float_, + np.float16, + np.float32, + np.float64, + # 复数类型不支持比较操作 + # np.complex_, + # np.complex64, + # np.complex128 +]) +def test_greater_random_input_dtype(dtype): + """ + 测试random输入不同dtype,对比MindSpore和Pytorch的支持度 + """ + flag1 = True + flag2 = True + shape = (4, 4) + try: + # MindSpore + input_ms = mindspore.Tensor(np.random.random(size=shape).astype(dtype)) + other_ms = mindspore.Tensor(np.random.random(size=shape).astype(dtype)) + result_ms = mindspore.mint.greater(input_ms, other_ms) + assert isinstance(result_ms, mindspore.Tensor) and result_ms.dtype == mindspore.bool_ + except Exception as e: + print(f"MindSpore出现报错: {e}") + flag1 = False + + try: + # Pytorch + input_pt = torch.from_numpy(np.random.random(size=shape).astype(dtype)) + other_pt = torch.from_numpy(np.random.random(size=shape).astype(dtype)) + result_pt = torch.gt(input_pt, other_pt) + assert isinstance(result_pt, torch.Tensor) and result_pt.dtype == torch.bool + except Exception as e: + print(f"Pytorch出现报错: {e}") + flag2 = False + + if not flag1 and flag2: + pytest.fail("mindspore不支持:"+str(dtype)) + if flag1 and not flag2: + pytest.fail("pytorch不支持:"+str(dtype)) + if not flag1 and not flag2: + pytest.fail("both mindspore 和 pytorch不支持:"+str(dtype)) + + +@pytest.mark.parametrize("input", [ + {"input":np.random.random(size=(4,4)), "other":np.random.random(size=(4,4))}, + {"input":np.random.random(size=(4,4)), "other":np.random.random(size=(4,))}, + {"input":np.random.random(size=(4,)), "other":np.random.random(size=(4,4))}, +]) +def test_greater_fixed_dtype_random_value(input): + """ + 测试固定dtype,random输入值,对比两个框架输出(误差范围不适用,这里主要比较结果的结构和类型) + """ + # MindSpore部分 + input_ms = mindspore.Tensor(input["input"]) + other_ms = mindspore.Tensor(input["other"]) + result_ms = mindspore.mint.greater(input_ms, other_ms) + + # Pytorch部分 + input_pt = torch.from_numpy(input["input"]) + other_pt = torch.from_numpy(input["other"]) + result_pt = torch.gt(input_pt, other_pt) + + assert np.allclose(result_ms.asnumpy(), result_pt.numpy()) + + +@pytest.mark.parametrize("random_messy_input", [ + {"input":np.random.random(size=[4,4]), "other":np.random.random(size=[4]), "error":TypeError}, + {"input":mindspore.Tensor(np.random.random(size=[4,4])), "other":mindspore.Tensor(np.random.random(size=(3))), "error":ValueError} +]) +def test_greater_random_messy_input_error_info(random_messy_input): + """ + 测试随机混乱输入,报错信息的准确性 + TypeError - input 和 other 都不是Tensor。 + ValueError - input 和 other 的形状不兼容。 + """ + flag = False + try: + input_ms = random_messy_input["input"] + other_ms = random_messy_input["other"] + result_ms = mindspore.mint.greater(input_ms, other_ms) + print(result_ms) + except Exception as e_ms: + assert isinstance(e_ms, random_messy_input["error"]) + flag = True + if not flag: + pytest.fail("在预期应捕获异常的情况下,未捕获到任何异常,测试不通过") + + +def test_greater_backward(): + """ + 测试包含gt操作的网络的反向传播 + """ + pass + + diff --git a/test/test_prod.py b/test/test_prod.py new file mode 100644 index 0000000..e8d0ec9 --- /dev/null +++ b/test/test_prod.py @@ -0,0 +1,230 @@ +import torch +import mindspore +import numpy as np +import pytest + +''' + 测试: + mindspore.mint.prod(input, dim=None, keepdim=False, *, dtype=None) + 默认情况下,使用指定维度的所有元素的乘积代替该维度的其他元素,以移除该维度。也可仅缩小该维度大小至1。 keepdim 控制输出和输入的维度是否相同。 +''' + +@pytest.mark.parametrize("dtype", [ + np.bool_, + np.int_, + np.intc, + np.intp, + np.int8, + np.int16, + np.int32, + np.int64, + np.uint8, + np.uint16, + np.uint32, + np.uint64, + np.float_, + np.float16, + np.float32, + np.float64, + np.complex_, + np.complex64, + np.complex128 +]) +def test_prod_random_input_dtype(dtype): + """ + 测试random输入不同dtype,对比MindSpore和Pytorch的支持度 + """ + flag1 = True + flag2 = True + shape = (4, 4) + try: + # MindSpore + input_ms = mindspore.Tensor(np.random.random(size=shape).astype(dtype)) + result_ms = mindspore.mint.prod(input_ms) + print(result_ms) + assert isinstance(result_ms, mindspore.Tensor) and result_ms.ndim == 0 + except Exception as e: + print(f"MindSpore出现报错: {e}") + flag1 = False + + try: + # Pytorch + input_pt = torch.from_numpy(np.random.random(size=shape).astype(dtype)) + result_pt = torch.prod(input_pt) + assert isinstance(result_pt, torch.Tensor) and result_pt.ndim == 0 + + except Exception as e: + print(f"Pytorch出现报错: {e}") + flag2 = False + + if not flag1 and flag2: + pytest.fail("mindspore不支持:"+str(dtype)) + if flag1 and not flag2: + pytest.fail("pytorch不支持:"+str(dtype)) + if not flag1 and not flag2: + pytest.fail("both mindspore 和 pytorch不支持:"+str(dtype)) + + + +@pytest.mark.parametrize("input", [ + {"shape":[4,4], "dim":None, "keepdim":False}, + {"shape":[4,4], "dim":0, "keepdim":False}, + {"shape":[4,4], "dim":1, "keepdim":False}, + {"shape":[4,4,4,8], "dim":None, "keepdim":False}, + {"shape":[4,4], "dim":1, "keepdim":True} +]) +def test_prod_fixed_dtype_random_value(input): + """ + 测试固定dtype,random输入值,对比两个框架输出(误差范围小于1e-3) + """ + input_value = np.random.random(size=input["shape"]) + + # MindSpore部分 + input_ms = mindspore.Tensor(input_value) + result_ms = mindspore.mint.prod(input_ms,dim=input["dim"],keepdim=input["keepdim"]) + + # Pytorch部分 + input_pt = torch.from_numpy(input_value) + if input["dim"] is not None: + result_pt = torch.prod(input_pt,dim=input["dim"],keepdim=input["keepdim"]) + else: + result_pt = torch.prod(input_pt) + + + assert np.allclose(result_ms.asnumpy(), result_pt.numpy(), atol=1e-3) + +@pytest.mark.parametrize("input_param", [ + 0 +]) +def test_prod_fixed_shape_fixed_value_different_params(input_param): + """ + 测试固定shape,固定输入值,不同输入参数,两个框架的支持度 + """ + input_value = np.random.random(size=[4,4]) + flag = True + try: + # MindSpore部分 + input_ms = mindspore.Tensor(input_value) + result_ms = mindspore.mint.prod(input_ms, dim=input_param) + + except Exception as e: + print(f"MindSpore出现报错: {e}") + flag = False + try: + # Pytorch部分 + input_pt = torch.from_numpy(input_value).refine_names('a', 'b') + result_pt = torch.prod(input_pt,dim=input_param) + + except Exception as e: + print(f"Pytorch出现报错: {e}") + flag = False + assert flag + + +@pytest.mark.parametrize("random_messy_input", [ + (np.array([[1, 2], [3, 4]]), 0, False, TypeError), + (mindspore.tensor([[1, 2], [3, 4]]), "str", False, TypeError), + (mindspore.tensor([[1, 2], [3, 4]]), 0, 1, TypeError), + (mindspore.tensor([[1, 2], [3, 4]]), -3,False, ValueError), + (mindspore.tensor([[1, 2], [3, 4]]), 2,False, ValueError) +]) +def test_prod_random_messy_input_error_info(random_messy_input): + """ + 测试随机混乱输入,报错信息的准确性 + TypeError - input 不是Tensor。 + TypeError - dim 不是int。 + TypeError - keepdim 不是bool类型。 + ValueError - dim 超出范围。 + """ + flag = False + input = random_messy_input[0] + dim = random_messy_input[1] + keepdim = random_messy_input[2] + try: + result_ms = mindspore.mint.prod(input, dim=dim, keepdim=keepdim) + print(result_ms) + except Exception as e_ms: + assert isinstance(e_ms, random_messy_input[-1]) + flag = True + if not flag: + pytest.fail("在预期应捕获异常的情况下,未捕获到任何异常,测试不通过") + + + + + +def test_prod_in_neural_network(): + """ + 测试包含prod操作的网络示例 + """ + input_value = np.random.random(size=(32)).astype(np.float32) + + class ProductPooling_pt(torch.nn.Module): + def __init__(self): + super(ProductPooling_pt, self).__init__() + + def forward(self, x): + y = x.reshape((8,4)) + pooled = torch.prod(y, dim=-1) + return pooled.sum(dim=-1) + + class ProductPooling_ms(mindspore.nn.Cell): + def __init__(self): + super(ProductPooling_ms, self).__init__() + + def construct(self, x): + y = x.reshape((8,4)) + pooled = mindspore.mint.prod(y, dim=-1) + return pooled.sum(axis=-1) + + input_ms = mindspore.Tensor(input_value) + net_ms = ProductPooling_ms() + result_ms = net_ms(input_ms) + + + input_pt = torch.from_numpy(input_value) + net_pt = ProductPooling_pt() + result_pt = net_pt(input_pt) + + assert np.allclose(result_ms.asnumpy(), result_pt.detach().numpy(), atol=1e-3) + + +def test_prod_backward(): + """ + 测试函数反向 + """ + class ProductPooling_pt(torch.nn.Module): + def __init__(self): + super(ProductPooling_pt, self).__init__() + + def forward(self, x): + y = x.reshape((8,4)) + pooled = torch.prod(y, dim=-1) + return pooled.sum(dim=-1) + + class ProductPooling_ms(mindspore.nn.Cell): + def __init__(self): + super(ProductPooling_ms, self).__init__() + + def construct(self, x): + y = x.reshape((8,4)) + pooled = mindspore.mint.prod(y, dim=-1) + return pooled.sum(axis=-1) + + + input_value = np.random.random(size=(32)).astype(np.float32) + + input_ms = mindspore.Tensor(input_value, mindspore.float32) + grad_ms = mindspore.grad(ProductPooling_ms())(input_ms) + + input_pt = torch.from_numpy(input_value) + input_pt.requires_grad = True + result_pt = ProductPooling_pt()(input_pt) + result_pt.backward() + grad_pt = input_pt.grad + + assert np.allclose(grad_ms.asnumpy(), grad_pt.numpy(), atol=1e-3) + + + + diff --git a/test/test_sum.py b/test/test_sum.py new file mode 100644 index 0000000..0144cd4 --- /dev/null +++ b/test/test_sum.py @@ -0,0 +1,231 @@ +import torch +import mindspore +import numpy as np +import pytest + +''' + 测试: + mindspore.mint.sum(input, dim=None, keepdim=False, *, dtype=None) + 计算Tensor指定维度元素的和。 +''' + +@pytest.mark.parametrize("dtype", [ + np.bool_, + np.int_, + np.intc, + np.intp, + np.int8, + np.int16, + np.int32, + np.int64, + np.uint8, + np.uint16, + np.uint32, + np.uint64, + np.float_, + np.float16, + np.float32, + np.float64, + np.complex_, + np.complex64, + np.complex128 +]) +def test_sum_random_input_dtype(dtype): + """ + 测试random输入不同dtype,对比MindSpore和Pytorch的支持度 + """ + flag1 = True + flag2 = True + shape = (4, 4) + try: + # MindSpore + input_ms = mindspore.Tensor(np.random.random(size=shape).astype(dtype)) + result_ms = mindspore.mint.sum(input_ms) + print(result_ms) + assert isinstance(result_ms, mindspore.Tensor) and result_ms.ndim == 0 + except Exception as e: + print(f"MindSpore出现报错: {e}") + flag1 = False + + try: + # Pytorch + input_pt = torch.from_numpy(np.random.random(size=shape).astype(dtype)) + result_pt = torch.sum(input_pt) + assert isinstance(result_pt, torch.Tensor) and result_pt.ndim == 0 + + except Exception as e: + print(f"Pytorch出现报错: {e}") + flag2 = False + + if not flag1 and flag2: + pytest.fail("mindspore不支持:"+str(dtype)) + if flag1 and not flag2: + pytest.fail("pytorch不支持:"+str(dtype)) + if not flag1 and not flag2: + pytest.fail("both mindspore 和 pytorch不支持:"+str(dtype)) + + + +@pytest.mark.parametrize("input", [ + {"shape":[4,4], "dim":None, "keepdim":False}, + {"shape":[4,4], "dim":0, "keepdim":False}, + {"shape":[4,4], "dim":-1, "keepdim":False}, + {"shape":[4,4,4,8], "dim":[1,2], "keepdim":False}, + {"shape":[4,4,4,8], "dim":(1,2), "keepdim":False}, + {"shape":[4,4,4,8], "dim":(1,2,3), "keepdim":False}, + {"shape":[4,4], "dim":1, "keepdim":True} +]) +def test_sum_fixed_dtype_random_value(input): + """ + 测试固定dtype,random输入值,对比两个框架输出(误差范围小于1e-3) + """ + input_value = np.random.random(size=input["shape"]) + + # MindSpore部分 + input_ms = mindspore.Tensor(input_value) + result_ms = mindspore.mint.sum(input_ms,dim=input["dim"],keepdim=input["keepdim"]) + + # Pytorch部分 + input_pt = torch.from_numpy(input_value) + if input["dim"] is not None: + result_pt = torch.sum(input_pt,dim=input["dim"],keepdim=input["keepdim"]) + else: + result_pt = torch.sum(input_pt) + + + assert np.allclose(result_ms.asnumpy(), result_pt.numpy(), atol=1e-3) + + +@pytest.mark.parametrize("input_param", [ + (0,1), + [0,1] +]) +def test_sum_fixed_shape_fixed_value_different_params(input_param): + """ + 测试固定shape,固定输入值,不同输入参数,两个框架的支持度 + """ + input_value = np.random.random(size=[4,4]) + flag = True + try: + # MindSpore部分 + input_ms = mindspore.Tensor(input_value) + result_ms = mindspore.mint.sum(input_ms, dim=input_param) + + except Exception as e: + print(f"MindSpore出现报错: {e}") + flag = False + try: + # Pytorch部分 + input_pt = torch.from_numpy(input_value).refine_names('a', 'b') + result_pt = torch.sum(input_pt,dim=input_param) + + except Exception as e: + print(f"Pytorch出现报错: {e}") + flag = False + assert flag + + +@pytest.mark.parametrize("random_messy_input", [ + (np.array([[1, 2], [3, 4]]), 0, False, TypeError), + (mindspore.tensor([[1, 2], [3, 4]]), "str", False, TypeError), + (mindspore.tensor([[1, 2], [3, 4]]), 0, 1, TypeError), + (mindspore.tensor([[1, 2], [3, 4]]), -3,False, ValueError), + (mindspore.tensor([[1, 2], [3, 4]]), [0,-3],False, ValueError), + (mindspore.tensor([[1, 2], [3, 4]]), 2,False, ValueError), + (mindspore.tensor([[1, 2], [3, 4]]), (0,2),False, ValueError), +]) +def test_sum_random_messy_input_error_info(random_messy_input): + """ + 测试随机混乱输入,报错信息的准确性 + TypeError - input 不是Tensor类型。 + TypeError - dim 类型不是int,tulpe(int),list(int),Tensor或None。 + ValueError - dim 取值不在范围。 + TypeError - keepdim 不是bool类型。 + """ + flag = False + input = random_messy_input[0] + dim = random_messy_input[1] + keepdim = random_messy_input[2] + try: + result_ms = mindspore.mint.sum(input, dim=dim, keepdim=keepdim) + print(result_ms) + except Exception as e_ms: + assert isinstance(e_ms, random_messy_input[-1]) + flag = True + if not flag: + pytest.fail("在预期应捕获异常的情况下,未捕获到任何异常,测试不通过") + + + + + +def test_sum_in_neural_network(): + """ + 测试包含sum操作的网络示例 + """ + input_value = np.random.random(size=(32)).astype(np.float32) + + class SimpleNet_pt(torch.nn.Module): + def __init__(self): + super(SimpleNet_pt, self).__init__() + + def forward(self, x): + output = torch.sum(x) + return output + + class SimpleNet_ms(mindspore.nn.Cell): + def __init__(self): + super(SimpleNet_ms, self).__init__() + + def construct(self, x): + output = mindspore.mint.sum(x) + return output + + input_ms = mindspore.Tensor(input_value) + net_ms = SimpleNet_ms() + result_ms = net_ms(input_ms) + + + input_pt = torch.from_numpy(input_value) + net_pt = SimpleNet_pt() + result_pt = net_pt(input_pt) + + assert np.allclose(result_ms.asnumpy(), result_pt.detach().numpy(), atol=1e-3) + + +def test_sum_backward(): + """ + 测试函数反向 + """ + class SimpleNet_pt(torch.nn.Module): + def __init__(self): + super(SimpleNet_pt, self).__init__() + + def forward(self, x): + output = torch.sum(x) + return output + + class SimpleNet_ms(mindspore.nn.Cell): + def __init__(self): + super(SimpleNet_ms, self).__init__() + + def construct(self, x): + output = mindspore.mint.sum(x) + return output + + + input_value = np.random.random(size=(32)).astype(np.float32) + + input_ms = mindspore.Tensor(input_value, mindspore.float32) + grad_ms = mindspore.grad(SimpleNet_ms())(input_ms) + + input_pt = torch.from_numpy(input_value) + input_pt.requires_grad = True + result_pt = SimpleNet_pt()(input_pt) + result_pt.backward() + grad_pt = input_pt.grad + + assert np.allclose(grad_ms.asnumpy(), grad_pt.numpy(), atol=1e-3) + + + diff --git a/test/test_unique.py b/test/test_unique.py new file mode 100644 index 0000000..5ab9728 --- /dev/null +++ b/test/test_unique.py @@ -0,0 +1,187 @@ +import torch +import mindspore +import numpy as np +import pytest + +''' + 测试: + mindspore.mint.unique(input, sorted=True, return_inverse=False, return_counts=False, dim=None)[源代码] + 对输入Tensor中元素去重。 + 在 return_inverse=True 时,会返回一个索引Tensor,包含输入Tensor中的元素在输出Tensor中的索引; + 在 return_counts=True 时,会返回一个Tensor,表示输出元素在输入中的个数。 + + input (Tensor) - 输入Tensor。 + sorted (bool) - 输出是否需要进行升序排序。默认值: True 。 + return_inverse (bool) - 是否输出 input 在 output 上对应的index。默认值: False 。 + return_counts (bool) - 是否输出 output 中元素的数量。默认值: False 。 + dim (int) - 做去重操作的维度,当设置为 None 的时候,对展开的输入做去重操作, 否则,将给定维度的Tensor视为一个元素去做去重操作。默认值:None 。 +''' + +@pytest.mark.parametrize("dtype", [ + np.bool_, + np.int_, + np.intc, + np.intp, + np.int8, + np.int16, + np.int32, + np.int64, + np.uint8, + np.uint16, + np.uint32, + np.uint64, + np.float_, + np.float16, + np.float32, + np.float64, + np.complex_, + np.complex64, + np.complex128 +]) +def test_unique_random_input_dtype(dtype): + """ + 测试random输入不同dtype,对比MindSpore和Pytorch的支持度 + """ + flag1 = True + flag2 = True + shape = (4, 4) + try: + # MindSpore + input_ms = mindspore.Tensor(np.random.random(size=shape).astype(dtype)) + result_ms = mindspore.mint.unique(input_ms) + print(result_ms) + assert isinstance(result_ms, mindspore.Tensor) + except Exception as e: + print(f"MindSpore出现报错: {e}") + flag1 = False + + try: + # Pytorch + input_pt = torch.from_numpy(np.random.random(size=shape).astype(dtype)) + result_pt = torch.unique(input_pt) + assert isinstance(result_pt, torch.Tensor) + + except Exception as e: + print(f"Pytorch出现报错: {e}") + flag2 = False + + if not flag1 and flag2: + pytest.fail("mindspore不支持:"+str(dtype)) + if flag1 and not flag2: + pytest.fail("pytorch不支持:"+str(dtype)) + if not flag1 and not flag2: + pytest.fail("both mindspore 和 pytorch不支持:"+str(dtype)) + + + +@pytest.mark.parametrize("input", [ + {"shape":[4,4], "sorted":False, "return_inverse":False, "return_counts":False, "dim":None}, + {"shape":[4,4], "sorted":True, "return_inverse":False, "return_counts":False, "dim":None}, + {"shape":[4,4], "sorted":False, "return_inverse":True, "return_counts":False, "dim":None}, + {"shape":[4,4], "sorted":False, "return_inverse":False, "return_counts":True, "dim":None}, + {"shape":[4,4], "sorted":False, "return_inverse":False, "return_counts":False, "dim":0} +]) +def test_unique_fixed_dtype_random_value(input): + """ + 测试固定dtype,random输入值,对比两个框架输出(误差范围小于1e-3) + """ + input_value = np.random.random(size=input["shape"]) + sorted = input["sorted"] + return_inverse = input["return_inverse"] + return_counts = input["return_counts"] + dim = input["dim"] + # MindSpore部分 + input_ms = mindspore.Tensor(input_value) + result_ms = mindspore.mint.unique(input_ms,dim=dim,sorted=sorted,return_counts=return_counts,return_inverse=return_inverse) + print(result_ms) + # Pytorch部分 + input_pt = torch.from_numpy(input_value) + result_pt = torch.unique(input_pt,dim=dim,sorted=sorted,return_counts=return_counts,return_inverse=return_inverse) + print(result_pt) + + if return_inverse or return_counts: + for result_ms_i,result_pt_i in zip(result_ms,result_pt): + assert np.allclose(result_ms_i.asnumpy(), result_pt_i.numpy(), atol=1e-3) + else: + assert np.allclose(result_ms.asnumpy(), result_pt.numpy(), atol=1e-3) + + +@pytest.mark.parametrize("input_param", [ + +]) +def test_unique_fixed_shape_fixed_value_different_params(input_param): + """ + 测试固定shape,固定输入值,不同输入参数,两个框架的支持度 + """ + pass + + +@pytest.mark.parametrize("random_messy_input", [ + {"input":np.random.random(), "other":np.random.random(), "error":TypeError}, + {"input":np.random.random(size=[4,4]), "other":np.random.random(size=[4]), "error":TypeError} +]) +def test_unique_random_messy_input_error_info(random_messy_input): + """ + TypeError - input 不是Tensor。 + """ + flag = False + input = random_messy_input["input"] + try: + result_ms = mindspore.mint.unique(input) + print(result_ms) + except Exception as e_ms: + assert isinstance(e_ms, random_messy_input["error"]) + flag = True + if not flag: + pytest.fail("在预期应捕获异常的情况下,未捕获到任何异常,测试不通过") + + + + + +def test_unique_in_neural_network(): + """ + 测试包含unique操作的网络示例 + """ + input_value = np.random.random(size=(32)).astype(np.float32) + + class SimpleNet_pt(torch.nn.Module): + def __init__(self): + super(SimpleNet_pt, self).__init__() + + def forward(self, x): + unique_output = torch.unique(x) + unique_elements = unique_output[0] + sum_output = torch.sum(unique_elements) + return sum_output + + class SimpleNet_ms(mindspore.nn.Cell): + def __init__(self): + super(SimpleNet_ms, self).__init__() + + def construct(self, x): + unique_output = mindspore.mint.unique(x) + unique_elements = unique_output[0] + sum_output = unique_elements.sum() + return sum_output + + input_ms = mindspore.Tensor(input_value) + net_ms = SimpleNet_ms() + result_ms = net_ms(input_ms) + + + input_pt = torch.from_numpy(input_value) + net_pt = SimpleNet_pt() + result_pt = net_pt(input_pt) + + assert np.allclose(result_ms.asnumpy(), result_pt.detach().numpy(), atol=1e-3) + + +def test_unique_backward(): + """ + 测试函数反向 + """ + pass + + +