Skip to content

Commit 204ad2a

Browse files
authored
Merge pull request #1 from confused666/master
测试接口mint.any、mint.max、mint.mean、mint.median、mint.min
2 parents 13e232b + 24770cd commit 204ad2a

File tree

5 files changed

+607
-0
lines changed

5 files changed

+607
-0
lines changed

test/test_any.py

Lines changed: 122 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,122 @@
1+
import pytest
2+
import numpy as np
3+
import mindspore as ms
4+
from mindspore import mint, Tensor, value_and_grad
5+
import torch
6+
7+
8+
dtype_ms = ms.float32
9+
dtype_torch = torch.float32
10+
input_data = [[1,0],[0,0],[1,1]]
11+
ms_tensor = Tensor(input_data, dtype_ms)
12+
torch_tensor = torch.tensor(input_data, dtype=dtype_torch)
13+
14+
15+
def is_same(input_data=[[1,0],[0,0],[1,1]], shape=None, dtype_ms=ms.float32, dtype_torch=torch.float32, dim=-1, keepdim=False):
16+
if shape != None:
17+
input_data = np.random.randn(*shape)
18+
19+
ms_tensor = Tensor(input_data, dtype_ms)
20+
torch_tensor = torch.tensor(input_data, dtype=dtype_torch)
21+
22+
ms_result = mint.any(ms_tensor, dim=dim, keepdim=keepdim).asnumpy()
23+
torch_result = torch.any(torch_tensor, dim=dim, keepdim=keepdim).numpy()
24+
25+
return np.allclose(ms_result, torch_result)
26+
27+
28+
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
29+
def test_any_different_dtypes(mode):
30+
"""测试random输入不同dtype,对比两个框架的支持度"""
31+
ms.set_context(mode=mode)
32+
ms_dtypes = [ms.int8, ms.int16, ms.int32, ms.int64, ms.uint8,ms.uint16, ms.uint32, ms.uint64, ms.float16, ms.float32, ms.float64, ms.bfloat16, ms.bool_]
33+
torch_dtypes = [torch.int8, torch.int16, torch.int32, torch.int64, torch.uint8, torch.uint16, torch.uint32, torch.uint64, torch.float16, torch.float32, torch.float64, torch.bfloat16, torch.bool]
34+
35+
for i in range(len(ms_dtypes)):
36+
dtype_ms = ms_dtypes[i]
37+
dtype_torch = torch_dtypes[i]
38+
39+
ms_tensor = Tensor(input_data, dtype_ms)
40+
torch_tensor = torch.tensor(input_data, dtype=dtype_torch)
41+
42+
err = False
43+
try:
44+
ms_result = mint.any(ms_tensor, dim=1).asnumpy()
45+
except Exception as e:
46+
err = True
47+
print(f"mint.any not supported for {dtype_ms}")
48+
49+
try:
50+
torch_result = torch.any(torch_tensor, dim=1).numpy()
51+
except Exception as e:
52+
err = True
53+
print(f"torch.any not supported for {dtype_torch}")
54+
55+
if not err:
56+
assert np.allclose(ms_result, torch_result)
57+
58+
59+
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
60+
def test_any_random_input_fixed_dtype(mode):
61+
"""测试固定数据类型下的随机输入值,对比输出误差"""
62+
ms.set_context(mode=mode)
63+
64+
shapes = [[5], [5, 2], [5, 4, 3], [4, 6, 7, 8]]
65+
for i in range(len(shapes)):
66+
shape = shapes[i]
67+
result = is_same(shape=shape)
68+
assert result
69+
70+
71+
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
72+
def test_any_different_para(mode):
73+
"""测试固定shape,固定输入值,不同输入参数(string\bool等类型),两个框架的支持度"""
74+
ms.set_context(mode=mode)
75+
76+
dims = [None, 0, 1]
77+
keepdims = [True, False]
78+
paras = [(dim, keepdim) for dim in dims for keepdim in keepdims]
79+
80+
for dim, keepdim in paras:
81+
result = is_same(dim=dim, keepdim=keepdim)
82+
assert result
83+
84+
85+
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
86+
def test_any_wrong_input(mode):
87+
"""测试随机混乱输入,报错信息的准确性"""
88+
ms.set_context(mode=mode)
89+
90+
try:
91+
ms_result = mint.any(ms_tensor, dim=1, keepdim=1).asnumpy()
92+
except Exception as e:
93+
print(f"keepdim 不是 bool 类型报错信息:\n{e}")
94+
95+
try:
96+
ms_result = mint.any(input_data, dim=1).asnumpy()
97+
except Exception as e:
98+
print(f"input 不是 Tensor 报错信息:\n{e}")
99+
100+
101+
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
102+
def test_any_forward_back(mode):
103+
"""使用Pytorch和MindSpore, 固定输入和权重, 测试正向推理结果和反向梯度"""
104+
ms.set_context(mode=mode)
105+
torch_tensor = torch.tensor(input_data, dtype=dtype_torch, requires_grad=True)
106+
107+
def forward_pt(x):
108+
return torch.any(x, dim=1, keepdim=False)
109+
110+
def forward_ms(x):
111+
return mint.any(x, dim=1, keepdim=False)
112+
113+
grad_fn = value_and_grad(forward_ms)
114+
output_ms, gradient_ms = grad_fn(ms_tensor)
115+
output_pt = forward_pt(torch_tensor)
116+
# output_pt.backward()
117+
# gradient_pt = torch_tensor.grad
118+
assert np.allclose(output_ms.asnumpy(), output_pt.detach().numpy(), atol=1e-3)
119+
# assert np.allclose(gradient_ms.asnumpy(), gradient_pt.numpy(), atol=1e-3)
120+
# print(output_ms, gradient_ms)
121+
# print(output_pt, gradient_pt)
122+

test/test_max.py

Lines changed: 120 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,120 @@
1+
import pytest
2+
import numpy as np
3+
import mindspore as ms
4+
from mindspore import mint, Tensor, value_and_grad
5+
import torch
6+
7+
8+
9+
dtype_ms = ms.float32
10+
dtype_torch = torch.float32
11+
input_data = [[1, 6, 2, 4],[7, 3, 8, 2],[2, 9, 11, 5]]
12+
ms_tensor = Tensor(input_data, dtype_ms)
13+
torch_tensor = torch.tensor(input_data, dtype=dtype_torch)
14+
15+
16+
def is_same(input_data=[[1,6,2,4],[7,3,8,2],[2, 9, 11, 5]], shape=None, dtype_ms=ms.float32, dtype_torch=torch.float32, dim=-1, keepdim=False):
17+
if shape != None:
18+
input_data = np.random.randn(*shape)
19+
20+
ms_tensor = Tensor(input_data, dtype_ms)
21+
torch_tensor = torch.tensor(input_data, dtype=dtype_torch)
22+
23+
if dim == None:
24+
ms_result = mint.max(ms_tensor)
25+
torch_result = torch.max(torch_tensor)
26+
return np.allclose(ms_result.asnumpy(), torch_result.numpy())
27+
else:
28+
ms_result, ms_index = mint.max(ms_tensor, dim=dim, keepdim=keepdim)
29+
torch_result, torch_index = torch.max(torch_tensor, dim=dim, keepdim=keepdim)
30+
return np.allclose(ms_result.asnumpy(), torch_result.numpy()) and np.allclose(ms_index.asnumpy(), torch_index.numpy())
31+
32+
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
33+
def test_max_different_dtypes(mode):
34+
"""测试random输入不同dtype,对比两个框架的支持度"""
35+
ms.set_context(mode=mode)
36+
ms_dtypes = [ms.int8, ms.int16, ms.int32, ms.int64, ms.uint8, ms.uint16, ms.uint32, ms.uint64, ms.float16, ms.float32, ms.float64, ms.bfloat16, ms.bool_]
37+
torch_dtypes = [torch.int8, torch.int16, torch.int32, torch.int64, torch.uint8, torch.uint16, torch.uint32, torch.uint64, torch.float16, torch.float32, torch.float64, torch.bfloat16, torch.bool]
38+
39+
for i in range(len(ms_dtypes)):
40+
dtype_ms = ms_dtypes[i]
41+
dtype_torch = torch_dtypes[i]
42+
43+
ms_tensor = Tensor(input_data, dtype_ms)
44+
torch_tensor = torch.tensor(input_data, dtype=dtype_torch)
45+
46+
err = False
47+
try:
48+
ms_result = mint.max(ms_tensor).asnumpy()
49+
except Exception as e:
50+
err = True
51+
print(f"mint.max not supported for {dtype_ms}")
52+
# print(e)
53+
54+
try:
55+
torch_result = torch.max(torch_tensor).numpy()
56+
except Exception as e:
57+
err = True
58+
print(f"torch.max not supported for {dtype_torch}")
59+
# print(e)
60+
61+
if not err:
62+
assert np.allclose(ms_result, torch_result)
63+
64+
65+
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
66+
def test_max_random_input_fixed_dtype(mode):
67+
"""测试固定数据类型下的随机输入值,对比输出误差"""
68+
ms.set_context(mode=mode)
69+
70+
shapes = [[5], [5, 2], [5, 4, 3], [4, 6, 7, 8]]
71+
for i in range(len(shapes)):
72+
shape = shapes[i]
73+
result = is_same(shape=shape)
74+
assert result
75+
76+
77+
78+
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
79+
def test_max_different_para(mode):
80+
"""测试固定shape,固定输入值,不同输入参数(string\bool等类型),两个框架的支持度"""
81+
ms.set_context(mode=mode)
82+
83+
dims = [None, 0, 1]
84+
keepdims = [True, False]
85+
paras = [(dim, keepdim) for dim in dims for keepdim in keepdims]
86+
for dim, keepdim in paras:
87+
result = is_same(dim=dim, keepdim=keepdim)
88+
assert result
89+
90+
91+
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
92+
def test_max_wrong_input(mode):
93+
"""测试随机混乱输入,报错信息的准确性"""
94+
ms.set_context(mode=mode)
95+
96+
try:
97+
ms_result = mint.max(ms_tensor, dim=None, keepdim=True).asnumpy()
98+
except Exception as e:
99+
print(f"dim 为 None 且 keepdim 不是 False报错信息:\n{e}")
100+
101+
102+
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
103+
def test_max_forward_back(mode):
104+
"""使用Pytorch和MindSpore, 固定输入和权重, 测试正向推理结果和反向梯度"""
105+
ms.set_context(mode=mode)
106+
torch_tensor = torch.tensor(input_data, dtype=dtype_torch, requires_grad=True)
107+
108+
def forward_pt(x):
109+
return torch.max(x)
110+
111+
def forward_ms(x):
112+
return mint.max(x, dim=None, keepdim=False)
113+
114+
grad_fn = value_and_grad(forward_ms)
115+
output_ms, gradient_ms = grad_fn(ms_tensor)
116+
output_pt = forward_pt(torch_tensor)
117+
output_pt.backward()
118+
gradient_pt = torch_tensor.grad
119+
assert np.allclose(output_ms.asnumpy(), output_pt.detach().numpy(), atol=1e-3)
120+
assert np.allclose(gradient_ms.asnumpy(), gradient_pt.numpy(), atol=1e-3)

test/test_mean.py

Lines changed: 116 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,116 @@
1+
import pytest
2+
import numpy as np
3+
import mindspore as ms
4+
from mindspore import mint, Tensor, value_and_grad
5+
import torch
6+
7+
8+
dtype_ms = ms.float32
9+
dtype_torch = torch.float32
10+
input_data = [[1,6,2,4],[7,3,8,2],[2, 9, 11, 5]]
11+
ms_tensor = Tensor(input_data, dtype_ms)
12+
torch_tensor = torch.tensor(input_data, dtype=dtype_torch)
13+
14+
15+
def is_same(input_data=[[1,6,2,4],[7,3,8,2],[2, 9, 11, 5]], shape=None, dtype_ms=ms.float32, dtype_torch=torch.float32, dim=-1, keepdim=False):
16+
if shape != None:
17+
input_data = np.random.randn(*shape)
18+
19+
ms_tensor = Tensor(input_data, dtype_ms)
20+
torch_tensor = torch.tensor(input_data, dtype=dtype_torch)
21+
22+
ms_result = mint.mean(ms_tensor, dim=dim, keepdim=keepdim).asnumpy()
23+
torch_result = torch.mean(torch_tensor, dim=dim, keepdim=keepdim).numpy()
24+
25+
return np.allclose(ms_result, torch_result)
26+
27+
28+
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
29+
def test_mean_different_dtypes(mode):
30+
"""测试random输入不同dtype,对比两个框架的支持度"""
31+
ms.set_context(mode=mode)
32+
ms_dtypes = [ms.float16, ms.float32, ms.float64]
33+
torch_dtypes = [torch.float16, torch.float32, torch.float64]
34+
35+
for i in range(len(ms_dtypes)):
36+
dtype_ms = ms_dtypes[i]
37+
dtype_torch = torch_dtypes[i]
38+
ms_tensor = Tensor(input_data, dtype_ms)
39+
torch_tensor = torch.tensor(input_data, dtype=dtype_torch)
40+
41+
err = False
42+
try:
43+
ms_result = mint.mean(ms_tensor).asnumpy()
44+
except Exception as e:
45+
er = True
46+
print(f"mint.mean not supported for {dtype_ms}")
47+
# print(e)
48+
49+
try:
50+
torch_result = torch.mean(torch_tensor).numpy()
51+
except Exception as e:
52+
err = True
53+
print(f"torch.mean not supported for {dtype_torch}")
54+
# print(e)
55+
if not err:
56+
assert np.allclose(ms_result, torch_result)
57+
58+
59+
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
60+
def test_mean_random_input_fixed_dtype(mode):
61+
"""测试固定数据类型下的随机输入值,对比输出误差"""
62+
ms.set_context(mode=mode)
63+
64+
shapes = [[5], [5, 2], [5, 4, 3], [4, 6, 7, 8]]
65+
for i in range(len(shapes)):
66+
shape = shapes[i]
67+
result = is_same(shape=shape)
68+
assert result
69+
70+
71+
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
72+
def test_mean_different_para(mode):
73+
"""测试固定shape,固定输入值,不同输入参数(string\bool等类型),两个框架的支持度"""
74+
ms.set_context(mode=mode)
75+
76+
dims = [None, 0, 1]
77+
keepdims = [True, False]
78+
paras = [(dim, keepdim) for dim in dims for keepdim in keepdims]
79+
80+
for dim, keepdim in paras:
81+
result = is_same(dim=dim, keepdim=keepdim)
82+
assert result
83+
84+
85+
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
86+
def test_mean_wrong_input(mode):
87+
"""测试随机混乱输入,报错信息的准确性"""
88+
ms.set_context(mode=mode)
89+
90+
try:
91+
ms_result = mint.mean(ms_tensor, dim=2, keepdim=True).asnumpy()
92+
except Exception as e:
93+
print(f"dim 超出范围报错信息:\n{e}")
94+
95+
96+
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
97+
def test_mean_forward_back(mode):
98+
"""使用Pytorch和MindSpore, 固定输入和权重, 测试正向推理结果和反向梯度"""
99+
ms.set_context(mode=mode)
100+
101+
torch_tensor = torch.tensor(input_data, dtype=dtype_torch, requires_grad=True)
102+
103+
def forward_pt(x):
104+
return torch.mean(x)
105+
106+
def forward_ms(x):
107+
return mint.mean(x)
108+
109+
grad_fn = value_and_grad(forward_ms)
110+
output_ms, gradient_ms = grad_fn(ms_tensor)
111+
output_pt = forward_pt(torch_tensor)
112+
output_pt.backward()
113+
gradient_pt = torch_tensor.grad
114+
assert np.allclose(output_ms.asnumpy(), output_pt.detach().numpy(), atol=1e-3)
115+
assert np.allclose(gradient_ms.asnumpy(), gradient_pt.numpy(), atol=1e-3)
116+

0 commit comments

Comments
 (0)