Skip to content

Commit 6ee7c21

Browse files
authored
Generate opset 22 impl (#1923)
Dependent on #1924
1 parent f6bf6cf commit 6ee7c21

File tree

12 files changed

+4759
-14
lines changed

12 files changed

+4759
-14
lines changed

onnxscript/onnx_opset/__init__.py

+21
Original file line numberDiff line numberDiff line change
@@ -37,10 +37,13 @@
3737
from onnxscript.onnx_opset._impl.opset18 import Opset18
3838
from onnxscript.onnx_opset._impl.opset19 import Opset19
3939
from onnxscript.onnx_opset._impl.opset20 import Opset20
40+
from onnxscript.onnx_opset._impl.opset21 import Opset21
41+
from onnxscript.onnx_opset._impl.opset22 import Opset22
4042
from onnxscript.onnx_opset._impl.opset_ai_onnx_ml1 import Opset_ai_onnx_ml1
4143
from onnxscript.onnx_opset._impl.opset_ai_onnx_ml2 import Opset_ai_onnx_ml2
4244
from onnxscript.onnx_opset._impl.opset_ai_onnx_ml3 import Opset_ai_onnx_ml3
4345
from onnxscript.onnx_opset._impl.opset_ai_onnx_ml4 import Opset_ai_onnx_ml4
46+
from onnxscript.onnx_opset._impl.opset_ai_onnx_ml5 import Opset_ai_onnx_ml5
4447
from onnxscript.onnx_opset._impl.opset_ai_onnx_preview_training1 import (
4548
Opset_ai_onnx_preview_training1,
4649
)
@@ -68,10 +71,13 @@
6871
"opset18",
6972
"opset19",
7073
"opset20",
74+
"opset21",
75+
"opset22",
7176
"opset_ai_onnx_ml1",
7277
"opset_ai_onnx_ml2",
7378
"opset_ai_onnx_ml3",
7479
"opset_ai_onnx_ml4",
80+
"opset_ai_onnx_ml5",
7581
"opset_ai_onnx_preview_training1",
7682
]
7783

@@ -102,10 +108,13 @@
102108
opset18 = Opset18()
103109
opset19 = Opset19()
104110
opset20 = Opset20()
111+
opset21 = Opset21()
112+
opset22 = Opset22()
105113
opset_ai_onnx_ml1 = Opset_ai_onnx_ml1()
106114
opset_ai_onnx_ml2 = Opset_ai_onnx_ml2()
107115
opset_ai_onnx_ml3 = Opset_ai_onnx_ml3()
108116
opset_ai_onnx_ml4 = Opset_ai_onnx_ml4()
117+
opset_ai_onnx_ml5 = Opset_ai_onnx_ml5()
109118
opset_ai_onnx_preview_training1 = Opset_ai_onnx_preview_training1()
110119
all_opsets: Mapping[Tuple[str, int], Opset] = {
111120
(
@@ -188,6 +197,14 @@
188197
"",
189198
20,
190199
): opset20,
200+
(
201+
"",
202+
21,
203+
): opset21,
204+
(
205+
"",
206+
22,
207+
): opset22,
191208
(
192209
"ai.onnx.ml",
193210
1,
@@ -204,6 +221,10 @@
204221
"ai.onnx.ml",
205222
4,
206223
): opset_ai_onnx_ml4,
224+
(
225+
"ai.onnx.ml",
226+
5,
227+
): opset_ai_onnx_ml5,
207228
(
208229
"ai.onnx.preview.training",
209230
1,

onnxscript/onnx_opset/_impl/opset1.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -2171,7 +2171,7 @@ def MatMul(self, A: T_MatMul, B: T_MatMul) -> T_MatMul:
21712171
r"""[🌐 MatMul(1)](https://onnx.ai/onnx/operators/onnx__MatMul.html#matmul-1 "Online Documentation")
21722172
21732173
2174-
Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html
2174+
Matrix product that behaves like [numpy.matmul](https://numpy.org/doc/stable/reference/generated/numpy.matmul.html).
21752175
21762176
21772177
Args:
@@ -3538,7 +3538,7 @@ def Slice(
35383538
35393539
35403540
Produces a slice of the input tensor along multiple axes. Similar to numpy:
3541-
https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
3541+
https://numpy.org/doc/stable/reference/routines.indexing.html
35423542
Slices uses `axes`, `starts` and `ends` attributes to specify the start and end
35433543
dimension for each axis in the list of axes, it uses this information to
35443544
slice the input `data` tensor. If a negative value is passed for any of the

onnxscript/onnx_opset/_impl/opset10.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -346,7 +346,7 @@ def MatMulInteger(
346346
r"""[🌐 MatMulInteger(10)](https://onnx.ai/onnx/operators/onnx__MatMulInteger.html#matmulinteger-10 "Online Documentation")
347347
348348
349-
Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html.
349+
Matrix product that behaves like [numpy.matmul](https://numpy.org/doc/stable/reference/generated/numpy.matmul.html).
350350
The production MUST never overflow. The accumulation may overflow if and only if in 32 bits.
351351
352352
@@ -749,7 +749,7 @@ def QLinearMatMul(
749749
r"""[🌐 QLinearMatMul(10)](https://onnx.ai/onnx/operators/onnx__QLinearMatMul.html#qlinearmatmul-10 "Online Documentation")
750750
751751
752-
Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html.
752+
Matrix product that behaves like [numpy.matmul](https://numpy.org/doc/stable/reference/generated/numpy.matmul.html).
753753
It consumes two quantized input tensors, their scales and zero points, scale and zero point of output,
754754
and computes the quantized output. The quantization formula is y = saturate((x / y_scale) + y_zero_point).
755755
For (x / y_scale), it is rounding to nearest ties to even. Refer to https://en.wikipedia.org/wiki/Rounding for details.
@@ -1067,7 +1067,7 @@ def Slice(
10671067
10681068
10691069
Produces a slice of the input tensor along multiple axes. Similar to numpy:
1070-
https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
1070+
https://numpy.org/doc/stable/reference/routines.indexing.html
10711071
Slices uses `starts`, `ends`, `axes` and `steps` inputs to specify the start and end
10721072
dimension and step for each axis in the list of axes, it uses this information to
10731073
slice the input `data` tensor. If a negative value is passed for any of the

onnxscript/onnx_opset/_impl/opset11.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -3481,7 +3481,7 @@ def Slice(
34813481
34823482
34833483
Produces a slice of the input tensor along multiple axes. Similar to numpy:
3484-
https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
3484+
https://numpy.org/doc/stable/reference/routines.indexing.html
34853485
Slices uses `starts`, `ends`, `axes` and `steps` inputs to specify the start and end
34863486
dimension and step for each axis in the list of axes, it uses this information to
34873487
slice the input `data` tensor. If a negative value is passed for any of the

onnxscript/onnx_opset/_impl/opset12.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -674,7 +674,7 @@ def MaxPool(
674674
```
675675
output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - dilation[i] * (kernel_shape[i] - 1) - 1) / strides_spatial_shape[i] + 1)
676676
```
677-
if ceil_mode is enabled. `pad_shape[i]` is the sum of pads along axis `i`. Sliding windows that would start in the right padded region are ignored.
677+
if ceil_mode is enabled. `pad_shape[i]` is the sum of pads along axis `i`.
678678
679679
`auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following when ceil_mode is enabled:
680680
```

onnxscript/onnx_opset/_impl/opset13.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1762,7 +1762,7 @@ def MatMul(self, A: T_MatMul, B: T_MatMul) -> T_MatMul:
17621762
r"""[🌐 MatMul(13)](https://onnx.ai/onnx/operators/onnx__MatMul.html#matmul-13 "Online Documentation")
17631763
17641764
1765-
Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html
1765+
Matrix product that behaves like [numpy.matmul](https://numpy.org/doc/stable/reference/generated/numpy.matmul.html).
17661766
17671767
17681768
Args:

onnxscript/onnx_opset/_impl/opset19.py

+5-4
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ def AveragePool(
8080
```
8181
output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - dilation[i] * (kernel_shape[i] - 1) - 1) / strides_spatial_shape[i] + 1)
8282
```
83-
if ceil_mode is enabled. `pad_shape[i]` is the sum of pads along axis `i`. Sliding windows that would start in the right padded region are ignored.
83+
if ceil_mode is enabled. `pad_shape[i]` is the sum of pads along axis `i`.
8484
8585
`auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following when ceil_mode is enabled:
8686
```
@@ -566,9 +566,10 @@ def DequantizeLinear(
566566
It's optional. Zero point is 0 when it's not specified.
567567
568568
axis: (Optional) The axis of the dequantizing dimension of the input tensor.
569-
Ignored for per-tensor quantization. Negative value means counting
570-
dimensions from the back. Accepted range is [-r, r-1] where r =
571-
rank(input).
569+
Used only for per-axis quantization. Negative value means counting
570+
dimensions from the back. Accepted range is `[-r, r-1]` where `r =
571+
rank(input)`. When the rank of the input is 1, per-tensor quantization
572+
is applied, rendering the axis unnecessary in this scenario.
572573
"""
573574

574575
schema = get_schema("DequantizeLinear", 19, "")

onnxscript/onnx_opset/_impl/opset2.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919

2020
from onnxscript.onnx_opset._impl.opset1 import Opset1
2121
from onnxscript.onnx_types import (
22+
BFLOAT16,
2223
BOOL,
2324
COMPLEX64,
2425
COMPLEX128,
@@ -42,7 +43,7 @@ class Opset2(Opset1):
4243
def __new__(cls):
4344
return Opset.__new__(cls, "", 2)
4445

45-
T_GlobalLpPool = TypeVar("T_GlobalLpPool", DOUBLE, FLOAT, FLOAT16)
46+
T_GlobalLpPool = TypeVar("T_GlobalLpPool", BFLOAT16, DOUBLE, FLOAT, FLOAT16)
4647

4748
def GlobalLpPool(self, X: T_GlobalLpPool, *, p: int = 2) -> T_GlobalLpPool:
4849
r"""[🌐 GlobalLpPool(2)](https://onnx.ai/onnx/operators/onnx__GlobalLpPool.html#globallppool-2 "Online Documentation")

0 commit comments

Comments
 (0)