|
4 | 4 | // level constants. This is a pragmatic choice which lets us have a lot
|
5 | 5 | // of tests in this file, whereas the others tend to be more bespoke.
|
6 | 6 |
|
| 7 | + |
| 8 | +// CHECK-LABEL: @test_quantizelinear_si8 |
| 9 | +func.func @test_quantizelinear_si8(%arg0: !torch.vtensor<[6],f32>, %arg1: !torch.vtensor<[],f32>, %arg2: !torch.vtensor<[],si8>) -> !torch.vtensor<[6],si8> attributes {torch.onnx_meta.ir_version = 9 : si64, torch.onnx_meta.opset_version = 19 : si64} { |
| 10 | + %0 = torch.operator "onnx.QuantizeLinear"(%arg0, %arg1, %arg2) : (!torch.vtensor<[6],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],si8>) -> !torch.vtensor<[6],si8> |
| 11 | + |
| 12 | + // CHECK: %[[C12:.+]] = torch.constant.int 12 |
| 13 | + // CHECK: %[[SCALE:.+]] = torch.aten.item %arg1 : !torch.vtensor<[],f32> -> !torch.float |
| 14 | + // CHECK: %[[ZP:.+]] = torch.aten.item %arg2 : !torch.vtensor<[],si8> -> !torch.int |
| 15 | + // CHECK: %[[QUANT:.+]] = torch.aten.quantize_per_tensor %arg0, %[[SCALE]], %[[ZP]], %[[C12]] |
| 16 | + // CHECK: %[[REPR:.+]] = torch.aten.int_repr %[[QUANT]] |
| 17 | + // CHECK: return %[[REPR]] |
| 18 | + return %0 : !torch.vtensor<[6],si8> |
| 19 | +} |
| 20 | + |
| 21 | +// ----- |
| 22 | + |
| 23 | +// CHECK-LABEL: @test_quantizelinear_ui8 |
| 24 | +func.func @test_quantizelinear_ui8(%arg0: !torch.vtensor<[6],f32>, %arg1: !torch.vtensor<[],f32>, %arg2: !torch.vtensor<[],ui8>) -> !torch.vtensor<[6],ui8> attributes {torch.onnx_meta.ir_version = 9 : si64, torch.onnx_meta.opset_version = 19 : si64} { |
| 25 | + %0 = torch.operator "onnx.QuantizeLinear"(%arg0, %arg1, %arg2) : (!torch.vtensor<[6],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],ui8>) -> !torch.vtensor<[6],ui8> |
| 26 | + // CHECK: %[[C13:.+]] = torch.constant.int 13 |
| 27 | + // CHECK: %[[SCALE:.+]] = torch.aten.item %arg1 : !torch.vtensor<[],f32> -> !torch.float |
| 28 | + // CHECK: %[[ZP:.+]] = torch.aten.item %arg2 : !torch.vtensor<[],ui8> -> !torch.int |
| 29 | + // CHECK: %[[QUANT:.+]] = torch.aten.quantize_per_tensor %arg0, %[[SCALE]], %[[ZP]], %[[C13]] |
| 30 | + // CHECK: %[[REPR:.+]] = torch.aten.int_repr %[[QUANT]] |
| 31 | + // CHECK: return %[[REPR]] |
| 32 | + return %0 : !torch.vtensor<[6],ui8> |
| 33 | +} |
| 34 | + |
| 35 | +// ----- |
| 36 | + |
| 37 | +// CHECK-LABEL: @test_quantizelinear_i32 |
| 38 | +func.func @test_quantizelinear_i32(%arg0: !torch.vtensor<[6],f32>, %arg1: !torch.vtensor<[],f32>, %arg2: !torch.vtensor<[],si32>) -> !torch.vtensor<[6],si32> attributes {torch.onnx_meta.ir_version = 9 : si64, torch.onnx_meta.opset_version = 19 : si64} { |
| 39 | + %0 = torch.operator "onnx.QuantizeLinear"(%arg0, %arg1, %arg2) : (!torch.vtensor<[6],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],si32>) -> !torch.vtensor<[6],si32> |
| 40 | + // CHECK: %[[C14:.+]] = torch.constant.int 14 |
| 41 | + // CHECK: %[[SCALE:.+]] = torch.aten.item %arg1 : !torch.vtensor<[],f32> -> !torch.float |
| 42 | + // CHECK: %[[ZP:.+]] = torch.aten.item %arg2 : !torch.vtensor<[],si32> -> !torch.int |
| 43 | + // CHECK: %[[QUANT:.+]] = torch.aten.quantize_per_tensor %arg0, %[[SCALE]], %[[ZP]], %[[C14]] |
| 44 | + // CHECK: %[[REPR:.+]] = torch.aten.int_repr %[[QUANT]] |
| 45 | + // CHECK: return %[[REPR]] |
| 46 | + return %0 : !torch.vtensor<[6],si32> |
| 47 | +} |
| 48 | + |
| 49 | +// ----- |
| 50 | + |
7 | 51 | // CHECK-LABEL: func.func @test_reciprocal
|
8 | 52 | func.func @test_reciprocal(%arg0: !torch.vtensor<[3,4,5],f32>) -> !torch.vtensor<[3,4,5],f32> attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 13 : si64, torch.onnx_meta.producer_name = "backend-test", torch.onnx_meta.producer_version = ""} {
|
9 | 53 | // CHECK: torch.aten.reciprocal %arg0 : !torch.vtensor<[3,4,5],f32> -> !torch.vtensor<[3,4,5],f32>
|
|
0 commit comments