Skip to content

Commit aa933ee

Browse files
christopherlmunozAlexandreEichenbergertungld
authored
start of llvm/stablehlo bump (#3302)
Signed-off-by: Christopher Munoz <[email protected]> Signed-off-by: Alexandre Eichenberger <[email protected]> Signed-off-by: Tung D. Le <[email protected]> Co-authored-by: Alexandre Eichenberger <[email protected]> Co-authored-by: Tung D. Le <[email protected]>
1 parent 72b796c commit aa933ee

28 files changed

+296
-227
lines changed

docs/BuildOnLinuxOSX.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ Firstly, install MLIR (as a part of LLVM-Project):
1515
``` bash
1616
git clone -n https://github.com/llvm/llvm-project.git
1717
# Check out a specific branch that is known to work with ONNX-MLIR.
18-
cd llvm-project && git checkout fc44a4fcd3c54be927c15ddd9211aca1501633e7 && cd ..
18+
cd llvm-project && git checkout 113f01aa82d055410f22a9d03b3468fa68600589 && cd ..
1919
```
2020

2121
[same-as-file]: <> ({"ref": "utils/build-mlir.sh", "skip-ref": 2})

docs/BuildOnWindows.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ Install MLIR (as a part of LLVM-Project):
5252
```shell
5353
git clone -n https://github.com/llvm/llvm-project.git
5454
# Check out a specific branch that is known to work with ONNX-MLIR.
55-
cd llvm-project && git checkout fc44a4fcd3c54be927c15ddd9211aca1501633e7 && cd ..
55+
cd llvm-project && git checkout 113f01aa82d055410f22a9d03b3468fa68600589 && cd ..
5656
```
5757

5858
[same-as-file]: <> ({"ref": "utils/build-mlir.cmd", "skip-ref": 2})

src/Accelerators/NNPA/Transform/ZHigh/ZHighConstPropagation.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -227,6 +227,7 @@ ZHighStickifiedConstantOp createQuantizedConstantForStick(
227227
init_ztensor(&pre_tfrmd_desc, &tfrmd_desc, &ztensor);
228228
status = allochelper_ztensor_alloc(&ztensor);
229229
assert(status == ZDNN_OK);
230+
memset(ztensor.buffer, 0, ztensor.buffer_size);
230231
status = quantized_stickify(&ztensor, rawData.data());
231232
assert(status == ZDNN_OK);
232233
// Emit a constant global in ZHigh dialect.

src/Compiler/CompilerPasses.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -281,12 +281,16 @@ void addKrnlToLLVMPasses(
281281
// pm.addNestedPass<func::FuncOp>(krnl::createConvertSeqToMemrefPass());
282282

283283
pm.addPass(mlir::memref::createFoldMemRefAliasOpsPass());
284+
// This pass is required on s390x targets to ensure all vector operations
285+
// are properly lowered to LLVM dialect. (e.g., vector.to_elements)
286+
pm.addPass(mlir::createConvertVectorToLLVMPass());
284287

285288
if (profileIR)
286289
pm.addNestedPass<func::FuncOp>(onnx_mlir::createInstrumentCleanupPass());
287290

288291
if (enableBoundCheck)
289292
pm.addPass(mlir::createGenerateRuntimeVerificationPass());
293+
290294
pm.addPass(krnl::createConvertKrnlToLLVMPass(verifyInputTensors,
291295
/*useLRODATA=*/(modelSize == ModelSize::large),
292296
/*storeConstantsToFile=*/storeConstantsToFile,

src/Conversion/ONNXToTOSA/Tensor/Resize.cpp

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -291,7 +291,9 @@ class ONNXResizeOpLoweringToTOSA : public ConversionPattern {
291291
bool isBilinear = mode == "linear";
292292
bool isNearest = mode == "nearest";
293293
bool isNearestModeFloor = nearestMode == "floor";
294-
StringRef resizeMode = isBilinear ? "BILINEAR" : "NEAREST_NEIGHBOR";
294+
mlir::tosa::ResizeMode resizeMode =
295+
isBilinear ? mlir::tosa::ResizeMode::BILINEAR
296+
: mlir::tosa::ResizeMode::NEAREST_NEIGHBOR;
295297

296298
if (halfPixelSymmetric)
297299
return rewriter.notifyMatchFailure(op,
@@ -317,7 +319,8 @@ class ONNXResizeOpLoweringToTOSA : public ConversionPattern {
317319
Value border = mlir::tosa::getTosaConstShape(
318320
rewriter, loc, {yDimension.border, xDimension.border});
319321

320-
auto resizeModeAttr = rewriter.getStringAttr(resizeMode);
322+
auto resizeModeAttr =
323+
mlir::tosa::ResizeModeAttr::get(rewriter.getContext(), resizeMode);
321324
Type newOutputType =
322325
RankedTensorType::get(llvm::SmallVector<int64_t, 4>(
323326
inputType.getRank(), ShapedType::kDynamic),

src/Tools/onnx-mlir-opt/onnx-mlir-opt.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -158,7 +158,7 @@ int main(int argc, char **argv) {
158158
if (!parseCustomEnvFlagsCommandLineOption(argc, argv, &llvm::errs()) ||
159159
!llvm::cl::ParseCommandLineOptions(argc, argv,
160160
getVendorName() + " - A modular optimizer driver\n", &llvm::errs(),
161-
customEnvFlags.c_str())) {
161+
nullptr, customEnvFlags.c_str())) {
162162
llvm::errs() << "Failed to parse options\n";
163163
return 1;
164164
}

src/onnx-mlir.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ int main(int argc, char *argv[]) {
4040
if (!parseCustomEnvFlagsCommandLineOption(argc, argv, &llvm::errs()) ||
4141
!llvm::cl::ParseCommandLineOptions(argc, argv,
4242
getVendorName() + " - A modular optimizer driver\n", &llvm::errs(),
43-
customEnvFlags.c_str())) {
43+
nullptr, customEnvFlags.c_str())) {
4444
llvm::errs() << "Failed to parse options\n";
4545
return 1;
4646
}

test/backend-cpp/TestCategoryMapper.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ int main(int argc, char *argv[]) {
6666
setCompilerOption(onnx_mlir::OptionKind::CompilerOptLevel, "3");
6767
mlir::registerPassManagerCLOptions();
6868
llvm::cl::ParseCommandLineOptions(
69-
argc, argv, "TestCategoryMapper\n", nullptr, "TEST_ARGS");
69+
argc, argv, "TestCategoryMapper\n", nullptr, nullptr, "TEST_ARGS");
7070
onnx_mlir::initCompilerConfig();
7171
std::cout << "Target options: \""
7272
<< onnx_mlir::getCompilerOption(onnx_mlir::OptionKind::TargetAccel)

test/mlir/conversion/onnx_to_krnl/Math/Elementwise_with_canonicalize_O3.mlir

Lines changed: 46 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -108,16 +108,19 @@ func.func @round(%arg0: tensor<15xf32>) -> tensor<*xf32> {
108108
// CHECK: [[LOAD_PARAM_0_MEM_:%.+]] = vector.load [[PARAM_0_]]{{.}}[[VAR_2_]]{{.}} : memref<15xf32>, vector<12xf32>
109109
// CHECK: [[VAR_4_:%.+]] = vector.shape_cast [[LOAD_PARAM_0_MEM_]] : vector<12xf32> to vector<3x4xf32>
110110
// CHECK: [[VAR_5_:%.+]] = vector.extract [[VAR_4_]][0] : vector<4xf32> from vector<3x4xf32>
111-
// CHECK: [[VAR_6_:%.+]] = "krnl.round_even"([[VAR_5_]]) : (vector<4xf32>) -> vector<4xf32>
112-
// CHECK-DAG: [[VAR_7_:%.+]] = vector.insert [[VAR_6_]], [[VAR_4_]] [0] : vector<4xf32> into vector<3x4xf32>
113-
// CHECK-DAG: [[VAR_8_:%.+]] = vector.extract [[VAR_4_]][1] : vector<4xf32> from vector<3x4xf32>
114-
// CHECK: [[VAR_9_:%.+]] = "krnl.round_even"([[VAR_8_]]) : (vector<4xf32>) -> vector<4xf32>
115-
// CHECK-DAG: [[VAR_10_:%.+]] = vector.insert [[VAR_9_]], [[VAR_7_]] [1] : vector<4xf32> into vector<3x4xf32>
116-
// CHECK-DAG: [[VAR_11_:%.+]] = vector.extract [[VAR_4_]][2] : vector<4xf32> from vector<3x4xf32>
117-
// CHECK: [[VAR_12_:%.+]] = "krnl.round_even"([[VAR_11_]]) : (vector<4xf32>) -> vector<4xf32>
118-
// CHECK: [[VAR_13_:%.+]] = vector.insert [[VAR_12_]], [[VAR_10_]] [2] : vector<4xf32> into vector<3x4xf32>
119-
// CHECK: [[VAR_14_:%.+]] = vector.shape_cast [[VAR_13_]] : vector<3x4xf32> to vector<12xf32>
120-
// CHECK: vector.store [[VAR_14_]], [[VAR_view_]]{{.}}[[VAR_2_]]{{.}} : memref<15xf32>, vector<12xf32>
111+
// CHECK-DAG: [[VAR_6_:%.+]] = "krnl.round_even"([[VAR_5_]]) : (vector<4xf32>) -> vector<4xf32>
112+
// CHECK-DAG: [[VAR_7_:%.+]] = vector.extract [[VAR_4_]][1] : vector<4xf32> from vector<3x4xf32>
113+
// CHECK-NOT: separator of consecutive DAGs
114+
// CHECK-DAG: [[VAR_8_:%.+]] = "krnl.round_even"([[VAR_7_]]) : (vector<4xf32>) -> vector<4xf32>
115+
// CHECK-DAG: [[VAR_9_:%.+]] = vector.extract [[VAR_4_]][2] : vector<4xf32> from vector<3x4xf32>
116+
// CHECK-NOT: separator of consecutive DAGs
117+
// CHECK-DAG: [[VAR_10_:%.+]] = "krnl.round_even"([[VAR_9_]]) : (vector<4xf32>) -> vector<4xf32>
118+
// CHECK-DAG: [[VAR_11_:%.+]]:4 = vector.to_elements [[VAR_6_]] : vector<4xf32>
119+
// CHECK-DAG: [[VAR_12_:%.+]]:4 = vector.to_elements [[VAR_8_]] : vector<4xf32>
120+
// CHECK: [[VAR_13_:%.+]]:4 = vector.to_elements [[VAR_10_]] : vector<4xf32>
121+
// CHECK: [[VAR_14_:%.+]] = vector.from_elements [[VAR_11_]]#0, [[VAR_11_]]#1, [[VAR_11_]]#2, [[VAR_11_]]#3, [[VAR_12_]]#0, [[VAR_12_]]#1, [[VAR_12_]]#2, [[VAR_12_]]#3, [[VAR_13_]]#0, [[VAR_13_]]#1, [[VAR_13_]]#2, [[VAR_13_]]#3 : vector<3x4xf32>
122+
// CHECK: [[VAR_15_:%.+]] = vector.shape_cast [[VAR_14_]] : vector<3x4xf32> to vector<12xf32>
123+
// CHECK: vector.store [[VAR_15_]], [[VAR_view_]]{{.}}[[VAR_2_]]{{.}} : memref<15xf32>, vector<12xf32>
121124
// CHECK: }
122125
// CHECK: [[LOOP_1_:%.+]] = krnl.define_loops 1
123126
// CHECK: krnl.iterate([[LOOP_1_]]) with ([[LOOP_1_]] -> [[I_1_:%.+]] = 12 to 15){
@@ -165,31 +168,39 @@ func.func private @test_round_multiple16(%arg0 : tensor<?x32xf32>) -> tensor<*xf
165168
// CHECK: [[LOAD_VAR_reshape_MEM_:%.+]] = vector.load [[VAR_reshape_]]{{.}}[[VAR_3_]]{{.}} : memref<?xf32>, vector<32xf32>
166169
// CHECK: [[VAR_5_:%.+]] = vector.shape_cast [[LOAD_VAR_reshape_MEM_]] : vector<32xf32> to vector<8x4xf32>
167170
// CHECK: [[VAR_6_:%.+]] = vector.extract [[VAR_5_]][0] : vector<4xf32> from vector<8x4xf32>
168-
// CHECK: [[VAR_7_:%.+]] = "krnl.round_even"([[VAR_6_]]) : (vector<4xf32>) -> vector<4xf32>
169-
// CHECK-DAG: [[VAR_8_:%.+]] = vector.insert [[VAR_7_]], [[VAR_5_]] [0] : vector<4xf32> into vector<8x4xf32>
170-
// CHECK-DAG: [[VAR_9_:%.+]] = vector.extract [[VAR_5_]][1] : vector<4xf32> from vector<8x4xf32>
171-
// CHECK: [[VAR_10_:%.+]] = "krnl.round_even"([[VAR_9_]]) : (vector<4xf32>) -> vector<4xf32>
172-
// CHECK-DAG: [[VAR_11_:%.+]] = vector.insert [[VAR_10_]], [[VAR_8_]] [1] : vector<4xf32> into vector<8x4xf32>
173-
// CHECK-DAG: [[VAR_12_:%.+]] = vector.extract [[VAR_5_]][2] : vector<4xf32> from vector<8x4xf32>
174-
// CHECK: [[VAR_13_:%.+]] = "krnl.round_even"([[VAR_12_]]) : (vector<4xf32>) -> vector<4xf32>
175-
// CHECK-DAG: [[VAR_14_:%.+]] = vector.insert [[VAR_13_]], [[VAR_11_]] [2] : vector<4xf32> into vector<8x4xf32>
176-
// CHECK-DAG: [[VAR_15_:%.+]] = vector.extract [[VAR_5_]][3] : vector<4xf32> from vector<8x4xf32>
177-
// CHECK: [[VAR_16_:%.+]] = "krnl.round_even"([[VAR_15_]]) : (vector<4xf32>) -> vector<4xf32>
178-
// CHECK-DAG: [[VAR_17_:%.+]] = vector.insert [[VAR_16_]], [[VAR_14_]] [3] : vector<4xf32> into vector<8x4xf32>
179-
// CHECK-DAG: [[VAR_18_:%.+]] = vector.extract [[VAR_5_]][4] : vector<4xf32> from vector<8x4xf32>
180-
// CHECK: [[VAR_19_:%.+]] = "krnl.round_even"([[VAR_18_]]) : (vector<4xf32>) -> vector<4xf32>
181-
// CHECK-DAG: [[VAR_20_:%.+]] = vector.insert [[VAR_19_]], [[VAR_17_]] [4] : vector<4xf32> into vector<8x4xf32>
182-
// CHECK-DAG: [[VAR_21_:%.+]] = vector.extract [[VAR_5_]][5] : vector<4xf32> from vector<8x4xf32>
183-
// CHECK: [[VAR_22_:%.+]] = "krnl.round_even"([[VAR_21_]]) : (vector<4xf32>) -> vector<4xf32>
184-
// CHECK-DAG: [[VAR_23_:%.+]] = vector.insert [[VAR_22_]], [[VAR_20_]] [5] : vector<4xf32> into vector<8x4xf32>
185-
// CHECK-DAG: [[VAR_24_:%.+]] = vector.extract [[VAR_5_]][6] : vector<4xf32> from vector<8x4xf32>
186-
// CHECK: [[VAR_25_:%.+]] = "krnl.round_even"([[VAR_24_]]) : (vector<4xf32>) -> vector<4xf32>
187-
// CHECK-DAG: [[VAR_26_:%.+]] = vector.insert [[VAR_25_]], [[VAR_23_]] [6] : vector<4xf32> into vector<8x4xf32>
188-
// CHECK-DAG: [[VAR_27_:%.+]] = vector.extract [[VAR_5_]][7] : vector<4xf32> from vector<8x4xf32>
189-
// CHECK: [[VAR_28_:%.+]] = "krnl.round_even"([[VAR_27_]]) : (vector<4xf32>) -> vector<4xf32>
190-
// CHECK: [[VAR_29_:%.+]] = vector.insert [[VAR_28_]], [[VAR_26_]] [7] : vector<4xf32> into vector<8x4xf32>
191-
// CHECK: [[VAR_30_:%.+]] = vector.shape_cast [[VAR_29_]] : vector<8x4xf32> to vector<32xf32>
192-
// CHECK: vector.store [[VAR_30_]], [[VAR_reshape_3_]]{{.}}[[VAR_3_]]{{.}} : memref<?xf32>, vector<32xf32>
171+
// CHECK-DAG: [[VAR_7_:%.+]] = "krnl.round_even"([[VAR_6_]]) : (vector<4xf32>) -> vector<4xf32>
172+
// CHECK-DAG: [[VAR_8_:%.+]] = vector.extract [[VAR_5_]][1] : vector<4xf32> from vector<8x4xf32>
173+
// CHECK-NOT: separator of consecutive DAGs
174+
// CHECK-DAG: [[VAR_9_:%.+]] = "krnl.round_even"([[VAR_8_]]) : (vector<4xf32>) -> vector<4xf32>
175+
// CHECK-DAG: [[VAR_10_:%.+]] = vector.extract [[VAR_5_]][2] : vector<4xf32> from vector<8x4xf32>
176+
// CHECK-NOT: separator of consecutive DAGs
177+
// CHECK-DAG: [[VAR_11_:%.+]] = "krnl.round_even"([[VAR_10_]]) : (vector<4xf32>) -> vector<4xf32>
178+
// CHECK-DAG: [[VAR_12_:%.+]] = vector.extract [[VAR_5_]][3] : vector<4xf32> from vector<8x4xf32>
179+
// CHECK-NOT: separator of consecutive DAGs
180+
// CHECK-DAG: [[VAR_13_:%.+]] = "krnl.round_even"([[VAR_12_]]) : (vector<4xf32>) -> vector<4xf32>
181+
// CHECK-DAG: [[VAR_14_:%.+]] = vector.extract [[VAR_5_]][4] : vector<4xf32> from vector<8x4xf32>
182+
// CHECK-NOT: separator of consecutive DAGs
183+
// CHECK-DAG: [[VAR_15_:%.+]] = "krnl.round_even"([[VAR_14_]]) : (vector<4xf32>) -> vector<4xf32>
184+
// CHECK-DAG: [[VAR_16_:%.+]] = vector.extract [[VAR_5_]][5] : vector<4xf32> from vector<8x4xf32>
185+
// CHECK-NOT: separator of consecutive DAGs
186+
// CHECK-DAG: [[VAR_17_:%.+]] = "krnl.round_even"([[VAR_16_]]) : (vector<4xf32>) -> vector<4xf32>
187+
// CHECK-DAG: [[VAR_18_:%.+]] = vector.extract [[VAR_5_]][6] : vector<4xf32> from vector<8x4xf32>
188+
// CHECK-NOT: separator of consecutive DAGs
189+
// CHECK-DAG: [[VAR_19_:%.+]] = "krnl.round_even"([[VAR_18_]]) : (vector<4xf32>) -> vector<4xf32>
190+
// CHECK-DAG: [[VAR_20_:%.+]] = vector.extract [[VAR_5_]][7] : vector<4xf32> from vector<8x4xf32>
191+
// CHECK-NOT: separator of consecutive DAGs
192+
// CHECK-DAG: [[VAR_21_:%.+]] = "krnl.round_even"([[VAR_20_]]) : (vector<4xf32>) -> vector<4xf32>
193+
// CHECK-DAG: [[VAR_22_:%.+]]:4 = vector.to_elements [[VAR_7_]] : vector<4xf32>
194+
// CHECK-DAG: [[VAR_23_:%.+]]:4 = vector.to_elements [[VAR_9_]] : vector<4xf32>
195+
// CHECK-DAG: [[VAR_24_:%.+]]:4 = vector.to_elements [[VAR_11_]] : vector<4xf32>
196+
// CHECK-DAG: [[VAR_25_:%.+]]:4 = vector.to_elements [[VAR_13_]] : vector<4xf32>
197+
// CHECK-DAG: [[VAR_26_:%.+]]:4 = vector.to_elements [[VAR_15_]] : vector<4xf32>
198+
// CHECK-DAG: [[VAR_27_:%.+]]:4 = vector.to_elements [[VAR_17_]] : vector<4xf32>
199+
// CHECK-DAG: [[VAR_28_:%.+]]:4 = vector.to_elements [[VAR_19_]] : vector<4xf32>
200+
// CHECK: [[VAR_29_:%.+]]:4 = vector.to_elements [[VAR_21_]] : vector<4xf32>
201+
// CHECK: [[VAR_30_:%.+]] = vector.from_elements [[VAR_22_]]#0, [[VAR_22_]]#1, [[VAR_22_]]#2, [[VAR_22_]]#3, [[VAR_23_]]#0, [[VAR_23_]]#1, [[VAR_23_]]#2, [[VAR_23_]]#3, [[VAR_24_]]#0, [[VAR_24_]]#1, [[VAR_24_]]#2, [[VAR_24_]]#3, [[VAR_25_]]#0, [[VAR_25_]]#1, [[VAR_25_]]#2, [[VAR_25_]]#3, [[VAR_26_]]#0, [[VAR_26_]]#1, [[VAR_26_]]#2, [[VAR_26_]]#3, [[VAR_27_]]#0, [[VAR_27_]]#1, [[VAR_27_]]#2, [[VAR_27_]]#3, [[VAR_28_]]#0, [[VAR_28_]]#1, [[VAR_28_]]#2, [[VAR_28_]]#3, [[VAR_29_]]#0, [[VAR_29_]]#1, [[VAR_29_]]#2, [[VAR_29_]]#3 : vector<8x4xf32>
202+
// CHECK: [[VAR_31_:%.+]] = vector.shape_cast [[VAR_30_]] : vector<8x4xf32> to vector<32xf32>
203+
// CHECK: vector.store [[VAR_31_]], [[VAR_reshape_3_]]{{.}}[[VAR_3_]]{{.}} : memref<?xf32>, vector<32xf32>
193204
// CHECK: }
194205
// CHECK: }
195206
// CHECK: return [[RES_]] : memref<?x32xf32>

0 commit comments

Comments
 (0)