diff --git a/externals/llvm-project b/externals/llvm-project index 5e5a22caf88a..bbd2b08b95fe 160000 --- a/externals/llvm-project +++ b/externals/llvm-project @@ -1 +1 @@ -Subproject commit 5e5a22caf88ac1ccfa8dc5720295fdeba0ad9372 +Subproject commit bbd2b08b95fe76bea138c1b03c1cd42ed3ee04df diff --git a/externals/stablehlo b/externals/stablehlo index 83f095e7217c..ab709fe48de8 160000 --- a/externals/stablehlo +++ b/externals/stablehlo @@ -1 +1 @@ -Subproject commit 83f095e7217c897f1eccac5652600ceb944cb0e0 +Subproject commit ab709fe48de88c67717abfbd7ef17425eb95ddaf diff --git a/include/torch-mlir/Conversion/TorchToStablehlo/StablehloLegalizeUtils.h b/include/torch-mlir/Conversion/TorchToStablehlo/StablehloLegalizeUtils.h index e8d57b7f6a72..6e14b324b656 100644 --- a/include/torch-mlir/Conversion/TorchToStablehlo/StablehloLegalizeUtils.h +++ b/include/torch-mlir/Conversion/TorchToStablehlo/StablehloLegalizeUtils.h @@ -51,7 +51,7 @@ Value promoteType(PatternRewriter &rewriter, Location loc, Value input, Value promoteAndBroadcast(ConversionPatternRewriter &rewriter, Value input, TensorType outType); -SmallVector toPositiveDims(ArrayRef dims, int64_t rank); +SmallVector toPositiveDims(ArrayRef dims, int64_t rank); // Get the dimension sizes of the input tensor, given the dimension axes FailureOr> getDimSizesOfTensor(PatternRewriter &rewriter, diff --git a/lib/Conversion/TorchToStablehlo/Basic.cpp b/lib/Conversion/TorchToStablehlo/Basic.cpp index 73710997709a..f0dc4aaf2dfa 100644 --- a/lib/Conversion/TorchToStablehlo/Basic.cpp +++ b/lib/Conversion/TorchToStablehlo/Basic.cpp @@ -615,12 +615,8 @@ class ConvertAtenTransposeIntOp SmallVector permValues(inputRank); std::iota(std::begin(permValues), std::end(permValues), 0); std::swap(permValues[dim0], permValues[dim1]); - DenseIntElementsAttr permutation = DenseIntElementsAttr::get( - RankedTensorType::get({static_cast(permValues.size())}, - rewriter.getI64Type()), - permValues); rewriter.replaceOpWithNewOp(op, outType, self, - permutation); + permValues); return success(); } }; @@ -793,12 +789,8 @@ LogicalResult ConvertAtenOp::matchAndRewrite( return op.emitError("not all dims are valid"); } - DenseIntElementsAttr permutation = DenseIntElementsAttr::get( - RankedTensorType::get({static_cast(permValues.size())}, - rewriter.getI64Type()), - permValues); rewriter.replaceOpWithNewOp(op, outType, self, - permutation); + permValues); return success(); } @@ -1755,8 +1747,7 @@ LogicalResult ConvertAtenOp::matchAndRewrite( } } - rewriter.replaceOpWithNewOp( - op, outType, self, rewriter.getI64TensorAttr(dims)); + rewriter.replaceOpWithNewOp(op, outType, self, dims); return success(); } diff --git a/lib/Conversion/TorchToStablehlo/Linear.cpp b/lib/Conversion/TorchToStablehlo/Linear.cpp index 71d679aeada4..df92317824a1 100644 --- a/lib/Conversion/TorchToStablehlo/Linear.cpp +++ b/lib/Conversion/TorchToStablehlo/Linear.cpp @@ -62,13 +62,9 @@ Value getPermutedTensor(PatternRewriter &rewriter, Operation *op, Value input, newShape.push_back(inpShape[d]); } - auto attrTy = RankedTensorType::get({static_cast(transDims.size())}, - rewriter.getIntegerType(64)); - auto permuteAttr = DenseIntElementsAttr::get(attrTy, transDims); - auto outTy = RankedTensorType::get(newShape, inputTy.getElementType()); auto result = rewriter.create(op->getLoc(), outTy, - input, permuteAttr); + input, transDims); return result.getResult(); } @@ -500,8 +496,8 @@ class ConvertAtenConvolutionOp : public ConvertAtenOp { for (int64_t i = 0; i <= rank; i++) transposeDims[i] = i; std::swap(transposeDims[rank - 1], transposeDims[rank - 2]); - weight = rewriter.create( - op->getLoc(), weight, rewriter.getI64TensorAttr(transposeDims)); + weight = rewriter.create(op->getLoc(), weight, + transposeDims); // 3. [H, W, ..., G, OC, IC//G] => [H, W, ..., G*OC, IC//G] weightShapeInt.erase(weightShapeInt.end() - 2); @@ -546,12 +542,10 @@ class ConvertAtenConvolutionOp : public ConvertAtenOp { } auto transposeTy = RankedTensorType::get(transposeShape, weightTy.getElementType()); - DenseIntElementsAttr permAttr = DenseIntElementsAttr::get( - RankedTensorType::get({nDims}, rewriter.getI64Type()), perm); auto transposeOp = rewriter.create( - op->getLoc(), transposeTy, weight, permAttr); + op->getLoc(), transposeTy, weight, perm); auto reverseOp = rewriter.create( - op->getLoc(), transposeOp, rewriter.getI64TensorAttr({0, 1})); + op->getLoc(), transposeOp, ArrayRef{0, 1}); // Prepare for transposed convolution SmallVector stablehloStrideVec(nSpatialDims, 1); diff --git a/lib/Conversion/TorchToStablehlo/StablehloLegalizeUtils.cpp b/lib/Conversion/TorchToStablehlo/StablehloLegalizeUtils.cpp index a25a66bbb293..ed203cb0f91f 100644 --- a/lib/Conversion/TorchToStablehlo/StablehloLegalizeUtils.cpp +++ b/lib/Conversion/TorchToStablehlo/StablehloLegalizeUtils.cpp @@ -250,12 +250,12 @@ Value promoteAndBroadcast(ConversionPatternRewriter &rewriter, Value input, return bcast_op.getResult(); } -SmallVector toPositiveDims(ArrayRef dims, int64_t rank) { - SmallVector posDims; +SmallVector toPositiveDims(ArrayRef dims, int64_t rank) { + SmallVector posDims; posDims.reserve(rank); std::transform( dims.begin(), dims.end(), std::back_inserter(posDims), - [rank](int64_t d) -> size_t { return toPositiveDim(d, rank); }); + [rank](int64_t d) -> int64_t { return toPositiveDim(d, rank); }); return posDims; } @@ -316,10 +316,10 @@ FailureOr unsqueezeTensor(PatternRewriter &rewriter, Operation *op, op, "failed to get dimension sizes of the input"); auto dimSizes = *dimSizesInfo; - auto rank = dimSizes.size(); - size_t newRank = rank + inputUnsqzDims.size(); + int64_t rank = dimSizes.size(); + int64_t newRank = rank + inputUnsqzDims.size(); auto unsqzDims = toPositiveDims(inputUnsqzDims, newRank); - for (size_t k = 0, sz = unsqzDims.size(); k < sz; ++k) + for (int64_t k = 0, sz = unsqzDims.size(); k < sz; ++k) if (k > 1 && unsqzDims[k] <= unsqzDims[k - 1]) return rewriter.notifyMatchFailure( op, "unsqueeze dimensions must be specified in order"); @@ -335,8 +335,8 @@ FailureOr unsqueezeTensor(PatternRewriter &rewriter, Operation *op, std::vector newShape; newDimSizes.reserve(newRank); newShape.reserve(newRank); - for (size_t k = 0, i = 0, j = 0; k < newRank; ++k) { - if (j < unsqzDims.size() && unsqzDims[j] == k) { + for (int64_t k = 0, i = 0, j = 0; k < newRank; ++k) { + if (j < static_cast(unsqzDims.size()) && unsqzDims[j] == k) { newDimSizes.push_back(one); newShape.push_back(1); j++; diff --git a/projects/pt1/e2e_testing/xfail_sets.py b/projects/pt1/e2e_testing/xfail_sets.py index c4b0aab05f57..7b5e74e51b32 100644 --- a/projects/pt1/e2e_testing/xfail_sets.py +++ b/projects/pt1/e2e_testing/xfail_sets.py @@ -13,6 +13,8 @@ from torch_mlir_e2e_test.test_suite import COMMON_TORCH_MLIR_LOWERING_XFAILS from torch_mlir._version import torch_version_for_comparison, version +print(f"TORCH_VERSION_FOR_COMPARISON =", torch_version_for_comparison()) + LINALG_XFAIL_SET = COMMON_TORCH_MLIR_LOWERING_XFAILS | { # Lowering Torch Backend IR -> Linalg-on-Tensors Backend IR failed # 'linalg.depthwise_conv_2d_nchw_chw' op inferred input/output operand #1 has shape's dimension #0 to be 4, but found 8 @@ -21,6 +23,14 @@ "IscloseStaticModuleTrue_basic" } +if torch_version_for_comparison() >= version.parse("2.2.0.dev20231204"): + LINALG_XFAIL_SET |= { + "Conv2dWithPaddingDilationStrideStaticModule_grouped", + "Conv2dWithPaddingDilationStrideStaticModule_grouped_multiplier", + "ConvolutionModule2DGroups_basic", + } + + TORCHDYNAMO_XFAIL_SET = { #### General TorchDynamo/PyTorch errors @@ -306,10 +316,11 @@ "ArangeStartOutViewModule_basic", } -if torch_version_for_comparison() < version.parse("2.1.0.dev"): - TORCHDYNAMO_XFAIL_SET -= { - "ScaledDotProductAttentionSameModule_basic", - "ScaledDotProductAttentionDifferentModule_basic", +if torch_version_for_comparison() >= version.parse("2.2.0.dev20231204"): + TORCHDYNAMO_XFAIL_SET |= { + "Conv2dWithPaddingDilationStrideStaticModule_grouped", + "Conv2dWithPaddingDilationStrideStaticModule_grouped_multiplier", + "ConvolutionModule2DGroups_basic", } TORCHDYNAMO_CRASHING_SET = { @@ -1305,6 +1316,10 @@ "MeanModule_basic", "ArangeStartOutModule_basic", "ArangeStartOutViewModule_basic", + "Conv2dBiasNoPaddingModule_basic", + "Conv2dNoPaddingModule_basic", + "Conv2dWithPaddingDilationStrideModule_basic", + "Conv2dWithPaddingModule_basic", } MAKE_FX_TOSA_PASS_SET = (TOSA_PASS_SET | { @@ -1335,20 +1350,12 @@ # failed to legalize operation 'torch.aten.to.dtype' that was explicitly marked illegal "AtenEyeModuleInt2D_basic", "AtenEyeMModuleInt2D_basic", -} -if torch_version_for_comparison() < version.parse("2.1.0.dev"): - MAKE_FX_TOSA_PASS_SET -= { - # 'tensor.expand_shape' op expected rank expansion, but found source rank 1 >= result rank 1 - "ReshapeCollapseModule_basic", - - # failed to lower torch.aten.empty.memory_format - "BatchNorm1DModule_basic", - "BatchNorm1DWith2DInputModule_basic", - "BatchNorm2DModule_basic", - "BatchNorm3DModule_basic", - "BatchNorm1DStaticShapeModule_basic", - } + "Conv2dBiasNoPaddingModule_basic", + "Conv2dNoPaddingModule_basic", + "Conv2dWithPaddingDilationStrideModule_basic", + "Conv2dWithPaddingModule_basic", +} LTC_CRASHING_SET = { # TODO: update test to move all inputs to the lazy device. Otherwise test fails with: