Skip to content

Commit b61a4ce

Browse files
authored
Upgrade GlobalOpt, InputConversion, ExternalInterfacess to free create function. NFC. (#21878)
The builder create methods are deprecated: https://mlir.llvm.org/deprecation/. See https://discourse.llvm.org/t/psa-opty-create-now-with-100-more-tab-complete/87339. The main benefit of free functions is better tab completion with LSP/IDE. I'm splitting the upgrade in chunks going by project directories.
1 parent e6f54a2 commit b61a4ce

18 files changed

+247
-246
lines changed

compiler/src/iree/compiler/ExternalInterfaces/EncodingExternalModels.cpp

Lines changed: 11 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -92,13 +92,13 @@ struct ContractionOpPropagationInterface final
9292
RankedTensorType operandEncodingType =
9393
collapseOp.getSrcType().cloneWithEncoding(
9494
operandEncodings.front());
95-
Value newEncodingOp = builder.create<IREE::Encoding::SetEncodingOp>(
96-
loc, operandEncodingType, collapseOp.getSrc());
95+
Value newEncodingOp = IREE::Encoding::SetEncodingOp::create(
96+
builder, loc, operandEncodingType, collapseOp.getSrc());
9797
auto resultEncodingType =
9898
dyn_cast<RankedTensorType>(opResult.getType())
9999
.cloneWithEncoding(resultEncodings.front());
100-
Value newCollapseOp = builder.create<tensor::CollapseShapeOp>(
101-
loc, resultEncodingType, newEncodingOp,
100+
Value newCollapseOp = tensor::CollapseShapeOp::create(
101+
builder, loc, resultEncodingType, newEncodingOp,
102102
collapseOp.getReassociationIndices());
103103
IREE::Encoding::PropagationResult result;
104104
result.replacements = {newCollapseOp};
@@ -228,9 +228,8 @@ struct GenericOpPropagationInterface final
228228
auto resType = RankedTensorType::get(
229229
operandType.getShape(), operandType.getElementType(),
230230
encoding);
231-
Value encodedInput =
232-
rewriter.create<IREE::Encoding::SetEncodingOp>(
233-
loc, resType, operand->get());
231+
Value encodedInput = IREE::Encoding::SetEncodingOp::create(
232+
rewriter, loc, resType, operand->get());
234233
result.generatedEncodingOps.push_back(
235234
encodedInput.getDefiningOp());
236235
encodedOperands.push_back(encodedInput);
@@ -253,8 +252,8 @@ struct GenericOpPropagationInterface final
253252

254253
// Create encoded generic op.
255254
rewriter.setInsertionPointAfter(emptyOp);
256-
Value encodedInit = rewriter.create<tensor::EmptyOp>(
257-
loc, emptyOp.getType().getShape(),
255+
Value encodedInit = tensor::EmptyOp::create(
256+
rewriter, loc, emptyOp.getType().getShape(),
258257
resultEncodingType.getElementType(),
259258
emptyOp.getDynamicSizes(), encoding);
260259
resultEncodingTypes.push_back(resultEncodingType);
@@ -271,10 +270,9 @@ struct GenericOpPropagationInterface final
271270
auto resultType =
272271
cast<RankedTensorType>(genericResult.getType())
273272
.dropEncoding();
274-
auto newUnsetEncoding =
275-
rewriter.create<IREE::Encoding::UnsetEncodingOp>(
276-
encodingOp.getLoc(), resultType, genericResult,
277-
encodingOp.getResultDims());
273+
auto newUnsetEncoding = IREE::Encoding::UnsetEncodingOp::create(
274+
rewriter, encodingOp.getLoc(), resultType, genericResult,
275+
encodingOp.getResultDims());
278276
result.replacements.push_back(newUnsetEncoding.getResult());
279277
result.generatedEncodingOps.push_back(newUnsetEncoding);
280278
}

compiler/src/iree/compiler/GlobalOptimization/ConvertStridedContractionToContraction.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -117,8 +117,8 @@ class ConvertStridedContractionToContraction
117117
}
118118
vSizes.push_back(rewriter.createOrFold<tensor::DimOp>(loc, input, i));
119119
}
120-
Value extractedSlice = rewriter.create<tensor::ExtractSliceOp>(
121-
loc, sliceTy, input, vOffset, vSizes, vStride);
120+
Value extractedSlice = tensor::ExtractSliceOp::create(
121+
rewriter, loc, sliceTy, input, vOffset, vSizes, vStride);
122122
rewriter.startOpModification(op);
123123
op.setIndexingMapsAttr(rewriter.getAffineMapArrayAttr(mapRange));
124124
op.setOperand(0, extractedSlice);

compiler/src/iree/compiler/GlobalOptimization/DecomposeConcat.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ static Value createTranspose(OpBuilder &builder, Value source,
2828
applyPermutationToVector(mixedSizes, perm);
2929
Type elemType = cast<RankedTensorType>(source.getType()).getElementType();
3030
Value empty =
31-
builder.create<tensor::EmptyOp>(source.getLoc(), mixedSizes, elemType)
31+
tensor::EmptyOp::create(builder, source.getLoc(), mixedSizes, elemType)
3232
.getResult();
3333
return builder
3434
.create<linalg::TransposeOp>(source.getLoc(), source, empty, perm)
@@ -75,9 +75,9 @@ struct TransposeInnerConcatenation : public OpRewritePattern<tensor::ConcatOp> {
7575
SmallVector<int64_t> newShape = applyPermutation(concatShape, permutation);
7676
auto newConcatType = RankedTensorType::get(
7777
newShape, concatOp.getResultType().getElementType());
78-
Value newConcat = rewriter.create<tensor::ConcatOp>(
79-
concatOp.getLoc(), newConcatType, /*dim=*/outerMostNonUnitDim,
80-
transposedInputs);
78+
Value newConcat =
79+
tensor::ConcatOp::create(rewriter, concatOp.getLoc(), newConcatType,
80+
/*dim=*/outerMostNonUnitDim, transposedInputs);
8181
auto invPerm = invertPermutationVector(permutation);
8282
Value transposedConcat = createTranspose(rewriter, newConcat, invPerm);
8383
rewriter.replaceOp(concatOp, transposedConcat);

compiler/src/iree/compiler/GlobalOptimization/DemoteContractionInputsToBF16.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -65,17 +65,17 @@ struct DemoteContractionInputsToBF16Pattern
6565
inputType.getRank(), utils::IteratorType::parallel);
6666
SmallVector<OpFoldResult> mixedSizes =
6767
tensor::getMixedSizes(rewriter, loc, input);
68-
Value empty = rewriter.create<tensor::EmptyOp>(loc, mixedSizes,
69-
rewriter.getBF16Type());
68+
Value empty = tensor::EmptyOp::create(rewriter, loc, mixedSizes,
69+
rewriter.getBF16Type());
7070
demotedInputs.push_back(
7171
rewriter
7272
.create<linalg::GenericOp>(
7373
loc, TypeRange{demotedInputType}, ValueRange{input},
7474
ValueRange{empty}, maps, iteratorTypes,
7575
[&](OpBuilder &b, Location loc, ValueRange args) {
76-
Value result = b.create<arith::TruncFOp>(
77-
loc, rewriter.getBF16Type(), args[0]);
78-
b.create<linalg::YieldOp>(loc, result);
76+
Value result = arith::TruncFOp::create(
77+
b, loc, rewriter.getBF16Type(), args[0]);
78+
linalg::YieldOp::create(b, loc, result);
7979
})
8080
->getResults()[0]);
8181
}

compiler/src/iree/compiler/GlobalOptimization/DetachElementwiseFromNamedOps.cpp

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -85,29 +85,29 @@ struct DetachElementwisePattern
8585
SmallVector<OpFoldResult> mixedSizes =
8686
tensor::getMixedSizes(rewriter, loc, outputOperand);
8787
auto initOp =
88-
rewriter.create<tensor::EmptyOp>(loc, mixedSizes, elementType);
89-
Value zero = rewriter.create<arith::ConstantOp>(
90-
loc, rewriter.getZeroAttr(elementType));
91-
Value fill =
92-
rewriter.create<linalg::FillOp>(loc, zero, initOp.getResult()).result();
88+
tensor::EmptyOp::create(rewriter, loc, mixedSizes, elementType);
89+
Value zero = arith::ConstantOp::create(rewriter, loc,
90+
rewriter.getZeroAttr(elementType));
91+
Value fill = linalg::FillOp::create(rewriter, loc, zero, initOp.getResult())
92+
.result();
9393

9494
// Update the contraction op to use the new zero tensor as output operand.
9595
rewriter.modifyOpInPlace(linalgOp,
9696
[&]() { linalgOp.setDpsInitOperand(0, fill); });
9797

9898
// Create a generic op to add back the original output tensor operand.
9999
rewriter.setInsertionPointAfter(linalgOp);
100-
auto genericOp = rewriter.create<linalg::GenericOp>(
101-
loc, outputType, ValueRange{linalgOp->getResult(0), outputOperand},
102-
fill, maps, iterators,
103-
[&](OpBuilder &b, Location nestedLoc, ValueRange args) {
100+
auto genericOp = linalg::GenericOp::create(
101+
rewriter, loc, outputType,
102+
ValueRange{linalgOp->getResult(0), outputOperand}, fill, maps,
103+
iterators, [&](OpBuilder &b, Location nestedLoc, ValueRange args) {
104104
Value result;
105105
if (llvm::isa<FloatType>(elementType)) {
106-
result = b.create<arith::AddFOp>(nestedLoc, args[0], args[1]);
106+
result = arith::AddFOp::create(b, nestedLoc, args[0], args[1]);
107107
} else {
108-
result = b.create<arith::AddIOp>(nestedLoc, args[0], args[1]);
108+
result = arith::AddIOp::create(b, nestedLoc, args[0], args[1]);
109109
}
110-
b.create<linalg::YieldOp>(nestedLoc, result);
110+
linalg::YieldOp::create(b, nestedLoc, result);
111111
});
112112
linalgOp->getResult(0).replaceAllUsesExcept(genericOp->getResult(0),
113113
genericOp);
@@ -153,8 +153,8 @@ struct DetachSplatConstantOutsOperands
153153

154154
Location loc = constOp.getLoc();
155155
Type elementType = resultType.getElementType();
156-
Value emptyTensorOp = rewriter.create<tensor::EmptyOp>(
157-
loc, resultType.getShape(), elementType);
156+
Value emptyTensorOp = tensor::EmptyOp::create(
157+
rewriter, loc, resultType.getShape(), elementType);
158158
TypedAttr constValue;
159159
if (llvm::isa<IntegerType>(elementType)) {
160160
constValue = rewriter.getIntegerAttr(
@@ -164,7 +164,7 @@ struct DetachSplatConstantOutsOperands
164164
elementType, attr.template getSplatValue<APFloat>());
165165
}
166166
Value scalarConstantOp =
167-
rewriter.create<arith::ConstantOp>(loc, elementType, constValue);
167+
arith::ConstantOp::create(rewriter, loc, elementType, constValue);
168168

169169
Value fillOp = rewriter
170170
.create<linalg::FillOp>(

compiler/src/iree/compiler/GlobalOptimization/ExpandTensorShapes.cpp

Lines changed: 29 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -87,8 +87,8 @@ static ExpandedGlobalMap expandGlobalTensorDims(Operation *rootOp,
8787
auto dimName =
8888
(global.tensorOp.getName() + "__d" + std::to_string(it.index()))
8989
.str();
90-
auto dimOp = builder.create<IREE::Util::GlobalOp>(
91-
global.tensorOp.getLoc(), dimName,
90+
auto dimOp = IREE::Util::GlobalOp::create(
91+
builder, global.tensorOp.getLoc(), dimName,
9292
/*isMutable=*/true, indexType);
9393
dimOp.setVisibility(global.tensorOp.getVisibility());
9494
symbolTable.insert(dimOp);
@@ -234,9 +234,9 @@ static void expandRegion(Region &region, SymbolTable &symbolTable,
234234
// Insert shape ties that we've sunk from callers.
235235
auto builder = OpBuilder::atBlockBegin(&block);
236236
for (auto &expansion : llvm::reverse(expansions)) {
237-
auto tieShapeOp = builder.create<IREE::Flow::TensorTieShapeOp>(
238-
region.getLoc(), expansion.tensor.getType(), expansion.tensor,
239-
expansion.dynamicDims);
237+
auto tieShapeOp = IREE::Flow::TensorTieShapeOp::create(
238+
builder, region.getLoc(), expansion.tensor.getType(),
239+
expansion.tensor, expansion.dynamicDims);
240240
expansion.tensor.replaceAllUsesExcept(tieShapeOp.getResult(), tieShapeOp);
241241
}
242242
}
@@ -283,9 +283,9 @@ static void retieResults(Operation *op, Operation *newOp,
283283
newOp->getResults().slice(newIdx, tensorType.getNumDynamicDims());
284284
newIdx += expandedValue.dynamicDims.size();
285285
tensorDimMap[expandedValue.tensor] = expandedValue;
286-
auto tieShapeOp = builder.create<IREE::Flow::TensorTieShapeOp>(
287-
op->getLoc(), expandedValue.tensor.getType(), expandedValue.tensor,
288-
expandedValue.dynamicDims);
286+
auto tieShapeOp = IREE::Flow::TensorTieShapeOp::create(
287+
builder, op->getLoc(), expandedValue.tensor.getType(),
288+
expandedValue.tensor, expandedValue.dynamicDims);
289289
oldResult.replaceAllUsesExcept(tieShapeOp.getResult(), tieShapeOp);
290290
}
291291
}
@@ -315,9 +315,9 @@ static void expandGlobalLoadOp(IREE::Util::GlobalLoadOpInterface op,
315315
dimOp.createLoadOp(op.getLoc(), builder).getLoadedGlobalValue());
316316
}
317317
tensorDimMap[op.getLoadedGlobalValue()] = expandedValue;
318-
auto tieShapeOp = builder.create<IREE::Flow::TensorTieShapeOp>(
319-
op.getLoc(), expandedValue.tensor.getType(), expandedValue.tensor,
320-
expandedValue.dynamicDims);
318+
auto tieShapeOp = IREE::Flow::TensorTieShapeOp::create(
319+
builder, op.getLoc(), expandedValue.tensor.getType(),
320+
expandedValue.tensor, expandedValue.dynamicDims);
321321
op.getLoadedGlobalValue().replaceAllUsesExcept(tieShapeOp.getResult(),
322322
tieShapeOp);
323323
}
@@ -436,7 +436,7 @@ static void expandReturnOp(IREE::Util::ReturnOp op, IndexSet &indexSet,
436436
OpBuilder builder(op);
437437
auto operands = expandOperands(op.getLoc(), op.getOperands(), tensorDimMap,
438438
indexSet, builder);
439-
builder.create<IREE::Util::ReturnOp>(op.getLoc(), operands);
439+
IREE::Util::ReturnOp::create(builder, op.getLoc(), operands);
440440
op.erase();
441441
}
442442

@@ -456,7 +456,7 @@ static void expandBranchOp(mlir::cf::BranchOp op, IndexSet &indexSet,
456456
OpBuilder builder(op);
457457
auto operands = expandOperands(op.getLoc(), op.getDestOperands(),
458458
tensorDimMap, indexSet, builder);
459-
builder.create<mlir::cf::BranchOp>(op.getLoc(), op.getDest(), operands);
459+
mlir::cf::BranchOp::create(builder, op.getLoc(), op.getDest(), operands);
460460
op.erase();
461461
}
462462

@@ -465,8 +465,8 @@ static void expandCondBranchOp(mlir::cf::CondBranchOp op, IndexSet &indexSet,
465465
if (!usesDynamicTensors(op))
466466
return;
467467
OpBuilder builder(op);
468-
builder.create<mlir::cf::CondBranchOp>(
469-
op.getLoc(), op.getCondition(), op.getTrueDest(),
468+
mlir::cf::CondBranchOp::create(
469+
builder, op.getLoc(), op.getCondition(), op.getTrueDest(),
470470
expandOperands(op.getLoc(), op.getTrueDestOperands(), tensorDimMap,
471471
indexSet, builder),
472472
op.getFalseDest(),
@@ -496,8 +496,9 @@ static void expandSelectOp(mlir::arith::SelectOp op, IndexSet &indexSet,
496496
auto falseValue = consumeExpandedValue(op.getLoc(), op.getFalseValue(),
497497
tensorDimMap, indexSet, builder);
498498

499-
auto selectOp = builder.create<mlir::arith::SelectOp>(
500-
op.getLoc(), op.getCondition(), op.getTrueValue(), op.getFalseValue());
499+
auto selectOp =
500+
mlir::arith::SelectOp::create(builder, op.getLoc(), op.getCondition(),
501+
op.getTrueValue(), op.getFalseValue());
501502

502503
SmallVector<Value> selectedDims;
503504
for (auto [trueDynamicDims, falseDynamicDims] :
@@ -508,9 +509,9 @@ static void expandSelectOp(mlir::arith::SelectOp op, IndexSet &indexSet,
508509
trueDynamicDims, falseDynamicDims)
509510
.getResult());
510511
}
511-
auto tieShapeOp = builder.create<IREE::Flow::TensorTieShapeOp>(
512-
selectOp.getLoc(), selectOp.getResult().getType(), selectOp.getResult(),
513-
selectedDims);
512+
auto tieShapeOp = IREE::Flow::TensorTieShapeOp::create(
513+
builder, selectOp.getLoc(), selectOp.getResult().getType(),
514+
selectOp.getResult(), selectedDims);
514515

515516
op.getResult().replaceAllUsesExcept(tieShapeOp.getResult(), tieShapeOp);
516517
op.erase();
@@ -524,9 +525,9 @@ static void expandWhileOp(mlir::scf::WhileOp op, SymbolTable &symbolTable,
524525
indexSet, builder);
525526
auto resultTypes = expandTypes(op.getResultTypes());
526527

527-
auto newOp = builder.create<scf::WhileOp>(op.getLoc(), resultTypes, operands,
528-
/*beforeBody*/ nullptr,
529-
/*afterBody*/ nullptr);
528+
auto newOp = scf::WhileOp::create(builder, op.getLoc(), resultTypes, operands,
529+
/*beforeBody*/ nullptr,
530+
/*afterBody*/ nullptr);
530531

531532
newOp.getBefore().takeBody(op.getBefore());
532533
newOp.getAfter().takeBody(op.getAfter());
@@ -545,8 +546,8 @@ static void expandIfOp(mlir::scf::IfOp op, SymbolTable &symbolTable,
545546
OpBuilder builder(op);
546547
auto resultTypes = expandTypes(op.getResultTypes());
547548

548-
auto newOp = builder.create<scf::IfOp>(
549-
op.getLoc(), resultTypes, op.getOperand(), op.elseBlock() != nullptr);
549+
auto newOp = scf::IfOp::create(builder, op.getLoc(), resultTypes,
550+
op.getOperand(), op.elseBlock() != nullptr);
550551

551552
newOp.getBodyRegion().takeBody(op.getBodyRegion());
552553
expandRegion(newOp.getBodyRegion(), symbolTable, globalMap, indexSet,
@@ -566,7 +567,7 @@ static void expandScfYieldOp(mlir::scf::YieldOp op, IndexSet &indexSet,
566567
OpBuilder builder(op);
567568
auto operands = expandOperands(op.getLoc(), op.getOperands(), tensorDimMap,
568569
indexSet, builder);
569-
builder.create<mlir::scf::YieldOp>(op.getLoc(), operands);
570+
mlir::scf::YieldOp::create(builder, op.getLoc(), operands);
570571
op.erase();
571572
}
572573

@@ -575,8 +576,8 @@ static void expandScfConditionOp(mlir::scf::ConditionOp op, IndexSet &indexSet,
575576
OpBuilder builder(op);
576577
auto operands = expandOperands(op.getLoc(), op.getArgs(), tensorDimMap,
577578
indexSet, builder);
578-
builder.create<mlir::scf::ConditionOp>(op.getLoc(), op.getCondition(),
579-
operands);
579+
mlir::scf::ConditionOp::create(builder, op.getLoc(), op.getCondition(),
580+
operands);
580581
op.erase();
581582
}
582583

0 commit comments

Comments
 (0)