Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -92,13 +92,13 @@ struct ContractionOpPropagationInterface final
RankedTensorType operandEncodingType =
collapseOp.getSrcType().cloneWithEncoding(
operandEncodings.front());
Value newEncodingOp = builder.create<IREE::Encoding::SetEncodingOp>(
loc, operandEncodingType, collapseOp.getSrc());
Value newEncodingOp = IREE::Encoding::SetEncodingOp::create(
builder, loc, operandEncodingType, collapseOp.getSrc());
auto resultEncodingType =
dyn_cast<RankedTensorType>(opResult.getType())
.cloneWithEncoding(resultEncodings.front());
Value newCollapseOp = builder.create<tensor::CollapseShapeOp>(
loc, resultEncodingType, newEncodingOp,
Value newCollapseOp = tensor::CollapseShapeOp::create(
builder, loc, resultEncodingType, newEncodingOp,
collapseOp.getReassociationIndices());
IREE::Encoding::PropagationResult result;
result.replacements = {newCollapseOp};
Expand Down Expand Up @@ -228,9 +228,8 @@ struct GenericOpPropagationInterface final
auto resType = RankedTensorType::get(
operandType.getShape(), operandType.getElementType(),
encoding);
Value encodedInput =
rewriter.create<IREE::Encoding::SetEncodingOp>(
loc, resType, operand->get());
Value encodedInput = IREE::Encoding::SetEncodingOp::create(
rewriter, loc, resType, operand->get());
result.generatedEncodingOps.push_back(
encodedInput.getDefiningOp());
encodedOperands.push_back(encodedInput);
Expand All @@ -253,8 +252,8 @@ struct GenericOpPropagationInterface final

// Create encoded generic op.
rewriter.setInsertionPointAfter(emptyOp);
Value encodedInit = rewriter.create<tensor::EmptyOp>(
loc, emptyOp.getType().getShape(),
Value encodedInit = tensor::EmptyOp::create(
rewriter, loc, emptyOp.getType().getShape(),
resultEncodingType.getElementType(),
emptyOp.getDynamicSizes(), encoding);
resultEncodingTypes.push_back(resultEncodingType);
Expand All @@ -271,10 +270,9 @@ struct GenericOpPropagationInterface final
auto resultType =
cast<RankedTensorType>(genericResult.getType())
.dropEncoding();
auto newUnsetEncoding =
rewriter.create<IREE::Encoding::UnsetEncodingOp>(
encodingOp.getLoc(), resultType, genericResult,
encodingOp.getResultDims());
auto newUnsetEncoding = IREE::Encoding::UnsetEncodingOp::create(
rewriter, encodingOp.getLoc(), resultType, genericResult,
encodingOp.getResultDims());
result.replacements.push_back(newUnsetEncoding.getResult());
result.generatedEncodingOps.push_back(newUnsetEncoding);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -117,8 +117,8 @@ class ConvertStridedContractionToContraction
}
vSizes.push_back(rewriter.createOrFold<tensor::DimOp>(loc, input, i));
}
Value extractedSlice = rewriter.create<tensor::ExtractSliceOp>(
loc, sliceTy, input, vOffset, vSizes, vStride);
Value extractedSlice = tensor::ExtractSliceOp::create(
rewriter, loc, sliceTy, input, vOffset, vSizes, vStride);
rewriter.startOpModification(op);
op.setIndexingMapsAttr(rewriter.getAffineMapArrayAttr(mapRange));
op.setOperand(0, extractedSlice);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ static Value createTranspose(OpBuilder &builder, Value source,
applyPermutationToVector(mixedSizes, perm);
Type elemType = cast<RankedTensorType>(source.getType()).getElementType();
Value empty =
builder.create<tensor::EmptyOp>(source.getLoc(), mixedSizes, elemType)
tensor::EmptyOp::create(builder, source.getLoc(), mixedSizes, elemType)
.getResult();
return builder
.create<linalg::TransposeOp>(source.getLoc(), source, empty, perm)
Expand Down Expand Up @@ -75,9 +75,9 @@ struct TransposeInnerConcatenation : public OpRewritePattern<tensor::ConcatOp> {
SmallVector<int64_t> newShape = applyPermutation(concatShape, permutation);
auto newConcatType = RankedTensorType::get(
newShape, concatOp.getResultType().getElementType());
Value newConcat = rewriter.create<tensor::ConcatOp>(
concatOp.getLoc(), newConcatType, /*dim=*/outerMostNonUnitDim,
transposedInputs);
Value newConcat =
tensor::ConcatOp::create(rewriter, concatOp.getLoc(), newConcatType,
/*dim=*/outerMostNonUnitDim, transposedInputs);
auto invPerm = invertPermutationVector(permutation);
Value transposedConcat = createTranspose(rewriter, newConcat, invPerm);
rewriter.replaceOp(concatOp, transposedConcat);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,17 +65,17 @@ struct DemoteContractionInputsToBF16Pattern
inputType.getRank(), utils::IteratorType::parallel);
SmallVector<OpFoldResult> mixedSizes =
tensor::getMixedSizes(rewriter, loc, input);
Value empty = rewriter.create<tensor::EmptyOp>(loc, mixedSizes,
rewriter.getBF16Type());
Value empty = tensor::EmptyOp::create(rewriter, loc, mixedSizes,
rewriter.getBF16Type());
demotedInputs.push_back(
rewriter
.create<linalg::GenericOp>(
loc, TypeRange{demotedInputType}, ValueRange{input},
ValueRange{empty}, maps, iteratorTypes,
[&](OpBuilder &b, Location loc, ValueRange args) {
Value result = b.create<arith::TruncFOp>(
loc, rewriter.getBF16Type(), args[0]);
b.create<linalg::YieldOp>(loc, result);
Value result = arith::TruncFOp::create(
b, loc, rewriter.getBF16Type(), args[0]);
linalg::YieldOp::create(b, loc, result);
})
->getResults()[0]);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,29 +85,29 @@ struct DetachElementwisePattern
SmallVector<OpFoldResult> mixedSizes =
tensor::getMixedSizes(rewriter, loc, outputOperand);
auto initOp =
rewriter.create<tensor::EmptyOp>(loc, mixedSizes, elementType);
Value zero = rewriter.create<arith::ConstantOp>(
loc, rewriter.getZeroAttr(elementType));
Value fill =
rewriter.create<linalg::FillOp>(loc, zero, initOp.getResult()).result();
tensor::EmptyOp::create(rewriter, loc, mixedSizes, elementType);
Value zero = arith::ConstantOp::create(rewriter, loc,
rewriter.getZeroAttr(elementType));
Value fill = linalg::FillOp::create(rewriter, loc, zero, initOp.getResult())
.result();

// Update the contraction op to use the new zero tensor as output operand.
rewriter.modifyOpInPlace(linalgOp,
[&]() { linalgOp.setDpsInitOperand(0, fill); });

// Create a generic op to add back the original output tensor operand.
rewriter.setInsertionPointAfter(linalgOp);
auto genericOp = rewriter.create<linalg::GenericOp>(
loc, outputType, ValueRange{linalgOp->getResult(0), outputOperand},
fill, maps, iterators,
[&](OpBuilder &b, Location nestedLoc, ValueRange args) {
auto genericOp = linalg::GenericOp::create(
rewriter, loc, outputType,
ValueRange{linalgOp->getResult(0), outputOperand}, fill, maps,
iterators, [&](OpBuilder &b, Location nestedLoc, ValueRange args) {
Value result;
if (llvm::isa<FloatType>(elementType)) {
result = b.create<arith::AddFOp>(nestedLoc, args[0], args[1]);
result = arith::AddFOp::create(b, nestedLoc, args[0], args[1]);
} else {
result = b.create<arith::AddIOp>(nestedLoc, args[0], args[1]);
result = arith::AddIOp::create(b, nestedLoc, args[0], args[1]);
}
b.create<linalg::YieldOp>(nestedLoc, result);
linalg::YieldOp::create(b, nestedLoc, result);
});
linalgOp->getResult(0).replaceAllUsesExcept(genericOp->getResult(0),
genericOp);
Expand Down Expand Up @@ -153,8 +153,8 @@ struct DetachSplatConstantOutsOperands

Location loc = constOp.getLoc();
Type elementType = resultType.getElementType();
Value emptyTensorOp = rewriter.create<tensor::EmptyOp>(
loc, resultType.getShape(), elementType);
Value emptyTensorOp = tensor::EmptyOp::create(
rewriter, loc, resultType.getShape(), elementType);
TypedAttr constValue;
if (llvm::isa<IntegerType>(elementType)) {
constValue = rewriter.getIntegerAttr(
Expand All @@ -164,7 +164,7 @@ struct DetachSplatConstantOutsOperands
elementType, attr.template getSplatValue<APFloat>());
}
Value scalarConstantOp =
rewriter.create<arith::ConstantOp>(loc, elementType, constValue);
arith::ConstantOp::create(rewriter, loc, elementType, constValue);

Value fillOp = rewriter
.create<linalg::FillOp>(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,8 +87,8 @@ static ExpandedGlobalMap expandGlobalTensorDims(Operation *rootOp,
auto dimName =
(global.tensorOp.getName() + "__d" + std::to_string(it.index()))
.str();
auto dimOp = builder.create<IREE::Util::GlobalOp>(
global.tensorOp.getLoc(), dimName,
auto dimOp = IREE::Util::GlobalOp::create(
builder, global.tensorOp.getLoc(), dimName,
/*isMutable=*/true, indexType);
dimOp.setVisibility(global.tensorOp.getVisibility());
symbolTable.insert(dimOp);
Expand Down Expand Up @@ -234,9 +234,9 @@ static void expandRegion(Region &region, SymbolTable &symbolTable,
// Insert shape ties that we've sunk from callers.
auto builder = OpBuilder::atBlockBegin(&block);
for (auto &expansion : llvm::reverse(expansions)) {
auto tieShapeOp = builder.create<IREE::Flow::TensorTieShapeOp>(
region.getLoc(), expansion.tensor.getType(), expansion.tensor,
expansion.dynamicDims);
auto tieShapeOp = IREE::Flow::TensorTieShapeOp::create(
builder, region.getLoc(), expansion.tensor.getType(),
expansion.tensor, expansion.dynamicDims);
expansion.tensor.replaceAllUsesExcept(tieShapeOp.getResult(), tieShapeOp);
}
}
Expand Down Expand Up @@ -283,9 +283,9 @@ static void retieResults(Operation *op, Operation *newOp,
newOp->getResults().slice(newIdx, tensorType.getNumDynamicDims());
newIdx += expandedValue.dynamicDims.size();
tensorDimMap[expandedValue.tensor] = expandedValue;
auto tieShapeOp = builder.create<IREE::Flow::TensorTieShapeOp>(
op->getLoc(), expandedValue.tensor.getType(), expandedValue.tensor,
expandedValue.dynamicDims);
auto tieShapeOp = IREE::Flow::TensorTieShapeOp::create(
builder, op->getLoc(), expandedValue.tensor.getType(),
expandedValue.tensor, expandedValue.dynamicDims);
oldResult.replaceAllUsesExcept(tieShapeOp.getResult(), tieShapeOp);
}
}
Expand Down Expand Up @@ -315,9 +315,9 @@ static void expandGlobalLoadOp(IREE::Util::GlobalLoadOpInterface op,
dimOp.createLoadOp(op.getLoc(), builder).getLoadedGlobalValue());
}
tensorDimMap[op.getLoadedGlobalValue()] = expandedValue;
auto tieShapeOp = builder.create<IREE::Flow::TensorTieShapeOp>(
op.getLoc(), expandedValue.tensor.getType(), expandedValue.tensor,
expandedValue.dynamicDims);
auto tieShapeOp = IREE::Flow::TensorTieShapeOp::create(
builder, op.getLoc(), expandedValue.tensor.getType(),
expandedValue.tensor, expandedValue.dynamicDims);
op.getLoadedGlobalValue().replaceAllUsesExcept(tieShapeOp.getResult(),
tieShapeOp);
}
Expand Down Expand Up @@ -436,7 +436,7 @@ static void expandReturnOp(IREE::Util::ReturnOp op, IndexSet &indexSet,
OpBuilder builder(op);
auto operands = expandOperands(op.getLoc(), op.getOperands(), tensorDimMap,
indexSet, builder);
builder.create<IREE::Util::ReturnOp>(op.getLoc(), operands);
IREE::Util::ReturnOp::create(builder, op.getLoc(), operands);
op.erase();
}

Expand All @@ -456,7 +456,7 @@ static void expandBranchOp(mlir::cf::BranchOp op, IndexSet &indexSet,
OpBuilder builder(op);
auto operands = expandOperands(op.getLoc(), op.getDestOperands(),
tensorDimMap, indexSet, builder);
builder.create<mlir::cf::BranchOp>(op.getLoc(), op.getDest(), operands);
mlir::cf::BranchOp::create(builder, op.getLoc(), op.getDest(), operands);
op.erase();
}

Expand All @@ -465,8 +465,8 @@ static void expandCondBranchOp(mlir::cf::CondBranchOp op, IndexSet &indexSet,
if (!usesDynamicTensors(op))
return;
OpBuilder builder(op);
builder.create<mlir::cf::CondBranchOp>(
op.getLoc(), op.getCondition(), op.getTrueDest(),
mlir::cf::CondBranchOp::create(
builder, op.getLoc(), op.getCondition(), op.getTrueDest(),
expandOperands(op.getLoc(), op.getTrueDestOperands(), tensorDimMap,
indexSet, builder),
op.getFalseDest(),
Expand Down Expand Up @@ -496,8 +496,9 @@ static void expandSelectOp(mlir::arith::SelectOp op, IndexSet &indexSet,
auto falseValue = consumeExpandedValue(op.getLoc(), op.getFalseValue(),
tensorDimMap, indexSet, builder);

auto selectOp = builder.create<mlir::arith::SelectOp>(
op.getLoc(), op.getCondition(), op.getTrueValue(), op.getFalseValue());
auto selectOp =
mlir::arith::SelectOp::create(builder, op.getLoc(), op.getCondition(),
op.getTrueValue(), op.getFalseValue());

SmallVector<Value> selectedDims;
for (auto [trueDynamicDims, falseDynamicDims] :
Expand All @@ -508,9 +509,9 @@ static void expandSelectOp(mlir::arith::SelectOp op, IndexSet &indexSet,
trueDynamicDims, falseDynamicDims)
.getResult());
}
auto tieShapeOp = builder.create<IREE::Flow::TensorTieShapeOp>(
selectOp.getLoc(), selectOp.getResult().getType(), selectOp.getResult(),
selectedDims);
auto tieShapeOp = IREE::Flow::TensorTieShapeOp::create(
builder, selectOp.getLoc(), selectOp.getResult().getType(),
selectOp.getResult(), selectedDims);

op.getResult().replaceAllUsesExcept(tieShapeOp.getResult(), tieShapeOp);
op.erase();
Expand All @@ -524,9 +525,9 @@ static void expandWhileOp(mlir::scf::WhileOp op, SymbolTable &symbolTable,
indexSet, builder);
auto resultTypes = expandTypes(op.getResultTypes());

auto newOp = builder.create<scf::WhileOp>(op.getLoc(), resultTypes, operands,
/*beforeBody*/ nullptr,
/*afterBody*/ nullptr);
auto newOp = scf::WhileOp::create(builder, op.getLoc(), resultTypes, operands,
/*beforeBody*/ nullptr,
/*afterBody*/ nullptr);

newOp.getBefore().takeBody(op.getBefore());
newOp.getAfter().takeBody(op.getAfter());
Expand All @@ -545,8 +546,8 @@ static void expandIfOp(mlir::scf::IfOp op, SymbolTable &symbolTable,
OpBuilder builder(op);
auto resultTypes = expandTypes(op.getResultTypes());

auto newOp = builder.create<scf::IfOp>(
op.getLoc(), resultTypes, op.getOperand(), op.elseBlock() != nullptr);
auto newOp = scf::IfOp::create(builder, op.getLoc(), resultTypes,
op.getOperand(), op.elseBlock() != nullptr);

newOp.getBodyRegion().takeBody(op.getBodyRegion());
expandRegion(newOp.getBodyRegion(), symbolTable, globalMap, indexSet,
Expand All @@ -566,7 +567,7 @@ static void expandScfYieldOp(mlir::scf::YieldOp op, IndexSet &indexSet,
OpBuilder builder(op);
auto operands = expandOperands(op.getLoc(), op.getOperands(), tensorDimMap,
indexSet, builder);
builder.create<mlir::scf::YieldOp>(op.getLoc(), operands);
mlir::scf::YieldOp::create(builder, op.getLoc(), operands);
op.erase();
}

Expand All @@ -575,8 +576,8 @@ static void expandScfConditionOp(mlir::scf::ConditionOp op, IndexSet &indexSet,
OpBuilder builder(op);
auto operands = expandOperands(op.getLoc(), op.getArgs(), tensorDimMap,
indexSet, builder);
builder.create<mlir::scf::ConditionOp>(op.getLoc(), op.getCondition(),
operands);
mlir::scf::ConditionOp::create(builder, op.getLoc(), op.getCondition(),
operands);
op.erase();
}

Expand Down
Loading
Loading