Skip to content

Commit 528354d

Browse files
author
Prashant Kumar
committed
Add aten.gt.Tensor op
`aten.gt.Tensor` op has been added in torch dialect and the lowering of the op has been done to the linalg dialect. Signed-off-by: Prashant Kumar <[email protected]>
1 parent a778f99 commit 528354d

File tree

3 files changed

+83
-2
lines changed

3 files changed

+83
-2
lines changed

e2e_testing/torchscript/elementwise.py

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -343,6 +343,42 @@ def forward(self, x):
343343
def ElementwiseGtScalarModule_basic(module, tu: TestUtils):
344344
module.forward(tu.rand(3, 5))
345345

346+
class ElementwiseGtFloatTensorModule(torch.nn.Module):
347+
def __init__(self):
348+
super().__init__()
349+
350+
@export
351+
@annotate_args([
352+
None,
353+
([-1, -1], torch.float32, True),
354+
([-1], torch.float32, True),
355+
])
356+
def forward(self, x, y):
357+
return torch.gt(x, y)
358+
359+
360+
@register_test_case(module_factory=lambda: ElementwiseGtFloatTensorModule())
361+
def ElementwiseGtFloatTensorModule_basic(module, tu: TestUtils):
362+
module.forward(tu.rand(3, 5), tu.rand(5))
363+
364+
class ElementwiseGtIntTensorModule(torch.nn.Module):
365+
def __init__(self):
366+
super().__init__()
367+
368+
@export
369+
@annotate_args([
370+
None,
371+
([-1, -1], torch.int64, True),
372+
([-1], torch.int64, True),
373+
])
374+
def forward(self, x, y):
375+
return torch.gt(x, y)
376+
377+
378+
@register_test_case(module_factory=lambda: ElementwiseGtIntTensorModule())
379+
def ElementwiseGtIntTensorModule_basic(module, tu: TestUtils):
380+
module.forward(torch.randint(10, (3, 5)), torch.randint(10, (5,)))
381+
346382
# ==============================================================================
347383

348384

lib/Conversion/TorchToLinalg/TorchToLinalg.cpp

Lines changed: 28 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1669,6 +1669,32 @@ static Value createLinalgPayloadCalculationForElementwiseOp(
16691669
return b.create<arith::MulIOp>(loc, lhs, rhs);
16701670
}
16711671
}
1672+
if (auto gtTensor = dyn_cast<AtenGtTensorOp>(op)) {
1673+
AtenGtTensorOp::Adaptor adaptor(operands);
1674+
Type lhsDtype = payloadArgs[0].getType();
1675+
Type rhsDtype = payloadArgs[1].getType();
1676+
1677+
// TODO: Type promotion in case of different `lhsDtype` and `rhsDtype` needs
1678+
// to be handled.
1679+
if (lhsDtype != rhsDtype)
1680+
gtTensor.emitError("unimplemented: different lhs and rhs dtype");
1681+
1682+
Type elementalType =
1683+
gtTensor.self().getType().cast<BaseTensorType>().getDtype();
1684+
1685+
if (elementalType.isa<mlir::FloatType>())
1686+
return b.create<arith::CmpFOp>(loc, arith::CmpFPredicate::UGT,
1687+
payloadArgs[0], payloadArgs[1]);
1688+
if (IntegerType intType = elementalType.dyn_cast<mlir::IntegerType>()) {
1689+
if (intType.isUnsigned())
1690+
return b.create<arith::CmpIOp>(loc, arith::CmpIPredicate::ugt,
1691+
payloadArgs[0], payloadArgs[1]);
1692+
if (intType.isSigned())
1693+
return b.create<arith::CmpIOp>(loc, arith::CmpIPredicate::sgt,
1694+
payloadArgs[0], payloadArgs[1]);
1695+
}
1696+
gtTensor.emitError("unimplemented: dtype isn't supported.");
1697+
}
16721698
if (auto div = dyn_cast<AtenDivTensorOp>(op)) {
16731699
AtenDivTensorOp::Adaptor adaptor(operands);
16741700
Type dtype = converter->convertType(div.getType())
@@ -2070,7 +2096,7 @@ struct ConvertElementwiseOp : ConversionPattern {
20702096
AtenSqrtOp, AtenFloorOp, AtenPowTensorScalarOp, AtenLog2Op,
20712097
AtenRsqrtOp, AtenDivScalarOp, AtenAbsOp, AtenReciprocalOp,
20722098
AtenBitwiseAndTensorOp, AtenGtScalarOp, AtenWhereSelfOp,
2073-
AtenCeilOp>(op))
2099+
AtenCeilOp, AtenGtTensorOp>(op))
20742100
return rewriter.notifyMatchFailure(op, "not a supported elementwise op");
20752101

20762102
if (failed(verifyLinalgCompatibleTypes(op, rewriter)))
@@ -3640,7 +3666,7 @@ class ConvertTorchToLinalg
36403666
AtenToDtypeOp, AtenClampOp, AtenRsubScalarOp, AtenLogOp, AtenSqrtOp,
36413667
AtenFloorOp, AtenCeilOp, AtenPowTensorScalarOp, AtenLog2Op, AtenRsqrtOp,
36423668
AtenAbsOp, AtenReciprocalOp, AtenBitwiseAndTensorOp, AtenGtScalarOp,
3643-
AtenWhereSelfOp>();
3669+
AtenWhereSelfOp, AtenGtTensorOp>();
36443670
patterns.add<ConvertElementwiseOp>(typeConverter, context);
36453671
target.addIllegalOp<AtenSqueezeOp>();
36463672
patterns.add<ConvertAtenSqueezeOp>(typeConverter, context);

lib/Dialect/Torch/Transforms/RefineTypes.cpp

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -320,6 +320,8 @@ class TypeAnalyzer : public ForwardDataFlowAnalysis<ValueKnowledge> {
320320
AtenDivTensorOp, Aten__And__TensorOp, AtenEqTensorOp,
321321
AtenMinimumOp, AtenMaximumOp, AtenBitwiseAndTensorOp>(op)) {
322322
return visitBinaryBroadcastingOp(op, operands);
323+
} else if (isa<AtenGtTensorOp>(op)) {
324+
return visitBinaryBroadcastingComparisonOp(op, operands);
323325
} else if (auto whereSelf = llvm::dyn_cast<AtenWhereSelfOp>(op)) {
324326
return visitAtenWhereSelfOp(whereSelf, operands);
325327
} else if (auto lerpTensor = llvm::dyn_cast<AtenLerpTensorOp>(op)) {
@@ -505,6 +507,8 @@ class TypeAnalyzer : public ForwardDataFlowAnalysis<ValueKnowledge> {
505507
Operation *op, ArrayRef<LatticeElement<ValueKnowledge> *> operands);
506508
ChangeResult visitBinaryBroadcastingOp(
507509
Operation *op, ArrayRef<LatticeElement<ValueKnowledge> *> operands);
510+
ChangeResult visitBinaryBroadcastingComparisonOp(
511+
Operation *op, ArrayRef<LatticeElement<ValueKnowledge> *> operands);
508512
ChangeResult
509513
visitAtenWhereSelfOp(AtenWhereSelfOp op,
510514
ArrayRef<LatticeElement<ValueKnowledge> *> operands);
@@ -884,6 +888,21 @@ ChangeResult TypeAnalyzer::visitBinaryBroadcastingOp(
884888
return getLatticeElement(op->getResult(0)).join(knowledge);
885889
}
886890

891+
ChangeResult TypeAnalyzer::visitBinaryBroadcastingComparisonOp(
892+
Operation *op, ArrayRef<LatticeElement<ValueKnowledge> *> operands) {
893+
auto lhs = operands[0]->getValue();
894+
auto rhs = operands[1]->getValue();
895+
auto knowledge =
896+
ValueKnowledge::getNotNonePessimisticValueState(getContext());
897+
if (lhs.hasSizes && rhs.hasSizes) {
898+
knowledge.hasSizes = true;
899+
knowledge.sizes.resize(std::max(lhs.sizes.size(), rhs.sizes.size()),
900+
kUnknownSize);
901+
}
902+
knowledge.dtype = IntegerType::get(op->getContext(), 1);
903+
return getLatticeElement(op->getResult(0)).join(knowledge);
904+
}
905+
887906
ChangeResult TypeAnalyzer::visitAtenWhereSelfOp(
888907
AtenWhereSelfOp op, ArrayRef<LatticeElement<ValueKnowledge> *> operands) {
889908
auto condition = operands[0]->getValue();

0 commit comments

Comments
 (0)