diff --git a/include/Conversion/LlvmToNeura/LlvmToNeura.h b/include/Conversion/LlvmToNeura/LlvmToNeura.h new file mode 100644 index 00000000..cbe51792 --- /dev/null +++ b/include/Conversion/LlvmToNeura/LlvmToNeura.h @@ -0,0 +1,12 @@ +#ifndef NEURA_CONVERSION_LLVMTONEURA_LLVMTONEURAPASS_H +#define NEURA_CONVERSION_LLVMTONEURA_LLVMTONEURAPASS_H + +#include "mlir/Pass/Pass.h" + +namespace mlir { +namespace neura { + std::unique_ptr createLowerLlvmToNeuraPass(); +} // namespace neura +} // namespace mlir + +#endif // NEURA_CONVERSION_LLVMTONEURA_LLVMTONEURAPASS_H diff --git a/include/NeuraDialect/NeuraOps.td b/include/NeuraDialect/NeuraOps.td index 0e90ae17..e0b01888 100644 --- a/include/NeuraDialect/NeuraOps.td +++ b/include/NeuraDialect/NeuraOps.td @@ -1,13 +1,42 @@ // NeuraOps.td - Custom operation definitions. -include "mlir/IR/OpBase.td" + include "NeuraDialect/NeuraDialect.td" // Defines an addition operation. def Neura_AddOp : Op { - let summary = "Addition operation"; + let summary = "Integer addition operation"; let opName = "add"; - let arguments = (ins F32:$lhs, F32:$rhs); - let results = (outs F32:$result); - let assemblyFormat = "$lhs `,` $rhs attr-dict `:` type($result)"; + let arguments = (ins AnyInteger:$lhs, AnyInteger:$rhs); + let results = (outs AnyInteger:$result); + // let assemblyFormat = "$lhs `,` $rhs attr-dict `:` type($result)"; + let traits = [SameOperandsAndResultElementType]; +} + +// Defines an addition operation. +def Neura_FAddOp : Op { + let summary = "Floating addition operation"; + let opName = "fadd"; + let arguments = (ins AnyFloat:$lhs, AnyFloat:$rhs); + let results = (outs AnyFloat:$result); + // let assemblyFormat = "$lhs `,` $rhs attr-dict `:` type($result)"; + let traits = [SameOperandsAndResultElementType]; +} + +def Neura_FAddFAddOp : Op { + let summary = "Fused fadd(fadd(a, b), c)"; + let arguments = (ins AnyFloat:$a, AnyFloat:$b, AnyFloat:$c); + let results = (outs AnyFloat:$result); + // let assemblyFormat = "$a `,` $b `,` $c attr-dict `:` type($result)"; + let traits = [SameOperandsAndResultElementType]; +} + + +// Defines a move operation for data communication. +def Neura_MovOp : Op { + let summary = "Move operation"; + let opName = "mov"; + let arguments = (ins AnyType:$lhs); + let results = (outs AnyType:$result); + let assemblyFormat = "$lhs attr-dict `:` type($lhs) `->` type($result)"; // let traits = [Pure]; } diff --git a/include/Transforms/FusePatternsPass.h b/include/Transforms/FusePatternsPass.h new file mode 100644 index 00000000..d808fe36 --- /dev/null +++ b/include/Transforms/FusePatternsPass.h @@ -0,0 +1,11 @@ +#ifndef NEURA_TRANSFORMS_FUSEPATTERNSPASS_H +#define NEURA_TRANSFORMS_FUSEPATTERNSPASS_H + +#include "mlir/Pass/Pass.h" + +namespace mlir::neura { +std::unique_ptr createFusePatternsPass(); +} + +#endif // NEURA_TRANSFORMS_FUSEPATTERNSPASS_H + diff --git a/include/Transforms/InsertMovPass.h b/include/Transforms/InsertMovPass.h new file mode 100644 index 00000000..06a0befa --- /dev/null +++ b/include/Transforms/InsertMovPass.h @@ -0,0 +1,13 @@ +#ifndef NEURA_TRANSFORMS_INSERTMOVPASS_H +#define NEURA_TRANSFORMS_INSERTMOVPASS_H + +#include "mlir/Pass/Pass.h" + +namespace mlir { +namespace neura { + std::unique_ptr createInsertMovPass(); +} // namespace neura +} // namespace mlir + +#endif // NEURA_TRANSFORMS_INSERTMOVPASS_H + diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt index 30d5c055..aec92864 100644 --- a/lib/CMakeLists.txt +++ b/lib/CMakeLists.txt @@ -1,2 +1,3 @@ add_subdirectory(NeuraDialect) -add_subdirectory(Conversion/ArithToNeura) +add_subdirectory(Conversion) +add_subdirectory(Transforms) diff --git a/lib/Conversion/ArithToNeura/ArithToNeura.cpp b/lib/Conversion/ArithToNeura/ArithToNeuraPass.cpp similarity index 56% rename from lib/Conversion/ArithToNeura/ArithToNeura.cpp rename to lib/Conversion/ArithToNeura/ArithToNeuraPass.cpp index cfb8c216..625f1474 100644 --- a/lib/Conversion/ArithToNeura/ArithToNeura.cpp +++ b/lib/Conversion/ArithToNeura/ArithToNeuraPass.cpp @@ -7,23 +7,20 @@ #include "mlir/Pass/Pass.h" #include "mlir/Transforms/GreedyPatternRewriteDriver.h" -using namespace mlir; +namespace mlir { +namespace neura { +// Uses arith2neura instead of llvm to avoid conflicts. +namespace arith2neura { -namespace { -struct ArithAddFOpLowering : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; +#include "ArithToNeuraPatterns.inc" - LogicalResult matchAndRewrite(arith::AddFOp op, - PatternRewriter &rewriter) const override { -llvm::errs() << "[cheng] step into matchAndRewriter()"; - rewriter.replaceOpWithNewOp(op, op.getType(), op.getLhs(), op.getRhs()); +} // namespace arith2neura +} // namespace neura +} // namespace mlir -llvm::errs() << "[cheng] Matched arith.addf: "; -// op.dump(); +using namespace mlir; - return success(); - } -}; +namespace { struct LowerArithToNeuraPass : public PassWrapper> { @@ -32,7 +29,7 @@ struct LowerArithToNeuraPass StringRef getArgument() const override { return "lower-arith-to-neura"; } StringRef getDescription() const override { - return "Lower arithmetic operations to Neura dialect operations"; + return "Lower arith dialect operations to Neura dialect operations"; } void getDependentDialects(DialectRegistry ®istry) const override { @@ -40,15 +37,8 @@ struct LowerArithToNeuraPass } void runOnOperation() override { - // getContext().loadDialect(); - RewritePatternSet patterns(&getContext()); - llvm::errs() << "[cheng] check runOnOperation: "; - getOperation().dump(); - getOperation().walk([](Operation *op) { - llvm::errs() << "[cheng] Saw op: " << op->getName() << "\n"; - }); - patterns.add(&getContext()); + mlir::neura::arith2neura::populateWithGenerated(patterns); if (failed(applyPatternsAndFoldGreedily(getOperation(), std::move(patterns)))) { signalPassFailure(); } diff --git a/lib/Conversion/ArithToNeura/ArithToNeuraPatterns.td b/lib/Conversion/ArithToNeura/ArithToNeuraPatterns.td new file mode 100644 index 00000000..785f5b47 --- /dev/null +++ b/lib/Conversion/ArithToNeura/ArithToNeuraPatterns.td @@ -0,0 +1,10 @@ +include "mlir/IR/OpBase.td" +include "mlir/IR/PatternBase.td" +include "mlir/Dialect/Arith/IR/ArithOps.td" +include "NeuraDialect/NeuraOps.td" + +def : Pat< + (Arith_AddFOp $lhs, $rhs, $_fastmath), + (Neura_FAddOp $lhs, $rhs) +>; + diff --git a/lib/Conversion/ArithToNeura/CMakeLists.txt b/lib/Conversion/ArithToNeura/CMakeLists.txt index a5eee595..c77ed4a4 100644 --- a/lib/Conversion/ArithToNeura/CMakeLists.txt +++ b/lib/Conversion/ArithToNeura/CMakeLists.txt @@ -1,7 +1,21 @@ +set(LLVM_TARGET_DEFINITIONS + ${CMAKE_CURRENT_SOURCE_DIR}/ArithToNeuraPatterns.td +) + +mlir_tablegen(ArithToNeuraPatterns.inc + -gen-rewriters + -I ${MLIR_SOURCE_DIR}/include + -I ${MLIR_BINARY_DIR}/include + -I ${CMAKE_SOURCE_DIR}/include + -I ${CMAKE_CURRENT_SOURCE_DIR} +) +add_public_tablegen_target(ArithToNeuraPatternGen) + add_mlir_library(NeuraArithToNeura - ArithToNeura.cpp + ArithToNeuraPass.cpp DEPENDS + ArithToNeuraPatternGen NeuraOpsIncGen NeuraDialectIncGen NeuraDialect @@ -16,6 +30,7 @@ add_mlir_library(NeuraArithToNeura target_include_directories(NeuraArithToNeura PUBLIC ${CMAKE_BINARY_DIR}/lib/NeuraDialect + ${CMAKE_BINARY_DIR}/lib/Conversion/ArithToNeura ${MLIR_INCLUDE_DIRS} ${LLVM_INCLUDE_DIRS} ${CMAKE_SOURCE_DIR}/include @@ -25,3 +40,4 @@ target_include_directories(NeuraArithToNeura PUBLIC target_compile_definitions(NeuraArithToNeura PRIVATE ${LLVM_DEFINITIONS} ) + diff --git a/lib/Conversion/CMakeLists.txt b/lib/Conversion/CMakeLists.txt new file mode 100644 index 00000000..b917c4f3 --- /dev/null +++ b/lib/Conversion/CMakeLists.txt @@ -0,0 +1,2 @@ +add_subdirectory(ArithToNeura) +add_subdirectory(LlvmToNeura) diff --git a/lib/Conversion/LlvmToNeura/CMakeLists.txt b/lib/Conversion/LlvmToNeura/CMakeLists.txt new file mode 100644 index 00000000..93748e27 --- /dev/null +++ b/lib/Conversion/LlvmToNeura/CMakeLists.txt @@ -0,0 +1,43 @@ +set(LLVM_TARGET_DEFINITIONS + ${CMAKE_CURRENT_SOURCE_DIR}/LlvmToNeuraPatterns.td +) + +mlir_tablegen(LlvmToNeuraPatterns.inc + -gen-rewriters + -I ${MLIR_SOURCE_DIR}/include + -I ${MLIR_BINARY_DIR}/include + -I ${CMAKE_SOURCE_DIR}/include + -I ${CMAKE_CURRENT_SOURCE_DIR} +) +add_public_tablegen_target(LlvmToNeuraPatternGen) + +add_mlir_library(NeuraLlvmToNeura + LlvmToNeuraPass.cpp + + DEPENDS + NeuraOpsIncGen + NeuraDialectIncGen + NeuraDialect + LlvmToNeuraPatternGen + + LINK_LIBS PUBLIC + MLIRArithDialect + MLIRFuncDialect + MLIRLLVMDialect + MLIRIR + MLIRPass + MLIRTransforms +) + +target_include_directories(NeuraLlvmToNeura PUBLIC + ${CMAKE_BINARY_DIR}/lib/NeuraDialect + ${CMAKE_BINARY_DIR}/lib/Conversion/LlvmToNeura + ${MLIR_INCLUDE_DIRS} + ${LLVM_INCLUDE_DIRS} + ${CMAKE_SOURCE_DIR}/include + ${CMAKE_BINARY_DIR}/include +) + +target_compile_definitions(NeuraLlvmToNeura + PRIVATE ${LLVM_DEFINITIONS} +) diff --git a/lib/Conversion/LlvmToNeura/LlvmToNeuraPass.cpp b/lib/Conversion/LlvmToNeura/LlvmToNeuraPass.cpp new file mode 100644 index 00000000..0ad72602 --- /dev/null +++ b/lib/Conversion/LlvmToNeura/LlvmToNeuraPass.cpp @@ -0,0 +1,53 @@ +#include "Conversion/LlvmToNeura/LlvmToNeura.h" +#include "NeuraDialect/NeuraDialect.h" +#include "NeuraDialect/NeuraOps.h" +#include "mlir/Dialect/LLVMIR/LLVMAttrs.h" +#include "mlir/Dialect/LLVMIR/LLVMDialect.h" +#include "mlir/Dialect/LLVMIR/LLVMTypes.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/IR/PatternMatch.h" +#include "mlir/Pass/Pass.h" +#include "mlir/Transforms/GreedyPatternRewriteDriver.h" + +namespace mlir { +namespace neura { +// Uses llvm2neura instead of llvm to avoid conflicts. +namespace llvm2neura { + +#include "LlvmToNeuraPatterns.inc" + +} // namespace llvm2neura +} // namespace neura +} // namespace mlir + +using namespace mlir; + +namespace { + +struct LowerLlvmToNeuraPass + : public PassWrapper> { + + MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(LowerLlvmToNeuraPass) + + StringRef getArgument() const override { return "lower-llvm-to-neura"; } + StringRef getDescription() const override { + return "Lower LLVM operations to Neura dialect operations"; + } + + void getDependentDialects(DialectRegistry ®istry) const override { + registry.insert(); + } + + void runOnOperation() override { + RewritePatternSet patterns(&getContext()); + mlir::neura::llvm2neura::populateWithGenerated(patterns); + if (failed(applyPatternsAndFoldGreedily(getOperation(), std::move(patterns)))) { + signalPassFailure(); + } + } +}; +} // namespace + +std::unique_ptr mlir::neura::createLowerLlvmToNeuraPass() { + return std::make_unique(); +} diff --git a/lib/Conversion/LlvmToNeura/LlvmToNeuraPatterns.td b/lib/Conversion/LlvmToNeura/LlvmToNeuraPatterns.td new file mode 100644 index 00000000..9b1f0035 --- /dev/null +++ b/lib/Conversion/LlvmToNeura/LlvmToNeuraPatterns.td @@ -0,0 +1,9 @@ +include "mlir/IR/OpBase.td" +include "mlir/IR/PatternBase.td" +include "mlir/Dialect/LLVMIR/LLVMOps.td" +include "NeuraDialect/NeuraOps.td" + +def : Pat< + (LLVM_FAddOp $lhs, $rhs, $_fastmath), + (Neura_FAddOp $lhs, $rhs) +>; diff --git a/lib/NeuraDialect/CMakeLists.txt b/lib/NeuraDialect/CMakeLists.txt index f80607a1..93931a3b 100644 --- a/lib/NeuraDialect/CMakeLists.txt +++ b/lib/NeuraDialect/CMakeLists.txt @@ -5,12 +5,22 @@ add_definitions(${MLIR_DEFINITIONS}) set(LLVM_TARGET_DEFINITIONS ${CMAKE_CURRENT_SOURCE_DIR}/../../include/NeuraDialect/NeuraOps.td) -mlir_tablegen(NeuraOps.h.inc -gen-op-decls -I${CMAKE_SOURCE_DIR}/include) -mlir_tablegen(NeuraOps.cpp.inc -gen-op-defs -I${CMAKE_SOURCE_DIR}/include) +mlir_tablegen(NeuraOps.h.inc -gen-op-decls + -I${MLIR_SOURCE_DIR}/include + -I${CMAKE_SOURCE_DIR}/include) +mlir_tablegen(NeuraOps.cpp.inc -gen-op-defs + -I${MLIR_SOURCE_DIR}/include + -I${CMAKE_SOURCE_DIR}/include) add_public_tablegen_target(NeuraOpsIncGen) -mlir_tablegen(NeuraDialect.h.inc -gen-dialect-decls -dialect=neura -I${CMAKE_SOURCE_DIR}/include) -mlir_tablegen(NeuraDialect.cpp.inc -gen-dialect-defs -dialect=neura -I${CMAKE_SOURCE_DIR}/include) +mlir_tablegen(NeuraDialect.h.inc -gen-dialect-decls + -dialect=neura + -I${MLIR_SOURCE_DIR}/include + -I${CMAKE_SOURCE_DIR}/include) +mlir_tablegen(NeuraDialect.cpp.inc -gen-dialect-defs + -dialect=neura + -I${MLIR_SOURCE_DIR}/include + -I${CMAKE_SOURCE_DIR}/include) add_public_tablegen_target(NeuraDialectIncGen) add_public_tablegen_target(MLIRNeuraIncGen) diff --git a/lib/Transforms/CMakeLists.txt b/lib/Transforms/CMakeLists.txt new file mode 100644 index 00000000..1635fc21 --- /dev/null +++ b/lib/Transforms/CMakeLists.txt @@ -0,0 +1,23 @@ +# set(LLVM_TARGET_DEFINITIONS ${CMAKE_CURRENT_SOURCE_DIR}/FusePatterns.td) +# +# mlir_tablegen(FusePatterns.inc -gen-rewriters +# -I ${MLIR_SOURCE_DIR}/include +# -I ${MLIR_BINARY_DIR}/include +# -I ${CMAKE_SOURCE_DIR}/include +# -I ${CMAKE_CURRENT_SOURCE_DIR} +# ) +# +# add_public_tablegen_target(NeuraFusePatternsGen) + +add_mlir_library(NeuraTransforms + InsertMovPass.cpp + FusePatternsPass.cpp + + LINK_LIBS PUBLIC + MLIRIR + MLIRFuncDialect + MLIRPass + MLIRSupport + MLIRTransformUtils + NeuraDialect +) diff --git a/lib/Transforms/FusePatternsPass.cpp b/lib/Transforms/FusePatternsPass.cpp new file mode 100644 index 00000000..b7975bb1 --- /dev/null +++ b/lib/Transforms/FusePatternsPass.cpp @@ -0,0 +1,55 @@ +#include "mlir/Pass/Pass.h" +#include "mlir/Transforms/GreedyPatternRewriteDriver.h" +#include "mlir/IR/PatternMatch.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "NeuraDialect/NeuraOps.h" + +using namespace mlir; + +namespace { + +struct FuseFAddFAddPattern : public RewritePattern { + FuseFAddFAddPattern(MLIRContext *ctx) + : RewritePattern("neura.fadd", /*benefit=*/1, ctx) {} + + LogicalResult matchAndRewrite(Operation *op, PatternRewriter &rewriter) const override { + auto first = dyn_cast(op); + if (!first || !first->hasOneUse()) return failure(); + + auto user = dyn_cast(*first->getUsers().begin()); + if (!user) return failure(); + + Location loc = user.getLoc(); + Type type = user.getType(); + + auto fused = rewriter.create(loc, type, + first.getLhs(), first.getRhs(), user.getRhs()); + + rewriter.replaceOp(user, fused.getResult()); + rewriter.eraseOp(first); + return success(); + } +}; + +struct FusePatternsPass : public PassWrapper> { + MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(FusePatternsPass) + + StringRef getArgument() const override { return "fuse-patterns"; } + StringRef getDescription() const override { return "Apply Neura fusion patterns."; } + + void runOnOperation() override { + RewritePatternSet patterns(&getContext()); + patterns.add(&getContext()); + if (failed(applyPatternsAndFoldGreedily(getOperation(), std::move(patterns)))) + signalPassFailure(); + } +}; + +} // namespace + +namespace mlir::neura { +std::unique_ptr createFusePatternsPass() { + return std::make_unique(); +} +} // namespace mlir::neura + diff --git a/lib/Transforms/InsertMovPass.cpp b/lib/Transforms/InsertMovPass.cpp new file mode 100644 index 00000000..0ff05656 --- /dev/null +++ b/lib/Transforms/InsertMovPass.cpp @@ -0,0 +1,86 @@ +#include "NeuraDialect/NeuraDialect.h" +#include "NeuraDialect/NeuraOps.h" +#include "mlir/IR/PatternMatch.h" +#include "mlir/Pass/Pass.h" +#include "mlir/Transforms/GreedyPatternRewriteDriver.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" + +using namespace mlir; + +namespace { +struct InsertMovForNeuraOps : public RewritePattern { + InsertMovForNeuraOps(MLIRContext *context) + : RewritePattern(/*matchAnyOpTypeTag=*/MatchAnyOpTypeTag(), /*benefit=*/1, context) {} + + LogicalResult matchAndRewrite(Operation *op, PatternRewriter &rewriter) const override { + if (op->getDialect()->getNamespace() != "neura" || + isa(op)) { + return failure(); + } + + // Skips ops that already being inserted mov on the operands. + bool allInputsAreMov = llvm::all_of(op->getOperands(), [](Value v) { + return isa_and_nonnull(v.getDefiningOp()); + }); + if (allInputsAreMov) { + return failure(); + } + + // Makes sure none of the operand has being processed. + bool hasAnyMovInput = llvm::any_of(op->getOperands(), [](Value v) { + return isa_and_nonnull(v.getDefiningOp()); + }); + assert(!hasAnyMovInput && "Unexpected: operand already wrapped in neura.mov"); + + Location loc = op->getLoc(); + + // Wraps operands in mov. + SmallVector newOperands; + for (Value operand : op->getOperands()) { + auto mov = rewriter.create(loc, operand.getType(), operand); + newOperands.push_back(mov); + } + + // Clones op with new operands. + OperationState state(loc, op->getName()); + state.addOperands(newOperands); + state.addTypes(op->getResultTypes()); + state.addAttributes(op->getAttrs()); + + Operation *newOp = rewriter.create(state); + rewriter.replaceOp(op, newOp->getResults()); + return success(); + } +}; + +struct InsertMovPass + : public PassWrapper> { + MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(InsertMovPass) + + StringRef getArgument() const override { return "insert-mov"; } + StringRef getDescription() const override { + return "Insert neura.mov before and after all neura dialect operations."; + } + + void getDependentDialects(DialectRegistry ®istry) const override { + registry.insert(); + } + + void runOnOperation() override { + RewritePatternSet patterns(&getContext()); + patterns.add(&getContext()); + if (failed(applyPatternsAndFoldGreedily(getOperation(), std::move(patterns)))) + signalPassFailure(); + } +}; +} // namespace + +namespace mlir { +namespace neura { + +std::unique_ptr createInsertMovPass() { + return std::make_unique(); +} + +} // namespace neura +} // namespace mlir diff --git a/test/arith2neura/add.mlir b/test/arith2neura/add.mlir index bf8814dc..668e8b3b 100644 --- a/test/arith2neura/add.mlir +++ b/test/arith2neura/add.mlir @@ -3,6 +3,6 @@ func.func @test(%a: f32) -> f32 { %b = arith.constant 2.0 : f32 %res = arith.addf %a, %b : f32 - // CHECK: neura.add + // CHECK: neura.fadd return %res : f32 } diff --git a/test/neura/arith_add.mlir b/test/neura/arith_add.mlir new file mode 100644 index 00000000..9cdc6977 --- /dev/null +++ b/test/neura/arith_add.mlir @@ -0,0 +1,10 @@ +// RUN: mlir-neura-opt --lower-arith-to-neura --insert-mov %s | FileCheck %s + +func.func @test(%a: f32) -> f32 { + %b = arith.constant 2.0 : f32 + %res = arith.addf %a, %b : f32 + // CHECK: neura.mov %arg0 : f32 -> f32 + // CHECK: neura.mov %cst : f32 -> f32 + // CHECK: neura.fadd + return %res : f32 +} diff --git a/test/neura/fadd_fadd.mlir b/test/neura/fadd_fadd.mlir new file mode 100644 index 00000000..63e2b5e0 --- /dev/null +++ b/test/neura/fadd_fadd.mlir @@ -0,0 +1,9 @@ +// RUN: mlir-neura-opt --lower-arith-to-neura --fuse-patterns --insert-mov %s | FileCheck %s + +func.func @test(%a: f32, %b: f32) -> f32 { + %c = arith.constant 2.0 : f32 + %temp = arith.addf %a, %b : f32 + %res = arith.addf %temp, %c : f32 + // CHECK: neura.fadd_fadd + return %res : f32 +} diff --git a/test/neura/llvm_add.mlir b/test/neura/llvm_add.mlir new file mode 100644 index 00000000..4c37a665 --- /dev/null +++ b/test/neura/llvm_add.mlir @@ -0,0 +1,10 @@ +// RUN: mlir-neura-opt --lower-llvm-to-neura --insert-mov %s | FileCheck %s + +func.func @test(%a: f32) -> f32 { + %b = llvm.mlir.constant(2.0 : f32) : f32 + %res = llvm.fadd %a, %b : f32 + // CHECK: [[LHS:%.*]] = neura.mov %{{.*}} : f32 -> f32 + // CHECK: [[RHS:%.*]] = neura.mov %{{.*}} : f32 -> f32 + // CHECK: [[RES:%.*]] = "neura.fadd"([[LHS]], [[RHS]]) + return %res : f32 +} diff --git a/test/test.mlir b/test/test.mlir index 829db0c4..764d2e95 100644 --- a/test/test.mlir +++ b/test/test.mlir @@ -3,10 +3,10 @@ func.func @test() -> f32 { %a = arith.constant 1.0 : f32 %b = arith.constant 2.0 : f32 - %res = neura.add %a, %b : f32 + %res = "neura.fadd" (%a, %b) : (f32, f32) -> f32 // Checks the expected lowered operation. -// CHECK: neura.add +// CHECK: neura.fadd return %res : f32 } diff --git a/tools/mlir-neura-opt/CMakeLists.txt b/tools/mlir-neura-opt/CMakeLists.txt index 8f617649..5c774f2f 100644 --- a/tools/mlir-neura-opt/CMakeLists.txt +++ b/tools/mlir-neura-opt/CMakeLists.txt @@ -6,15 +6,18 @@ add_executable(mlir-neura-opt # Links MLIR libraries. target_link_libraries(mlir-neura-opt PRIVATE - MLIROptLib # MLIR optimizer library + MLIRDialect # MLIR Dialect MLIRIR # MLIR Core IR + MLIRLLVMDialect + MLIROptLib # MLIR optimizer library MLIRSupport # MLIR Support utilities MLIRTransforms # MLIR transformation passes - MLIRDialect # MLIR Dialect NeuraDialect # Custom dialect MLIRFuncDialect # Builtin dialect required by custom dialect MLIRArithDialect NeuraArithToNeura + NeuraLlvmToNeura + NeuraTransforms ) # Includes directories. diff --git a/tools/mlir-neura-opt/mlir-neura-opt.cpp b/tools/mlir-neura-opt/mlir-neura-opt.cpp index 85df2f1e..b331313e 100644 --- a/tools/mlir-neura-opt/mlir-neura-opt.cpp +++ b/tools/mlir-neura-opt/mlir-neura-opt.cpp @@ -1,12 +1,16 @@ // tools/mlir-neura-opt/mlir-neura-opt.cpp +#include "mlir/Dialect/LLVMIR/LLVMDialect.h" #include "mlir/InitAllDialects.h" #include "mlir/InitAllPasses.h" #include "mlir/Support/FileUtilities.h" #include "mlir/Support/LogicalResult.h" #include "mlir/Tools/mlir-opt/MlirOptMain.h" #include "Conversion/ArithToNeura/ArithToNeura.h" +#include "Conversion/LlvmToNeura/LlvmToNeura.h" #include "NeuraDialect/NeuraDialect.h" +#include "Transforms/InsertMovPass.h" +#include "Transforms/FusePatternsPass.h" int main(int argc, char **argv) { // Registers MLIR dialects. @@ -14,10 +18,21 @@ int main(int argc, char **argv) { registry.insert(); registry.insert(); registry.insert(); + registry.insert(); mlir::registerPass([]() -> std::unique_ptr { return mlir::neura::createLowerArithToNeuraPass(); }); + mlir::registerPass([]() -> std::unique_ptr { + return mlir::neura::createLowerLlvmToNeuraPass(); + }); + mlir::registerPass([]() -> std::unique_ptr { + return mlir::neura::createInsertMovPass(); + }); + + mlir::registerPass([]() -> std::unique_ptr { + return mlir::neura::createFusePatternsPass(); + }); // Runs the MLIR optimizer. return mlir::asMainReturnCode(