Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions include/Conversion/LlvmToNeura/LlvmToNeura.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
#ifndef NEURA_CONVERSION_LLVMTONEURA_LLVMTONEURAPASS_H
#define NEURA_CONVERSION_LLVMTONEURA_LLVMTONEURAPASS_H

#include "mlir/Pass/Pass.h"

namespace mlir {
namespace neura {
std::unique_ptr<Pass> createLowerLlvmToNeuraPass();
} // namespace neura
} // namespace mlir

#endif // NEURA_CONVERSION_LLVMTONEURA_LLVMTONEURAPASS_H
10 changes: 10 additions & 0 deletions include/NeuraDialect/NeuraOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -11,3 +11,13 @@ def Neura_AddOp : Op<NeuraDialect, "add"> {
let assemblyFormat = "$lhs `,` $rhs attr-dict `:` type($result)";
// let traits = [Pure];
}

// Defines a move operation for data communication.
def Neura_MovOp : Op<NeuraDialect, "mov"> {
let summary = "Move operation";
let opName = "mov";
let arguments = (ins AnyType:$lhs);
let results = (outs AnyType:$result);
let assemblyFormat = "$lhs attr-dict `:` type($lhs) `->` type($result)";
// let traits = [Pure];
}
13 changes: 13 additions & 0 deletions include/Transforms/InsertMovPass.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
#ifndef NEURA_TRANSFORMS_INSERTMOVPASS_H
#define NEURA_TRANSFORMS_INSERTMOVPASS_H

#include "mlir/Pass/Pass.h"

namespace mlir {
namespace neura {
std::unique_ptr<mlir::Pass> createInsertMovPass();
} // namespace neura
} // namespace mlir

#endif // NEURA_TRANSFORMS_INSERTMOVPASS_H

3 changes: 2 additions & 1 deletion lib/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
add_subdirectory(NeuraDialect)
add_subdirectory(Conversion/ArithToNeura)
add_subdirectory(Conversion)
add_subdirectory(Transforms)
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,7 @@ struct ArithAddFOpLowering : public OpRewritePattern<arith::AddFOp> {

LogicalResult matchAndRewrite(arith::AddFOp op,
PatternRewriter &rewriter) const override {
llvm::errs() << "[cheng] step into matchAndRewriter()";
rewriter.replaceOpWithNewOp<neura::AddOp>(op, op.getType(), op.getLhs(), op.getRhs());

llvm::errs() << "[cheng] Matched arith.addf: ";
// op.dump();

return success();
}
};
Expand All @@ -32,22 +27,16 @@ struct LowerArithToNeuraPass

StringRef getArgument() const override { return "lower-arith-to-neura"; }
StringRef getDescription() const override {
return "Lower arithmetic operations to Neura dialect operations";
return "Lower arith dialect operations to Neura dialect operations";
}

void getDependentDialects(DialectRegistry &registry) const override {
registry.insert<mlir::neura::NeuraDialect>();
}

void runOnOperation() override {
// getContext().loadDialect<mlir::neura::NeuraDialect>();

RewritePatternSet patterns(&getContext());
llvm::errs() << "[cheng] check runOnOperation: ";
getOperation().dump();
getOperation().walk([](Operation *op) {
llvm::errs() << "[cheng] Saw op: " << op->getName() << "\n";
});
patterns.add<ArithAddFOpLowering>(&getContext());
if (failed(applyPatternsAndFoldGreedily(getOperation(), std::move(patterns)))) {
signalPassFailure();
Expand Down
2 changes: 1 addition & 1 deletion lib/Conversion/ArithToNeura/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
add_mlir_library(NeuraArithToNeura
ArithToNeura.cpp
ArithToNeuraPass.cpp

DEPENDS
NeuraOpsIncGen
Expand Down
2 changes: 2 additions & 0 deletions lib/Conversion/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
add_subdirectory(ArithToNeura)
add_subdirectory(LlvmToNeura)
28 changes: 28 additions & 0 deletions lib/Conversion/LlvmToNeura/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
add_mlir_library(NeuraLlvmToNeura
LlvmToNeuraPass.cpp

DEPENDS
NeuraOpsIncGen
NeuraDialectIncGen
NeuraDialect

LINK_LIBS PUBLIC
MLIRArithDialect
MLIRFuncDialect
MLIRLLVMDialect
MLIRIR
MLIRPass
MLIRTransforms
)

target_include_directories(NeuraLlvmToNeura PUBLIC
${CMAKE_BINARY_DIR}/lib/NeuraDialect
${MLIR_INCLUDE_DIRS}
${LLVM_INCLUDE_DIRS}
${CMAKE_SOURCE_DIR}/include
${CMAKE_BINARY_DIR}/include
)

target_compile_definitions(NeuraLlvmToNeura
PRIVATE ${LLVM_DEFINITIONS}
)
51 changes: 51 additions & 0 deletions lib/Conversion/LlvmToNeura/LlvmToNeuraPass.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
#include "Conversion/LlvmToNeura/LlvmToNeura.h"
#include "NeuraDialect/NeuraDialect.h"
#include "NeuraDialect/NeuraOps.h"
#include "mlir/Dialect/LLVMIR/LLVMAttrs.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/Dialect/LLVMIR/LLVMTypes.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"

using namespace mlir;

namespace {
struct LlvmAddFOpLowering : public OpRewritePattern<mlir::LLVM::FAddOp> {
using OpRewritePattern::OpRewritePattern;

LogicalResult matchAndRewrite(mlir::LLVM::FAddOp op,
PatternRewriter &rewriter) const override {
rewriter.replaceOpWithNewOp<neura::AddOp>(op, op.getType(), op.getLhs(), op.getRhs());
return success();
}
};

struct LowerLlvmToNeuraPass
: public PassWrapper<LowerLlvmToNeuraPass, OperationPass<func::FuncOp>> {

MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(LowerLlvmToNeuraPass)

StringRef getArgument() const override { return "lower-llvm-to-neura"; }
StringRef getDescription() const override {
return "Lower LLVM operations to Neura dialect operations";
}

void getDependentDialects(DialectRegistry &registry) const override {
registry.insert<mlir::neura::NeuraDialect>();
}

void runOnOperation() override {
RewritePatternSet patterns(&getContext());
patterns.add<LlvmAddFOpLowering>(&getContext());
if (failed(applyPatternsAndFoldGreedily(getOperation(), std::move(patterns)))) {
signalPassFailure();
}
}
};
} // namespace

std::unique_ptr<Pass> mlir::neura::createLowerLlvmToNeuraPass() {
return std::make_unique<LowerLlvmToNeuraPass>();
}
11 changes: 11 additions & 0 deletions lib/Transforms/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
add_mlir_library(NeuraTransforms
InsertMovPass.cpp

LINK_LIBS PUBLIC
MLIRIR
MLIRFuncDialect
MLIRPass
MLIRSupport
MLIRTransformUtils
NeuraDialect
)
86 changes: 86 additions & 0 deletions lib/Transforms/InsertMovPass.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
#include "NeuraDialect/NeuraDialect.h"
#include "NeuraDialect/NeuraOps.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"

using namespace mlir;

namespace {
struct InsertMovForNeuraOps : public RewritePattern {
InsertMovForNeuraOps(MLIRContext *context)
: RewritePattern(/*matchAnyOpTypeTag=*/MatchAnyOpTypeTag(), /*benefit=*/1, context) {}

LogicalResult matchAndRewrite(Operation *op, PatternRewriter &rewriter) const override {
if (op->getDialect()->getNamespace() != "neura" ||
isa<neura::MovOp>(op)) {
return failure();
}

// Skips ops that already being inserted mov on the operands.
bool allInputsAreMov = llvm::all_of(op->getOperands(), [](Value v) {
return isa_and_nonnull<neura::MovOp>(v.getDefiningOp());
});
if (allInputsAreMov) {
return failure();
}

// Makes sure none of the operand has being processed.
bool hasAnyMovInput = llvm::any_of(op->getOperands(), [](Value v) {
return isa_and_nonnull<neura::MovOp>(v.getDefiningOp());
});
assert(!hasAnyMovInput && "Unexpected: operand already wrapped in neura.mov");

Location loc = op->getLoc();

// Wraps operands in mov.
SmallVector<Value> newOperands;
for (Value operand : op->getOperands()) {
auto mov = rewriter.create<neura::MovOp>(loc, operand.getType(), operand);
newOperands.push_back(mov);
}

// Clones op with new operands.
OperationState state(loc, op->getName());
state.addOperands(newOperands);
state.addTypes(op->getResultTypes());
state.addAttributes(op->getAttrs());

Operation *newOp = rewriter.create(state);
rewriter.replaceOp(op, newOp->getResults());
return success();
}
};

struct InsertMovPass
: public PassWrapper<InsertMovPass, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(InsertMovPass)

StringRef getArgument() const override { return "insert-mov"; }
StringRef getDescription() const override {
return "Insert neura.mov before and after all neura dialect operations.";
}

void getDependentDialects(DialectRegistry &registry) const override {
registry.insert<mlir::neura::NeuraDialect>();
}

void runOnOperation() override {
RewritePatternSet patterns(&getContext());
patterns.add<InsertMovForNeuraOps>(&getContext());
if (failed(applyPatternsAndFoldGreedily(getOperation(), std::move(patterns))))
signalPassFailure();
}
};
} // namespace

namespace mlir {
namespace neura {

std::unique_ptr<Pass> createInsertMovPass() {
return std::make_unique<InsertMovPass>();
}

} // namespace neura
} // namespace mlir
10 changes: 10 additions & 0 deletions test/neura/arith_add.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
// RUN: mlir-neura-opt --lower-arith-to-neura --insert-mov %s | FileCheck %s

func.func @test(%a: f32) -> f32 {
%b = arith.constant 2.0 : f32
%res = arith.addf %a, %b : f32
// CHECK: neura.mov %arg0 : f32 -> f32
// CHECK: neura.mov %cst : f32 -> f32
// CHECK: neura.add
return %res : f32
}
10 changes: 10 additions & 0 deletions test/neura/llvm_add.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
// RUN: mlir-neura-opt --lower-llvm-to-neura --insert-mov %s | FileCheck %s

func.func @test(%a: f32) -> f32 {
%b = llvm.mlir.constant(2.0 : f32) : f32
%res = llvm.fadd %a, %b : f32
// CHECK: [[LHS:%.*]] = neura.mov %{{.*}} : f32 -> f32
// CHECK: [[RHS:%.*]] = neura.mov %{{.*}} : f32 -> f32
// CHECK: [[RES:%.*]] = neura.add [[LHS]], [[RHS]] : f32
return %res : f32
}
7 changes: 5 additions & 2 deletions tools/mlir-neura-opt/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,15 +6,18 @@ add_executable(mlir-neura-opt

# Links MLIR libraries.
target_link_libraries(mlir-neura-opt PRIVATE
MLIROptLib # MLIR optimizer library
MLIRDialect # MLIR Dialect
MLIRIR # MLIR Core IR
MLIRLLVMDialect
MLIROptLib # MLIR optimizer library
MLIRSupport # MLIR Support utilities
MLIRTransforms # MLIR transformation passes
MLIRDialect # MLIR Dialect
NeuraDialect # Custom dialect
MLIRFuncDialect # Builtin dialect required by custom dialect
MLIRArithDialect
NeuraArithToNeura
NeuraLlvmToNeura
NeuraTransforms
)

# Includes directories.
Expand Down
10 changes: 10 additions & 0 deletions tools/mlir-neura-opt/mlir-neura-opt.cpp
Original file line number Diff line number Diff line change
@@ -1,23 +1,33 @@
// tools/mlir-neura-opt/mlir-neura-opt.cpp

#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/InitAllDialects.h"
#include "mlir/InitAllPasses.h"
#include "mlir/Support/FileUtilities.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Tools/mlir-opt/MlirOptMain.h"
#include "Conversion/ArithToNeura/ArithToNeura.h"
#include "Conversion/LlvmToNeura/LlvmToNeura.h"
#include "NeuraDialect/NeuraDialect.h"
#include "Transforms/InsertMovPass.h"

int main(int argc, char **argv) {
// Registers MLIR dialects.
mlir::DialectRegistry registry;
registry.insert<mlir::neura::NeuraDialect>();
registry.insert<mlir::func::FuncDialect>();
registry.insert<mlir::arith::ArithDialect>();
registry.insert<mlir::LLVM::LLVMDialect>();

mlir::registerPass([]() -> std::unique_ptr<mlir::Pass> {
return mlir::neura::createLowerArithToNeuraPass();
});
mlir::registerPass([]() -> std::unique_ptr<mlir::Pass> {
return mlir::neura::createLowerLlvmToNeuraPass();
});
mlir::registerPass([]() -> std::unique_ptr<mlir::Pass> {
return mlir::neura::createInsertMovPass();
});

// Runs the MLIR optimizer.
return mlir::asMainReturnCode(
Expand Down