Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion lib/NeuraDialect/Transforms/GenerateCodePass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,9 @@ struct GenerateCodePass

for (auto func : module.getOps<func::FuncOp>()) {
auto accel_attr = func->getAttrOfType<StringAttr>("accelerator");
if (!accel_attr || accel_attr.getValue() != "neura")
if (!accel_attr || accel_attr.getValue() != "neura") {
continue;
}

llvm::json::Object func_obj;
func_obj["name"] = func.getName().str();
Expand Down
10 changes: 7 additions & 3 deletions lib/NeuraDialect/Transforms/LeveragePredicatedValuePass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,11 @@ struct LeveragePredicatedValuePass
ModuleOp module = getOperation();

// Processes each function.
module.walk([&](func::FuncOp func) {
module.walk([&](FunctionOpInterface func) {
auto accel_attr = func->getAttrOfType<StringAttr>("accelerator");
if (!accel_attr || accel_attr.getValue() != "neura") {
return;
}
// Converts block argument types to predicated values.
func.walk([&](Block *block) {
// skips the entry (first) block of the function.
Expand Down Expand Up @@ -70,8 +74,8 @@ struct LeveragePredicatedValuePass

private:
// Gets operations in topological order.
void getOperationsInTopologicalOrder(func::FuncOp func,
SmallVector<Operation*> &ordered) {
void getOperationsInTopologicalOrder(FunctionOpInterface func,
SmallVector<Operation*> &ordered) {
DenseSet<Operation*> visited;
func.walk<WalkOrder::PreOrder>([&](Operation *op) {
// Uses standard DFS to build topological order.
Expand Down
78 changes: 49 additions & 29 deletions lib/NeuraDialect/Transforms/TransformCtrlToDataFlowPass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
#include "NeuraDialect/NeuraPasses.h"
#include "NeuraDialect/NeuraTypes.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/IR/Block.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/Dominance.h"
Expand Down Expand Up @@ -132,12 +133,13 @@ struct ControlFlowInfo {

// Checks if all the live-out values in a block are dominated by the block's
// arguments.
void assertLiveOutValuesDominatedByBlockArgs(func::FuncOp &func) {
void assertLiveOutValuesDominatedByBlockArgs(Region &region) {
llvm::errs()
<< "[ctrl2data] Asserting live-out values dominated by block arguments\n";
for (Block &block : func.getBlocks()) {
if (&block == &func.getBody().front())
for (Block &block : region) {
if (&block == &region.front()) {
continue;
}

llvm::errs() << "[ctrl2data] Checking block: " << block << "\n";
DenseSet<Value> live_out_values;
Expand Down Expand Up @@ -199,9 +201,9 @@ void assertLiveOutValuesDominatedByBlockArgs(func::FuncOp &func) {
}

// Builds control flow info for the given function.
void buildControlFlowInfo(func::FuncOp &func, ControlFlowInfo &ctrl_info,
void buildControlFlowInfo(Region &region, ControlFlowInfo &ctrl_info,
DominanceInfo &dom_info) {
for (Block &block : func.getBlocks()) {
for (Block &block : region) {
Operation *terminator = block.getTerminator();

if (auto cond_br = dyn_cast<neura::CondBr>(terminator)) {
Expand Down Expand Up @@ -305,12 +307,11 @@ Value getPrecessedCondition(Value condition, bool is_not_condition,
return not_condition;
}

void createReserveAndPhiOps(
func::FuncOp &func, ControlFlowInfo &ctrl_info,
llvm::MapVector<BlockArgument, Value> &arg_to_reserve,
llvm::MapVector<BlockArgument, Value> &arg_to_phi_result,
OpBuilder &builder) {
DominanceInfo dom_info(func);
void createReserveAndPhiOps(Region &region, ControlFlowInfo &ctrl_info,
llvm::MapVector<BlockArgument, Value> &arg_to_reserve,
llvm::MapVector<BlockArgument, Value> &arg_to_phi_result,
OpBuilder &builder) {
DominanceInfo dom_info(region.getParentOp());

// ================================================
// Step 1: Categorizes edges into six types.
Expand Down Expand Up @@ -574,18 +575,18 @@ void createReserveAndPhiOps(
}

// Transforms control flow into data flow.
void transformControlFlowToDataFlow(func::FuncOp &func,
void transformControlFlowToDataFlow(Region &region,
ControlFlowInfo &ctrl_info,
DominanceInfo &dom_info,
OpBuilder &builder) {

// Asserts that all live-out values are dominated by block arguments.
assertLiveOutValuesDominatedByBlockArgs(func);
assertLiveOutValuesDominatedByBlockArgs(region);

// Creates reserve and phi operations for each block argument.
llvm::MapVector<BlockArgument, Value> arg_to_reserve;
llvm::MapVector<BlockArgument, Value> arg_to_phi_result;
createReserveAndPhiOps(func, ctrl_info, arg_to_reserve, arg_to_phi_result,
createReserveAndPhiOps(region, ctrl_info, arg_to_reserve, arg_to_phi_result,
builder);

// Replaces blockarguments with phi results
Expand All @@ -596,9 +597,9 @@ void transformControlFlowToDataFlow(func::FuncOp &func,
}

// Flattens blocks into the entry block.
Block *entryBlock = &func.getBody().front();
Block *entryBlock = &region.front();
SmallVector<Block *> blocks_to_flatten;
for (Block &block : func.getBody()) {
for (Block &block : region) {
if (&block != entryBlock)
blocks_to_flatten.push_back(&block);
}
Expand Down Expand Up @@ -650,23 +651,42 @@ struct TransformCtrlToDataFlowPass

void getDependentDialects(DialectRegistry &registry) const override {
registry.insert<mlir::neura::NeuraDialect>();
registry.insert<mlir::LLVM::LLVMDialect>();
}

void runOnOperation() override {
ModuleOp module = getOperation();
module.walk([&](func::FuncOp func) {
OpBuilder builder(func.getContext());
GrantPredicateInEntryBlock(&func.getBody().front(), builder);

DominanceInfo dom_info(func);

// Step 1: Analyzes the control flow and creates control flow info
// struct.
ControlFlowInfo ctrl_info;
buildControlFlowInfo(func, ctrl_info, dom_info);

// Step 2: Transforms control flow into data flow.
transformControlFlowToDataFlow(func, ctrl_info, dom_info, builder);
module.walk([&](Operation *op) {
Region *region = nullptr;
DominanceInfo domInfo;
OpBuilder builder(op->getContext());

if (auto func = dyn_cast<func::FuncOp>(op)) {
auto accel_attr = func->getAttrOfType<StringAttr>("accelerator");
if (!accel_attr || accel_attr.getValue() != "neura") {
return;
}
region = &func.getBody();
domInfo = DominanceInfo(func);
GrantPredicateInEntryBlock(&region->front(), builder);
assertLiveOutValuesDominatedByBlockArgs(*region);
} else if (auto llvmFunc = dyn_cast<LLVM::LLVMFuncOp>(op)) {
if (llvmFunc.isDeclaration()) return;
auto accel_attr = llvmFunc->getAttrOfType<StringAttr>("accelerator");
if (!accel_attr || accel_attr.getValue() != "neura") {
return;
}
region = &llvmFunc.getBody();
domInfo = DominanceInfo(llvmFunc);
GrantPredicateInEntryBlock(&region->front(), builder);
assertLiveOutValuesDominatedByBlockArgs(*region);
// Skips SSA live-out dominance assert.
} else {
return;
}
ControlFlowInfo ctrlInfo;
buildControlFlowInfo(*region, ctrlInfo, domInfo);
transformControlFlowToDataFlow(*region, ctrlInfo, domInfo, builder);
});
}
};
Expand Down
30 changes: 26 additions & 4 deletions test/neura/for_loop/test.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,36 @@

// TODO: Enable --insert-mov once the backward ctrl flow mov is supported.
// Lowers to neura.
// TODO: Make `--leverage-predicated-value` work. Segmentation fault for now.
// https://github.com/coredac/dataflow/issues/84.
// RUN: mlir-neura-opt \
// RUN: --assign-accelerator \
// RUN: --lower-llvm-to-neura \
// RN: --transform-ctrl-to-data-flow \
// RN: --leverage-predicated-value \
// RUN: --transform-ctrl-to-data-flow \
// RUN: --fuse-patterns \
// RN: --insert-mov \
// RUN: %t-kernel.mlir | FileCheck %s
// RUN: %t-kernel.mlir > %t-lowered.mlir

// RUN: FileCheck %s < %t-lowered.mlir

// Verifies the neura ops are generated. And fusion happens.
// CHECK: accelerator = "neura"
// CHECK-NOT: = llvm.
// CHECK: accelerator = "neura"
// CHECK-NEXT: %0 = "neura.constant"() <{predicate = true, value = 0 : i64}> : () -> i64
// CHECK-NEXT: %1 = "neura.constant"() <{predicate = true, value = 1 : i64}> : () -> i64
// CHECK-NEXT: %2 = "neura.constant"() <{predicate = true, value = 32 : i64}> : () -> i64
// CHECK-NEXT: %3 = neura.reserve : i64
// CHECK-NEXT: %4 = "neura.phi"(%3, %0) : (i64, i64) -> i64
// CHECK-NEXT: %5 = "neura.gep"(%arg0, %4) : (!llvm.ptr, i64) -> !llvm.ptr
// CHECK-NEXT: %6 = "neura.load"(%5) : (!llvm.ptr) -> f32
// CHECK-NEXT: %7 = "neura.gep"(%arg2, %4) : (!llvm.ptr, i64) -> !llvm.ptr
// CHECK-NEXT: %8 = "neura.load"(%7) : (!llvm.ptr) -> f32
// CHECK-NEXT: %9 = "neura.load"(%arg1) : (!llvm.ptr) -> f32
// CHECK-NEXT: %10 = "neura.fmul_fadd"(%6, %8, %9) : (f32, f32, f32) -> f32
// CHECK-NEXT: "neura.store"(%10, %arg1) : (f32, !llvm.ptr) -> ()
// CHECK-NEXT: %11 = "neura.add"(%4, %1) : (i64, i64) -> i64
// CHECK-NEXT: %12 = "neura.icmp"(%11, %2) <{cmpType = "eq"}> : (i64, i64) -> i1
// CHECK-NEXT: %13 = "neura.not"(%12) : (i1) -> i1
// CHECK-NEXT: %14 = neura.grant_predicate %11, %13 : i64, i1 -> i64
// CHECK-NEXT: neura.ctrl_mov %14 -> %3 : i64 i64
// CHECK-NEXT: "neura.return"() : () -> ()