Skip to content
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions include/NeuraDialect/Mapping/mapping_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,10 @@ OperationKind getOperationKindFromMlirOp(Operation *op);
// Returns true if the operation does not need CGRA tile placement.
bool is_non_materialized(Operation *op);

// Returns true if the operation is a materialized reserve user, i.e.,
// phi, invariant, carry.
bool isMaterializedReserveUser(Operation *op);

// Represents a recurrence cycle rooted at a reserve operation and closed by
// ctrl_mov.
struct RecurrenceCycle {
Expand Down
78 changes: 78 additions & 0 deletions include/NeuraDialect/NeuraOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -489,4 +489,82 @@ def Neura_LoopControlOp : Op<NeuraDialect, "loop_control">{

// let assemblyFormat =
// " `(``parent_valid` `=` $parentValid `,` `start` `=` $start `,` `end` `=` $end `,` `step` `=` $step`)` attr-dict `:` type($parentValid) `,` type($start) `,` type($end) `,` type($step) `->` type($nextindex) `,` type($valid)";
}

// ----------------------------------------------------
// Defines operations for steering-control based DFG execution.
// ----------------------------------------------------

// Defines the true_steer operation.
def Neura_TrueSteerOp : Op<NeuraDialect, "true_steer">{
let summary = "Conditionally pass a value when condition is true.";
let description = [{When the condition is true, the input value is passed to the output; otherwise, the output is empty.
Example:
%out = neura.true_steer %in, %cond : i32, i1 -> f32
}];

let arguments = (ins AnyType:$input, AnyType:$condition);
let results = (outs AnyType:$output);

let assemblyFormat = "$input `,` $condition attr-dict `:` type($input) `,` type($condition) `->` type($output)";
}

// Defines the false_steer operation.
def Neura_FalseSteerOp : Op<NeuraDialect, "false_steer">{
let summary = "Conditionally pass a value when condition is false.";
let description = [{When the condition is false, the input value is passed to the output; otherwise, the output is empty.
Example:
%out = neura.false_steer %in, %cond : i32, i1 -> f32
}];

let arguments = (ins AnyType:$input, AnyType:$condition);
let results = (outs AnyType:$output);

let assemblyFormat = "$input `,` $condition attr-dict `:` type($input) `,` type($condition) `->` type($output)";
}

// Defines the carry operation.
def Neura_CarryOp : Op<NeuraDialect, "carry">{
let summary = "Carry state across iterations.";
let description = [{
Three inputs for carry operation:
- initial value: used in the first execution.
- condition: determines whether to use the carried value.
- carried value: used when condition is true.
The output is the initial value when it is executed for the first time, otherwise it is the carried value when the condition is true.
Example:
%out = neura.carry %init, %cond, %carry_val : i32, i1, i32 -> i32
}];

let arguments = (ins AnyType:$initial, AnyType:$condition, AnyType:$carried);
let results = (outs AnyType:$result);
let assemblyFormat = "$initial `,` $condition `,` $carried attr-dict `:` type($initial) `,` type($condition) `,` type($carried) `->` type($result)";
}

// Defines the merge operation.
def Neura_MergeOp : Op<NeuraDialect, "merge">{
let summary = "Merge multiple inputs into one output.";
let description = [{
Merges multiple input values into a single output value based on the condition.
Example:
%out = neura.merge %cond, %in1, %in2 : i1, i32, i32 -> i32
}];

let arguments = (ins AnyType:$condition, AnyType:$true_value, AnyType:$false_value);
let results = (outs AnyType:$result);

let assemblyFormat = "$condition `,` $true_value `,` $false_value attr-dict `:` type($condition) `,` type($true_value) `,` type($false_value) `->` type($result)";
}

// Defines the invariant operation.
def Neura_InvariantOp : Op<NeuraDialect, "invariant">{
let summary = "Invariant value across DFG execution.";
let description = [{
Invariant operation is a subset of carry operation where the output is always the initial value.
Example:
%out = neura.invariant %init %cond : i32, i1 -> i32
}];
let arguments = (ins AnyType:$initial, AnyType:$condition);
let results = (outs AnyType:$result);
let assemblyFormat = "$initial `,` $condition attr-dict `:` type($initial) `,` type($condition) `->` type($result)";
}
2 changes: 2 additions & 0 deletions include/NeuraDialect/NeuraPasses.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@ std::unique_ptr<mlir::Pass> createMapToAcceleratorPass();
std::unique_ptr<mlir::Pass> createGenerateCodePass();
std::unique_ptr<mlir::Pass> createCanonicalizeLiveInPass();
std::unique_ptr<mlir::Pass> createPromoteFuncArgToConstPass();
std::unique_ptr<mlir::Pass> createTransformToSteerControlPass();
std::unique_ptr<mlir::Pass> createRemovePredicatedTypePass();

// ====================================
// Optimization Passes
Expand Down
19 changes: 19 additions & 0 deletions include/NeuraDialect/NeuraPasses.td
Original file line number Diff line number Diff line change
Expand Up @@ -116,4 +116,23 @@ def FoldConstant : Pass<"fold-constant", "ModuleOp"> {
let constructor = "neura::createFoldConstantPass()";
}

def TransformToSteerControl : Pass<"transform-to-steer-control", "func::FuncOp"> {
let summary = "Transform control flow into data flow using steer control";
let description = [{
This pass transforms Neura control flow graphs (CDFG) into pure dataflow graphs (DFG)
using steer control operations like true_steer, false_steer, carry, and merge.
Unlike predication-based approaches, steer control explicitly directs data through
different paths based on conditions.
}];
let constructor = "neura::createTransformToSteerControlPass()";
}

def RemovePredicatedType : Pass<"remove-predicated-type", "ModuleOp"> {
let summary = "Removes predicated types from Neura dialect operations";
let description = [{
This pass removes predicated types from Neura dialect operations,
converting them back to regular types.
}];
let constructor = "neura::createRemovePredicatedTypePass()";
}
#endif // NEURA_PASSES_TD
35 changes: 22 additions & 13 deletions lib/NeuraDialect/Mapping/mapping_util.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -316,12 +316,9 @@ mlir::Operation *mlir::neura::getMaterializedBackwardUser(Operation *op) {
"Expected the user of ctrl_mov target to be a reserve operation");
auto reserve_op = dyn_cast<neura::ReserveOp>(target.getDefiningOp());

// Skip ctrl_mov users of reserve; return the first phi user.
// Skip ctrl_mov users of reserve; return the first materialized user.
for (Operation *user : reserve_op.getResult().getUsers()) {
if (isa<neura::CtrlMovOp>(user)) {
continue; // skip ctrl_mov user
}
if (isa<neura::PhiOp>(user)) {
if (isMaterializedReserveUser(user)) {
return user;
}
}
Expand Down Expand Up @@ -702,6 +699,19 @@ bool mlir::neura::canReachLocInTime(const MappingLoc &src_loc,
return false;
}

bool mlir::neura::isMaterializedReserveUser(Operation *user) {
if (isa<neura::PhiOp>(user)) {
return true;
}
if (isa<neura::InvariantOp>(user)) {
return true;
}
if (isa<neura::CarryOp>(user)) {
return true;
}
return false;
}

void mlir::neura::updateAward(std::map<MappingLoc, int> &locs_with_award,
MappingLoc loc, int award) {
// Updates the award of the top element in the priority queue.
Expand Down Expand Up @@ -752,8 +762,9 @@ mlir::neura::calculateAward(Operation *op, std::set<Operation *> &critical_ops,
assert(ctrl_mov && "Expected user to be a CtrlMovOp");
mlir::Operation *materialized_backward_op =
getMaterializedBackwardUser(ctrl_mov);
assert(isa<neura::PhiOp>(materialized_backward_op) &&
"Expected materialized operation of ctrl_mov to be a PhiOp");
assert(isMaterializedReserveUser(materialized_backward_op) &&
"Expected materialized operation of ctrl_mov to be a "
"PhiOp/InvariantOp/CarryOp.");
backward_users.push_back(materialized_backward_op);
}

Expand Down Expand Up @@ -794,10 +805,7 @@ mlir::neura::calculateAward(Operation *op, std::set<Operation *> &critical_ops,
award += op->getOperands().size() -
getPhysicalHops(producers, tile, mapping_state);
}
// llvm::errs() << "[DEBUG] checking range: "
// << earliest_start_time_step << " to "
// << latest_end_time_step << " for tile: "
// << tile->getType() << "#" << tile->getId() << "\n";

for (int t = earliest_start_time_step; t < latest_end_time_step; t += 1) {
MappingLoc tile_loc_candidate = {tile, t};
// If the tile at time `t` is available, we can consider it for mapping.
Expand Down Expand Up @@ -942,8 +950,9 @@ bool mlir::neura::placeAndRoute(Operation *op, const MappingLoc &target_loc,
assert(ctrl_mov && "Expected user to be a CtrlMovOp");
mlir::Operation *materialized_backward_op =
getMaterializedBackwardUser(ctrl_mov);
assert(isa<neura::PhiOp>(materialized_backward_op) &&
"Expected materialized operation of ctrl_mov to be a PhiOp");
assert(isMaterializedReserveUser(materialized_backward_op) &&
"Expected materialized operation of ctrl_mov to be a "
"PhiOp/InvariantOp/CarryOp");
// Gets the last location of the materialized operation.
MappingLoc backward_loc =
mapping_state.getAllLocsOfOp(materialized_backward_op).back();
Expand Down
2 changes: 2 additions & 0 deletions lib/NeuraDialect/Transforms/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@ add_mlir_library(
CanonicalizeLiveInPass.cpp
CanonicalizeCastPass.cpp
PromoteFuncArgToConstPass.cpp
TransformToSteerControlPass.cpp
RemovePredicatedTypePass.cpp

DEPENDS
MLIRNeuraTransformsIncGen
Expand Down
6 changes: 3 additions & 3 deletions lib/NeuraDialect/Transforms/InsertDataMovPass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -77,9 +77,9 @@ struct InsertDataMovForNeuraOps : public RewritePattern {
SmallVector<Value> new_operands;
for (Value operand : op->getOperands()) {
Operation *producer = operand.getDefiningOp();
// Skips adding mov for neura.reserve -> neura.phi.
if (isa<neura::PhiOp>(op) && producer &&
isa<neura::ReserveOp>(producer)) {

// Skips adding mov for any operand that comes from a reserve op.
if (producer && isa<neura::ReserveOp>(producer)) {
new_operands.push_back(operand);
continue;
}
Expand Down
6 changes: 3 additions & 3 deletions lib/NeuraDialect/Transforms/LeveragePredicatedValuePass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,15 +45,15 @@ struct LeveragePredicatedValuePass
}

for (BlockArgument arg : block->getArguments()) {
Type origType = arg.getType();
Type orig_type = arg.getType();

// Avoid double-wrapping if already predicated
if (llvm::isa<neura::PredicatedValue>(origType)) {
if (llvm::isa<neura::PredicatedValue>(orig_type)) {
continue;
}

auto predicated_type = neura::PredicatedValue::get(
func.getContext(), origType,
func.getContext(), orig_type,
IntegerType::get(func.getContext(), 1));
arg.setType(predicated_type);
}
Expand Down
156 changes: 156 additions & 0 deletions lib/NeuraDialect/Transforms/RemovePredicatedTypePass.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,156 @@
#include "NeuraDialect/NeuraDialect.h"
#include "NeuraDialect/NeuraOps.h"
#include "NeuraDialect/NeuraPasses.h"
#include "NeuraDialect/NeuraTypes.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
#include "llvm/Support/raw_ostream.h"

using namespace mlir;

#define GEN_PASS_DEF_REMOVEPREDICATEDTYPE
#include "NeuraDialect/NeuraPasses.h.inc"

namespace {

struct RemovePredicatedTypePass
: public PassWrapper<RemovePredicatedTypePass, OperationPass<ModuleOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(RemovePredicatedTypePass)

StringRef getArgument() const override { return "remove-predicated-type"; }
StringRef getDescription() const override {
return "Remove predicated types from Neura dialect operations, reverting "
"to basic types.";
}

void runOnOperation() override {
ModuleOp module = getOperation();

// Processes each function.
module.walk([&](FunctionOpInterface func) {
auto accel_attr = func->getAttrOfType<StringAttr>("accelerator");
if (!accel_attr || accel_attr.getValue() != "neura") {
return;
}

// Converts block arguments.
func.walk([&](Block *block) {
// Processes block arguments.
for (BlockArgument arg : block->getArguments()) {
Type orig_type = arg.getType();
if (auto predicated_type =
llvm::dyn_cast<neura::PredicatedValue>(orig_type)) {
arg.setType(predicated_type.getValueType());
}
}
});

// Gets operations in topological order.
SmallVector<Operation *> ordered_ops;
getOperationsInTopologicalOrder(func, ordered_ops);

// Processes each operation in topological order.
for (Operation *op : ordered_ops) {
if (failed(removePredicatedType(op))) {
llvm::errs() << "Failed to convert op from predicated form: " << *op
<< "\n";
signalPassFailure();
return;
}
}
});
}

private:
// Gets operations in topological order.
void getOperationsInTopologicalOrder(FunctionOpInterface func,
SmallVector<Operation *> &ordered_ops) {
DenseSet<Operation *> visited_ops;
func.walk<WalkOrder::PreOrder>([&](Operation *op) {
if (visited_ops.contains(op)) {
return;
}

// Visits operands first.
for (Value operand : op->getOperands()) {
if (Operation *def_op = operand.getDefiningOp()) {
if (!visited_ops.contains(def_op)) {
visited_ops.insert(def_op);
ordered_ops.push_back(def_op);
}
}
}

if (!visited_ops.contains(op)) {
visited_ops.insert(op);
ordered_ops.push_back(op);
}
});
}

// Converts a single operation from predicated to normal types.
LogicalResult removePredicatedType(Operation *op) {
// Skips if not a Neura op.
if (op->getDialect()->getNamespace() != "neura") {
return success();
}

// Skips if no results or no predicated types.
if (op->getNumResults() == 0 ||
!llvm::any_of(op->getResultTypes(), [](Type t) {
return mlir::isa<mlir::neura::PredicatedValue>(t);
})) {
return success();
}

// Converts result types to non-predicated form.
OpBuilder builder(op);
SmallVector<Type> new_results;
for (Type t : op->getResultTypes()) {
if (auto predicated_type = llvm::dyn_cast<neura::PredicatedValue>(t)) {
new_results.push_back(predicated_type.getValueType());
} else {
new_results.push_back(t);
}
}

// Creates new operation with updated result types.
OperationState state(op->getLoc(), op->getName());
state.addOperands(op->getOperands());
state.addTypes(new_results);
state.addAttributes(op->getAttrs());

// Copies regions if needed.
for (unsigned i = 0; i < op->getNumRegions(); ++i) {
state.addRegion();
}

Operation *new_op = builder.create(state);

// Moves regions if any.
for (unsigned i = 0; i < op->getNumRegions(); ++i) {
Region &old_region = op->getRegion(i);
Region &new_region = new_op->getRegion(i);
new_region.takeBody(old_region);
}

// Replaces old op.
op->replaceAllUsesWith(new_op);
op->erase();
return success();
}
};

} // namespace

namespace mlir {
namespace neura {

std::unique_ptr<Pass> createRemovePredicatedTypePass() {
return std::make_unique<RemovePredicatedTypePass>();
}

} // namespace neura
} // namespace mlir
Loading