diff --git a/include/NeuraDialect/mapping/mapping_util.h b/include/NeuraDialect/mapping/mapping_util.h new file mode 100644 index 00000000..2492bd4c --- /dev/null +++ b/include/NeuraDialect/mapping/mapping_util.h @@ -0,0 +1,26 @@ +#pragma once + +#include "mlir/IR/Operation.h" + +namespace mlir { +namespace neura { + +// Represents a recurrence cycle rooted at a reserve operation and closed by ctrl_mov. +struct RecurrenceCycle { + SmallVector operations; // Ordered list of operations in the cycle. + int length = 0; // Number of operations excluding reserve/ctrl_mov. +}; + +// Accelerator configuration struct. +struct AcceleratorConfig { + int num_tiles = 4; // Default to 4 tiles if unspecified. +}; + +// Collects recurrence cycles rooted at reserve and closed by ctrl_mov. +SmallVector collectRecurrenceCycles(Operation *func_op); + +// Calculates ResMII: ceil(#ops / #tiles). +int calculateResMii(Operation *func_op, const AcceleratorConfig &config); + +} // namespace neura +} // namespace mlir diff --git a/lib/NeuraDialect/Transforms/CMakeLists.txt b/lib/NeuraDialect/Transforms/CMakeLists.txt index c1d16bdc..30734c92 100644 --- a/lib/NeuraDialect/Transforms/CMakeLists.txt +++ b/lib/NeuraDialect/Transforms/CMakeLists.txt @@ -9,6 +9,7 @@ add_mlir_library( TransformCtrlToDataFlowPass.cpp LeveragePredicatedValuePass.cpp MapToAcceleratorPass.cpp + mapping/mapping_util.cpp DEPENDS MLIRNeuraTransformsIncGen diff --git a/lib/NeuraDialect/Transforms/MapToAcceleratorPass.cpp b/lib/NeuraDialect/Transforms/MapToAcceleratorPass.cpp index 76e25901..919a3167 100644 --- a/lib/NeuraDialect/Transforms/MapToAcceleratorPass.cpp +++ b/lib/NeuraDialect/Transforms/MapToAcceleratorPass.cpp @@ -4,94 +4,20 @@ #include "NeuraDialect/NeuraOps.h" #include "NeuraDialect/NeuraTypes.h" #include "NeuraDialect/NeuraPasses.h" +#include "NeuraDialect/mapping/mapping_util.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/PatternMatch.h" #include "mlir/Pass/Pass.h" #include "mlir/Transforms/GreedyPatternRewriteDriver.h" using namespace mlir; +using namespace mlir::neura; #define GEN_PASS_DEF_MapToAccelerator #include "NeuraDialect/NeuraPasses.h.inc" namespace { -/// Represents a recurrence cycle rooted at a reserve operation and ending at a ctrl_mov. -/// The cycle consists of a sequence of operations and its corresponding length. -struct RecurrenceCycle { - SmallVector operations; // Ordered list of operations in the cycle. - int length = 0; // Number of operations excluding ctrl_mov and reserve_op. -}; - -// Traverses (backward) the operation graph starting from the given operation -// towards reserve_value. -void traverseAlongPath(Operation *op, Value reserve_value, - std::deque ¤t_path, - DenseSet &visited_in_path, - SmallVector &collected_paths) { - if (!op || visited_in_path.contains(op)) - return; - - visited_in_path.insert(op); - current_path.push_front(op); - - for (Value operand : op->getOperands()) { - if (operand == reserve_value) { - Operation *res_op = reserve_value.getDefiningOp(); - if (res_op) current_path.push_front(res_op); - - constexpr int kNumExcludedOps = 2; - collected_paths.push_back(RecurrenceCycle{ - operations: SmallVector(current_path.begin(), current_path.end()), - length: static_cast(current_path.size()) - kNumExcludedOps - }); - - if (res_op) current_path.pop_front(); // Remove reserve before backtracking - continue; - } - - if (Operation *def_op = operand.getDefiningOp()) { - traverseAlongPath(def_op, reserve_value, current_path, visited_in_path, collected_paths); - } - } - - current_path.pop_front(); // Backtrack - visited_in_path.erase(op); // Unmark from path -} - -/// Collects all recurrence cycles rooted at reserve operations and closed by ctrl_mov. -/// Each cycle contains the operation sequence and its corresponding length. -SmallVector collectRecurrenceCycles(Operation *root_op) { - SmallVector recurrence_cycles; - - root_op->walk([&](neura::CtrlMovOp ctrl_mov_op) { - Value target = ctrl_mov_op.getTarget(); - auto reserve_op = target.getDefiningOp(); - if (!reserve_op) - return; - - Value reserve_value = reserve_op.getResult(); - Value ctrl_mov_from = ctrl_mov_op.getValue(); - - Operation *parent_op = ctrl_mov_from.getDefiningOp(); - if (!parent_op) - return; - - std::deque current_path; - SmallVector collected_paths; - DenseSet visited_in_path; - traverseAlongPath(parent_op, reserve_value, current_path, visited_in_path, collected_paths); - - for (auto &cycle : collected_paths) { - cycle.operations.push_back(ctrl_mov_op); - ++cycle.length; - recurrence_cycles.push_back(std::move(cycle)); - } - }); - - return recurrence_cycles; -} - struct MapToAcceleratorPass : public PassWrapper> { MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(MapToAcceleratorPass) @@ -127,10 +53,17 @@ struct MapToAcceleratorPass << longest->length << "):\n"; for (Operation *op : longest->operations) op->print(llvm::errs()), llvm::errs() << "\n"; - IntegerAttr mii_attr = IntegerAttr::get( + IntegerAttr rec_mii_attr = IntegerAttr::get( IntegerType::get(func.getContext(), 32), longest->length); - func->setAttr("RecMII", mii_attr); + func->setAttr("RecMII", rec_mii_attr); } + + AcceleratorConfig config{/*numTiles=*/8}; // Example + int res_mii = calculateResMii(func, config); + IntegerAttr res_mii_attr = IntegerAttr::get( + IntegerType::get(func.getContext(), 32), res_mii); + func->setAttr("ResMII", res_mii_attr); + }); } }; diff --git a/lib/NeuraDialect/Transforms/mapping/mapping_util.cpp b/lib/NeuraDialect/Transforms/mapping/mapping_util.cpp new file mode 100644 index 00000000..51b839f9 --- /dev/null +++ b/lib/NeuraDialect/Transforms/mapping/mapping_util.cpp @@ -0,0 +1,104 @@ +#include + +#include "NeuraDialect/mapping/mapping_util.h" +#include "NeuraDialect/NeuraOps.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/IR/Operation.h" + +using namespace mlir; +using namespace mlir::neura; + +namespace { + +// Traverses (backward) the operation graph starting from the given operation +// towards reserve_value. +void traverseAlongPath(Operation *op, Value reserve_value, + std::deque ¤t_path, + DenseSet &visited_in_path, + SmallVector &collected_paths) { + if (!op || visited_in_path.contains(op)) + return; + + visited_in_path.insert(op); + current_path.push_front(op); + + for (Value operand : op->getOperands()) { + if (operand == reserve_value) { + Operation *res_op = reserve_value.getDefiningOp(); + if (res_op) current_path.push_front(res_op); + + constexpr int kNumExcludedOps = 2; + collected_paths.push_back(RecurrenceCycle{ + operations: SmallVector(current_path.begin(), current_path.end()), + length: static_cast(current_path.size()) - kNumExcludedOps + }); + + if (res_op) current_path.pop_front(); + continue; + } + + if (Operation *def_op = operand.getDefiningOp()) { + traverseAlongPath(def_op, reserve_value, current_path, visited_in_path, collected_paths); + } + } + + current_path.pop_front(); + visited_in_path.erase(op); +} + +} // namespace + +SmallVector mlir::neura::collectRecurrenceCycles(Operation *func_op) { + SmallVector recurrence_cycles; + + func_op->walk([&](neura::CtrlMovOp ctrl_mov_op) { + Value target = ctrl_mov_op.getTarget(); + auto reserve_op = target.getDefiningOp(); + if (!reserve_op) + return; + + Value reserve_value = reserve_op.getResult(); + Value ctrl_mov_from = ctrl_mov_op.getValue(); + + Operation *parent_op = ctrl_mov_from.getDefiningOp(); + if (!parent_op) + return; + + std::deque current_path; + SmallVector collected_paths; + DenseSet visited_in_path; + traverseAlongPath(parent_op, reserve_value, current_path, visited_in_path, collected_paths); + + for (auto &cycle : collected_paths) { + cycle.operations.push_back(ctrl_mov_op); + ++cycle.length; + recurrence_cycles.push_back(std::move(cycle)); + } + }); + + return recurrence_cycles; +} + +int mlir::neura::calculateResMii(Operation *func_op, const AcceleratorConfig &config) { + int num_ops = 0; + + // Count all "compute" operations (non-terminators, non-block ops). + func_op->walk([&](Operation *op) { + // Skips non-materialized ops. + if (isa(op) || + isa(op)) { + return; + } + ++num_ops; + }); + + llvm::errs() << "[calculateResMii] Total operations: " << num_ops << "\n"; + + // Avoid divide-by-zero + int tiles = std::max(1, config.num_tiles); + + return llvm::divideCeil(num_ops, tiles); +} diff --git a/test/neura/ctrl/branch_for.mlir b/test/neura/ctrl/branch_for.mlir index eac44582..7f5b48d1 100644 --- a/test/neura/ctrl/branch_for.mlir +++ b/test/neura/ctrl/branch_for.mlir @@ -17,7 +17,7 @@ // RUN: --leverage-predicated-value \ // RUN: --transform-ctrl-to-data-flow \ // RUN: --map-to-accelerator \ -// RUN: | FileCheck %s -check-prefix=RECMII +// RUN: | FileCheck %s -check-prefix=MII func.func @loop_test() -> f32 { %n = llvm.mlir.constant(10 : i64) : i64 @@ -81,4 +81,4 @@ func.func @loop_test() -> f32 { // CTRL2DATA-NEXT: "neura.return"(%18) : (!neura.data) -> () // CTRL2DATA-NEXT: } -// RECMII: func.func @loop_test() -> f32 attributes {RecMII = 4 : i32, accelerator = "neura"} \ No newline at end of file +// MII: func.func @loop_test() -> f32 attributes {RecMII = 4 : i32, ResMII = 2 : i32, accelerator = "neura"} \ No newline at end of file