diff --git a/include/NeuraDialect/Architecture/Architecture.h b/include/NeuraDialect/Architecture/Architecture.h new file mode 100644 index 00000000..33db86b2 --- /dev/null +++ b/include/NeuraDialect/Architecture/Architecture.h @@ -0,0 +1,101 @@ +#ifndef NEURA_ARCHITECTURE_H +#define NEURA_ARCHITECTURE_H + +#include +#include +#include +#include +#include +#include + +namespace mlir { +namespace neura { + +//===----------------------------------------------------------------------===// +// BasicResource: abstract base class for Tile, Link, etc. +//===----------------------------------------------------------------------===// + +class BasicResource { +public: + virtual ~BasicResource() = default; + virtual int getId() const = 0; + virtual std::string getType() const = 0; +}; + +//===----------------------------------------------------------------------===// +// Forward declaration for use in Tile +class Link; + +//===----------------------------------------------------------------------===// +// Tile +//===----------------------------------------------------------------------===// + +class Tile : public BasicResource { +public: + Tile(int id, int x, int y); + + int getId() const override; + std::string getType() const override { return "tile"; } + + int getX() const; + int getY() const; + + void linkDstTile(Link* link, Tile* tile); + const std::set& getDstTiles() const; + const std::set& getSrcTiles() const; + const std::set& getOutLinks() const; + const std::set& getInLinks() const; + +private: + int id; + int x, y; + std::set src_tiles; + std::set dst_tiles; + std::set in_links; + std::set out_links; +}; + +//===----------------------------------------------------------------------===// +// Link +//===----------------------------------------------------------------------===// + +class Link : public BasicResource { +public: + Link(int id); + + int getId() const override; + std::string getType() const override { return "link"; } + + Tile* getSrcTile() const; + Tile* getDstTile() const; + + void connect(Tile* src, Tile* dst); + +private: + int id; + Tile* src_tile; + Tile* dst_tile; +}; + +/// Describes the entire CGRA architecture. +class Architecture { +public: + Architecture(int width, int height); + + Tile* getTile(int id); + Tile* getTile(int x, int y); + + Link* getLink(int id); + + int getNumTiles() const { return static_cast(tiles.size()); } + std::vector getAllTiles() { return tiles; } + +private: + std::vector> tileStorage; + std::vector tiles; +}; + +} // namespace neura +} // namespace mlir + +#endif // NEURA_ARCHITECTURE_H diff --git a/include/NeuraDialect/Mapping/MappingState.h b/include/NeuraDialect/Mapping/MappingState.h new file mode 100644 index 00000000..92c32dbb --- /dev/null +++ b/include/NeuraDialect/Mapping/MappingState.h @@ -0,0 +1,37 @@ +#ifndef NEURA_MAPPING_STATE_H +#define NEURA_MAPPING_STATE_H + +#include "mlir/IR/Operation.h" +#include "NeuraDialect/Architecture/Architecture.h" // for BasicResource +#include +#include +#include +#include + +namespace mlir { +namespace neura { + +// Represents a spatial-temporal location: (resource, timeStep) +using MappingLoc = std::pair; + +// Tracks placement and routing of ops on the CGRA. +class MappingState { +public: + // Binds a (tile/link, timeStep) location to an operation. + void bindOp(MappingLoc loc, Operation *op); + + // Checks if a (tile/link, timeStep) is available (unoccupied). + bool isAvailable(const MappingLoc &loc) const; + + // Gets the operation at a specific (tile/link, timeStep) location. + std::optional getOpAt(MappingLoc loc) const; + +private: + std::unordered_map loc_to_op; + std::unordered_set occupied_locs; +}; + +} // namespace neura +} // namespace mlir + +#endif // NEURA_MAPPING_STATE_H diff --git a/include/NeuraDialect/mapping/mapping_util.h b/include/NeuraDialect/Mapping/mapping_util.h similarity index 75% rename from include/NeuraDialect/mapping/mapping_util.h rename to include/NeuraDialect/Mapping/mapping_util.h index 2492bd4c..ca3a4b45 100644 --- a/include/NeuraDialect/mapping/mapping_util.h +++ b/include/NeuraDialect/Mapping/mapping_util.h @@ -1,6 +1,7 @@ #pragma once #include "mlir/IR/Operation.h" +#include "NeuraDialect/Architecture/Architecture.h" namespace mlir { namespace neura { @@ -11,16 +12,11 @@ struct RecurrenceCycle { int length = 0; // Number of operations excluding reserve/ctrl_mov. }; -// Accelerator configuration struct. -struct AcceleratorConfig { - int num_tiles = 4; // Default to 4 tiles if unspecified. -}; - // Collects recurrence cycles rooted at reserve and closed by ctrl_mov. SmallVector collectRecurrenceCycles(Operation *func_op); // Calculates ResMII: ceil(#ops / #tiles). -int calculateResMii(Operation *func_op, const AcceleratorConfig &config); +int calculateResMii(Operation *func_op, const Architecture &architecture); } // namespace neura } // namespace mlir diff --git a/lib/NeuraDialect/Architecture/Architecture.cpp b/lib/NeuraDialect/Architecture/Architecture.cpp new file mode 100644 index 00000000..7a040cce --- /dev/null +++ b/lib/NeuraDialect/Architecture/Architecture.cpp @@ -0,0 +1,130 @@ +#include "NeuraDialect/Architecture/Architecture.h" +#include + +using namespace mlir; +using namespace mlir::neura; + +Tile::Tile(int id, int x, int y) { + this->id = id; + this->x = x; + this->y = y; +} + +int Tile::getId() const { + return id; +} + +int Tile::getX() const { + return x; +} + +int Tile::getY() const { + return y; +} + +void Tile::linkDstTile(Link* link, Tile* tile) { + assert(tile && "Cannot link to a null tile"); + dst_tiles.insert(tile); + out_links.insert(link); + tile->src_tiles.insert(this); + tile->in_links.insert(link); +} + +const std::set& Tile::getDstTiles() const { + return dst_tiles; +} + +const std::set& Tile::getSrcTiles() const { + return src_tiles; +} + +const std::set& Tile::getOutLinks() const { + return out_links; +} + +const std::set& Tile::getInLinks() const { + return in_links; +} + +Link::Link(int id) { + this->id = id; +} + +int Link::getId() const { + return id; +} + +Tile* Link::getSrcTile() const { + return src_tile; +} + +Tile* Link::getDstTile() const { + return dst_tile; +} + +void Link::connect(Tile* src, Tile* dst) { + assert(src && dst && "Cannot connect null tiles"); + src_tile = src; + dst_tile = dst; + src->linkDstTile(this, dst); +} + +Architecture::Architecture(int width, int height) { + const int num_tiles = width * height; + + tileStorage.reserve(num_tiles); + tiles.reserve(num_tiles); + + for (int i = 0; i < width; ++i) { + for (int j = 0; j < height; ++j) { + auto tile = std::make_unique(i * width + j, i, j); + tiles.push_back(tile.get()); + tileStorage.push_back(std::move(tile)); + } + } + + // TODO: Model topology based on the architecture specs. + // https://github.com/coredac/dataflow/issues/52. + int link_id = 0; + for (int i = 0; i < width; ++i) { + for (int j = 0; j < height; ++j) { + Tile* tile = getTile(i, j); + if (i > 0) { + auto link_towards_left = std::make_unique(link_id++); + link_towards_left->connect(tile, getTile(i - 1, j)); + } + if (i < width - 1) { + auto link_towards_right = std::make_unique(link_id++); + link_towards_right->connect(tile, getTile(i + 1, j)); + } + if (j > 0) { + auto link_towards_down = std::make_unique(link_id++); + link_towards_down->connect(tile, getTile(i, j - 1)); + } + if (j < height - 1) { + auto link_towards_up = std::make_unique(link_id++); + link_towards_up->connect(tile, getTile(i, j + 1)); + } + } + } +} + +Tile* Architecture::getTile(int id) { + for (const auto &tile : tiles) { + if (tile->getId() == id) { + return tile; + } + } + assert(false && "Tile with given ID not found"); + return nullptr; +} + +Tile* Architecture::getTile(int x, int y) { + for (const auto &tile : tiles) { + if (tile->getX() == x && tile->getY() == y) { + return tile; + } + } + assert(false && "Tile with given coordinates not found"); + return nullptr; +} diff --git a/lib/NeuraDialect/CMakeLists.txt b/lib/NeuraDialect/CMakeLists.txt index 6ed04ae7..7801f79f 100644 --- a/lib/NeuraDialect/CMakeLists.txt +++ b/lib/NeuraDialect/CMakeLists.txt @@ -19,6 +19,8 @@ add_mlir_dialect_library(MLIRNeura Neura.cpp NeuraTypes.cpp NeuraPasses.cpp + Mapping/mapping_util.cpp + Architecture/Architecture.cpp ADDITIONAL_HEADER_DIRS ${PROJECT_SOURCE_DIR}/include/NeuraDialect diff --git a/lib/NeuraDialect/Mapping/MappingState.cpp b/lib/NeuraDialect/Mapping/MappingState.cpp new file mode 100644 index 00000000..4749d6f0 --- /dev/null +++ b/lib/NeuraDialect/Mapping/MappingState.cpp @@ -0,0 +1,13 @@ +#include "NeuraDialect/Mapping/MappingState.h" + +using namespace mlir; +using namespace mlir::neura; + +void MappingState::bindOp(MappingLoc loc, Operation *op) { + loc_to_op[loc] = op; + occupied_locs.insert(loc); +} + +bool MappingState::isAvailable(const MappingLoc &loc) const { + return !occupied_locs.contains(loc); +} diff --git a/lib/NeuraDialect/Transforms/mapping/mapping_util.cpp b/lib/NeuraDialect/Mapping/mapping_util.cpp similarity index 91% rename from lib/NeuraDialect/Transforms/mapping/mapping_util.cpp rename to lib/NeuraDialect/Mapping/mapping_util.cpp index 51b839f9..3c724a7e 100644 --- a/lib/NeuraDialect/Transforms/mapping/mapping_util.cpp +++ b/lib/NeuraDialect/Mapping/mapping_util.cpp @@ -1,6 +1,6 @@ #include -#include "NeuraDialect/mapping/mapping_util.h" +#include "NeuraDialect/Mapping/mapping_util.h" #include "NeuraDialect/NeuraOps.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/Operation.h" @@ -79,7 +79,8 @@ SmallVector mlir::neura::collectRecurrenceCycles(Operation * return recurrence_cycles; } -int mlir::neura::calculateResMii(Operation *func_op, const AcceleratorConfig &config) { +int mlir::neura::calculateResMii(Operation *func_op, + const Architecture &architecture) { int num_ops = 0; // Count all "compute" operations (non-terminators, non-block ops). @@ -98,7 +99,7 @@ int mlir::neura::calculateResMii(Operation *func_op, const AcceleratorConfig &co llvm::errs() << "[calculateResMii] Total operations: " << num_ops << "\n"; // Avoid divide-by-zero - int tiles = std::max(1, config.num_tiles); + int num_tiles = std::max(1, architecture.getNumTiles()); - return llvm::divideCeil(num_ops, tiles); + return llvm::divideCeil(num_ops, num_tiles); } diff --git a/lib/NeuraDialect/Transforms/CMakeLists.txt b/lib/NeuraDialect/Transforms/CMakeLists.txt index 30734c92..c1d16bdc 100644 --- a/lib/NeuraDialect/Transforms/CMakeLists.txt +++ b/lib/NeuraDialect/Transforms/CMakeLists.txt @@ -9,7 +9,6 @@ add_mlir_library( TransformCtrlToDataFlowPass.cpp LeveragePredicatedValuePass.cpp MapToAcceleratorPass.cpp - mapping/mapping_util.cpp DEPENDS MLIRNeuraTransformsIncGen diff --git a/lib/NeuraDialect/Transforms/MapToAcceleratorPass.cpp b/lib/NeuraDialect/Transforms/MapToAcceleratorPass.cpp index 919a3167..be214ff0 100644 --- a/lib/NeuraDialect/Transforms/MapToAcceleratorPass.cpp +++ b/lib/NeuraDialect/Transforms/MapToAcceleratorPass.cpp @@ -1,10 +1,11 @@ #include +#include "NeuraDialect/Architecture/Architecture.h" #include "NeuraDialect/NeuraDialect.h" #include "NeuraDialect/NeuraOps.h" #include "NeuraDialect/NeuraTypes.h" #include "NeuraDialect/NeuraPasses.h" -#include "NeuraDialect/mapping/mapping_util.h" +#include "NeuraDialect/Mapping/mapping_util.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/PatternMatch.h" #include "mlir/Pass/Pass.h" @@ -58,8 +59,9 @@ struct MapToAcceleratorPass func->setAttr("RecMII", rec_mii_attr); } - AcceleratorConfig config{/*numTiles=*/8}; // Example - int res_mii = calculateResMii(func, config); + // AcceleratorConfig config{/*numTiles=*/8}; // Example + Architecture architecture(2, 2); + int res_mii = calculateResMii(func, architecture); IntegerAttr res_mii_attr = IntegerAttr::get( IntegerType::get(func.getContext(), 32), res_mii); func->setAttr("ResMII", res_mii_attr); diff --git a/test/neura/ctrl/branch_for.mlir b/test/neura/ctrl/branch_for.mlir index c57476da..e656bbef 100644 --- a/test/neura/ctrl/branch_for.mlir +++ b/test/neura/ctrl/branch_for.mlir @@ -81,4 +81,4 @@ func.func @loop_test() -> f32 { // CTRL2DATA-NEXT: "neura.return"(%18) : (!neura.data) -> () // CTRL2DATA-NEXT: } -// MII: func.func @loop_test() -> f32 attributes {RecMII = 4 : i32, ResMII = 2 : i32, accelerator = "neura"} \ No newline at end of file +// MII: func.func @loop_test() -> f32 attributes {RecMII = 4 : i32, ResMII = 4 : i32, accelerator = "neura"} \ No newline at end of file