Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
101 changes: 101 additions & 0 deletions include/NeuraDialect/Architecture/Architecture.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
#ifndef NEURA_ARCHITECTURE_H
#define NEURA_ARCHITECTURE_H

#include <string>
#include <vector>
#include <set>
#include <unordered_map>
#include <optional>
#include <memory>

namespace mlir {
namespace neura {

//===----------------------------------------------------------------------===//
// BasicResource: abstract base class for Tile, Link, etc.
//===----------------------------------------------------------------------===//

class BasicResource {
public:
virtual ~BasicResource() = default;
virtual int getId() const = 0;
virtual std::string getType() const = 0;
};

//===----------------------------------------------------------------------===//
// Forward declaration for use in Tile
class Link;

//===----------------------------------------------------------------------===//
// Tile
//===----------------------------------------------------------------------===//

class Tile : public BasicResource {
public:
Tile(int id, int x, int y);

int getId() const override;
std::string getType() const override { return "tile"; }

int getX() const;
int getY() const;

void linkDstTile(Link* link, Tile* tile);
const std::set<Tile*>& getDstTiles() const;
const std::set<Tile*>& getSrcTiles() const;
const std::set<Link*>& getOutLinks() const;
const std::set<Link*>& getInLinks() const;

private:
int id;
int x, y;
std::set<Tile*> src_tiles;
std::set<Tile*> dst_tiles;
std::set<Link*> in_links;
std::set<Link*> out_links;
};

//===----------------------------------------------------------------------===//
// Link
//===----------------------------------------------------------------------===//

class Link : public BasicResource {
public:
Link(int id);

int getId() const override;
std::string getType() const override { return "link"; }

Tile* getSrcTile() const;
Tile* getDstTile() const;

void connect(Tile* src, Tile* dst);

private:
int id;
Tile* src_tile;
Tile* dst_tile;
};

/// Describes the entire CGRA architecture.
class Architecture {
public:
Architecture(int width, int height);

Tile* getTile(int id);
Tile* getTile(int x, int y);

Link* getLink(int id);

int getNumTiles() const { return static_cast<int>(tiles.size()); }
std::vector<Tile*> getAllTiles() { return tiles; }

private:
std::vector<std::unique_ptr<Tile>> tileStorage;
std::vector<Tile*> tiles;
};

} // namespace neura
} // namespace mlir

#endif // NEURA_ARCHITECTURE_H
37 changes: 37 additions & 0 deletions include/NeuraDialect/Mapping/MappingState.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
#ifndef NEURA_MAPPING_STATE_H
#define NEURA_MAPPING_STATE_H

#include "mlir/IR/Operation.h"
#include "NeuraDialect/Architecture/Architecture.h" // for BasicResource
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include <optional>

namespace mlir {
namespace neura {

// Represents a spatial-temporal location: (resource, timeStep)
using MappingLoc = std::pair<BasicResource*, int>;

// Tracks placement and routing of ops on the CGRA.
class MappingState {
public:
// Binds a (tile/link, timeStep) location to an operation.
void bindOp(MappingLoc loc, Operation *op);

// Checks if a (tile/link, timeStep) is available (unoccupied).
bool isAvailable(const MappingLoc &loc) const;

// Gets the operation at a specific (tile/link, timeStep) location.
std::optional<Operation*> getOpAt(MappingLoc loc) const;

private:
std::unordered_map<MappingLoc, Operation*> loc_to_op;
std::unordered_set<MappingLoc> occupied_locs;
};

} // namespace neura
} // namespace mlir

#endif // NEURA_MAPPING_STATE_H
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
#pragma once

#include "mlir/IR/Operation.h"
#include "NeuraDialect/Architecture/Architecture.h"

namespace mlir {
namespace neura {
Expand All @@ -11,16 +12,11 @@ struct RecurrenceCycle {
int length = 0; // Number of operations excluding reserve/ctrl_mov.
};

// Accelerator configuration struct.
struct AcceleratorConfig {
int num_tiles = 4; // Default to 4 tiles if unspecified.
};

// Collects recurrence cycles rooted at reserve and closed by ctrl_mov.
SmallVector<RecurrenceCycle, 4> collectRecurrenceCycles(Operation *func_op);

// Calculates ResMII: ceil(#ops / #tiles).
int calculateResMii(Operation *func_op, const AcceleratorConfig &config);
int calculateResMii(Operation *func_op, const Architecture &architecture);

} // namespace neura
} // namespace mlir
130 changes: 130 additions & 0 deletions lib/NeuraDialect/Architecture/Architecture.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,130 @@
#include "NeuraDialect/Architecture/Architecture.h"
#include <cassert>

using namespace mlir;
using namespace mlir::neura;

Tile::Tile(int id, int x, int y) {
this->id = id;
this->x = x;
this->y = y;
}

int Tile::getId() const {
return id;
}

int Tile::getX() const {
return x;
}

int Tile::getY() const {
return y;
}

void Tile::linkDstTile(Link* link, Tile* tile) {
assert(tile && "Cannot link to a null tile");
dst_tiles.insert(tile);
out_links.insert(link);
tile->src_tiles.insert(this);
tile->in_links.insert(link);
}

const std::set<Tile*>& Tile::getDstTiles() const {
return dst_tiles;
}

const std::set<Tile*>& Tile::getSrcTiles() const {
return src_tiles;
}

const std::set<Link*>& Tile::getOutLinks() const {
return out_links;
}

const std::set<Link*>& Tile::getInLinks() const {
return in_links;
}

Link::Link(int id) {
this->id = id;
}

int Link::getId() const {
return id;
}

Tile* Link::getSrcTile() const {
return src_tile;
}

Tile* Link::getDstTile() const {
return dst_tile;
}

void Link::connect(Tile* src, Tile* dst) {
assert(src && dst && "Cannot connect null tiles");
src_tile = src;
dst_tile = dst;
src->linkDstTile(this, dst);
}

Architecture::Architecture(int width, int height) {
const int num_tiles = width * height;

tileStorage.reserve(num_tiles);
tiles.reserve(num_tiles);

for (int i = 0; i < width; ++i) {
for (int j = 0; j < height; ++j) {
auto tile = std::make_unique<Tile>(i * width + j, i, j);
tiles.push_back(tile.get());
tileStorage.push_back(std::move(tile));
}
}

// TODO: Model topology based on the architecture specs.
// https://github.com/coredac/dataflow/issues/52.
int link_id = 0;
for (int i = 0; i < width; ++i) {
for (int j = 0; j < height; ++j) {
Tile* tile = getTile(i, j);
if (i > 0) {
auto link_towards_left = std::make_unique<Link>(link_id++);
link_towards_left->connect(tile, getTile(i - 1, j));
}
if (i < width - 1) {
auto link_towards_right = std::make_unique<Link>(link_id++);
link_towards_right->connect(tile, getTile(i + 1, j));
}
if (j > 0) {
auto link_towards_down = std::make_unique<Link>(link_id++);
link_towards_down->connect(tile, getTile(i, j - 1));
}
if (j < height - 1) {
auto link_towards_up = std::make_unique<Link>(link_id++);
link_towards_up->connect(tile, getTile(i, j + 1));
}
}
}
}

Tile* Architecture::getTile(int id) {
for (const auto &tile : tiles) {
if (tile->getId() == id) {
return tile;
}
}
assert(false && "Tile with given ID not found");
return nullptr;
}

Tile* Architecture::getTile(int x, int y) {
for (const auto &tile : tiles) {
if (tile->getX() == x && tile->getY() == y) {
return tile;
}
}
assert(false && "Tile with given coordinates not found");
return nullptr;
}
2 changes: 2 additions & 0 deletions lib/NeuraDialect/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@ add_mlir_dialect_library(MLIRNeura
Neura.cpp
NeuraTypes.cpp
NeuraPasses.cpp
Mapping/mapping_util.cpp
Architecture/Architecture.cpp

ADDITIONAL_HEADER_DIRS
${PROJECT_SOURCE_DIR}/include/NeuraDialect
Expand Down
13 changes: 13 additions & 0 deletions lib/NeuraDialect/Mapping/MappingState.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
#include "NeuraDialect/Mapping/MappingState.h"

using namespace mlir;
using namespace mlir::neura;

void MappingState::bindOp(MappingLoc loc, Operation *op) {
loc_to_op[loc] = op;
occupied_locs.insert(loc);
}

bool MappingState::isAvailable(const MappingLoc &loc) const {
return !occupied_locs.contains(loc);
}
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#include <deque>

#include "NeuraDialect/mapping/mapping_util.h"
#include "NeuraDialect/Mapping/mapping_util.h"
#include "NeuraDialect/NeuraOps.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Operation.h"
Expand Down Expand Up @@ -79,7 +79,8 @@ SmallVector<RecurrenceCycle, 4> mlir::neura::collectRecurrenceCycles(Operation *
return recurrence_cycles;
}

int mlir::neura::calculateResMii(Operation *func_op, const AcceleratorConfig &config) {
int mlir::neura::calculateResMii(Operation *func_op,
const Architecture &architecture) {
int num_ops = 0;

// Count all "compute" operations (non-terminators, non-block ops).
Expand All @@ -98,7 +99,7 @@ int mlir::neura::calculateResMii(Operation *func_op, const AcceleratorConfig &co
llvm::errs() << "[calculateResMii] Total operations: " << num_ops << "\n";

// Avoid divide-by-zero
int tiles = std::max(1, config.num_tiles);
int num_tiles = std::max(1, architecture.getNumTiles());

return llvm::divideCeil(num_ops, tiles);
return llvm::divideCeil(num_ops, num_tiles);
}
1 change: 0 additions & 1 deletion lib/NeuraDialect/Transforms/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ add_mlir_library(
TransformCtrlToDataFlowPass.cpp
LeveragePredicatedValuePass.cpp
MapToAcceleratorPass.cpp
mapping/mapping_util.cpp

DEPENDS
MLIRNeuraTransformsIncGen
Expand Down
8 changes: 5 additions & 3 deletions lib/NeuraDialect/Transforms/MapToAcceleratorPass.cpp
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
#include <deque>

#include "NeuraDialect/Architecture/Architecture.h"
#include "NeuraDialect/NeuraDialect.h"
#include "NeuraDialect/NeuraOps.h"
#include "NeuraDialect/NeuraTypes.h"
#include "NeuraDialect/NeuraPasses.h"
#include "NeuraDialect/mapping/mapping_util.h"
#include "NeuraDialect/Mapping/mapping_util.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Pass/Pass.h"
Expand Down Expand Up @@ -58,8 +59,9 @@ struct MapToAcceleratorPass
func->setAttr("RecMII", rec_mii_attr);
}

AcceleratorConfig config{/*numTiles=*/8}; // Example
int res_mii = calculateResMii(func, config);
// AcceleratorConfig config{/*numTiles=*/8}; // Example
Architecture architecture(2, 2);
int res_mii = calculateResMii(func, architecture);
IntegerAttr res_mii_attr = IntegerAttr::get(
IntegerType::get(func.getContext(), 32), res_mii);
func->setAttr("ResMII", res_mii_attr);
Expand Down
2 changes: 1 addition & 1 deletion test/neura/ctrl/branch_for.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -81,4 +81,4 @@ func.func @loop_test() -> f32 {
// CTRL2DATA-NEXT: "neura.return"(%18) : (!neura.data<f32, i1>) -> ()
// CTRL2DATA-NEXT: }

// MII: func.func @loop_test() -> f32 attributes {RecMII = 4 : i32, ResMII = 2 : i32, accelerator = "neura"}
// MII: func.func @loop_test() -> f32 attributes {RecMII = 4 : i32, ResMII = 4 : i32, accelerator = "neura"}