Skip to content

Commit d536de3

Browse files
authored
Merge pull request #153 from coredac/architecture_spec
add support to max_ii
2 parents b390978 + cd34766 commit d536de3

File tree

4 files changed

+58
-41
lines changed

4 files changed

+58
-41
lines changed

lib/NeuraDialect/Transforms/GenerateCodePass.cpp

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -133,6 +133,11 @@ static std::string getConstantLiteral(Operation *op) {
133133
return "#" + std::to_string(integer_attr.getInt());
134134
if (auto float_attr = dyn_cast<FloatAttr>(value_attr))
135135
return "#" + std::to_string(float_attr.getValueAsDouble());
136+
//TODO: Issue #154: handle argument situations.
137+
// if (auto string_attr = dyn_cast<StringAttr>(value_attr)) {
138+
// std::string value = string_attr.getValue().str();
139+
// return value;
140+
// }
136141
}
137142
return "#0";
138143
}
@@ -143,6 +148,9 @@ static std::string getConstantLiteral(Operation *op) {
143148
return "#" + std::to_string(integer_attr.getInt());
144149
if (auto float_attr = dyn_cast<FloatAttr>(constant_value_attr))
145150
return "#" + std::to_string(float_attr.getValueAsDouble());
151+
//TODO: Issue #154: handle argument situations.
152+
// if (auto string_attr = dyn_cast<StringAttr>(constant_value_attr))
153+
// return string_attr.getValue().str();
146154
}
147155

148156
return "";

lib/NeuraDialect/Transforms/MapToAcceleratorPass.cpp

Lines changed: 46 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -538,7 +538,7 @@ mlir::neura::BaseTopology parseTopologyString(const std::string& topology_str) {
538538
}
539539

540540
// Helper function to parse architecture YAML configuration.
541-
bool parseArchitectureYAML(llvm::yaml::Document &doc, int &width, int &height,
541+
bool parseArchitectureYAML(llvm::yaml::Document &doc, int &width, int &height, int &max_ii,
542542
mlir::neura::TileDefaults &tile_defaults,
543543
std::vector<mlir::neura::TileOverride> &tile_overrides,
544544
mlir::neura::LinkDefaults &link_defaults,
@@ -550,66 +550,71 @@ bool parseArchitectureYAML(llvm::yaml::Document &doc, int &width, int &height,
550550
return false;
551551
}
552552

553-
auto *rootMap = llvm::dyn_cast<llvm::yaml::MappingNode>(root);
554-
if (!rootMap) {
553+
auto *root_map = llvm::dyn_cast<llvm::yaml::MappingNode>(root);
554+
if (!root_map) {
555555
llvm::errs() << "[MapToAcceleratorPass] YAML root is not a mapping\n";
556556
return false;
557557
}
558558

559559
// Iterate root mapping ONCE; find 'architecture' and 'tile_defaults'.
560-
for (auto &keyValuePair : *rootMap) {
561-
auto *keyNode = llvm::dyn_cast_or_null<llvm::yaml::ScalarNode>(keyValuePair.getKey());
562-
if (!keyNode) continue;
560+
for (auto &key_value_pair : *root_map) {
561+
auto *key_node = llvm::dyn_cast_or_null<llvm::yaml::ScalarNode>(key_value_pair.getKey());
562+
if (!key_node) continue;
563563

564-
llvm::SmallString<64> keyString;
565-
llvm::StringRef keyRef = keyNode->getValue(keyString);
564+
llvm::SmallString<64> key_string;
565+
llvm::StringRef key_ref = key_node->getValue(key_string);
566566

567-
if (keyRef == "architecture") {
568-
auto *architectureMap = llvm::dyn_cast_or_null<llvm::yaml::MappingNode>(keyValuePair.getValue());
569-
if (!architectureMap) continue;
567+
if (key_ref == "architecture") {
568+
auto *architecture_map = llvm::dyn_cast_or_null<llvm::yaml::MappingNode>(key_value_pair.getValue());
569+
if (!architecture_map) continue;
570570

571571
// Iterate architecture mapping ONCE; read width/height in the same pass.
572-
for (auto &architectureKeyValuePair : *architectureMap) {
573-
auto *architectureKeyNode = llvm::dyn_cast_or_null<llvm::yaml::ScalarNode>(architectureKeyValuePair.getKey());
574-
if (!architectureKeyNode) continue;
572+
for (auto &architecture_key_value_pair : *architecture_map) {
573+
auto *architecture_key_node = llvm::dyn_cast_or_null<llvm::yaml::ScalarNode>(architecture_key_value_pair.getKey());
574+
if (!architecture_key_node) continue;
575575

576-
llvm::SmallString<64> architectureKeyString;
577-
llvm::StringRef architectureKeyRef = architectureKeyNode->getValue(architectureKeyString);
578-
if (architectureKeyRef != "width" && architectureKeyRef != "height") continue;
579-
580-
auto *architectureValueNode = llvm::dyn_cast_or_null<llvm::yaml::ScalarNode>(architectureKeyValuePair.getValue());
581-
if (!architectureValueNode) continue;
582-
583-
llvm::SmallString<64> architectureValueString;
584-
llvm::StringRef architectureValueRef = architectureValueNode->getValue(architectureValueString);
585-
long long tempValue = 0;
586-
if (!architectureValueRef.getAsInteger(10, tempValue)) {
587-
if (architectureKeyRef == "width") width = static_cast<int>(tempValue);
588-
if (architectureKeyRef == "height") height = static_cast<int>(tempValue);
576+
llvm::SmallString<64> architecture_key_string;
577+
llvm::StringRef architecture_key_ref = architecture_key_node->getValue(architecture_key_string);
578+
if (architecture_key_ref == "width" || architecture_key_ref == "height" || architecture_key_ref == "max_allowed_ii_by_hw") {
579+
auto *architecture_value_node = llvm::dyn_cast_or_null<llvm::yaml::ScalarNode>(architecture_key_value_pair.getValue());
580+
if (!architecture_value_node) continue;
581+
582+
llvm::SmallString<64> architecture_value_string;
583+
llvm::StringRef architecture_value_ref = architecture_value_node->getValue(architecture_value_string);
584+
long long temp_value = 0;
585+
if (!architecture_value_ref.getAsInteger(10, temp_value)) {
586+
if (architecture_key_ref == "width") width = static_cast<int>(temp_value);
587+
if (architecture_key_ref == "height") height = static_cast<int>(temp_value);
588+
if (architecture_key_ref == "max_allowed_ii_by_hw") {
589+
max_ii = static_cast<int>(temp_value);
590+
}
591+
}
592+
} else {
593+
continue;
589594
}
590595
}
591-
} else if (keyRef == "tile_defaults") {
592-
auto *tile_defaults_map = llvm::dyn_cast_or_null<llvm::yaml::MappingNode>(keyValuePair.getValue());
596+
} else if (key_ref == "tile_defaults") {
597+
auto *tile_defaults_map = llvm::dyn_cast_or_null<llvm::yaml::MappingNode>(key_value_pair.getValue());
593598
if (tile_defaults_map) {
594599
parseTileDefaults(tile_defaults_map, tile_defaults);
595600
}
596-
} else if (keyRef == "tile_overrides") {
597-
auto *tile_overrides_seq = llvm::dyn_cast_or_null<llvm::yaml::SequenceNode>(keyValuePair.getValue());
601+
} else if (key_ref == "tile_overrides") {
602+
auto *tile_overrides_seq = llvm::dyn_cast_or_null<llvm::yaml::SequenceNode>(key_value_pair.getValue());
598603
if (tile_overrides_seq) {
599604
parseTileOverrides(tile_overrides_seq, tile_overrides);
600605
}
601-
} else if (keyRef == "link_defaults") {
602-
auto *link_defaults_map = llvm::dyn_cast_or_null<llvm::yaml::MappingNode>(keyValuePair.getValue());
606+
} else if (key_ref == "link_defaults") {
607+
auto *link_defaults_map = llvm::dyn_cast_or_null<llvm::yaml::MappingNode>(key_value_pair.getValue());
603608
if (link_defaults_map) {
604609
parseLinkDefaults(link_defaults_map, link_defaults);
605610
}
606-
} else if (keyRef == "link_overrides") {
607-
auto *link_overrides_seq = llvm::dyn_cast_or_null<llvm::yaml::SequenceNode>(keyValuePair.getValue());
611+
} else if (key_ref == "link_overrides") {
612+
auto *link_overrides_seq = llvm::dyn_cast_or_null<llvm::yaml::SequenceNode>(key_value_pair.getValue());
608613
if (link_overrides_seq) {
609614
parseLinkOverrides(link_overrides_seq, link_overrides);
610615
}
611-
} else if (keyRef == "base_topology") {
612-
auto *topology_node = llvm::dyn_cast_or_null<llvm::yaml::ScalarNode>(keyValuePair.getValue());
616+
} else if (key_ref == "base_topology") {
617+
auto *topology_node = llvm::dyn_cast_or_null<llvm::yaml::ScalarNode>(key_value_pair.getValue());
613618
if (topology_node) {
614619
llvm::SmallString<64> topology_string;
615620
llvm::StringRef topology_ref = topology_node->getValue(topology_string);
@@ -739,6 +744,7 @@ struct MapToAcceleratorPass
739744
std::string architecture_spec_file = mlir::neura::getArchitectureSpecFile();
740745
int yaml_width = -1;
741746
int yaml_height = -1;
747+
int yaml_max_ii = 20; // Default max_ii = 20
742748
mlir::neura::TileDefaults yaml_tile_defaults;
743749
std::vector<mlir::neura::TileOverride> tile_overrides;
744750
mlir::neura::LinkDefaults yaml_link_defaults;
@@ -773,7 +779,7 @@ struct MapToAcceleratorPass
773779
}
774780

775781
// Parse YAML configuration
776-
if (!parseArchitectureYAML(firstDoc, yaml_width, yaml_height, yaml_tile_defaults, tile_overrides, yaml_link_defaults, link_overrides, base_topology)) {
782+
if (!parseArchitectureYAML(firstDoc, yaml_width, yaml_height, yaml_max_ii, yaml_tile_defaults, tile_overrides, yaml_link_defaults, link_overrides, base_topology)) {
777783
return;
778784
}
779785

@@ -845,7 +851,8 @@ struct MapToAcceleratorPass
845851
int res_mii = calculateResMii(func, architecture);
846852

847853
const int possibleMinII = std::max(rec_mii, res_mii);
848-
constexpr int maxII = 20;
854+
const int maxII = yaml_max_ii; // Use YAML config (default 20 if not specified)
855+
849856
std::vector<Operation *> topologically_sorted_ops =
850857
getTopologicallySortedOps(func);
851858
if (topologically_sorted_ops.empty()) {

test/arch_spec/arch_spec_example.yaml

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,9 @@
11
architecture:
22
name: "NeuraCGRA"
33
version: "1.0"
4-
width: 8
5-
height: 8
4+
width: 4
5+
height: 4
6+
max_allowed_ii_by_hw: 20
67

78
tile_defaults:
89
num_registers: 128

test/arch_spec/architecture.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ architecture:
33
version: "1.0"
44
width: 4
55
height: 4
6+
max_allowed_ii_by_hw: 20
67

78
tile_defaults:
89
num_registers: 32

0 commit comments

Comments
 (0)