Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 7 additions & 2 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -40,23 +40,28 @@ add_definitions(${MLIR_DEFINITIONS})

# Tools built by this project or LLVM
set(MLIR_NEURA_OPT ${CMAKE_BINARY_DIR}/tools/mlir-neura-opt/mlir-neura-opt)
set(NEURA_INTERPRETER ${CMAKE_BINARY_DIR}/tools/neura-interpreter/neura-interpreter)
set(FILECHECK ${LLVM_TOOLS_BINARY_DIR}/FileCheck)
set(MLIR_OPT ${LLVM_TOOLS_BINARY_DIR}/mlir-opt)
set(MLIR_TRANSLATE ${LLVM_TOOLS_BINARY_DIR}/mlir-translate)
set(LLC ${LLVM_TOOLS_BINARY_DIR}/llc)

# Configure lit.cfg from lit.cfg.in
# Builds the interpreter.
add_subdirectory(tools/neura-interpreter)

# Configures lit.cfg from lit.cfg.in
configure_file(
${CMAKE_SOURCE_DIR}/test/lit.cfg.in
# ${CMAKE_BINARY_DIR}/test/lit.cfg
${CMAKE_SOURCE_DIR}/test/lit.cfg
@ONLY
)

# Add a custom target for running lit tests
# Adds a custom target for running lit tests
add_custom_target(check-neura
COMMAND ${LLVM_EXTERNAL_LIT} -sv ${CMAKE_BINARY_DIR}/test
DEPENDS mlir-neura-opt
WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
COMMENT "Running Sora Dialect Tests with lit"
)

1 change: 1 addition & 0 deletions test/lit.cfg.in
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ config.test_exec_root = os.path.dirname(__file__)

# Tool substitutions from CMake
config.substitutions.append(('mlir-neura-opt', '@MLIR_NEURA_OPT@'))
config.substitutions.append(('neura-interpreter', '@NEURA_INTERPRETER@'))
config.substitutions.append(('FileCheck', '@FILECHECK@'))
config.substitutions.append(('mlir-opt', '@MLIR_OPT@'))
config.substitutions.append(('mlir-translate', '@MLIR_TRANSLATE@'))
Expand Down
13 changes: 13 additions & 0 deletions test/neura/interpreter/interpreter.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
// RUN: neura-interpreter %s | FileCheck %s

module {
func.func @test() -> f32 {
%arg0 = arith.constant 9.0 : f32
%cst = arith.constant 2.0 : f32
%0 = neura.mov %arg0 : f32 -> f32
%1 = neura.mov %cst : f32 -> f32
%2 = "neura.fadd"(%0, %1) : (f32, f32) -> f32
return %2 : f32
// CHECK: 1.1
}
}
39 changes: 39 additions & 0 deletions test/neura/interpreter/lower_and_interpreter.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
// RUN: mlir-opt %s \
// RUN: --convert-scf-to-cf \
// RUN: --convert-math-to-llvm \
// RUN: --convert-arith-to-llvm \
// RUN: --convert-func-to-llvm \
// RUN: --convert-cf-to-llvm \
// RUN: --reconcile-unrealized-casts \
// RUN: -o %t-lowered-to-llvm.mlir

// RUN: mlir-translate -mlir-to-llvmir \
// RUN: %t-lowered-to-llvm.mlir \
// RUN: -o %t-lower_and_interpreter.ll

// RUN: llc %t-lower_and_interpreter.ll \
// RUN: -filetype=obj -o %t-out.o

// RUN: clang++ main.cpp %t-out.o \
// RUN: -o %t-out.bin

// RUN: %t-out.bin > %t-dumped_output.txt

// RUN: mlir-neura-opt --lower-arith-to-neura --insert-mov %s \
// RUN: -o %t-neura.mlir

// RUN: neura-interpreter %t-neura.mlir >> %t-dumped_output.txt

// RUN: FileCheck %s < %t-dumped_output.txt

module {
func.func @test() -> f32 attributes { llvm.emit_c_interface }{
%arg0 = arith.constant 9.0 : f32
%cst = arith.constant 2.0 : f32
%0 = arith.addf %arg0, %cst : f32
// CHECK: Golden output: [[OUTPUT:[0-9]+\.[0-9]+]]
// CHECK: [neura-interpreter] Output: [[OUTPUT]]
return %0 : f32
}
}

10 changes: 10 additions & 0 deletions test/neura/interpreter/main.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
#include <cstdio>

extern "C" float test();

int main() {
float result = test();
std::printf("Golden output: %f\n", result);
return 0;
}

21 changes: 21 additions & 0 deletions tools/neura-interpreter/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
add_executable(neura-interpreter neura-interpreter.cpp)

target_include_directories(neura-interpreter
PRIVATE
${CMAKE_SOURCE_DIR}/include
${CMAKE_BINARY_DIR}/include
)

target_link_libraries(neura-interpreter
PRIVATE
MLIRParser
MLIRIR
MLIRArithDialect
MLIRSupport
MLIRFuncDialect
MLIRDialect # Optional: for builtin
NeuraDialect # Your dialect
)

mlir_check_all_link_libraries(neura-interpreter)

89 changes: 89 additions & 0 deletions tools/neura-interpreter/neura-interpreter.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
#include "llvm/Support/Format.h"
#include "llvm/Support/SourceMgr.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Operation.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/FileUtilities.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/IR/AsmState.h"
#include "mlir/InitAllDialects.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"

#include "NeuraDialect/NeuraDialect.h"
#include "NeuraDialect/NeuraOps.h"

#include <unordered_map>
#include <iostream>

using namespace mlir;

int main(int argc, char **argv) {
if (argc < 2) {
llvm::errs() << "Usage: neura-interpreter <input.mlir>\n";
return 1;
}

DialectRegistry registry;
registry.insert<neura::NeuraDialect, func::FuncDialect, arith::ArithDialect>();

MLIRContext context;
context.appendDialectRegistry(registry);

llvm::SourceMgr sourceMgr;
auto fileOrErr = mlir::openInputFile(argv[1]);
if (!fileOrErr) {
llvm::errs() << "Error opening file\n";
return 1;
}

sourceMgr.AddNewSourceBuffer(std::move(fileOrErr), llvm::SMLoc());

OwningOpRef<ModuleOp> module = parseSourceFile<ModuleOp>(sourceMgr, &context);
if (!module) {
llvm::errs() << "Failed to parse MLIR input file\n";
return 1;
}

llvm::DenseMap<Value, float> valueMap;

for (auto func : module->getOps<func::FuncOp>()) {
Block &block = func.getBody().front();

for (Operation &op : block.getOperations()) {
if (auto constOp = dyn_cast<mlir::arith::ConstantOp>(op)) {
auto attr = constOp.getValue();

float val = 0.0f;

if (auto floatAttr = llvm::dyn_cast<mlir::FloatAttr>(attr)) {
val = floatAttr.getValueAsDouble(); // or .convertToFloat()
} else if (auto intAttr = llvm::dyn_cast<mlir::IntegerAttr>(attr)) {
val = static_cast<float>(intAttr.getInt()); // interpret integer as float
} else {
llvm::errs() << "Unsupported constant type in arith.constant\n";
return 1;
}

valueMap[constOp.getResult()] = val;
} else if (auto movOp = dyn_cast<neura::MovOp>(op)) {
valueMap[movOp.getResult()] = valueMap[movOp.getOperand()];
} else if (auto faddOp = dyn_cast<neura::FAddOp>(op)) {
float lhs = valueMap[faddOp.getLhs()];
float rhs = valueMap[faddOp.getRhs()];
valueMap[faddOp.getResult()] = lhs + rhs;
} else if (auto retOp = dyn_cast<func::ReturnOp>(op)) {
float result = valueMap[retOp.getOperand(0)];
llvm::outs() << "[neura-interpreter] Output: " << llvm::format("%.6f", result) << "\n";
} else {
llvm::errs() << "Unhandled op: ";
op.print(llvm::errs());
llvm::errs() << "\n";
return 1;
}
}
}

return 0;
}