diff --git a/CMakeLists.txt b/CMakeLists.txt index 55ecf8cd..81bf2215 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -40,12 +40,16 @@ add_definitions(${MLIR_DEFINITIONS}) # Tools built by this project or LLVM set(MLIR_NEURA_OPT ${CMAKE_BINARY_DIR}/tools/mlir-neura-opt/mlir-neura-opt) +set(NEURA_INTERPRETER ${CMAKE_BINARY_DIR}/tools/neura-interpreter/neura-interpreter) set(FILECHECK ${LLVM_TOOLS_BINARY_DIR}/FileCheck) set(MLIR_OPT ${LLVM_TOOLS_BINARY_DIR}/mlir-opt) set(MLIR_TRANSLATE ${LLVM_TOOLS_BINARY_DIR}/mlir-translate) set(LLC ${LLVM_TOOLS_BINARY_DIR}/llc) -# Configure lit.cfg from lit.cfg.in +# Builds the interpreter. +add_subdirectory(tools/neura-interpreter) + +# Configures lit.cfg from lit.cfg.in configure_file( ${CMAKE_SOURCE_DIR}/test/lit.cfg.in # ${CMAKE_BINARY_DIR}/test/lit.cfg @@ -53,10 +57,11 @@ configure_file( @ONLY ) -# Add a custom target for running lit tests +# Adds a custom target for running lit tests add_custom_target(check-neura COMMAND ${LLVM_EXTERNAL_LIT} -sv ${CMAKE_BINARY_DIR}/test DEPENDS mlir-neura-opt WORKING_DIRECTORY ${CMAKE_BINARY_DIR} COMMENT "Running Sora Dialect Tests with lit" ) + diff --git a/test/lit.cfg.in b/test/lit.cfg.in index c732a7d0..33640d31 100644 --- a/test/lit.cfg.in +++ b/test/lit.cfg.in @@ -9,6 +9,7 @@ config.test_exec_root = os.path.dirname(__file__) # Tool substitutions from CMake config.substitutions.append(('mlir-neura-opt', '@MLIR_NEURA_OPT@')) +config.substitutions.append(('neura-interpreter', '@NEURA_INTERPRETER@')) config.substitutions.append(('FileCheck', '@FILECHECK@')) config.substitutions.append(('mlir-opt', '@MLIR_OPT@')) config.substitutions.append(('mlir-translate', '@MLIR_TRANSLATE@')) diff --git a/test/neura/interpreter/interpreter.mlir b/test/neura/interpreter/interpreter.mlir new file mode 100644 index 00000000..bce96544 --- /dev/null +++ b/test/neura/interpreter/interpreter.mlir @@ -0,0 +1,13 @@ +// RUN: neura-interpreter %s | FileCheck %s + +module { + func.func @test() -> f32 { + %arg0 = arith.constant 9.0 : f32 + %cst = arith.constant 2.0 : f32 + %0 = neura.mov %arg0 : f32 -> f32 + %1 = neura.mov %cst : f32 -> f32 + %2 = "neura.fadd"(%0, %1) : (f32, f32) -> f32 + return %2 : f32 + // CHECK: 1.1 + } +} diff --git a/test/neura/interpreter/lower_and_interpreter.mlir b/test/neura/interpreter/lower_and_interpreter.mlir new file mode 100644 index 00000000..c76db420 --- /dev/null +++ b/test/neura/interpreter/lower_and_interpreter.mlir @@ -0,0 +1,39 @@ +// RUN: mlir-opt %s \ +// RUN: --convert-scf-to-cf \ +// RUN: --convert-math-to-llvm \ +// RUN: --convert-arith-to-llvm \ +// RUN: --convert-func-to-llvm \ +// RUN: --convert-cf-to-llvm \ +// RUN: --reconcile-unrealized-casts \ +// RUN: -o %t-lowered-to-llvm.mlir + +// RUN: mlir-translate -mlir-to-llvmir \ +// RUN: %t-lowered-to-llvm.mlir \ +// RUN: -o %t-lower_and_interpreter.ll + +// RUN: llc %t-lower_and_interpreter.ll \ +// RUN: -filetype=obj -o %t-out.o + +// RUN: clang++ main.cpp %t-out.o \ +// RUN: -o %t-out.bin + +// RUN: %t-out.bin > %t-dumped_output.txt + +// RUN: mlir-neura-opt --lower-arith-to-neura --insert-mov %s \ +// RUN: -o %t-neura.mlir + +// RUN: neura-interpreter %t-neura.mlir >> %t-dumped_output.txt + +// RUN: FileCheck %s < %t-dumped_output.txt + +module { + func.func @test() -> f32 attributes { llvm.emit_c_interface }{ + %arg0 = arith.constant 9.0 : f32 + %cst = arith.constant 2.0 : f32 + %0 = arith.addf %arg0, %cst : f32 + // CHECK: Golden output: [[OUTPUT:[0-9]+\.[0-9]+]] + // CHECK: [neura-interpreter] Output: [[OUTPUT]] + return %0 : f32 + } +} + diff --git a/test/neura/interpreter/main.cpp b/test/neura/interpreter/main.cpp new file mode 100644 index 00000000..0a67b549 --- /dev/null +++ b/test/neura/interpreter/main.cpp @@ -0,0 +1,10 @@ +#include + +extern "C" float test(); + +int main() { + float result = test(); + std::printf("Golden output: %f\n", result); + return 0; +} + diff --git a/tools/neura-interpreter/CMakeLists.txt b/tools/neura-interpreter/CMakeLists.txt new file mode 100644 index 00000000..ff9c62a2 --- /dev/null +++ b/tools/neura-interpreter/CMakeLists.txt @@ -0,0 +1,21 @@ +add_executable(neura-interpreter neura-interpreter.cpp) + +target_include_directories(neura-interpreter + PRIVATE + ${CMAKE_SOURCE_DIR}/include + ${CMAKE_BINARY_DIR}/include +) + +target_link_libraries(neura-interpreter + PRIVATE + MLIRParser + MLIRIR + MLIRArithDialect + MLIRSupport + MLIRFuncDialect + MLIRDialect # Optional: for builtin + NeuraDialect # Your dialect +) + +mlir_check_all_link_libraries(neura-interpreter) + diff --git a/tools/neura-interpreter/neura-interpreter.cpp b/tools/neura-interpreter/neura-interpreter.cpp new file mode 100644 index 00000000..972e442c --- /dev/null +++ b/tools/neura-interpreter/neura-interpreter.cpp @@ -0,0 +1,89 @@ +#include "llvm/Support/Format.h" +#include "llvm/Support/SourceMgr.h" +#include "mlir/Dialect/Arith/IR/Arith.h" +#include "mlir/IR/MLIRContext.h" +#include "mlir/IR/BuiltinOps.h" +#include "mlir/IR/Operation.h" +#include "mlir/Parser/Parser.h" +#include "mlir/Support/FileUtilities.h" +#include "mlir/Support/LogicalResult.h" +#include "mlir/IR/AsmState.h" +#include "mlir/InitAllDialects.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" + +#include "NeuraDialect/NeuraDialect.h" +#include "NeuraDialect/NeuraOps.h" + +#include +#include + +using namespace mlir; + +int main(int argc, char **argv) { + if (argc < 2) { + llvm::errs() << "Usage: neura-interpreter \n"; + return 1; + } + + DialectRegistry registry; + registry.insert(); + + MLIRContext context; + context.appendDialectRegistry(registry); + + llvm::SourceMgr sourceMgr; + auto fileOrErr = mlir::openInputFile(argv[1]); + if (!fileOrErr) { + llvm::errs() << "Error opening file\n"; + return 1; + } + + sourceMgr.AddNewSourceBuffer(std::move(fileOrErr), llvm::SMLoc()); + + OwningOpRef module = parseSourceFile(sourceMgr, &context); + if (!module) { + llvm::errs() << "Failed to parse MLIR input file\n"; + return 1; + } + + llvm::DenseMap valueMap; + + for (auto func : module->getOps()) { + Block &block = func.getBody().front(); + + for (Operation &op : block.getOperations()) { + if (auto constOp = dyn_cast(op)) { + auto attr = constOp.getValue(); + + float val = 0.0f; + + if (auto floatAttr = llvm::dyn_cast(attr)) { + val = floatAttr.getValueAsDouble(); // or .convertToFloat() + } else if (auto intAttr = llvm::dyn_cast(attr)) { + val = static_cast(intAttr.getInt()); // interpret integer as float + } else { + llvm::errs() << "Unsupported constant type in arith.constant\n"; + return 1; + } + + valueMap[constOp.getResult()] = val; + } else if (auto movOp = dyn_cast(op)) { + valueMap[movOp.getResult()] = valueMap[movOp.getOperand()]; + } else if (auto faddOp = dyn_cast(op)) { + float lhs = valueMap[faddOp.getLhs()]; + float rhs = valueMap[faddOp.getRhs()]; + valueMap[faddOp.getResult()] = lhs + rhs; + } else if (auto retOp = dyn_cast(op)) { + float result = valueMap[retOp.getOperand(0)]; + llvm::outs() << "[neura-interpreter] Output: " << llvm::format("%.6f", result) << "\n"; + } else { + llvm::errs() << "Unhandled op: "; + op.print(llvm::errs()); + llvm::errs() << "\n"; + return 1; + } + } + } + + return 0; +}