Skip to content
66 changes: 57 additions & 9 deletions include/NeuraDialect/NeuraOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -81,8 +81,8 @@ def Neura_FMulOp : Op<NeuraDialect, "fmul"> {

def Neura_FDivOp : Op<NeuraDialect, "fdiv"> {
let summary = "Floating division operation";
let arguments = (ins AnyFloat:$lhs, AnyFloat:$rhs, Optional<AnyType>:$predicate);
let results = (outs AnyFloat:$result);
let arguments = (ins AnyType:$lhs, Optional<AnyType>:$rhs);
let results = (outs AnyType:$result);
// let assemblyFormat = "$lhs `,` $rhs `,` $predicate attr-dict `:` type($result)";
}

Expand Down Expand Up @@ -211,6 +211,54 @@ def Neura_CastOp : Op<NeuraDialect, "cast">{
// let assemblyFormat = "$input type($input) `->` type($output) `,` $predicate attr-dict";
}

// Defines an alloca operation for memory allocation.
def Neura_AllocaOp : Op<NeuraDialect, "alloca"> {
let summary = "Memory allocation operation";
let description = [{
Allocates memory on the stack, similar to llvm.alloca.
Takes a predicated size value and returns a pointer to the allocated memory.

Example:
%ptr = neura.alloca %size : !neura.data<i32, i1> -> !llvm.ptr
}];

let arguments = (ins AnyType:$size);
let results = (outs AnyType:$result);
let assemblyFormat = "$size attr-dict `:` type($size) `->` type($result)";
}

// Defines a sign extension operation.
def Neura_SExtOp : Op<NeuraDialect, "sext"> {
let summary = "Sign extension operation";
let description = [{
Sign extends a value from a smaller integer type to a larger integer type.
Similar to llvm.sext, but works with predicated values.

Example:
%extended = neura.sext %value : !neura.data<i8, i1> -> !neura.data<i32, i1>
}];

let arguments = (ins AnyType:$value);
let results = (outs AnyType:$result);
let assemblyFormat = "$value attr-dict `:` type($value) `->` type($result)";
}

// Defines a zero extension operation.
def Neura_ZExtOp : Op<NeuraDialect, "zext"> {
let summary = "Zero extension operation";
let description = [{
Zero extends a value from a smaller integer type to a larger integer type.
Similar to llvm.zext, but works with predicated values.

Example:
%extended = neura.zext %value : !neura.data<i8, i1> -> !neura.data<i32, i1>
}];

let arguments = (ins AnyType:$value);
let results = (outs AnyType:$result);
let assemblyFormat = "$value attr-dict `:` type($value) `->` type($result)";
}

// ----------------------------------------------------
// Defines vector operations.

Expand Down Expand Up @@ -264,7 +312,7 @@ def Neura_DataMovOp : Op<NeuraDialect, "data_mov"> {
// ----------------------------------------------------
// Defines ctrl-related operations.

// Phi operation for merging values in dataflow form
// Defines phi operation for merging values in dataflow form.
def Neura_PhiOp : Op<NeuraDialect, "phi"> {
let summary = "Phi node in dataflow form";
let description = [{
Expand All @@ -280,11 +328,11 @@ def Neura_PhiOp : Op<NeuraDialect, "phi"> {
let arguments = (ins Variadic<AnyType>:$inputs);
let results = (outs AnyType:$result);

// Explicitly specify types for operands in the assembly format
// Explicitly specifies types for operands in the assembly format.
// let assemblyFormat = "$init_val `:` type($init_val) `,` $loop_val `:` type($loop_val) attr-dict `,` type($result)";
}

// Control movement extending base move but with different signature.
// Defines control movement extending base move but with different signature.
def Neura_CtrlMovOp : Op<NeuraDialect, "ctrl_mov"> {
let summary = "Control movement operation";
let description = [{
Expand All @@ -295,15 +343,15 @@ def Neura_CtrlMovOp : Op<NeuraDialect, "ctrl_mov"> {
ctrl_mov %value to %placeholder : f32 // Connect value to placeholder
}];

// Add type constraints for both operands
// Adds type constraints for both operands.
let arguments = (ins AnyType:$value, AnyType:$target);
let results = (outs);

// Correct assembly format - types must be space-separated
// Corrects assembly format - types must be space-separated.
let assemblyFormat = "$value `->` $target attr-dict `:` type($value) type($target)";
}

// Reserve operation for control flow values.
// Defines reserve operation for control flow values.
def Neura_ReserveOp : Op<NeuraDialect, "reserve"> {
let summary = "Creates a placeholder for control flow values";
let description = [{
Expand Down Expand Up @@ -390,7 +438,7 @@ def Neura_LoopControlOp : Op<NeuraDialect, "loop_control">{
predicate bit is initially 0, while the start value's predicate bit is 1.

Example:
// Loop control that calculates next index and validity in one step
// Shows loop control that calculates next index and validity in one step.
%next_idx, %loop_valid = neura.loop_control(
parent_valid = %parent_valid,
start = %start_val,
Expand Down
3 changes: 1 addition & 2 deletions lib/Conversion/ArithToNeura/ArithToNeuraPass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -160,8 +160,7 @@ struct ArithFDivToNeuraFDiv : public OpRewritePattern<mlir::arith::DivFOp> {
Type result_type = op.getType();

// Optional predicate: default to null.
rewriter.replaceOpWithNewOp<neura::FDivOp>(op, result_type, lhs, rhs,
nullptr);
rewriter.replaceOpWithNewOp<neura::FDivOp>(op, result_type, lhs, rhs);
return success();
}
};
Expand Down
171 changes: 167 additions & 4 deletions lib/Conversion/LlvmToNeura/LlvmToNeuraPass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ struct LlvmFAddToNeuraFAdd : public OpRewritePattern<mlir::LLVM::FAddOp> {
if (!mlir::isa<FloatType>(result_type))
return failure();

// Optional predicate: default to 'none'
// Sets optional predicate: default to 'none'.
rewriter.replaceOpWithNewOp<neura::FAddOp>(op, result_type, lhs, rhs);
return success();
}
Expand All @@ -72,7 +72,7 @@ struct LlvmFSubToNeuraFSub : public OpRewritePattern<mlir::LLVM::FSubOp> {
return failure();
}

// Optional predicate: default to 'none'.
// Sets optional predicate: default to 'none'.
rewriter.replaceOpWithNewOp<neura::FSubOp>(op, result_type, lhs, rhs,
Value());
return success();
Expand Down Expand Up @@ -291,19 +291,168 @@ struct LlvmConstantToNeuraConstant : public OpRewritePattern<LLVM::ConstantOp> {
PatternRewriter &rewriter) const override {
auto attr = op.getValue();

// Creates operation state manually
// Creates operation state manually.
OperationState state(op.getLoc(), neura::ConstantOp::getOperationName());
state.addAttribute("value", attr);
state.addAttribute("predicate", rewriter.getBoolAttr(true));
state.addTypes(op.getType());

// Creates the operation and replace
// Creates the operation and replaces.
Operation *newOp = rewriter.create(state);
rewriter.replaceOp(op, newOp->getResults());
return success();
}
};

struct LlvmAllocaToNeuraAlloca : public OpRewritePattern<LLVM::AllocaOp> {
using OpRewritePattern::OpRewritePattern;

LogicalResult matchAndRewrite(LLVM::AllocaOp op,
PatternRewriter &rewriter) const override {
Value size = op.getArraySize();
Type resultType = op.getType();

// Converts the size to neura.data<i32, i1> if it's not already.
// Assumes the size is already in the right format.
// Handles type conversion here.

rewriter.replaceOpWithNewOp<neura::AllocaOp>(op, resultType, size);
return success();
}
};

struct LlvmSExtToNeuraSExt : public OpRewritePattern<LLVM::SExtOp> {
using OpRewritePattern::OpRewritePattern;

LogicalResult matchAndRewrite(LLVM::SExtOp op,
PatternRewriter &rewriter) const override {
Value input = op.getArg();
Type resultType = op.getType();

rewriter.replaceOpWithNewOp<neura::SExtOp>(op, resultType, input);
return success();
}
};

struct LlvmZExtToNeuraZExt : public OpRewritePattern<LLVM::ZExtOp> {
using OpRewritePattern::OpRewritePattern;

LogicalResult matchAndRewrite(LLVM::ZExtOp op,
PatternRewriter &rewriter) const override {
Value input = op.getArg();
Type resultType = op.getType();

rewriter.replaceOpWithNewOp<neura::ZExtOp>(op, resultType, input);
return success();
}
};

struct LlvmMulToNeuraMul : public OpRewritePattern<LLVM::MulOp> {
using OpRewritePattern::OpRewritePattern;

LogicalResult matchAndRewrite(LLVM::MulOp op,
PatternRewriter &rewriter) const override {
Value lhs = op.getLhs();
Value rhs = op.getRhs();
Type resultType = op.getType();

rewriter.replaceOpWithNewOp<neura::MulOp>(op, resultType, lhs, rhs);
return success();
}
};

struct LlvmFuncToNeuraFunc : public OpRewritePattern<LLVM::LLVMFuncOp> {
using OpRewritePattern::OpRewritePattern;

LogicalResult matchAndRewrite(LLVM::LLVMFuncOp op,
PatternRewriter &rewriter) const override {


auto target = op->getAttrOfType<StringAttr>(mlir::accel::kAcceleratorAttr);
if (!target || target.getValue() != mlir::accel::kNeuraTarget) {
return failure();
}

// Converts LLVMFunctionType to FunctionType.
auto llvmFuncType = op.getFunctionType();
auto funcType = rewriter.getFunctionType(
llvmFuncType.getParams(),
llvmFuncType.getReturnType()
);

// Creates the new func.func operation using OperationState to have full control.
OperationState state(op.getLoc(), func::FuncOp::getOperationName());
state.addAttribute("sym_name", rewriter.getStringAttr(op.getName()));
state.addAttribute("function_type", TypeAttr::get(funcType));

// Copies ALL attributes from the original llvm.func exactly as they are.
// Skips function type and name attributes as they are handled separately.
SmallVector<NamedAttribute> attrs;
for (auto attr : op->getAttrs()) {
if (attr.getName() == "function_type" || attr.getName() == "sym_name") {
continue;
}
attrs.push_back(attr);
}
state.addAttributes(attrs);

// Adds the function body region.
state.addRegion();

auto newFunc = cast<func::FuncOp>(rewriter.create(state));

// Moves the function body.
rewriter.inlineRegionBefore(op.getBody(), newFunc.getBody(), newFunc.getBody().end());

// Replaces the old function.
rewriter.replaceOp(op, newFunc);
return success();
}
};

struct LlvmCallToFuncCall : public OpRewritePattern<LLVM::CallOp> {
using OpRewritePattern::OpRewritePattern;

LogicalResult matchAndRewrite(LLVM::CallOp op,
PatternRewriter &rewriter) const override {
// Gets the callee name.
auto callee = op.getCallee();
if (!callee) {
return failure();
}

// Checks if the callee function exists as func.func in the module.
ModuleOp module = op->getParentOfType<ModuleOp>();
if (!module) {
return failure();
}

// Looks for a func.func with the same name.
func::FuncOp funcOp = module.lookupSymbol<func::FuncOp>(callee.value());
if (!funcOp) {
return failure();
}

// Gets the result types from the function signature.
auto resultTypes = funcOp.getFunctionType().getResults();

// Converts the call to func.call.
auto newCall = rewriter.create<func::CallOp>(
op.getLoc(), resultTypes, callee.value(), op.getArgOperands()
);

// Replaces the old call with the new one.
// Handles both cases: calls with results and calls without results.
if (op.getNumResults() == 0) {
rewriter.eraseOp(op);
} else {
rewriter.replaceOp(op, newCall->getResults());
}

return success();
}
};

struct LowerLlvmToNeuraPass
: public PassWrapper<LowerLlvmToNeuraPass, OperationPass<ModuleOp>> {

Expand All @@ -316,6 +465,7 @@ struct LowerLlvmToNeuraPass

void getDependentDialects(DialectRegistry &registry) const override {
registry.insert<mlir::neura::NeuraDialect>();
registry.insert<mlir::func::FuncDialect>();
}

void runOnOperation() override {
Expand All @@ -338,11 +488,24 @@ struct LowerLlvmToNeuraPass
patterns.add<LlvmReturnToNeuraReturn>(&getContext());
patterns.add<FuncReturnToNeuraReturn>(&getContext());
patterns.add<LlvmFSubToNeuraFSub>(&getContext());
patterns.add<LlvmAllocaToNeuraAlloca>(&getContext());
patterns.add<LlvmSExtToNeuraSExt>(&getContext());
patterns.add<LlvmZExtToNeuraZExt>(&getContext());
patterns.add<LlvmMulToNeuraMul>(&getContext());
patterns.add<LlvmFuncToNeuraFunc>(&getContext());
patterns.add<LlvmCallToFuncCall>(&getContext());

FrozenRewritePatternSet frozen(std::move(patterns));

ModuleOp module_op = getOperation();

// Performs function-level conversions.
if (failed(applyPatternsGreedily(module_op, frozen))) {
signalPassFailure();
return;
}

// Performs operation-level conversions.
// Applies to every region inside the module (regardless of func type,
// e.g., mlir func or llvm func).
module_op.walk([&](FunctionOpInterface func) {
Expand Down
46 changes: 46 additions & 0 deletions test/c2llvm2mlir/nested_loop/kernel.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
// RUN: mlir-neura-opt %s | FileCheck %s

#include <stdio.h>

#define NTAPS 32

int input[NTAPS] = {
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1
};
int output[NTAPS];
int coefficients[NTAPS] = {25, 150, 375, -225, 50, 75, -300, 125,
25, 150, 375, -225, 50, 75, -300, 125,
25, 150, 375, -225, 50, 75, -300, 125,
25, 150, 375, -225, 50, 75, -300, 125};

void kernel(int input[], int output[], int coefficient[]);

int main()
{

// input_dsp (input, NTAPS, 0);

kernel(input, output, coefficients);

// output_dsp (input, NTAPS, 0);
// output_dsp (coefficients, NTAPS, 0);
// output_dsp (output, NTAPS, 0);
printf("output: %d\n", output[0]);
return 0;
}

/* input : input sample array */
/* output: output sample array */
/* coefficient: coefficient array */
void kernel(int input[], int output[], int coefficient[]) {
int i, j;

for (i = 0; i < NTAPS; ++i) {
for (j = 0; j < NTAPS; ++j) {
output[j] += input[i] * coefficient[i];
}
}
}
Loading