Skip to content
52 changes: 50 additions & 2 deletions include/NeuraDialect/NeuraOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -81,8 +81,8 @@ def Neura_FMulOp : Op<NeuraDialect, "fmul"> {

def Neura_FDivOp : Op<NeuraDialect, "fdiv"> {
let summary = "Floating division operation";
let arguments = (ins AnyFloat:$lhs, AnyFloat:$rhs, Optional<AnyType>:$predicate);
let results = (outs AnyFloat:$result);
let arguments = (ins AnyType:$lhs, AnyType:$rhs, Optional<AnyType>:$predicate);
let results = (outs AnyType:$result);
// let assemblyFormat = "$lhs `,` $rhs `,` $predicate attr-dict `:` type($result)";
}

Expand Down Expand Up @@ -211,6 +211,54 @@ def Neura_CastOp : Op<NeuraDialect, "cast">{
// let assemblyFormat = "$input type($input) `->` type($output) `,` $predicate attr-dict";
}

// Defines an alloca operation for memory allocation.
def Neura_AllocaOp : Op<NeuraDialect, "alloca"> {
let summary = "Memory allocation operation";
let description = [{
Allocates memory on the stack, similar to llvm.alloca.
Takes a predicated size value and returns a pointer to the allocated memory.

Example:
%ptr = neura.alloca %size : !neura.data<i32, i1> -> !llvm.ptr
}];

let arguments = (ins AnyType:$size);
let results = (outs AnyType:$result);
let assemblyFormat = "$size attr-dict `:` type($size) `->` type($result)";
}

// Defines a sign extension operation.
def Neura_SExtOp : Op<NeuraDialect, "sext"> {
let summary = "Sign extension operation";
let description = [{
Sign extends a value from a smaller integer type to a larger integer type.
Similar to llvm.sext, but works with predicated values.

Example:
%extended = neura.sext %value : !neura.data<i8, i1> -> !neura.data<i32, i1>
}];

let arguments = (ins AnyType:$value);
let results = (outs AnyType:$result);
let assemblyFormat = "$value attr-dict `:` type($value) `->` type($result)";
}

// Defines a zero extension operation.
def Neura_ZExtOp : Op<NeuraDialect, "zext"> {
let summary = "Zero extension operation";
let description = [{
Zero extends a value from a smaller integer type to a larger integer type.
Similar to llvm.zext, but works with predicated values.

Example:
%extended = neura.zext %value : !neura.data<i8, i1> -> !neura.data<i32, i1>
}];

let arguments = (ins AnyType:$value);
let results = (outs AnyType:$result);
let assemblyFormat = "$value attr-dict `:` type($value) `->` type($result)";
}

// ----------------------------------------------------
// Defines vector operations.

Expand Down
61 changes: 61 additions & 0 deletions lib/Conversion/LlvmToNeura/LlvmToNeuraPass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -304,6 +304,63 @@ struct LlvmConstantToNeuraConstant : public OpRewritePattern<LLVM::ConstantOp> {
}
};

struct LlvmAllocaToNeuraAlloca : public OpRewritePattern<LLVM::AllocaOp> {
using OpRewritePattern::OpRewritePattern;

LogicalResult matchAndRewrite(LLVM::AllocaOp op,
PatternRewriter &rewriter) const override {
Value size = op.getArraySize();
Type resultType = op.getType();

// Convert the size to neura.data<i32, i1> if it's not already
// For simplicity, we'll assume the size is already in the right format
// In practice, you might need to handle type conversion here

rewriter.replaceOpWithNewOp<neura::AllocaOp>(op, resultType, size);
return success();
}
};

struct LlvmSExtToNeuraSExt : public OpRewritePattern<LLVM::SExtOp> {
using OpRewritePattern::OpRewritePattern;

LogicalResult matchAndRewrite(LLVM::SExtOp op,
PatternRewriter &rewriter) const override {
Value input = op.getArg();
Type resultType = op.getType();

rewriter.replaceOpWithNewOp<neura::SExtOp>(op, resultType, input);
return success();
}
};

struct LlvmZExtToNeuraZExt : public OpRewritePattern<LLVM::ZExtOp> {
using OpRewritePattern::OpRewritePattern;

LogicalResult matchAndRewrite(LLVM::ZExtOp op,
PatternRewriter &rewriter) const override {
Value input = op.getArg();
Type resultType = op.getType();

rewriter.replaceOpWithNewOp<neura::ZExtOp>(op, resultType, input);
return success();
}
};

struct LlvmMulToNeuraMul : public OpRewritePattern<LLVM::MulOp> {
using OpRewritePattern::OpRewritePattern;

LogicalResult matchAndRewrite(LLVM::MulOp op,
PatternRewriter &rewriter) const override {
Value lhs = op.getLhs();
Value rhs = op.getRhs();
Type resultType = op.getType();

rewriter.replaceOpWithNewOp<neura::MulOp>(op, resultType, lhs, rhs);
return success();
}
};

struct LowerLlvmToNeuraPass
: public PassWrapper<LowerLlvmToNeuraPass, OperationPass<ModuleOp>> {

Expand Down Expand Up @@ -338,6 +395,10 @@ struct LowerLlvmToNeuraPass
patterns.add<LlvmReturnToNeuraReturn>(&getContext());
patterns.add<FuncReturnToNeuraReturn>(&getContext());
patterns.add<LlvmFSubToNeuraFSub>(&getContext());
patterns.add<LlvmAllocaToNeuraAlloca>(&getContext());
patterns.add<LlvmSExtToNeuraSExt>(&getContext());
patterns.add<LlvmZExtToNeuraZExt>(&getContext());
patterns.add<LlvmMulToNeuraMul>(&getContext());

FrozenRewritePatternSet frozen(std::move(patterns));

Expand Down
46 changes: 46 additions & 0 deletions test/c2llvm2mlir/kernel2.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
// RUN: mlir-neura-opt %s | FileCheck %s

#include <stdio.h>

#define NTAPS 32

int input[NTAPS] = {
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1
};
int output[NTAPS];
int coefficients[NTAPS] = {25, 150, 375, -225, 50, 75, -300, 125,
25, 150, 375, -225, 50, 75, -300, 125,
25, 150, 375, -225, 50, 75, -300, 125,
25, 150, 375, -225, 50, 75, -300, 125};

void kernel(int input[], int output[], int coefficient[]);

int main()
{

// input_dsp (input, NTAPS, 0);

kernel(input, output, coefficients);

// output_dsp (input, NTAPS, 0);
// output_dsp (coefficients, NTAPS, 0);
// output_dsp (output, NTAPS, 0);
printf("output: %d\n", output[0]);
return 0;
}

/* input : input sample array */
/* output: output sample array */
/* coefficient: coefficient array */
void kernel(int input[], int output[], int coefficient[]) {
int i, j;

for (i = 0; i < NTAPS; ++i) {
for (j = 0; j < NTAPS; ++j) {
output[j] += input[i] * coefficient[i];
}
}
}
16 changes: 16 additions & 0 deletions test/c2llvm2mlir/test2.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
// RUN: clang++ -S -emit-llvm kernel2.cpp -o kernel2.ll
// RUN: mlir-translate --import-llvm kernel2.ll -o kernel2.mlir
// RUN: mlir-neura-opt --assign-accelerator \
// RUN: --lower-llvm-to-neura \
// RUN: --canonicalize-live-in \
// RUN: --leverage-predicated-value \
// RUN: --transform-ctrl-to-data-flow \
// RUN: --fold-constant \
// RUN: --insert-data-mov \
// RUN: --map-to-accelerator="mapping-strategy=heuristic backtrack-config=simple" \
// RUN: --generate-code kernel2.mlir | FileCheck %s --check-prefix=CHECK-LLVM2NEURA

// CHECK-LLVM2NEURA: %25 = neura.alloca %24 : !neura.data<i32, i1> -> !neura.data<!llvm.ptr, i1>
// CHECK-LLVM2NEURA: %38 = "neura.phi"(%36, %37) : (!neura.data<i32, i1>, !neura.data<i32, i1>) -> !neura.data<i32, i1>
// CHECK-LLVM2NEURA: %175 = neura.sext %174 : !neura.data<i32, i1> -> !neura.data<i64, i1>
// CHECK-LLVM2NEURA: %194 = "neura.mul"(%192, %193) : (!neura.data<i32, i1>, !neura.data<i32, i1>) -> !neura.data<i32, i1>