Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CIR][CIRGen][Builtin][Neon] Lower neon_vstl1_lane_s64 and vstl1q_lane_s64 #1340

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4473,7 +4473,12 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E,
}
case NEON::BI__builtin_neon_vstl1_lane_s64:
case NEON::BI__builtin_neon_vstl1q_lane_s64: {
llvm_unreachable("NEON::BI__builtin_neon_vstl1q_lane_s64 NYI");
Ops[1] = builder.createBitcast(Ops[1], ty);
Ops[1] = builder.create<cir::VecExtractOp>(Ops[1].getLoc(), Ops[1], Ops[2]);
cir::StoreOp Store = builder.createAlignedStore(
getLoc(E->getExprLoc()), Ops[1], Ops[0], PtrOp0.getAlignment());
Store.setAtomic(cir::MemOrder::Release);
return Ops[1];
}
case NEON::BI__builtin_neon_vld2_v:
case NEON::BI__builtin_neon_vld2q_v: {
Expand Down
128 changes: 128 additions & 0 deletions clang/test/CIR/CodeGen/AArch64/neon-ldst.c
Original file line number Diff line number Diff line change
Expand Up @@ -501,3 +501,131 @@ void test_vst1q_lane_f64(float64_t * ptr, float64x2_t src) {
// LLVM: [[VEC_CAST1:%.*]] = bitcast <16 x i8> [[VEC_CAST0]] to <2 x double>
// LLVM: [[RES:%.*]] = extractelement <2 x double> [[VEC_CAST1]], i32 1
// LLVM: store double [[RES]], ptr [[PTR]], align 8

void test_vstl1q_lane_u64(uint64_t *a, uint64x2_t b) {
vstl1q_lane_u64(a, b, 1);
}

// CIR-LABEL: test_vstl1q_lane_u64
// CIR: [[LANE:%.*]] = cir.const #cir.int<1> : !s32i
// CIR: [[VAL:%.*]] = cir.vec.extract {{%.*}}[[[LANE]] : !s32i] : !cir.vector<!u64i x 2>
// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr<!void>), !cir.ptr<!u64i>
// CIR: cir.store align(8) atomic(release) [[VAL]], [[PTR]] : !u64i, !cir.ptr<!u64i>

// LLVM: {{.*}}test_vstl1q_lane_u64(ptr{{.*}}[[PTR:%.*]], <2 x i64>{{.*}}[[SRC:%.*]])
// LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[SRC]] to <16 x i8>
// LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
// LLVM: [[TMP2:%.*]] = extractelement <2 x i64> [[TMP1]], i32 1
// LLVM: store atomic i64 [[TMP2]], ptr [[PTR]] release, align 8

void test_vstl1q_lane_s64(int64_t *a, int64x2_t b) {
vstl1q_lane_s64(a, b, 1);
}

// CIR-LABEL: test_vstl1q_lane_s64
// CIR: [[LANE:%.*]] = cir.const #cir.int<1> : !s32i
// CIR: [[VAL:%.*]] = cir.vec.extract {{%.*}}[[[LANE]] : !s32i] : !cir.vector<!s64i x 2>
// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr<!void>), !cir.ptr<!s64i>
// CIR: cir.store align(8) atomic(release) [[VAL]], [[PTR]] : !s64i, !cir.ptr<!s64i>

// LLVM: {{.*}}test_vstl1q_lane_s64(ptr{{.*}}[[PTR:%.*]], <2 x i64>{{.*}}[[SRC:%.*]])
// LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[SRC]] to <16 x i8>
// LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
// LLVM: [[TMP2:%.*]] = extractelement <2 x i64> [[TMP1]], i32 1
// LLVM: store atomic i64 [[TMP2]], ptr [[PTR]] release, align 8

void test_vstl1q_lane_f64(float64_t *a, float64x2_t b) {
vstl1q_lane_f64(a, b, 1);
}

// CIR-LABEL: test_vstl1q_lane_f64
// CIR: [[LANE:%.*]] = cir.const #cir.int<1> : !s32i
// CIR: [[VAL:%.*]] = cir.vec.extract {{%.*}}[[[LANE]] : !s32i] : !cir.vector<!cir.double x 2>
// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr<!void>), !cir.ptr<!cir.double>
// CIR: cir.store align(8) atomic(release) [[VAL]], [[PTR]] : !cir.double, !cir.ptr<!cir.double>

// LLVM: {{.*}}test_vstl1q_lane_f64(ptr{{.*}}[[PTR:%.*]], <2 x double>{{.*}}[[SRC:%.*]])
// LLVM: [[TMP0:%.*]] = bitcast <2 x double> [[SRC]] to <16 x i8>
// LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x double>
// LLVM: [[TMP2:%.*]] = extractelement <2 x double> [[TMP1]], i32 1
// LLVM: store atomic double [[TMP2]], ptr [[PTR]] release, align 8

void test_vstl1q_lane_p64(poly64_t *a, poly64x2_t b) {
vstl1q_lane_p64(a, b, 1);
}

// CIR-LABEL: test_vstl1q_lane_p64
// CIR: [[LANE:%.*]] = cir.const #cir.int<1> : !s32i
// CIR: [[VAL:%.*]] = cir.vec.extract {{%.*}}[[[LANE]] : !s32i] : !cir.vector<!s64i x 2>
// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr<!void>), !cir.ptr<!s64i>
// CIR: cir.store align(8) atomic(release) [[VAL]], [[PTR]] : !s64i, !cir.ptr<!s64i>

// LLVM: {{.*}}test_vstl1q_lane_p64(ptr{{.*}}[[PTR:%.*]], <2 x i64>{{.*}}[[SRC:%.*]])
// LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[SRC]] to <16 x i8>
// LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
// LLVM: [[TMP2:%.*]] = extractelement <2 x i64> [[TMP1]], i32 1
// LLVM: store atomic i64 [[TMP2]], ptr [[PTR]] release, align 8

void test_vstl1_lane_u64(uint64_t *a, uint64x1_t b) {
vstl1_lane_u64(a, b, 0);
}

// CIR-LABEL: test_vstl1_lane_u64
// CIR: [[LANE:%.*]] = cir.const #cir.int<0> : !s32i
// CIR: [[VAL:%.*]] = cir.vec.extract {{%.*}}[[[LANE]] : !s32i] : !cir.vector<!u64i x 1>
// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr<!void>), !cir.ptr<!u64i>
// CIR: cir.store align(8) atomic(release) [[VAL]], [[PTR]] : !u64i, !cir.ptr<!u64i>

// LLVM: {{.*}}test_vstl1_lane_u64(ptr{{.*}}[[PTR:%.*]], <1 x i64>{{.*}}[[SRC:%.*]])
// LLVM: [[TMP0:%.*]] = bitcast <1 x i64> [[SRC]] to <8 x i8>
// LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64>
// LLVM: [[TMP2:%.*]] = extractelement <1 x i64> [[TMP1]], i32 0
// LLVM: store atomic i64 [[TMP2]], ptr [[PTR]] release, align 8

void test_vstl1_lane_s64(int64_t *a, int64x1_t b) {
vstl1_lane_s64(a, b, 0);
}

// CIR-LABEL:test_vstl1_lane_s64
// CIR: [[LANE:%.*]] = cir.const #cir.int<0> : !s32i
// CIR: [[VAL:%.*]] = cir.vec.extract {{%.*}}[[[LANE]] : !s32i] : !cir.vector<!s64i x 1>
// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr<!void>), !cir.ptr<!s64i>
// CIR: cir.store align(8) atomic(release) [[VAL]], [[PTR]] : !s64i, !cir.ptr<!s64i>

// LLVM: {{.*}}test_vstl1_lane_s64(ptr{{.*}}[[PTR:%.*]], <1 x i64>{{.*}}[[SRC:%.*]])
// LLVM: [[TMP0:%.*]] = bitcast <1 x i64> [[SRC]] to <8 x i8>
// LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64>
// LLVM: [[TMP2:%.*]] = extractelement <1 x i64> [[TMP1]], i32 0
// LLVM: store atomic i64 [[TMP2]], ptr [[PTR]] release, align 8

void test_vstl1_lane_f64(float64_t *a, float64x1_t b) {
vstl1_lane_f64(a, b, 0);
}

// CIR-LABEL:test_vstl1_lane_f64
// CIR: [[LANE:%.*]] = cir.const #cir.int<0> : !s32i
// CIR: [[VAL:%.*]] = cir.vec.extract {{%.*}}[[[LANE]] : !s32i] : !cir.vector<!cir.double x 1>
// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr<!void>), !cir.ptr<!cir.double>
// CIR: cir.store align(8) atomic(release) [[VAL]], [[PTR]] : !cir.double, !cir.ptr<!cir.double>

// LLVM: {{.*}}test_vstl1_lane_f64(ptr{{.*}}[[PTR:%.*]], <1 x double>{{.*}}[[SRC:%.*]])
// LLVM: [[TMP0:%.*]] = bitcast <1 x double> [[SRC]] to <8 x i8>
// LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x double>
// LLVM: [[TMP2:%.*]] = extractelement <1 x double> [[TMP1]], i32 0
// LLVM: store atomic double [[TMP2]], ptr [[PTR]] release, align 8

void test_vstl1_lane_p64(poly64_t *a, poly64x1_t b) {
vstl1_lane_p64(a, b, 0);
}

// CIR-LABEL: test_vstl1_lane_p64
// CIR: [[LANE:%.*]] = cir.const #cir.int<0> : !s32i
// CIR: [[VAL:%.*]] = cir.vec.extract {{%.*}}[[[LANE]] : !s32i] : !cir.vector<!s64i x 1>
// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr<!void>), !cir.ptr<!s64i>
// CIR: cir.store align(8) atomic(release) [[VAL]], [[PTR]] : !s64i, !cir.ptr<!s64i>

// LLVM: {{.*}}test_vstl1_lane_p64(ptr{{.*}}[[PTR:%.*]], <1 x i64>{{.*}}[[SRC:%.*]])
// LLVM: [[TMP0:%.*]] = bitcast <1 x i64> [[SRC]] to <8 x i8>
// LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64>
// LLVM: [[TMP2:%.*]] = extractelement <1 x i64> [[TMP1]], i32 0
// LLVM: store atomic i64 [[TMP2]], ptr [[PTR]] release, align 8
Loading