Skip to content

Commit

Permalink
Some tweaks to the SVE2p1 load and store intrinsics
Browse files Browse the repository at this point in the history
The pre-SVE2p1 gather and scatter intrinsics allow vector displacements
(offsets or indices) to be either signed or unsigned.  svld1q and svst1q
instead required them to be unsigned.  This patch adds signed versions
too, for consistency.

Also, the SVE2p1 stores were specified to take pointers to const,
but they ought to be pointers to non-const instead.
  • Loading branch information
rsandifo-arm committed Nov 6, 2024
1 parent a897926 commit 521561d
Showing 1 changed file with 11 additions and 6 deletions.
17 changes: 11 additions & 6 deletions main/acle.md
Original file line number Diff line number Diff line change
Expand Up @@ -422,6 +422,7 @@ Armv8.4-A [[ARMARMv84]](#ARMARMv84). Support is added for the Dot Product intrin
* Removed Function Multi Versioning features ebf16, memtag3, and rpres.
* Fixed range of operand `o0` (too small) in AArch64 system register designations.
* Fixed SVE2.1 quadword gather load/scatter store intrinsics.
* Removed extraneous `const` from SVE2.1 store intrinsics.

### References

Expand Down Expand Up @@ -9181,11 +9182,13 @@ Gather Load Quadword.
// _mf8, _bf16, _f16, _f32, _f64
svint8_t svld1q_gather[_u64base]_s8(svbool_t pg, svuint64_t zn);
svint8_t svld1q_gather[_u64base]_offset_s8(svbool_t pg, svuint64_t zn, int64_t offset);
svint8_t svld1q_gather_[s64]offset[_s8](svbool_t pg, const int8_t *base, svuint64_t offset);
svint8_t svld1q_gather_[u64]offset[_s8](svbool_t pg, const int8_t *base, svuint64_t offset);

// Variants are also available for:
// _u16, _u32, _s32, _u64, _s64
// _bf16, _f16, _f32, _f64
svint16_t svld1q_gather_[s64]index[_s16](svbool_t pg, const int16_t *base, svuint64_t index);
svint16_t svld1q_gather_[u64]index[_s16](svbool_t pg, const int16_t *base, svuint64_t index);
svint16_t svld1q_gather[_u64base]_index_s16(svbool_t pg, svuint64_t zn, int64_t index);
```
Expand Down Expand Up @@ -9255,14 +9258,14 @@ Contiguous store of single vector operand, truncating from quadword.
``` c
// Variants are also available for:
// _u32, _s32
void svst1wq[_f32](svbool_t, const float32_t *ptr, svfloat32_t data);
void svst1wq_vnum[_f32](svbool_t, const float32_t *ptr, int64_t vnum, svfloat32_t data);
void svst1wq[_f32](svbool_t, float32_t *ptr, svfloat32_t data);
void svst1wq_vnum[_f32](svbool_t, float32_t *ptr, int64_t vnum, svfloat32_t data);


// Variants are also available for:
// _u64, _s64
void svst1dq[_f64](svbool_t, const float64_t *ptr, svfloat64_t data);
void svst1dq_vnum[_f64](svbool_t, const float64_t *ptr, int64_t vnum, svfloat64_t data);
void svst1dq[_f64](svbool_t, float64_t *ptr, svfloat64_t data);
void svst1dq_vnum[_f64](svbool_t, float64_t *ptr, int64_t vnum, svfloat64_t data);
```

#### ST1Q
Expand All @@ -9275,12 +9278,14 @@ Scatter store quadwords.
// _mf8, _bf16, _f16, _f32, _f64
void svst1q_scatter[_u64base][_s8](svbool_t pg, svuint64_t zn, svint8_t data);
void svst1q_scatter[_u64base]_offset[_s8](svbool_t pg, svuint64_t zn, int64_t offset, svint8_t data);
void svst1q_scatter_[u64]offset[_s8](svbool_t pg, const uint8_t *base, svuint64_t offset, svint8_t data);
void svst1q_scatter_[s64]offset[_s8](svbool_t pg, uint8_t *base, svuint64_t offset, svint8_t data);
void svst1q_scatter_[u64]offset[_s8](svbool_t pg, uint8_t *base, svuint64_t offset, svint8_t data);

// Variants are also available for:
// _u16, _u32, _s32, _u64, _s64
// _bf16, _f16, _f32, _f64
void svst1q_scatter_[u64]index[_s16](svbool_t pg, const int16_t *base, svuint64_t index, svint16_t data);
void svst1q_scatter_[s64]index[_s16](svbool_t pg, int16_t *base, svuint64_t index, svint16_t data);
void svst1q_scatter_[u64]index[_s16](svbool_t pg, int16_t *base, svuint64_t index, svint16_t data);
void svst1q_scatter[_u64base]_index[_s16](svbool_t pg, svuint64_t zn, int64_t index, svint16_t data);
```

Expand Down

0 comments on commit 521561d

Please sign in to comment.