diff --git a/gcc/config/riscv/constraints.md b/gcc/config/riscv/constraints.md index eb5a0bb75c7..901ded4b028 100644 --- a/gcc/config/riscv/constraints.md +++ b/gcc/config/riscv/constraints.md @@ -295,3 +295,13 @@ "Shifting immediate for SIMD shufflei3." (and (match_code "const_int") (match_test "IN_RANGE (ival, -64, -1)"))) + +(define_constraint "Ou01" + "A 1-bit unsigned immediate." + (and (match_code "const_int") + (match_test "IN_RANGE (ival, 0, 1)"))) + +(define_constraint "Ou02" + "A 2-bit unsigned immediate." + (and (match_code "const_int") + (match_test "IN_RANGE (ival, 0, 3)"))) diff --git a/gcc/config/riscv/generic-vector-ooo.md b/gcc/config/riscv/generic-vector-ooo.md index bcad36c1a36..020fbf7d4b9 100644 --- a/gcc/config/riscv/generic-vector-ooo.md +++ b/gcc/config/riscv/generic-vector-ooo.md @@ -141,3 +141,7 @@ (eq_attr "type" "rdvlenb,rdvl") "vxu_ooo_issue,vxu_ooo_issue") +;; Vector sf_vcp. +(define_insn_reservation "vec_sf_vcp" 2 + (eq_attr "type" "sf_vc,sf_vc_se") + "vxu_ooo_issue") diff --git a/gcc/config/riscv/genrvv-type-indexer.cc b/gcc/config/riscv/genrvv-type-indexer.cc index e1eee34237a..e3b845d156e 100644 --- a/gcc/config/riscv/genrvv-type-indexer.cc +++ b/gcc/config/riscv/genrvv-type-indexer.cc @@ -164,6 +164,18 @@ floattype (unsigned sew, int lmul_log2) return mode.str (); } +std::string +expand_floattype (unsigned sew, int lmul_log2, unsigned nf) +{ + if (sew != 8 || nf!= 1 + || (!valid_type (sew * 4, lmul_log2 + 2, /*float_t*/ true))) + return "INVALID"; + + std::stringstream mode; + mode << "vfloat" << sew * 4 << to_lmul (lmul_log2 + 2) << "_t"; + return mode.str (); +} + std::string floattype (unsigned sew, int lmul_log2, unsigned nf) { @@ -276,6 +288,7 @@ main (int argc, const char **argv) fprintf (fp, " /*QLMUL1*/ INVALID,\n"); fprintf (fp, " /*QLMUL1_SIGNED*/ INVALID,\n"); fprintf (fp, " /*QLMUL1_UNSIGNED*/ INVALID,\n"); + fprintf (fp, " /*XFQF*/ INVALID,\n"); for (unsigned eew : {8, 16, 32, 64}) fprintf (fp, " /*EEW%d_INTERPRET*/ INVALID,\n", eew); @@ -290,6 +303,8 @@ main (int argc, const char **argv) fprintf (fp, " /*UNSIGNED_EEW%d_LMUL1_INTERPRET*/ %s,\n", eew, inttype (eew, LMUL1_LOG2, /* unsigned_p */true).c_str ()); + fprintf (fp, " /*X2*/ INVALID,\n"); + for (unsigned lmul_log2_offset : {1, 2, 3, 4, 5, 6}) { unsigned multiple_of_lmul = 1 << lmul_log2_offset; @@ -384,6 +399,8 @@ main (int argc, const char **argv) inttype (8, /*lmul_log2*/ 0, false).c_str ()); fprintf (fp, " /*QLMUL1_UNSIGNED*/ %s,\n", inttype (8, /*lmul_log2*/ 0, true).c_str ()); + fprintf (fp, " /*XFQF*/ %s,\n", + expand_floattype (sew, lmul_log2, nf).c_str ()); for (unsigned eew : {8, 16, 32, 64}) { if (eew == sew) @@ -411,6 +428,9 @@ main (int argc, const char **argv) fprintf (fp, " /*UNSIGNED_EEW%d_LMUL1_INTERPRET*/ INVALID,\n", eew); + fprintf (fp, " /*X2*/ %s,\n", + inttype (sew * 2, lmul_log2 + 1, /*unsigned_p*/ true).c_str ()); + for (unsigned lmul_log2_offset : {1, 2, 3, 4, 5, 6}) { unsigned multiple_of_lmul = 1 << lmul_log2_offset; @@ -473,6 +493,7 @@ main (int argc, const char **argv) bfloat16_wide_type (/*lmul_log2*/ 0).c_str ()); fprintf (fp, " /*QLMUL1_SIGNED*/ INVALID,\n"); fprintf (fp, " /*QLMUL1_UNSIGNED*/ INVALID,\n"); + fprintf (fp, " /*XFQF*/ INVALID,\n"); for (unsigned eew : {8, 16, 32, 64}) fprintf (fp, " /*EEW%d_INTERPRET*/ INVALID,\n", eew); @@ -485,6 +506,8 @@ main (int argc, const char **argv) for (unsigned eew : EEW_SIZE_LIST) fprintf (fp, " /*UNSIGNED_EEW%d_LMUL1_INTERPRET*/ INVALID,\n", eew); + fprintf (fp, " /*X2*/ INVALID,\n"); + for (unsigned lmul_log2_offset : {1, 2, 3, 4, 5, 6}) { unsigned multiple_of_lmul = 1 << lmul_log2_offset; @@ -558,6 +581,7 @@ main (int argc, const char **argv) floattype (sew / 4, /*lmul_log2*/ 0).c_str ()); fprintf (fp, " /*QLMUL1_SIGNED*/ INVALID,\n"); fprintf (fp, " /*QLMUL1_UNSIGNED*/ INVALID,\n"); + fprintf (fp, " /*XFQF*/ INVALID,\n"); for (unsigned eew : {8, 16, 32, 64}) fprintf (fp, " /*EEW%d_INTERPRET*/ INVALID,\n", eew); @@ -571,6 +595,8 @@ main (int argc, const char **argv) fprintf (fp, " /*UNSIGNED_EEW%d_LMUL1_INTERPRET*/ INVALID,\n", eew); + fprintf (fp, " /*X2*/ INVALID,\n"); + for (unsigned lmul_log2_offset : {1, 2, 3, 4, 5, 6}) { unsigned multiple_of_lmul = 1 << lmul_log2_offset; diff --git a/gcc/config/riscv/riscv-vector-builtins-shapes.cc b/gcc/config/riscv/riscv-vector-builtins-shapes.cc index 0999a644388..d824485c396 100644 --- a/gcc/config/riscv/riscv-vector-builtins-shapes.cc +++ b/gcc/config/riscv/riscv-vector-builtins-shapes.cc @@ -1343,6 +1343,52 @@ struct sf_vfnrclip_def : public build_base } }; +/* sf_vcix_se_def class. */ +struct sf_vcix_se_def : public build_base +{ + char *get_name (function_builder &b, const function_instance &instance, + bool overloaded_p) const override + { + /* Return nullptr if it is overloaded. */ + if (overloaded_p) + return nullptr; + + b.append_base_name (instance.base_name); + + /* vop --> vop_se_. */ + if (!overloaded_p) + { + b.append_name (operand_suffixes[instance.op_info->op]); + b.append_name ("_se"); + b.append_name (type_suffixes[instance.type.index].vector); + } + return b.finish_name (); + } +}; + +/* sf_vcix_def class. */ +struct sf_vcix_def : public build_base +{ + char *get_name (function_builder &b, const function_instance &instance, + bool overloaded_p) const override + { + /* Return nullptr if it is overloaded. */ + if (overloaded_p) + return nullptr; + + b.append_base_name (instance.base_name); + + /* vop --> vop_. */ + if (!overloaded_p) + { + b.append_name (operand_suffixes[instance.op_info->op]); + b.append_name (type_suffixes[instance.type.index].vector); + } + return b.finish_name (); + } +}; + + SHAPE(vsetvl, vsetvl) SHAPE(vsetvl, vsetvlmax) SHAPE(loadstore, loadstore) @@ -1379,4 +1425,6 @@ SHAPE(crypto_vi, crypto_vi) SHAPE(crypto_vv_no_op_type, crypto_vv_no_op_type) SHAPE (sf_vqmacc, sf_vqmacc) SHAPE (sf_vfnrclip, sf_vfnrclip) +SHAPE(sf_vcix_se, sf_vcix_se) +SHAPE(sf_vcix, sf_vcix) } // end namespace riscv_vector diff --git a/gcc/config/riscv/riscv-vector-builtins-shapes.h b/gcc/config/riscv/riscv-vector-builtins-shapes.h index 16049c46018..ebfe39d2b57 100644 --- a/gcc/config/riscv/riscv-vector-builtins-shapes.h +++ b/gcc/config/riscv/riscv-vector-builtins-shapes.h @@ -62,6 +62,8 @@ extern const function_shape *const crypto_vv_no_op_type; /* Sifive vendor extension. */ extern const function_shape *const sf_vqmacc; extern const function_shape *const sf_vfnrclip; +extern const function_shape *const sf_vcix_se; +extern const function_shape *const sf_vcix; } } // end namespace riscv_vector diff --git a/gcc/config/riscv/riscv-vector-builtins-types.def b/gcc/config/riscv/riscv-vector-builtins-types.def index 96412bfd1a5..b924565e18e 100644 --- a/gcc/config/riscv/riscv-vector-builtins-types.def +++ b/gcc/config/riscv/riscv-vector-builtins-types.def @@ -363,6 +363,24 @@ along with GCC; see the file COPYING3. If not see #define DEF_RVV_QMACC_OPS(TYPE, REQUIRE) #endif +/* Use "DEF_RVV_XFQF_OPS" macro include signed integer which will + be iterated and registered as intrinsic functions. */ +#ifndef DEF_RVV_XFQF_OPS +#define DEF_RVV_XFQF_OPS(TYPE, REQUIRE) +#endif + +/* Use "DEF_RVV_X2_U_OPS" macro include unsigned integer which will + be iterated and registered as intrinsic functions. */ +#ifndef DEF_RVV_X2_U_OPS +#define DEF_RVV_X2_U_OPS(TYPE, REQUIRE) +#endif + +/* Use "DEF_RVV_X2_WU_OPS" macro include widen unsigned integer which will + be iterated and registered as intrinsic functions. */ +#ifndef DEF_RVV_X2_WU_OPS +#define DEF_RVV_X2_WU_OPS(TYPE, REQUIRE) +#endif + DEF_RVV_I_OPS (vint8mf8_t, RVV_REQUIRE_MIN_VLEN_64) DEF_RVV_I_OPS (vint8mf4_t, 0) DEF_RVV_I_OPS (vint8mf2_t, 0) @@ -1451,6 +1469,38 @@ DEF_RVV_QMACC_OPS (vint32m2_t, 0) DEF_RVV_QMACC_OPS (vint32m4_t, 0) DEF_RVV_QMACC_OPS (vint32m8_t, 0) +DEF_RVV_XFQF_OPS (vint8mf8_t, 0) +DEF_RVV_XFQF_OPS (vint8mf4_t, 0) +DEF_RVV_XFQF_OPS (vint8mf2_t, 0) +DEF_RVV_XFQF_OPS (vint8m1_t, 0) +DEF_RVV_XFQF_OPS (vint8m2_t, 0) + +DEF_RVV_X2_U_OPS (vuint8mf8_t, RVV_REQUIRE_MIN_VLEN_64) +DEF_RVV_X2_U_OPS (vuint8mf4_t, 0) +DEF_RVV_X2_U_OPS (vuint8mf2_t, 0) +DEF_RVV_X2_U_OPS (vuint8m1_t, 0) +DEF_RVV_X2_U_OPS (vuint8m2_t, 0) +DEF_RVV_X2_U_OPS (vuint8m4_t, 0) +DEF_RVV_X2_U_OPS (vuint16mf4_t, RVV_REQUIRE_MIN_VLEN_64) +DEF_RVV_X2_U_OPS (vuint16mf2_t, 0) +DEF_RVV_X2_U_OPS (vuint16m1_t, 0) +DEF_RVV_X2_U_OPS (vuint16m2_t, 0) +DEF_RVV_X2_U_OPS (vuint16m4_t, 0) +DEF_RVV_X2_U_OPS (vuint32mf2_t, RVV_REQUIRE_MIN_VLEN_64) +DEF_RVV_X2_U_OPS (vuint32m1_t, 0) +DEF_RVV_X2_U_OPS (vuint32m2_t, 0) +DEF_RVV_X2_U_OPS (vuint32m4_t, 0) + +DEF_RVV_X2_WU_OPS (vuint16mf4_t, RVV_REQUIRE_MIN_VLEN_64) +DEF_RVV_X2_WU_OPS (vuint16mf2_t, 0) +DEF_RVV_X2_WU_OPS (vuint16m1_t, 0) +DEF_RVV_X2_WU_OPS (vuint16m2_t, 0) +DEF_RVV_X2_WU_OPS (vuint16m4_t, 0) +DEF_RVV_X2_WU_OPS (vuint32mf2_t, RVV_REQUIRE_MIN_VLEN_64) +DEF_RVV_X2_WU_OPS (vuint32m1_t, 0) +DEF_RVV_X2_WU_OPS (vuint32m2_t, 0) +DEF_RVV_X2_WU_OPS (vuint32m4_t, 0) + #undef DEF_RVV_I_OPS #undef DEF_RVV_U_OPS #undef DEF_RVV_F_OPS @@ -1506,3 +1556,6 @@ DEF_RVV_QMACC_OPS (vint32m8_t, 0) #undef DEF_RVV_CRYPTO_SEW64_OPS #undef DEF_RVV_F32_OPS #undef DEF_RVV_QMACC_OPS +#undef DEF_RVV_XFQF_OPS +#undef DEF_RVV_X2_U_OPS +#undef DEF_RVV_X2_WU_OPS \ No newline at end of file diff --git a/gcc/config/riscv/riscv-vector-builtins.cc b/gcc/config/riscv/riscv-vector-builtins.cc index b9b9d33adab..dc558fa5a7d 100644 --- a/gcc/config/riscv/riscv-vector-builtins.cc +++ b/gcc/config/riscv/riscv-vector-builtins.cc @@ -544,6 +544,20 @@ static const rvv_type_info crypto_sew64_ops[] = { #include "riscv-vector-builtins-types.def" {NUM_VECTOR_TYPES, 0}}; +/* A list of signed integer will be registered for Sifive Xsfvcp intrinsic*/ +/* functions. */ +static const rvv_type_info x2_u_ops[] = { +#define DEF_RVV_X2_U_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE}, +#include "riscv-vector-builtins-types.def" + {NUM_VECTOR_TYPES, 0}}; + +/* A list of signed integer will be registered for Sifive Xsfvcp intrinsic*/ +/* functions. */ +static const rvv_type_info x2_wu_ops[] = { +#define DEF_RVV_X2_WU_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE}, +#include "riscv-vector-builtins-types.def" + {NUM_VECTOR_TYPES, 0}}; + /* A list of signed integer will be registered for intrinsic * functions. */ static const rvv_type_info qmacc_ops[] = { @@ -551,6 +565,12 @@ static const rvv_type_info qmacc_ops[] = { #include "riscv-vector-builtins-types.def" {NUM_VECTOR_TYPES, 0}}; +/* A list of signed integer will be registered for intrinsic functions. */ +static const rvv_type_info xfqf_ops[] = { +#define DEF_RVV_XFQF_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE}, +#include "riscv-vector-builtins-types.def" + {NUM_VECTOR_TYPES, 0}}; + static CONSTEXPR const rvv_arg_type_info rvv_arg_type_info_end = rvv_arg_type_info (NUM_BASE_TYPES); @@ -720,7 +740,8 @@ static CONSTEXPR const rvv_arg_type_info shift_wv_args[] rvv_arg_type_info_end}; static CONSTEXPR const rvv_arg_type_info clip_args[] - = {rvv_arg_type_info (RVV_BASE_vector), rvv_arg_type_info (RVV_BASE_scalar), + = {rvv_arg_type_info (RVV_BASE_xfqf_vector), + rvv_arg_type_info (RVV_BASE_xfqf_float), rvv_arg_type_info_end}; /* A list of args for vector_type func (vector_type) function. */ @@ -798,7 +819,7 @@ static CONSTEXPR const rvv_arg_type_info bf_wwxv_args[] static CONSTEXPR const rvv_arg_type_info m_args[] = {rvv_arg_type_info (RVV_BASE_mask), rvv_arg_type_info_end}; -/* A list of args for vector_type func (scalar_type) function. */ +/* A list of args for vector_type func (scalar_type/sf.vc) function. */ static CONSTEXPR const rvv_arg_type_info x_args[] = {rvv_arg_type_info (RVV_BASE_scalar), rvv_arg_type_info_end}; @@ -1048,6 +1069,161 @@ static CONSTEXPR const rvv_arg_type_info scalar_ptr_size_args[] rvv_arg_type_info (RVV_BASE_size), rvv_arg_type_info (RVV_BASE_vector), rvv_arg_type_info_end}; +/* A list of args for vector_type func (sf.vc.x) function. */ +static CONSTEXPR const rvv_arg_type_info sf_vc_x_args[] + = {rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info_end}; + +/* A list of args for vector_type func (sf.vc.v.x) function. */ +static CONSTEXPR const rvv_arg_type_info sf_vc_v_x_args[] + = {rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info_end}; + +/* A list of args for vector_type func (sf.vc.i) function. */ +static CONSTEXPR const rvv_arg_type_info sf_vc_i_args[] + = {rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info_end}; + + +/* A list of args for vector_type func (sf.vc.i) function. */ +static CONSTEXPR const rvv_arg_type_info sf_vc_v_i_args[] + = {rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info_end}; + +/* A list of args for vector_type func (sf.vc.vv) function. */ +static CONSTEXPR const rvv_arg_type_info sf_vc_vv_args[] + = {rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info (RVV_BASE_vector), + rvv_arg_type_info (RVV_BASE_vector), + rvv_arg_type_info_end}; + +/* A list of args for vector_type func (sf.vc.v.vv) function. */ +static CONSTEXPR const rvv_arg_type_info sf_vc_v_vv_args[] + = {rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info (RVV_BASE_vector), + rvv_arg_type_info (RVV_BASE_vector), + rvv_arg_type_info_end}; + +/* A list of args for vector_type func (sf.vc.xv) function. */ +static CONSTEXPR const rvv_arg_type_info sf_vc_xv_args[] + = {rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info (RVV_BASE_vector), + rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info_end}; + +/* A list of args for vector_type func (sf.vc.v.xv) function. */ +static CONSTEXPR const rvv_arg_type_info sf_vc_v_xv_args[] + = {rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info (RVV_BASE_vector), + rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info_end}; + +/* A list of args for vector_type func (sf.vc.iv) function. */ +static CONSTEXPR const rvv_arg_type_info sf_vc_iv_args[] + = {rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info (RVV_BASE_vector), + rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info_end}; + +/* A list of args for vector_type func (sf.vc.v.iv) function. */ +static CONSTEXPR const rvv_arg_type_info sf_vc_v_iv_args[] + = {rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info (RVV_BASE_vector), + rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info_end}; + +/* A list of args for vector_type func (sf.vc.fv) function. */ +static CONSTEXPR const rvv_arg_type_info sf_vc_fv_args[] + = {rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info (RVV_BASE_vector), + rvv_arg_type_info (RVV_BASE_scalar_float), + rvv_arg_type_info_end}; + +/* A list of args for vector_type func (sf.vc.v.fv) function. */ +static CONSTEXPR const rvv_arg_type_info sf_vc_v_fv_args[] + = {rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info (RVV_BASE_vector), + rvv_arg_type_info (RVV_BASE_scalar_float), + rvv_arg_type_info_end}; + +/* A list of args for vector_type func (sf.vc.vvv/sf.vc.v.vvv) function. */ +static CONSTEXPR const rvv_arg_type_info sf_vc_vvv_args[] + = {rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info (RVV_BASE_vector), + rvv_arg_type_info (RVV_BASE_vector), + rvv_arg_type_info (RVV_BASE_vector), + rvv_arg_type_info_end}; + +/* A list of args for vector_type func (sf.vc.xvv/sf.vc.v.xvv) function. */ +static CONSTEXPR const rvv_arg_type_info sf_vc_xvv_args[] + = {rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info (RVV_BASE_vector), + rvv_arg_type_info (RVV_BASE_vector), + rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info_end}; + +/* A list of args for vector_type func (sf.vc.ivv/sf.vc.v.ivv) function. */ +static CONSTEXPR const rvv_arg_type_info sf_vc_ivv_args[] + = {rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info (RVV_BASE_unsigned_vector), + rvv_arg_type_info (RVV_BASE_unsigned_vector), + rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info_end}; + +/* A list of args for vector_type func (sf.vc.fvv/sf.vc.v.fvv) function. */ +static CONSTEXPR const rvv_arg_type_info sf_vc_fvv_args[] + = {rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info (RVV_BASE_vector), + rvv_arg_type_info (RVV_BASE_vector), + rvv_arg_type_info (RVV_BASE_scalar_float), + rvv_arg_type_info_end}; + +/* A list of args for vector_type func (sf.vc.vvw/sf.vc.v.vvw) function. */ +static CONSTEXPR const rvv_arg_type_info sf_vc_vvw_args[] + = {rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info (RVV_BASE_x2_vector), + rvv_arg_type_info (RVV_BASE_vector), + rvv_arg_type_info (RVV_BASE_vector), + rvv_arg_type_info_end}; + +/* A list of args for vector_type func (sf.vc.xvw/sf.vc.v.xvw) function. */ +static CONSTEXPR const rvv_arg_type_info sf_vc_xvw_args[] + = {rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info (RVV_BASE_x2_vector), + rvv_arg_type_info (RVV_BASE_vector), + rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info_end}; + +/* A list of args for vector_type func (sf.vc.ivw/sf.vc.v.ivw) function. */ +static CONSTEXPR const rvv_arg_type_info sf_vc_ivw_args[] + = {rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info (RVV_BASE_x2_vector), + rvv_arg_type_info (RVV_BASE_vector), + rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info_end}; + +/* A list of args for vector_type func (sf.vc.fvw/sf.vc.v.fvw) function. */ +static CONSTEXPR const rvv_arg_type_info sf_vc_fvw_args[] + = {rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info (RVV_BASE_x2_vector), + rvv_arg_type_info (RVV_BASE_vector), + rvv_arg_type_info (RVV_BASE_scalar_float), + rvv_arg_type_info_end}; + /* A list of none preds that will be registered for intrinsic functions. */ static CONSTEXPR const predication_type_index none_preds[] = {PRED_TYPE_none, NUM_PRED_TYPES}; @@ -2549,17 +2725,17 @@ static CONSTEXPR const rvv_op_info i_narrow_shift_vwx_ops /* A static operand information for double demote type func (vector_type, * shift_type) function registration. */ static CONSTEXPR const rvv_op_info u_clip_qf_ops - = {f32_ops, /* Types */ + = {xfqf_ops, /* Types */ OP_TYPE_none, /* Suffix */ - rvv_arg_type_info (RVV_BASE_eew8_index), /* Return type */ + rvv_arg_type_info (RVV_BASE_unsigned_vector), /* Return type */ clip_args /* Args */}; /* A static operand information for double demote type func (vector_type, * shift_type) function registration. */ static CONSTEXPR const rvv_op_info i_clip_qf_ops - = {f32_ops, /* Types */ + = {xfqf_ops, /* Types */ OP_TYPE_none, /* Suffix */ - rvv_arg_type_info (RVV_BASE_signed_eew8_index), /* Return type */ + rvv_arg_type_info (RVV_BASE_vector), /* Return type */ clip_args /* Args */}; /* A static operand information for double demote type func (vector_type, @@ -2999,6 +3175,174 @@ static CONSTEXPR const rvv_op_info u_vvvv_crypto_sew64_ops rvv_arg_type_info (RVV_BASE_vector), /* Return type */ vvv_args /* Args */}; +static CONSTEXPR const rvv_op_info sf_vc_x_ops + = {full_v_u_ops, /* Types */ + OP_TYPE_x, /* Suffix */ + rvv_arg_type_info (RVV_BASE_void), /* Return type */ + sf_vc_x_args /* Args */}; + +static CONSTEXPR const rvv_op_info sf_vc_v_x_ops + = {full_v_u_ops, /* Types */ + OP_TYPE_v_x, /* Suffix */ + rvv_arg_type_info (RVV_BASE_vector), /* Return type */ + sf_vc_v_x_args /* Args */}; + +static CONSTEXPR const rvv_op_info sf_vc_i_ops + = {full_v_u_ops, /* Types */ + OP_TYPE_i, /* Suffix */ + rvv_arg_type_info (RVV_BASE_void), /* Return type */ + sf_vc_i_args /* Args */}; + +static CONSTEXPR const rvv_op_info sf_vc_v_i_ops + = {full_v_u_ops, /* Types */ + OP_TYPE_v_i, /* Suffix */ + rvv_arg_type_info (RVV_BASE_vector), /* Return type */ + sf_vc_v_i_args /* Args */}; + +static CONSTEXPR const rvv_op_info sf_vc_vv_ops + = {full_v_u_ops, /* Types */ + OP_TYPE_vv, /* Suffix */ + rvv_arg_type_info (RVV_BASE_void), /* Return type */ + sf_vc_vv_args /* Args */}; + +static CONSTEXPR const rvv_op_info sf_vc_v_vv_ops + = {full_v_u_ops, /* Types */ + OP_TYPE_v_vv, /* Suffix */ + rvv_arg_type_info (RVV_BASE_vector), /* Return type */ + sf_vc_v_vv_args /* Args */}; + +static CONSTEXPR const rvv_op_info sf_vc_xv_ops + = {full_v_u_ops, /* Types */ + OP_TYPE_xv, /* Suffix */ + rvv_arg_type_info (RVV_BASE_void), /* Return type */ + sf_vc_xv_args /* Args */}; + +static CONSTEXPR const rvv_op_info sf_vc_v_xv_ops + = {full_v_u_ops, /* Types */ + OP_TYPE_v_xv, /* Suffix */ + rvv_arg_type_info (RVV_BASE_vector), /* Return type */ + sf_vc_v_xv_args /* Args */}; + +static CONSTEXPR const rvv_op_info sf_vc_iv_ops + = {full_v_u_ops, /* Types */ + OP_TYPE_iv, /* Suffix */ + rvv_arg_type_info (RVV_BASE_void), /* Return type */ + sf_vc_iv_args /* Args */}; + +static CONSTEXPR const rvv_op_info sf_vc_v_iv_ops + = {full_v_u_ops, /* Types */ + OP_TYPE_v_iv, /* Suffix */ + rvv_arg_type_info (RVV_BASE_vector), /* Return type */ + sf_vc_v_iv_args /* Args */}; + +static CONSTEXPR const rvv_op_info sf_vc_fv_ops + = {wextu_ops, /* Types */ + OP_TYPE_fv, /* Suffix */ + rvv_arg_type_info (RVV_BASE_void), /* Return type */ + sf_vc_fv_args /* Args */}; + +static CONSTEXPR const rvv_op_info sf_vc_v_fv_ops + = {wextu_ops, /* Types */ + OP_TYPE_v_fv, /* Suffix */ + rvv_arg_type_info (RVV_BASE_vector), /* Return type */ + sf_vc_v_fv_args /* Args */}; + +static CONSTEXPR const rvv_op_info sf_vc_vvv_ops + = {full_v_u_ops, /* Types */ + OP_TYPE_vvv, /* Suffix */ + rvv_arg_type_info (RVV_BASE_void), /* Return type */ + sf_vc_vvv_args /* Args */}; + +static CONSTEXPR const rvv_op_info sf_vc_v_vvv_ops + = {full_v_u_ops, /* Types */ + OP_TYPE_v_vvv, /* Suffix */ + rvv_arg_type_info (RVV_BASE_vector), /* Return type */ + sf_vc_vvv_args /* Args */}; + +static CONSTEXPR const rvv_op_info sf_vc_xvv_ops + = {full_v_u_ops, /* Types */ + OP_TYPE_xvv, /* Suffix */ + rvv_arg_type_info (RVV_BASE_void), /* Return type */ + sf_vc_xvv_args /* Args */}; + +static CONSTEXPR const rvv_op_info sf_vc_v_xvv_ops + = {full_v_u_ops, /* Types */ + OP_TYPE_v_xvv, /* Suffix */ + rvv_arg_type_info (RVV_BASE_vector), /* Return type */ + sf_vc_xvv_args /* Args */}; + +static CONSTEXPR const rvv_op_info sf_vc_ivv_ops + = {full_v_u_ops, /* Types */ + OP_TYPE_ivv, /* Suffix */ + rvv_arg_type_info (RVV_BASE_void), /* Return type */ + sf_vc_ivv_args /* Args */}; + +static CONSTEXPR const rvv_op_info sf_vc_v_ivv_ops + = {full_v_u_ops, /* Types */ + OP_TYPE_v_ivv, /* Suffix */ + rvv_arg_type_info (RVV_BASE_vector), /* Return type */ + sf_vc_ivv_args /* Args */}; + +static CONSTEXPR const rvv_op_info sf_vc_fvv_ops + = {wextu_ops, /* Types */ + OP_TYPE_fvv, /* Suffix */ + rvv_arg_type_info (RVV_BASE_void), /* Return type */ + sf_vc_fvv_args /* Args */}; + +static CONSTEXPR const rvv_op_info sf_vc_v_fvv_ops + = {wextu_ops, /* Types */ + OP_TYPE_v_fvv, /* Suffix */ + rvv_arg_type_info (RVV_BASE_vector), /* Return type */ + sf_vc_fvv_args /* Args */}; + +static CONSTEXPR const rvv_op_info sf_vc_vvw_ops + = {x2_u_ops, /* Types */ + OP_TYPE_vvw, /* Suffix */ + rvv_arg_type_info (RVV_BASE_void), /* Return type */ + sf_vc_vvw_args /* Args */}; + +static CONSTEXPR const rvv_op_info sf_vc_v_vvw_ops + = {x2_u_ops, /* Types */ + OP_TYPE_v_vvw, /* Suffix */ + rvv_arg_type_info (RVV_BASE_x2_vector), /* Return type */ + sf_vc_vvw_args /* Args */}; + +static CONSTEXPR const rvv_op_info sf_vc_xvw_ops + = {x2_u_ops, /* Types */ + OP_TYPE_xvw, /* Suffix */ + rvv_arg_type_info (RVV_BASE_void), /* Return type */ + sf_vc_xvw_args /* Args */}; + +static CONSTEXPR const rvv_op_info sf_vc_v_xvw_ops + = {x2_u_ops, /* Types */ + OP_TYPE_v_xvw, /* Suffix */ + rvv_arg_type_info (RVV_BASE_x2_vector), /* Return type */ + sf_vc_xvw_args /* Args */}; + +static CONSTEXPR const rvv_op_info sf_vc_ivw_ops + = {x2_u_ops, /* Types */ + OP_TYPE_ivw, /* Suffix */ + rvv_arg_type_info (RVV_BASE_void), /* Return type */ + sf_vc_ivw_args /* Args */}; + +static CONSTEXPR const rvv_op_info sf_vc_v_ivw_ops + = {x2_u_ops, /* Types */ + OP_TYPE_v_ivw, /* Suffix */ + rvv_arg_type_info (RVV_BASE_x2_vector), /* Return type */ + sf_vc_ivw_args /* Args */}; + +static CONSTEXPR const rvv_op_info sf_vc_fvw_ops + = {x2_wu_ops, /* Types */ + OP_TYPE_fvw, /* Suffix */ + rvv_arg_type_info (RVV_BASE_void), /* Return type */ + sf_vc_fvw_args /* Args */}; + +static CONSTEXPR const rvv_op_info sf_vc_v_fvw_ops + = {x2_wu_ops, /* Types */ + OP_TYPE_v_fvw, /* Suffix */ + rvv_arg_type_info (RVV_BASE_x2_vector), /* Return type */ + sf_vc_fvw_args /* Args */}; + /* A list of all RVV base function types. */ static CONSTEXPR const function_type_info function_types[] = { #define DEF_RVV_TYPE_INDEX( \ @@ -3008,14 +3352,14 @@ static CONSTEXPR const function_type_info function_types[] = { QUAD_FIX_UNSIGNED, OCT_TRUNC, DOUBLE_TRUNC_SCALAR, DOUBLE_TRUNC_SIGNED, \ DOUBLE_TRUNC_UNSIGNED, DOUBLE_TRUNC_UNSIGNED_SCALAR, \ DOUBLE_TRUNC_BFLOAT_SCALAR, DOUBLE_TRUNC_BFLOAT, DOUBLE_TRUNC_FLOAT, FLOAT, \ - LMUL1, WLMUL1, QLMUL1, QLMUL1_SIGNED, QLMUL1_UNSIGNED, EEW8_INTERPRET, \ + LMUL1, WLMUL1, QLMUL1, QLMUL1_SIGNED, QLMUL1_UNSIGNED, XFQF, EEW8_INTERPRET, \ EEW16_INTERPRET, EEW32_INTERPRET, EEW64_INTERPRET, BOOL1_INTERPRET, \ BOOL2_INTERPRET, BOOL4_INTERPRET, BOOL8_INTERPRET, BOOL16_INTERPRET, \ BOOL32_INTERPRET, BOOL64_INTERPRET, SIGNED_EEW8_LMUL1_INTERPRET, \ SIGNED_EEW16_LMUL1_INTERPRET, SIGNED_EEW32_LMUL1_INTERPRET, \ SIGNED_EEW64_LMUL1_INTERPRET, UNSIGNED_EEW8_LMUL1_INTERPRET, \ UNSIGNED_EEW16_LMUL1_INTERPRET, UNSIGNED_EEW32_LMUL1_INTERPRET, \ - UNSIGNED_EEW64_LMUL1_INTERPRET, X2_VLMUL_EXT, X4_VLMUL_EXT, X8_VLMUL_EXT, \ + UNSIGNED_EEW64_LMUL1_INTERPRET, X2, X2_VLMUL_EXT, X4_VLMUL_EXT, X8_VLMUL_EXT,\ X16_VLMUL_EXT, X32_VLMUL_EXT, X64_VLMUL_EXT, TUPLE_SUBPART) \ { \ VECTOR_TYPE_##VECTOR, \ @@ -3060,6 +3404,7 @@ static CONSTEXPR const function_type_info function_types[] = { VECTOR_TYPE_##QLMUL1, \ VECTOR_TYPE_##QLMUL1_SIGNED, \ VECTOR_TYPE_##QLMUL1_UNSIGNED, \ + VECTOR_TYPE_##XFQF, \ VECTOR_TYPE_##EEW8_INTERPRET, \ VECTOR_TYPE_##EEW16_INTERPRET, \ VECTOR_TYPE_##EEW32_INTERPRET, \ @@ -3079,6 +3424,7 @@ static CONSTEXPR const function_type_info function_types[] = { VECTOR_TYPE_##UNSIGNED_EEW16_LMUL1_INTERPRET, \ VECTOR_TYPE_##UNSIGNED_EEW32_LMUL1_INTERPRET, \ VECTOR_TYPE_##UNSIGNED_EEW64_LMUL1_INTERPRET, \ + VECTOR_TYPE_##X2, \ VECTOR_TYPE_##X2_VLMUL_EXT, \ VECTOR_TYPE_##X4_VLMUL_EXT, \ VECTOR_TYPE_##X8_VLMUL_EXT, \ @@ -3579,6 +3925,37 @@ rvv_arg_type_info::get_scalar_const_ptr_type (vector_type_index type_idx) const return builtin_types[type_idx].scalar_const_ptr; } +tree +rvv_arg_type_info::get_xfqf_float_type (vector_type_index type_idx) const +{ + /* Convert vint8 types into float types. + Note: + - According to riscv-vector-builtins-types.def, the index of an unsigned + type is always one greater than its corresponding signed type. */ + if (type_idx >= VECTOR_TYPE_vint8mf8_t && type_idx <= VECTOR_TYPE_vuint8m2_t) + return builtin_types[VECTOR_TYPE_vfloat32m1_t].scalar; + else + return NULL_TREE; +} + +tree +rvv_arg_type_info::get_scalar_float_type (vector_type_index type_idx) const +{ + /* Convert vint types to their corresponding scalar float types. + Note: + - According to riscv-vector-builtins-types.def, the index of an unsigned + type is always one greater than its corresponding signed type. + - Conversion for vint8 types is not required. */ + if (type_idx >= VECTOR_TYPE_vint16mf4_t && type_idx <= VECTOR_TYPE_vuint16m8_t) + return builtin_types[VECTOR_TYPE_vfloat16m1_t].scalar; + else if (type_idx >= VECTOR_TYPE_vint32mf2_t && type_idx <= VECTOR_TYPE_vuint32m8_t) + return builtin_types[VECTOR_TYPE_vfloat32m1_t].scalar; + else if (type_idx >= VECTOR_TYPE_vint64m1_t && type_idx <= VECTOR_TYPE_vuint64m8_t) + return builtin_types[VECTOR_TYPE_vfloat64m1_t].scalar; + else + return NULL_TREE; +} + vector_type_index rvv_arg_type_info::get_function_type_index (vector_type_index type_idx) const { @@ -3737,7 +4114,7 @@ function_instance::modifies_global_state_p () const return true; /* Handle direct modifications of global state. */ - return flags & (CP_WRITE_MEMORY | CP_WRITE_CSR); + return flags & (CP_WRITE_MEMORY | CP_WRITE_CSR | CP_USE_COPROCESSORS); } /* Return true if calls to the function could raise a signal. */ diff --git a/gcc/config/riscv/riscv-vector-builtins.def b/gcc/config/riscv/riscv-vector-builtins.def index 5359490d464..480406ac3f4 100644 --- a/gcc/config/riscv/riscv-vector-builtins.def +++ b/gcc/config/riscv/riscv-vector-builtins.def @@ -75,14 +75,14 @@ along with GCC; see the file COPYING3. If not see QUAD_FIX_UNSIGNED, OCT_TRUNC, DOUBLE_TRUNC_SCALAR, DOUBLE_TRUNC_SIGNED, \ DOUBLE_TRUNC_UNSIGNED, DOUBLE_TRUNC_UNSIGNED_SCALAR, \ DOUBLE_TRUNC_BFLOAT_SCALAR, DOUBLE_TRUNC_BFLOAT, DOUBLE_TRUNC_FLOAT, FLOAT, \ - LMUL1, WLMUL1, QLMUL1, QLMUL1_SIGNED, QLMUL1_UNSIGNED, EEW8_INTERPRET, \ + LMUL1, WLMUL1, QLMUL1, QLMUL1_SIGNED, QLMUL1_UNSIGNED, XFQF, EEW8_INTERPRET, \ EEW16_INTERPRET, EEW32_INTERPRET, EEW64_INTERPRET, BOOL1_INTERPRET, \ BOOL2_INTERPRET, BOOL4_INTERPRET, BOOL8_INTERPRET, BOOL16_INTERPRET, \ BOOL32_INTERPRET, BOOL64_INTERPRET, SIGNED_EEW8_LMUL1_INTERPRET, \ SIGNED_EEW16_LMUL1_INTERPRET, SIGNED_EEW32_LMUL1_INTERPRET, \ SIGNED_EEW64_LMUL1_INTERPRET, UNSIGNED_EEW8_LMUL1_INTERPRET, \ UNSIGNED_EEW16_LMUL1_INTERPRET, UNSIGNED_EEW32_LMUL1_INTERPRET, \ - UNSIGNED_EEW64_LMUL1_INTERPRET, X2_VLMUL_EXT, X4_VLMUL_EXT, X8_VLMUL_EXT, \ + UNSIGNED_EEW64_LMUL1_INTERPRET, X2, X2_VLMUL_EXT, X4_VLMUL_EXT, X8_VLMUL_EXT,\ X16_VLMUL_EXT, X32_VLMUL_EXT, X64_VLMUL_EXT, TUPLE_SUBPART) #endif @@ -637,6 +637,32 @@ DEF_RVV_OP_TYPE (xu_w) DEF_RVV_OP_TYPE (s) DEF_RVV_OP_TYPE (4x8x4) DEF_RVV_OP_TYPE (2x8x2) +DEF_RVV_OP_TYPE (v_x) +DEF_RVV_OP_TYPE (i) +DEF_RVV_OP_TYPE (v_i) +DEF_RVV_OP_TYPE (xv) +DEF_RVV_OP_TYPE (iv) +DEF_RVV_OP_TYPE (fv) +DEF_RVV_OP_TYPE (vvv) +DEF_RVV_OP_TYPE (xvv) +DEF_RVV_OP_TYPE (ivv) +DEF_RVV_OP_TYPE (fvv) +DEF_RVV_OP_TYPE (vvw) +DEF_RVV_OP_TYPE (xvw) +DEF_RVV_OP_TYPE (ivw) +DEF_RVV_OP_TYPE (fvw) +DEF_RVV_OP_TYPE (v_vv) +DEF_RVV_OP_TYPE (v_xv) +DEF_RVV_OP_TYPE (v_iv) +DEF_RVV_OP_TYPE (v_fv) +DEF_RVV_OP_TYPE (v_vvv) +DEF_RVV_OP_TYPE (v_xvv) +DEF_RVV_OP_TYPE (v_ivv) +DEF_RVV_OP_TYPE (v_fvv) +DEF_RVV_OP_TYPE (v_vvw) +DEF_RVV_OP_TYPE (v_xvw) +DEF_RVV_OP_TYPE (v_ivw) +DEF_RVV_OP_TYPE (v_fvw) DEF_RVV_PRED_TYPE (ta) DEF_RVV_PRED_TYPE (tu) @@ -700,6 +726,7 @@ DEF_RVV_BASE_TYPE (widen_lmul1_vector, get_vector_type (type_idx)) DEF_RVV_BASE_TYPE (quad_lmul1_vector, get_vector_type (type_idx)) DEF_RVV_BASE_TYPE (quad_lmul1_signed_vector, get_vector_type (type_idx)) DEF_RVV_BASE_TYPE (quad_lmul1_unsigned_vector, get_vector_type (type_idx)) +DEF_RVV_BASE_TYPE (xfqf_vector, get_vector_type (type_idx)) DEF_RVV_BASE_TYPE (eew8_interpret, get_vector_type (type_idx)) DEF_RVV_BASE_TYPE (eew16_interpret, get_vector_type (type_idx)) DEF_RVV_BASE_TYPE (eew32_interpret, get_vector_type (type_idx)) @@ -719,6 +746,7 @@ DEF_RVV_BASE_TYPE (unsigned_eew8_lmul1_interpret, get_vector_type (type_idx)) DEF_RVV_BASE_TYPE (unsigned_eew16_lmul1_interpret, get_vector_type (type_idx)) DEF_RVV_BASE_TYPE (unsigned_eew32_lmul1_interpret, get_vector_type (type_idx)) DEF_RVV_BASE_TYPE (unsigned_eew64_lmul1_interpret, get_vector_type (type_idx)) +DEF_RVV_BASE_TYPE (x2_vector, get_vector_type (type_idx)) DEF_RVV_BASE_TYPE (vlmul_ext_x2, get_vector_type (type_idx)) DEF_RVV_BASE_TYPE (vlmul_ext_x4, get_vector_type (type_idx)) DEF_RVV_BASE_TYPE (vlmul_ext_x8, get_vector_type (type_idx)) @@ -727,6 +755,8 @@ DEF_RVV_BASE_TYPE (vlmul_ext_x32, get_vector_type (type_idx)) DEF_RVV_BASE_TYPE (vlmul_ext_x64, get_vector_type (type_idx)) DEF_RVV_BASE_TYPE (size_ptr, build_pointer_type (size_type_node)) DEF_RVV_BASE_TYPE (tuple_subpart, get_tuple_subpart_type (type_idx)) +DEF_RVV_BASE_TYPE (xfqf_float, get_xfqf_float_type (type_idx)) +DEF_RVV_BASE_TYPE (scalar_float, get_scalar_float_type (type_idx)) DEF_RVV_VXRM_ENUM (RNU, VXRM_RNU) DEF_RVV_VXRM_ENUM (RNE, VXRM_RNE) diff --git a/gcc/config/riscv/riscv-vector-builtins.h b/gcc/config/riscv/riscv-vector-builtins.h index 2602f6c2aa1..a8b6a8a8571 100644 --- a/gcc/config/riscv/riscv-vector-builtins.h +++ b/gcc/config/riscv/riscv-vector-builtins.h @@ -130,6 +130,7 @@ enum required_ext XSFVQMACCQOQ_EXT, /* XSFVQMACCQOQ extension */ XSFVQMACCDOD_EXT, /* XSFVQMACCDOD extension */ XSFVFNRCLIPXFQF_EXT, /* XSFVFNRCLIPXFQF extension */ + XSFVCP_EXT, /* XSFVCP extension*/ /* Please update below to isa_name func when add or remove enum type(s). */ }; @@ -169,6 +170,8 @@ static inline const char * required_ext_to_isa_name (enum required_ext required) return "xsfvqmaccdod"; case XSFVFNRCLIPXFQF_EXT: return "xsfvfnrclipxfqf"; + case XSFVCP_EXT: + return "xsfvcp"; default: gcc_unreachable (); } @@ -212,6 +215,8 @@ static inline bool required_extensions_specified (enum required_ext required) return TARGET_XSFVQMACCDOD; case XSFVFNRCLIPXFQF_EXT: return TARGET_XSFVFNRCLIPXFQF; + case XSFVCP_EXT: + return TARGET_XSFVCP; default: gcc_unreachable (); } @@ -296,6 +301,8 @@ struct rvv_arg_type_info tree get_vector_type (vector_type_index) const; tree get_tree_type (vector_type_index) const; tree get_tuple_subpart_type (vector_type_index) const; + tree get_xfqf_float_type (vector_type_index) const; + tree get_scalar_float_type (vector_type_index) const; }; /* Static information for each operand. */ @@ -358,6 +365,8 @@ struct function_group_info return TARGET_XSFVQMACCDOD; case XSFVFNRCLIPXFQF_EXT: return TARGET_XSFVFNRCLIPXFQF; + case XSFVCP_EXT: + return TARGET_XSFVCP; default: gcc_unreachable (); } diff --git a/gcc/config/riscv/riscv.md b/gcc/config/riscv/riscv.md index 3a4cd1d93a0..9fdbac7c006 100644 --- a/gcc/config/riscv/riscv.md +++ b/gcc/config/riscv/riscv.md @@ -482,6 +482,8 @@ ;; SiFive custom extension instrctions ;; sf_vqmacc vector matrix integer multiply-add instructions ;; sf_vfnrclip vector fp32 to int8 ranged clip instructions +;; sf_vc vector coprocessor interface without side effect +;; sf_vc_se vector coprocessor interface with side effect (define_attr "type" "unknown,branch,jump,jalr,ret,call,load,fpload,store,fpstore, mtc,mfc,const,arith,logical,shift,slt,imul,idiv,move,fmove,fadd,fmul, @@ -503,7 +505,8 @@ vslideup,vslidedown,vislide1up,vislide1down,vfslide1up,vfslide1down, vgather,vcompress,vmov,vector,vandn,vbrev,vbrev8,vrev8,vclz,vctz,vcpop,vrol,vror,vwsll, vclmul,vclmulh,vghsh,vgmul,vaesef,vaesem,vaesdf,vaesdm,vaeskf1,vaeskf2,vaesz, - vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm4r,vsm3me,vsm3c,vfncvtbf16,vfwcvtbf16,vfwmaccbf16" + vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm4r,vsm3me,vsm3c,vfncvtbf16,vfwcvtbf16,vfwmaccbf16, + sf_vc,sf_vc_se" (cond [(eq_attr "got" "load") (const_string "load") ;; If a doubleword move uses these expensive instructions, diff --git a/gcc/config/riscv/sifive-vector-builtins-bases.cc b/gcc/config/riscv/sifive-vector-builtins-bases.cc index 77048a98001..e7bbcf8d88c 100644 --- a/gcc/config/riscv/sifive-vector-builtins-bases.cc +++ b/gcc/config/riscv/sifive-vector-builtins-bases.cc @@ -195,12 +195,89 @@ class sf_vfnrclip_xu_f_qf : public function_base } }; +/* Implements SiFive sf.vc. */ +class sf_vc : public function_base +{ +public: + + unsigned int call_properties (const function_instance &) const override + { + return CP_USE_COPROCESSORS; + } + + rtx expand (function_expander &e) const override + { + switch (e.op_info->op) + { + case OP_TYPE_x: + return e.use_exact_insn (code_for_sf_vc_x (e.vector_mode ())); + case OP_TYPE_i: + return e.use_exact_insn (code_for_sf_vc_i (e.vector_mode ())); + case OP_TYPE_vv: + return e.use_exact_insn (code_for_sf_vc_vv (e.vector_mode ())); + case OP_TYPE_xv: + return e.use_exact_insn (code_for_sf_vc_xv (e.vector_mode ())); + case OP_TYPE_iv: + return e.use_exact_insn (code_for_sf_vc_iv (e.vector_mode ())); + case OP_TYPE_fv: + return e.use_exact_insn (code_for_sf_vc_fv (e.vector_mode ())); + case OP_TYPE_v_x: + return e.use_exact_insn (code_for_sf_vc_v_x (e.vector_mode ())); + case OP_TYPE_v_i: + return e.use_exact_insn (code_for_sf_vc_v_i (e.vector_mode ())); + case OP_TYPE_v_vv: + return e.use_exact_insn (code_for_sf_vc_v_vv (e.vector_mode ())); + case OP_TYPE_v_xv: + return e.use_exact_insn (code_for_sf_vc_v_xv (e.vector_mode ())); + case OP_TYPE_v_iv: + return e.use_exact_insn (code_for_sf_vc_v_iv (e.vector_mode ())); + case OP_TYPE_v_fv: + return e.use_exact_insn (code_for_sf_vc_v_fv (e.vector_mode ())); + case OP_TYPE_vvv: + return e.use_exact_insn (code_for_sf_vc_vvv (e.vector_mode ())); + case OP_TYPE_xvv: + return e.use_exact_insn (code_for_sf_vc_xvv (e.vector_mode ())); + case OP_TYPE_ivv: + return e.use_exact_insn (code_for_sf_vc_ivv (e.vector_mode ())); + case OP_TYPE_fvv: + return e.use_exact_insn (code_for_sf_vc_fvv (e.vector_mode ())); + case OP_TYPE_vvw: + return e.use_exact_insn (code_for_sf_vc_vvw (e.vector_mode ())); + case OP_TYPE_xvw: + return e.use_exact_insn (code_for_sf_vc_xvw (e.vector_mode ())); + case OP_TYPE_ivw: + return e.use_exact_insn (code_for_sf_vc_ivw (e.vector_mode ())); + case OP_TYPE_fvw: + return e.use_exact_insn (code_for_sf_vc_fvw (e.vector_mode ())); + case OP_TYPE_v_vvv: + return e.use_exact_insn (code_for_sf_vc_v_vvv (e.vector_mode ())); + case OP_TYPE_v_xvv: + return e.use_exact_insn (code_for_sf_vc_v_xvv (e.vector_mode ())); + case OP_TYPE_v_ivv: + return e.use_exact_insn (code_for_sf_vc_v_ivv (e.vector_mode ())); + case OP_TYPE_v_fvv: + return e.use_exact_insn (code_for_sf_vc_v_fvv (e.vector_mode ())); + case OP_TYPE_v_vvw: + return e.use_exact_insn (code_for_sf_vc_v_vvw (e.vector_mode ())); + case OP_TYPE_v_xvw: + return e.use_exact_insn (code_for_sf_vc_v_xvw (e.vector_mode ())); + case OP_TYPE_v_ivw: + return e.use_exact_insn (code_for_sf_vc_v_ivw (e.vector_mode ())); + case OP_TYPE_v_fvw: + return e.use_exact_insn (code_for_sf_vc_v_fvw (e.vector_mode ())); + default: + gcc_unreachable (); + } + } +}; + static CONSTEXPR const sf_vqmacc sf_vqmacc_obj; static CONSTEXPR const sf_vqmaccu sf_vqmaccu_obj; static CONSTEXPR const sf_vqmaccsu sf_vqmaccsu_obj; static CONSTEXPR const sf_vqmaccus sf_vqmaccus_obj; static CONSTEXPR const sf_vfnrclip_x_f_qf sf_vfnrclip_x_f_qf_obj; static CONSTEXPR const sf_vfnrclip_xu_f_qf sf_vfnrclip_xu_f_qf_obj; +static CONSTEXPR const sf_vc sf_vc_obj; /* Declare the function base NAME, pointing it to an instance of class _obj. */ @@ -213,4 +290,5 @@ BASE (sf_vqmaccsu) BASE (sf_vqmaccus) BASE (sf_vfnrclip_x_f_qf) BASE (sf_vfnrclip_xu_f_qf) +BASE (sf_vc) } // end namespace riscv_vector diff --git a/gcc/config/riscv/sifive-vector-builtins-bases.h b/gcc/config/riscv/sifive-vector-builtins-bases.h index 077f0713814..2fa7055dd55 100644 --- a/gcc/config/riscv/sifive-vector-builtins-bases.h +++ b/gcc/config/riscv/sifive-vector-builtins-bases.h @@ -23,6 +23,8 @@ namespace riscv_vector { +static const unsigned int CP_USE_COPROCESSORS = 1U << 6; + namespace bases { extern const function_base *const sf_vqmacc; extern const function_base *const sf_vqmaccu; @@ -30,6 +32,7 @@ extern const function_base *const sf_vqmaccsu; extern const function_base *const sf_vqmaccus; extern const function_base *const sf_vfnrclip_x_f_qf; extern const function_base *const sf_vfnrclip_xu_f_qf; +extern const function_base *const sf_vc; } } // end namespace riscv_vector diff --git a/gcc/config/riscv/sifive-vector-builtins-functions.def b/gcc/config/riscv/sifive-vector-builtins-functions.def index 5275d85a36e..607b866077d 100644 --- a/gcc/config/riscv/sifive-vector-builtins-functions.def +++ b/gcc/config/riscv/sifive-vector-builtins-functions.def @@ -55,4 +55,49 @@ DEF_RVV_FUNCTION (sf_vfnrclip_x_f_qf, sf_vfnrclip, full_preds, i_clip_qf_ops) DEF_RVV_FUNCTION (sf_vfnrclip_xu_f_qf, sf_vfnrclip, full_preds, u_clip_qf_ops) #undef REQUIRED_EXTENSIONS +#define REQUIRED_EXTENSIONS XSFVCP_EXT +DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_x_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_i_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_vv_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_xv_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_iv_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_fv_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_vvv_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_xvv_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_ivv_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_fvv_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_vvw_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_xvw_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_ivw_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_fvw_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_x_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_i_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_vv_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_xv_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_iv_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_fv_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_vvv_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_xvv_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_ivv_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_fvv_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_vvw_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_xvw_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_ivw_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_fvw_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_x_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_i_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_vv_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_xv_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_iv_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_fv_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_vvv_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_xvv_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_ivv_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_fvv_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_vvw_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_xvw_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_ivw_ops) +DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_fvw_ops) +#undef REQUIRED_EXTENSIONS + #undef DEF_RVV_FUNCTION diff --git a/gcc/config/riscv/sifive-vector.md b/gcc/config/riscv/sifive-vector.md index e2fba6a27fb..15c4eb0a087 100644 --- a/gcc/config/riscv/sifive-vector.md +++ b/gcc/config/riscv/sifive-vector.md @@ -164,8 +164,8 @@ (set_attr "mode" "")]) (define_insn "@pred_sf_vfnrclip_x_f_qf" - [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr") - (if_then_else: + [(set (match_operand:SF_XF 0 "register_operand" "=vd, vd, vr, vr") + (if_then_else:SF_XF (unspec: [(match_operand: 1 "vector_mask_operand" " vm, vm,Wc1,Wc1") (match_operand 5 "vector_length_operand" " rK, rK, rK, rK") @@ -174,11 +174,580 @@ (match_operand 8 "const_int_operand" " i, i, i, i") (reg:SI VL_REGNUM) (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) - (unspec: + (unspec:SF_XF [(match_operand:SF 4 "register_operand" " f, f, f, f") - (match_operand:SF_VF 3 "register_operand" " vr, vr, vr, vr")] SF_VFNRCLIP) - (match_operand: 2 "vector_merge_operand" " vu, 0, vu, 0")))] + (match_operand: 3 "register_operand" " vr, vr, vr, vr")] SF_VFNRCLIP) + (match_operand:SF_XF 2 "vector_merge_operand" " vu, 0, vu, 0")))] "TARGET_VECTOR && TARGET_XSFVFNRCLIPXFQF" "sf.vfnrclip.x.f.qf\t%0,%3,%4%p1" [(set_attr "type" "sf_vfnrclip") (set_attr "mode" "")]) + +;; SF_VCP +(define_insn "@sf_vc_x" + [(unspec: + [(match_operand: 0 "vector_mask_operand" " Wc1") + (match_operand 5 "vector_length_operand" " rK") + (match_operand 6 "const_int_operand" " i") + (match_operand 7 "const_int_operand" " i") + (match_operand 8 "const_int_operand" " i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (unspec:VFULLI + [(match_operand:SI 1 "const_int_operand" "Ou02") + (match_operand:SI 2 "const_int_operand" "K") + (match_operand:SI 3 "const_int_operand" "K") + (match_operand: 4 "register_operand" "r")] UNSPEC_SF_CV)] + "TARGET_VECTOR && TARGET_XSFVCP" + "sf.vc.x\t%1,%2,%3,%4" + [(set_attr "type" "sf_vc_se") + (set_attr "mode" "")]) + +(define_insn "@sf_vc_v_x" + [(set (match_operand:VFULLI 0 "register_operand" "=vr,vr") + (if_then_else:VFULLI + (unspec: + [(match_operand: 1 "vector_mask_operand" " Wc1,Wc1") + (match_operand 6 "vector_length_operand" " rK, rK") + (match_operand 7 "const_int_operand" " i, i") + (match_operand 8 "const_int_operand" " i, i") + (match_operand 9 "const_int_operand" " i, i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (unspec:VFULLI + [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02") + (match_operand:SI 4 "const_int_operand" "K,K") + (match_operand: 5 "register_operand" "r,r")] UNSPEC_SF_CV) + (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))] + "TARGET_VECTOR && TARGET_XSFVCP" + "sf.vc.v.x\t%3,%4,%0,%5" + [(set_attr "type" "sf_vc,sf_vc_se") + (set_attr "mode" "")]) + +(define_insn "@sf_vc_i" + [(unspec: + [(match_operand: 0 "vector_mask_operand" " Wc1") + (match_operand 5 "vector_length_operand" " rK") + (match_operand 6 "const_int_operand" " i") + (match_operand 7 "const_int_operand" " i") + (match_operand 8 "const_int_operand" " i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (unspec:VFULLI + [(match_operand:SI 1 "const_int_operand" "Ou02") + (match_operand:SI 2 "const_int_operand" "K") + (match_operand:SI 3 "const_int_operand" "K") + (match_operand:SI 4 "const_int_operand" "P")] UNSPEC_SF_CV)] + "TARGET_VECTOR && TARGET_XSFVCP" + "sf.vc.i\t%1,%2,%3,%4" + [(set_attr "type" "sf_vc_se") + (set_attr "mode" "")]) + +(define_insn "@sf_vc_v_i" + [(set (match_operand:VFULLI 0 "register_operand" "=vr,vr") + (if_then_else:VFULLI + (unspec: + [(match_operand: 1 "vector_mask_operand" " Wc1,Wc1") + (match_operand 6 "vector_length_operand" " rK, rK") + (match_operand 7 "const_int_operand" " i, i") + (match_operand 8 "const_int_operand" " i, i") + (match_operand 9 "const_int_operand" " i, i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (unspec:VFULLI + [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02") + (match_operand:SI 4 "const_int_operand" "K,K") + (match_operand:SI 5 "const_int_operand" "P,P")] UNSPEC_SF_CV) + (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))] + "TARGET_VECTOR && TARGET_XSFVCP" + "sf.vc.v.i\t%3,%4,%0,%5" + [(set_attr "type" "sf_vc,sf_vc_se") + (set_attr "mode" "")]) + +(define_insn "@sf_vc_vv" + [(unspec: + [(match_operand: 0 "vector_mask_operand" " Wc1") + (match_operand 5 "vector_length_operand" " rK") + (match_operand 6 "const_int_operand" " i") + (match_operand 7 "const_int_operand" " i") + (match_operand 8 "const_int_operand" " i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (unspec:VFULLI + [(match_operand:SI 1 "const_int_operand" "Ou02") + (match_operand:SI 2 "const_int_operand" "K") + (match_operand:VFULLI 3 "register_operand" "vr") + (match_operand:VFULLI 4 "register_operand" "vr")] UNSPEC_SF_CV)] + "TARGET_VECTOR && TARGET_XSFVCP" + "sf.vc.vv\t%1,%2,%3,%4" + [(set_attr "type" "sf_vc_se") + (set_attr "mode" "")]) + +(define_insn "@sf_vc_v_vv" + [(set (match_operand:VFULLI 0 "register_operand" "=&vr,vr") + (if_then_else:VFULLI + (unspec: + [(match_operand: 1 "vector_mask_operand" " Wc1,Wc1") + (match_operand 6 "vector_length_operand" " rK, rK") + (match_operand 7 "const_int_operand" " i, i") + (match_operand 8 "const_int_operand" " i, i") + (match_operand 9 "const_int_operand" " i, i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (unspec:VFULLI + [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02") + (match_operand:VFULLI 4 "register_operand" "vr,vr") + (match_operand:VFULLI 5 "register_operand" "vr,vr")] UNSPEC_SF_CV) + (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))] + "TARGET_VECTOR && TARGET_XSFVCP" + "sf.vc.v.vv\t%3,%0,%4,%5" + [(set_attr "type" "sf_vc,sf_vc_se") + (set_attr "mode" "")]) + +(define_insn "@sf_vc_xv" + [(unspec: + [(match_operand: 0 "vector_mask_operand" " Wc1") + (match_operand 5 "vector_length_operand" " rK") + (match_operand 6 "const_int_operand" " i") + (match_operand 7 "const_int_operand" " i") + (match_operand 8 "const_int_operand" " i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (unspec:VFULLI + [(match_operand:SI 1 "const_int_operand" "Ou02") + (match_operand:SI 2 "const_int_operand" "K") + (match_operand:VFULLI 3 "register_operand" "vr") + (match_operand: 4 "register_operand" "r")] UNSPEC_SF_CV)] + "TARGET_VECTOR && TARGET_XSFVCP" + "sf.vc.xv\t%1,%2,%3,%4" + [(set_attr "type" "sf_vc_se") + (set_attr "mode" "")]) + +(define_insn "@sf_vc_v_xv" + [(set (match_operand:VFULLI 0 "register_operand" "=&vd,vd") + (if_then_else:VFULLI + (unspec: + [(match_operand: 1 "vector_mask_operand" " Wc1,Wc1") + (match_operand 6 "vector_length_operand" " rK, rK") + (match_operand 7 "const_int_operand" " i, i") + (match_operand 8 "const_int_operand" " i, i") + (match_operand 9 "const_int_operand" " i, i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (unspec:VFULLI + [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02") + (match_operand:VFULLI 4 "register_operand" "vr,vr") + (match_operand: 5 "register_operand" "r,r")] UNSPEC_SF_CV) + (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))] + "TARGET_VECTOR && TARGET_XSFVCP" + "sf.vc.v.xv\t%3,%0,%4,%5" + [(set_attr "type" "sf_vc,sf_vc_se") + (set_attr "mode" "")]) + +(define_insn "@sf_vc_iv" + [(unspec: + [(match_operand: 0 "vector_mask_operand" " Wc1") + (match_operand 5 "vector_length_operand" " rK") + (match_operand 6 "const_int_operand" " i") + (match_operand 7 "const_int_operand" " i") + (match_operand 8 "const_int_operand" " i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (unspec:VFULLI + [(match_operand:SI 1 "const_int_operand" "Ou02") + (match_operand:SI 2 "const_int_operand" "K") + (match_operand:VFULLI 3 "register_operand" "vr") + (match_operand:SI 4 "const_int_operand" "P")] UNSPEC_SF_CV)] + "TARGET_VECTOR && TARGET_XSFVCP" + "sf.vc.iv\t%1,%2,%3,%4" + [(set_attr "type" "sf_vc_se") + (set_attr "mode" "")]) + +(define_insn "@sf_vc_v_iv" + [(set (match_operand:VFULLI 0 "register_operand" "=&vd,vd") + (if_then_else:VFULLI + (unspec: + [(match_operand: 1 "vector_mask_operand" " Wc1,Wc1") + (match_operand 6 "vector_length_operand" " rK, rK") + (match_operand 7 "const_int_operand" " i, i") + (match_operand 8 "const_int_operand" " i, i") + (match_operand 9 "const_int_operand" " i, i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (unspec:VFULLI + [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02") + (match_operand:VFULLI 4 "register_operand" "vr,vr") + (match_operand:SI 5 "const_int_operand" "P,P")] UNSPEC_SF_CV) + (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))] + "TARGET_VECTOR && TARGET_XSFVCP" + "sf.vc.v.iv\t%3,%0,%4,%5" + [(set_attr "type" "sf_vc,sf_vc_se") + (set_attr "mode" "")]) + +(define_insn "@sf_vc_fv" + [(unspec: + [(match_operand: 0 "vector_mask_operand" " Wc1") + (match_operand 5 "vector_length_operand" " rK") + (match_operand 6 "const_int_operand" " i") + (match_operand 7 "const_int_operand" " i") + (match_operand 8 "const_int_operand" " i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (unspec:SF_FV + [(match_operand:SI 1 "const_int_operand" "Ou01") + (match_operand:SI 2 "const_int_operand" "K") + (match_operand:SF_FV 3 "register_operand" "vr") + (match_operand: 4 "register_operand" "f")] UNSPEC_SF_CV)] + "TARGET_VECTOR && TARGET_XSFVCP" + "sf.vc.fv\t%1,%2,%3,%4" + [(set_attr "type" "sf_vc_se") + (set_attr "mode" "")]) + +(define_insn "@sf_vc_v_fv" + [(set (match_operand:SF_FV 0 "register_operand" "=&vd,vd") + (if_then_else:SF_FV + (unspec: + [(match_operand: 1 "vector_mask_operand" " Wc1,Wc1") + (match_operand 6 "vector_length_operand" " rK, rK") + (match_operand 7 "const_int_operand" " i, i") + (match_operand 8 "const_int_operand" " i, i") + (match_operand 9 "const_int_operand" " i, i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (unspec:SF_FV + [(match_operand:SI 3 "const_int_operand" "Ou01,Ou01") + (match_operand:SF_FV 4 "register_operand" "vr,vr") + (match_operand: 5 "register_operand" "f,f")] UNSPEC_SF_CV) + (match_operand:SF_FV 2 "vector_merge_operand" "vu,vu")))] + "TARGET_VECTOR && TARGET_XSFVCP" + "sf.vc.v.fv\t%3,%0,%4,%5" + [(set_attr "type" "sf_vc,sf_vc_se") + (set_attr "mode" "")]) + +(define_insn "@sf_vc_vvv" + [(unspec: + [(match_operand: 0 "vector_mask_operand" "vmWc1") + (match_operand 5 "vector_length_operand" " rK") + (match_operand 6 "const_int_operand" " i") + (match_operand 7 "const_int_operand" " i") + (match_operand 8 "const_int_operand" " i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (unspec:VFULLI + [(match_operand:SI 1 "const_int_operand" "Ou02") + (match_operand:VFULLI 2 "register_operand" "vd") + (match_operand:VFULLI 3 "register_operand" "vr") + (match_operand:VFULLI 4 "register_operand" "vr")] UNSPEC_SF_CV)] + "TARGET_VECTOR && TARGET_XSFVCP" + "sf.vc.vvv\t%1,%2,%3,%4" + [(set_attr "type" "sf_vc_se") + (set_attr "mode" "")]) + +(define_insn "@sf_vc_v_vvv" + [(set (match_operand:VFULLI 0 "register_operand" "=&vr,vr") + (if_then_else:VFULLI + (unspec: + [(match_operand: 1 "vector_mask_operand" " Wc1,Wc1") + (match_operand 7 "vector_length_operand" " rK, rK") + (match_operand 8 "const_int_operand" " i, i") + (match_operand 9 "const_int_operand" " i, i") + (match_operand 10 "const_int_operand" " i, i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (unspec:VFULLI + [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02") + (match_operand:VFULLI 4 "register_operand" "vd,vd") + (match_operand:VFULLI 5 "register_operand" "vr,vr") + (match_operand:VFULLI 6 "register_operand" "vr,vr")] UNSPEC_SF_CV) + (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))] + "TARGET_VECTOR && TARGET_XSFVCP" + "sf.vc.v.vvv\t%3,%4,%6,%5" + [(set_attr "type" "sf_vc,sf_vc_se") + (set_attr "mode" "")]) + +(define_insn "@sf_vc_xvv" + [(unspec: + [(match_operand: 0 "vector_mask_operand" " Wc1") + (match_operand 5 "vector_length_operand" " rK") + (match_operand 6 "const_int_operand" " i") + (match_operand 7 "const_int_operand" " i") + (match_operand 8 "const_int_operand" " i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (unspec:VFULLI + [(match_operand:SI 1 "const_int_operand" "Ou02") + (match_operand:VFULLI 2 "register_operand" "vd") + (match_operand:VFULLI 3 "register_operand" "vr") + (match_operand: 4 "register_operand" "r")] UNSPEC_SF_CV)] + "TARGET_VECTOR && TARGET_XSFVCP" + "sf.vc.xvv\t%1,%2,%3,%4" + [(set_attr "type" "sf_vc_se") + (set_attr "mode" "")]) + +(define_insn "@sf_vc_v_xvv" + [(set (match_operand:VFULLI 0 "register_operand" "=&vr,vr") + (if_then_else:VFULLI + (unspec: + [(match_operand: 1 "vector_mask_operand" " Wc1,Wc1") + (match_operand 7 "vector_length_operand" " rK, rK") + (match_operand 8 "const_int_operand" " i, i") + (match_operand 9 "const_int_operand" " i, i") + (match_operand 10 "const_int_operand" " i, i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (unspec:VFULLI + [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02") + (match_operand:VFULLI 4 "register_operand" "vd,vd") + (match_operand:VFULLI 5 "register_operand" "vr,vr") + (match_operand: 6 "register_operand" "r,r")] UNSPEC_SF_CV) + (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))] + "TARGET_VECTOR && TARGET_XSFVCP" + "sf.vc.v.xvv\t%3,%4,%5,%6" + [(set_attr "type" "sf_vc,sf_vc_se") + (set_attr "mode" "")]) + +(define_insn "@sf_vc_ivv" + [(unspec: + [(match_operand: 0 "vector_mask_operand" " Wc1") + (match_operand 5 "vector_length_operand" " rK") + (match_operand 6 "const_int_operand" " i") + (match_operand 7 "const_int_operand" " i") + (match_operand 8 "const_int_operand" " i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (unspec:VFULLI + [(match_operand:SI 1 "const_int_operand" "Ou02") + (match_operand:VFULLI 2 "register_operand" "vd") + (match_operand:VFULLI 3 "register_operand" "vr") + (match_operand:SI 4 "const_int_operand" "P")] UNSPEC_SF_CV)] + "TARGET_VECTOR && TARGET_XSFVCP" + "sf.vc.ivv\t%1,%2,%3,%4" + [(set_attr "type" "sf_vc_se") + (set_attr "mode" "")]) + +(define_insn "@sf_vc_v_ivv" + [(set (match_operand:VFULLI 0 "register_operand" "=&vr,vr") + (if_then_else:VFULLI + (unspec: + [(match_operand: 1 "vector_mask_operand" " Wc1,Wc1") + (match_operand 7 "vector_length_operand" " rK, rK") + (match_operand 8 "const_int_operand" " i, i") + (match_operand 9 "const_int_operand" " i, i") + (match_operand 10 "const_int_operand" " i, i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (unspec:VFULLI + [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02") + (match_operand:VFULLI 4 "register_operand" "vd,vd") + (match_operand:VFULLI 5 "register_operand" "vr,vr") + (match_operand:SI 6 "const_int_operand" "P,P")] UNSPEC_SF_CV) + (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))] + "TARGET_VECTOR && TARGET_XSFVCP" + "sf.vc.v.ivv\t%3,%4,%5,%6" + [(set_attr "type" "sf_vc,sf_vc_se") + (set_attr "mode" "")]) + +(define_insn "@sf_vc_fvv" + [(unspec: + [(match_operand: 0 "vector_mask_operand" " Wc1") + (match_operand 5 "vector_length_operand" " rK") + (match_operand 6 "const_int_operand" " i") + (match_operand 7 "const_int_operand" " i") + (match_operand 8 "const_int_operand" " i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (unspec:SF_FV + [(match_operand:SI 1 "const_int_operand" "Ou01") + (match_operand:SF_FV 2 "register_operand" "vd") + (match_operand:SF_FV 3 "register_operand" "vr") + (match_operand: 4 "register_operand" "f")] UNSPEC_SF_CV)] + "TARGET_VECTOR && TARGET_XSFVCP" + "sf.vc.fvv\t%1,%2,%3,%4" + [(set_attr "type" "sf_vc_se") + (set_attr "mode" "")]) + +(define_insn "@sf_vc_v_fvv" + [(set (match_operand:SF_FV 0 "register_operand" "=&vr,vr") + (if_then_else:SF_FV + (unspec: + [(match_operand: 1 "vector_mask_operand" " Wc1,Wc1") + (match_operand 7 "vector_length_operand" " rK, rK") + (match_operand 8 "const_int_operand" " i, i") + (match_operand 9 "const_int_operand" " i, i") + (match_operand 10 "const_int_operand" " i, i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (unspec:SF_FV + [(match_operand:SI 3 "const_int_operand" "Ou01,Ou01") + (match_operand:SF_FV 4 "register_operand" "vd,vd") + (match_operand:SF_FV 5 "register_operand" "vr,vr") + (match_operand: 6 "register_operand" "f,f")] UNSPEC_SF_CV) + (match_operand:SF_FV 2 "vector_merge_operand" "vu,vu")))] + "TARGET_VECTOR && TARGET_XSFVCP" + "sf.vc.v.fvv\t%3,%4,%5,%6" + [(set_attr "type" "sf_vc,sf_vc_se") + (set_attr "mode" "")]) + +(define_insn "@sf_vc_vvw" + [(unspec: + [(match_operand: 0 "vector_mask_operand" " Wc1") + (match_operand 5 "vector_length_operand" " rK") + (match_operand 6 "const_int_operand" " i") + (match_operand 7 "const_int_operand" " i") + (match_operand 8 "const_int_operand" " i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (unspec: + [(match_operand:SI 1 "const_int_operand" "Ou02") + (match_operand: 2 "register_operand" "vd") + (match_operand:SF_VC_W 3 "register_operand" "vr") + (match_operand:SF_VC_W 4 "register_operand" "vr")] UNSPEC_SF_CV)] + "TARGET_VECTOR && TARGET_XSFVCP" + "sf.vc.vvw\t%1,%2,%3,%4" + [(set_attr "type" "sf_vc_se") + (set_attr "mode" "")]) + +(define_insn "@sf_vc_v_vvw" + [(set (match_operand: 0 "register_operand" "=&vr,vr") + (if_then_else: + (unspec: + [(match_operand: 1 "vector_mask_operand" " Wc1,Wc1") + (match_operand 7 "vector_length_operand" " rK, rK") + (match_operand 8 "const_int_operand" " i, i") + (match_operand 9 "const_int_operand" " i, i") + (match_operand 10 "const_int_operand" " i, i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (unspec: + [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02") + (match_operand: 4 "register_operand" "vd,vd") + (match_operand:SF_VC_W 5 "register_operand" "vr,vr") + (match_operand:SF_VC_W 6 "register_operand" "vr,vr")] UNSPEC_SF_CV) + (match_operand: 2 "vector_merge_operand" "vu,vu")))] + "TARGET_VECTOR && TARGET_XSFVCP" + "sf.vc.v.vvw\t%3,%4,%5,%6" + [(set_attr "type" "sf_vc,sf_vc_se") + (set_attr "mode" "")]) + +(define_insn "@sf_vc_xvw" + [(unspec: + [(match_operand: 0 "vector_mask_operand" " Wc1") + (match_operand 5 "vector_length_operand" " rK") + (match_operand 6 "const_int_operand" " i") + (match_operand 7 "const_int_operand" " i") + (match_operand 8 "const_int_operand" " i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (unspec: + [(match_operand:SI 1 "const_int_operand" "Ou02") + (match_operand: 2 "register_operand" "vd") + (match_operand:SF_VC_W 3 "register_operand" "vr") + (match_operand: 4 "register_operand" "r")] UNSPEC_SF_CV)] + "TARGET_VECTOR && TARGET_XSFVCP" + "sf.vc.xvw\t%1,%2,%3,%4" + [(set_attr "type" "sf_vc_se") + (set_attr "mode" "")]) + +(define_insn "@sf_vc_v_xvw" + [(set (match_operand: 0 "register_operand" "=&vr,vr") + (if_then_else: + (unspec: + [(match_operand: 1 "vector_mask_operand" " Wc1,Wc1") + (match_operand 7 "vector_length_operand" " rK, rK") + (match_operand 8 "const_int_operand" " i, i") + (match_operand 9 "const_int_operand" " i, i") + (match_operand 10 "const_int_operand" " i, i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (unspec: + [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02") + (match_operand: 4 "register_operand" "vd,vd") + (match_operand:SF_VC_W 5 "register_operand" "vr,vr") + (match_operand: 6 "register_operand" "r,r")] UNSPEC_SF_CV) + (match_operand: 2 "vector_merge_operand" "vu,vu")))] + "TARGET_VECTOR && TARGET_XSFVCP" + "sf.vc.v.xvw\t%3,%4,%5,%6" + [(set_attr "type" "sf_vc,sf_vc_se") + (set_attr "mode" "")]) + +(define_insn "@sf_vc_ivw" + [(unspec: + [(match_operand: 0 "vector_mask_operand" " Wc1") + (match_operand 5 "vector_length_operand" " rK") + (match_operand 6 "const_int_operand" " i") + (match_operand 7 "const_int_operand" " i") + (match_operand 8 "const_int_operand" " i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (unspec: + [(match_operand:SI 1 "const_int_operand" "Ou02") + (match_operand: 2 "register_operand" "vd") + (match_operand:SF_VC_W 3 "register_operand" "vr") + (match_operand:SI 4 "immediate_operand" "P")] UNSPEC_SF_CV)] + "TARGET_VECTOR && TARGET_XSFVCP" + "sf.vc.ivw\t%1,%2,%3,%4" + [(set_attr "type" "sf_vc_se") + (set_attr "mode" "")]) + +(define_insn "@sf_vc_v_ivw" + [(set (match_operand: 0 "register_operand" "=&vr,vr") + (if_then_else: + (unspec: + [(match_operand: 1 "vector_mask_operand" " Wc1,Wc1") + (match_operand 7 "vector_length_operand" " rK, rK") + (match_operand 8 "const_int_operand" " i, i") + (match_operand 9 "const_int_operand" " i, i") + (match_operand 10 "const_int_operand" " i, i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (unspec: + [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02") + (match_operand: 4 "register_operand" "vd,vd") + (match_operand:SF_VC_W 5 "register_operand" "vr,vr") + (match_operand:SI 6 "immediate_operand" "P,P")] UNSPEC_SF_CV) + (match_operand: 2 "vector_merge_operand" "vu,vu")))] + "TARGET_VECTOR && TARGET_XSFVCP" + "sf.vc.v.ivw\t%3,%4,%5,%6" + [(set_attr "type" "sf_vc,sf_vc_se") + (set_attr "mode" "")]) + +(define_insn "@sf_vc_fvw" + [(unspec: + [(match_operand: 0 "vector_mask_operand" " Wc1") + (match_operand 5 "vector_length_operand" " rK") + (match_operand 6 "const_int_operand" " i") + (match_operand 7 "const_int_operand" " i") + (match_operand 8 "const_int_operand" " i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (unspec: + [(match_operand:SI 1 "const_int_operand" "Ou01") + (match_operand: 2 "register_operand" "vd") + (match_operand:SF_VC_FW 3 "register_operand" "vr") + (match_operand: 4 "register_operand" "f")] UNSPEC_SF_CV)] + "TARGET_VECTOR && TARGET_XSFVCP" + "sf.vc.fvw\t%1,%2,%3,%4" + [(set_attr "type" "sf_vc_se") + (set_attr "mode" "")]) + +(define_insn "@sf_vc_v_fvw" + [(set (match_operand: 0 "register_operand" "=&vr,vr") + (if_then_else: + (unspec: + [(match_operand: 1 "vector_mask_operand" " Wc1,Wc1") + (match_operand 7 "vector_length_operand" " rK, rK") + (match_operand 8 "const_int_operand" " i, i") + (match_operand 9 "const_int_operand" " i, i") + (match_operand 10 "const_int_operand" " i, i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (unspec: + [(match_operand:SI 3 "const_int_operand" "Ou01,Ou01") + (match_operand: 4 "register_operand" "vd,vd") + (match_operand:SF_VC_FW 5 "register_operand" "vr,vr") + (match_operand: 6 "register_operand" "f,f")] UNSPEC_SF_CV) + (match_operand: 2 "vector_merge_operand" "vu,vu")))] + "TARGET_VECTOR && TARGET_XSFVCP" + "sf.vc.v.fvw\t%3,%4,%5,%6" + [(set_attr "type" "sf_vc,sf_vc_se") + (set_attr "mode" "")]) diff --git a/gcc/config/riscv/vector-iterators.md b/gcc/config/riscv/vector-iterators.md index 8e73022904b..395a1a791e5 100644 --- a/gcc/config/riscv/vector-iterators.md +++ b/gcc/config/riscv/vector-iterators.md @@ -106,6 +106,7 @@ UNSPEC_SF_VFNRCLIP UNSPEC_SF_VFNRCLIPU + UNSPEC_SF_CV ]) (define_c_enum "unspecv" [ @@ -4794,24 +4795,70 @@ (RVVM1SI "rvvm1qi") ]) -(define_mode_iterator SF_VF [ - (RVVM8SF "TARGET_VECTOR_ELEN_FP_32") (RVVM4SF "TARGET_VECTOR_ELEN_FP_32") (RVVM2SF "TARGET_VECTOR_ELEN_FP_32") - (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32") +(define_mode_iterator SF_XF [ + RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32") ]) (define_mode_attr SF_XFQF [ - (RVVMF2SF "RVVMF8QI") - (RVVM1SF "RVVMF4QI") - (RVVM2SF "RVVMF2QI") - (RVVM4SF "RVVM1QI") - (RVVM8SF "RVVM2QI") + (RVVMF8QI "RVVMF2SF") + (RVVMF4QI "RVVM1SF") + (RVVMF2QI "RVVM2SF") + (RVVM1QI "RVVM4SF") + (RVVM2QI "RVVM8SF") ]) (define_mode_attr sf_xfqf [ - (RVVMF2SF "rvvmf8qi") - (RVVM1SF "rvvmf4qi") - (RVVM2SF "rvvmf2qi") - (RVVM4SF "rvvm1qi") - (RVVM8SF "rvvm2qi") + (RVVMF8QI "rvvmf2sf") + (RVVMF4QI "rvvm1sf") + (RVVMF2QI "rvvm2sf") + (RVVM1QI "rvvm4sf") + (RVVM2QI "rvvm8sf") +]) + + +(define_mode_iterator SF_VC_W [ + RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32") + RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32") + RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32") +]) + +(define_mode_attr SF_VW [ + (RVVM4QI "RVVM8HI") (RVVM2QI "RVVM4HI") (RVVM1QI "RVVM2HI") (RVVMF2QI "RVVM1HI") + (RVVMF4QI "RVVMF2HI") (RVVMF8QI "RVVMF4HI") + (RVVM4HI "RVVM8SI") (RVVM2HI "RVVM4SI") (RVVM1HI "RVVM2SI") (RVVMF2HI "RVVM1SI") + (RVVMF4HI "RVVMF2SI") + (RVVM4SI "RVVM8DI") (RVVM2SI "RVVM4DI") (RVVM1SI "RVVM2DI") (RVVMF2SI "RVVM1DI") +]) + +(define_mode_attr sf_vw [ + (RVVM4QI "rvvm8hi") (RVVM2QI "rvvm4hi") (RVVM1QI "rvvm2hi") (RVVMF2QI "rvvm1hi") + (RVVMF4QI "rvvmf2hi") (RVVMF8QI "rvvmf4hi") + (RVVM4HI "rvvm8si") (RVVM2HI "rvvm4si") (RVVM1HI "rvvm2si") (RVVMF2HI "rvvm1si") + (RVVMF4HI "rvvmf2si") + (RVVM4SI "rvvm8di") (RVVM2SI "rvvm4di") (RVVM1SI "rvvm2di") (RVVMF2SI "rvvm1di") +]) + +(define_mode_iterator SF_FV [ + RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32") + RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32") + (RVVM8DI "TARGET_VECTOR_ELEN_64") (RVVM4DI "TARGET_VECTOR_ELEN_64") + (RVVM2DI "TARGET_VECTOR_ELEN_64") (RVVM1DI "TARGET_VECTOR_ELEN_64") +]) + + +(define_mode_iterator SF_VC_FW [ + RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32") + RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32") +]) + +(define_mode_attr SF_XF [ + (RVVM8HI "HF") (RVVM4HI "HF") (RVVM2HI "HF") (RVVM1HI "HF") (RVVMF2HI "HF") (RVVMF4HI "HF") + (RVVM8SI "SF") (RVVM4SI "SF") (RVVM2SI "SF") (RVVM1SI "SF") (RVVMF2SI "SF") + (RVVM8DI "DF") (RVVM4DI "DF") (RVVM2DI "DF") (RVVM1DI "DF") +]) + +(define_mode_attr SF_XFW [ + (RVVM4HI "HF") (RVVM2HI "HF") (RVVM1HI "HF") (RVVMF2HI "HF") (RVVMF4HI "HF") + (RVVM4SI "SF") (RVVM2SI "SF") (RVVM1SI "SF") (RVVMF2SI "SF") ]) diff --git a/gcc/config/riscv/vector.md b/gcc/config/riscv/vector.md index 58406f3d17c..403656d52f9 100644 --- a/gcc/config/riscv/vector.md +++ b/gcc/config/riscv/vector.md @@ -56,7 +56,7 @@ vssegtux,vssegtox,vlsegdff,vandn,vbrev,vbrev8,vrev8,vcpop,vclz,vctz,vrol,\ vror,vwsll,vclmul,vclmulh,vghsh,vgmul,vaesef,vaesem,vaesdf,vaesdm,\ vaeskf1,vaeskf2,vaesz,vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm4r,vsm3me,vsm3c,\ - vfncvtbf16,vfwcvtbf16,vfwmaccbf16") + vfncvtbf16,vfwcvtbf16,vfwmaccbf16,sf_vqmacc,sf_vfnrclip,sf_vc,sf_vc_se") (const_string "true")] (const_string "false"))) @@ -893,7 +893,7 @@ vfredo,vfwredu,vfwredo,vslideup,vslidedown,vislide1up,\ vislide1down,vfslide1up,vfslide1down,vgather,viwmuladd,vfwmuladd,\ vlsegds,vlsegdux,vlsegdox,vandn,vrol,vror,vwsll,vclmul,vclmulh,\ - vfwmaccbf16") + vfwmaccbf16,sf_vqmacc,sf_vfnrclip") (symbol_ref "riscv_vector::get_ta(operands[6])") (eq_attr "type" "vimuladd,vfmuladd") @@ -924,7 +924,7 @@ vfwalu,vfwmul,vfsgnj,vfcmp,vslideup,vslidedown,\ vislide1up,vislide1down,vfslide1up,vfslide1down,vgather,\ viwmuladd,vfwmuladd,vlsegds,vlsegdux,vlsegdox,vandn,vrol,\ - vror,vwsll,vclmul,vclmulh,vfwmaccbf16") + vror,vwsll,vclmul,vclmulh,vfwmaccbf16,sf_vqmacc,sf_vfnrclip") (symbol_ref "riscv_vector::get_ma(operands[7])") (eq_attr "type" "vimuladd,vfmuladd") diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_f.c b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_f.c new file mode 100644 index 00000000000..ceba419b596 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_f.c @@ -0,0 +1,1286 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv_xsfvcp -mabi=lp64d -O3" } */ +/* { dg-final { check-function-bodies "**" "" } } */ + +#include "riscv_vector.h" + +typedef _Float16 float16_t; +typedef float float32_t; +typedef double float64_t; + +/* +** test_sf_vc_v_fv_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.v\.fv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint16mf4_t test_sf_vc_v_fv_u16mf4(vuint16mf4_t vs2, float16_t fs1, size_t vl) { + return __riscv_sf_vc_v_fv_u16mf4(1, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fv_se_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.v\.fv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint16mf4_t test_sf_vc_v_fv_se_u16mf4(vuint16mf4_t vs2, float16_t fs1, size_t vl) { + return __riscv_sf_vc_v_fv_se_u16mf4(1, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fv_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.v\.fv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint16mf2_t test_sf_vc_v_fv_u16mf2(vuint16mf2_t vs2, float16_t fs1, size_t vl) { + return __riscv_sf_vc_v_fv_u16mf2(1, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fv_se_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.v\.fv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint16mf2_t test_sf_vc_v_fv_se_u16mf2(vuint16mf2_t vs2, float16_t fs1, size_t vl) { + return __riscv_sf_vc_v_fv_se_u16mf2(1, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fv_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.v\.fv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint16m1_t test_sf_vc_v_fv_u16m1(vuint16m1_t vs2, float16_t fs1, size_t vl) { + return __riscv_sf_vc_v_fv_u16m1(1, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fv_se_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.v\.fv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint16m1_t test_sf_vc_v_fv_se_u16m1(vuint16m1_t vs2, float16_t fs1, size_t vl) { + return __riscv_sf_vc_v_fv_se_u16m1(1, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fv_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.v\.fv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint16m2_t test_sf_vc_v_fv_u16m2(vuint16m2_t vs2, float16_t fs1, size_t vl) { + return __riscv_sf_vc_v_fv_u16m2(1, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fv_se_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.v\.fv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint16m2_t test_sf_vc_v_fv_se_u16m2(vuint16m2_t vs2, float16_t fs1, size_t vl) { + return __riscv_sf_vc_v_fv_se_u16m2(1, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fv_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.v\.fv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint16m4_t test_sf_vc_v_fv_u16m4(vuint16m4_t vs2, float16_t fs1, size_t vl) { + return __riscv_sf_vc_v_fv_u16m4(1, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fv_se_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.v\.fv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint16m4_t test_sf_vc_v_fv_se_u16m4(vuint16m4_t vs2, float16_t fs1, size_t vl) { + return __riscv_sf_vc_v_fv_se_u16m4(1, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fv_u16m8: +** ... +** vsetivli\s+zero+,0+,e16+,m8,ta,ma+ +** sf\.vc\.v\.fv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint16m8_t test_sf_vc_v_fv_u16m8(vuint16m8_t vs2, float16_t fs1, size_t vl) { + return __riscv_sf_vc_v_fv_u16m8(1, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fv_se_u16m8: +** ... +** vsetivli\s+zero+,0+,e16+,m8,ta,ma+ +** sf\.vc\.v\.fv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint16m8_t test_sf_vc_v_fv_se_u16m8(vuint16m8_t vs2, float16_t fs1, size_t vl) { + return __riscv_sf_vc_v_fv_se_u16m8(1, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fv_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.v\.fv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint32mf2_t test_sf_vc_v_fv_u32mf2(vuint32mf2_t vs2, float32_t fs1, size_t vl) { + return __riscv_sf_vc_v_fv_u32mf2(1, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fv_se_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.v\.fv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint32mf2_t test_sf_vc_v_fv_se_u32mf2(vuint32mf2_t vs2, float32_t fs1, size_t vl) { + return __riscv_sf_vc_v_fv_se_u32mf2(1, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fv_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.v\.fv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint32m1_t test_sf_vc_v_fv_u32m1(vuint32m1_t vs2, float32_t fs1, size_t vl) { + return __riscv_sf_vc_v_fv_u32m1(1, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fv_se_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.v\.fv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint32m1_t test_sf_vc_v_fv_se_u32m1(vuint32m1_t vs2, float32_t fs1, size_t vl) { + return __riscv_sf_vc_v_fv_se_u32m1(1, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fv_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.v\.fv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint32m2_t test_sf_vc_v_fv_u32m2(vuint32m2_t vs2, float32_t fs1, size_t vl) { + return __riscv_sf_vc_v_fv_u32m2(1, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fv_se_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.v\.fv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint32m2_t test_sf_vc_v_fv_se_u32m2(vuint32m2_t vs2, float32_t fs1, size_t vl) { + return __riscv_sf_vc_v_fv_se_u32m2(1, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fv_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.v\.fv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint32m4_t test_sf_vc_v_fv_u32m4(vuint32m4_t vs2, float32_t fs1, size_t vl) { + return __riscv_sf_vc_v_fv_u32m4(1, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fv_se_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.v\.fv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint32m4_t test_sf_vc_v_fv_se_u32m4(vuint32m4_t vs2, float32_t fs1, size_t vl) { + return __riscv_sf_vc_v_fv_se_u32m4(1, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fv_u32m8: +** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ +** sf\.vc\.v\.fv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint32m8_t test_sf_vc_v_fv_u32m8(vuint32m8_t vs2, float32_t fs1, size_t vl) { + return __riscv_sf_vc_v_fv_u32m8(1, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fv_se_u32m8: +** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ +** sf\.vc\.v\.fv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint32m8_t test_sf_vc_v_fv_se_u32m8(vuint32m8_t vs2, float32_t fs1, size_t vl) { + return __riscv_sf_vc_v_fv_se_u32m8(1, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fv_u64m1: +** ... +** vsetivli\s+zero+,0+,e64+,m1,ta,ma+ +** sf\.vc\.v\.fv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint64m1_t test_sf_vc_v_fv_u64m1(vuint64m1_t vs2, float64_t fs1, size_t vl) { + return __riscv_sf_vc_v_fv_u64m1(1, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fv_se_u64m1: +** ... +** vsetivli\s+zero+,0+,e64+,m1,ta,ma+ +** sf\.vc\.v\.fv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint64m1_t test_sf_vc_v_fv_se_u64m1(vuint64m1_t vs2, float64_t fs1, size_t vl) { + return __riscv_sf_vc_v_fv_se_u64m1(1, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fv_u64m2: +** ... +** vsetivli\s+zero+,0+,e64+,m2,ta,ma+ +** sf\.vc\.v\.fv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint64m2_t test_sf_vc_v_fv_u64m2(vuint64m2_t vs2, float64_t fs1, size_t vl) { + return __riscv_sf_vc_v_fv_u64m2(1, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fv_se_u64m2: +** ... +** vsetivli\s+zero+,0+,e64+,m2,ta,ma+ +** sf\.vc\.v\.fv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint64m2_t test_sf_vc_v_fv_se_u64m2(vuint64m2_t vs2, float64_t fs1, size_t vl) { + return __riscv_sf_vc_v_fv_se_u64m2(1, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fv_u64m4: +** ... +** vsetivli\s+zero+,0+,e64+,m4,ta,ma+ +** sf\.vc\.v\.fv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint64m4_t test_sf_vc_v_fv_u64m4(vuint64m4_t vs2, float64_t fs1, size_t vl) { + return __riscv_sf_vc_v_fv_u64m4(1, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fv_se_u64m4: +** ... +** vsetivli\s+zero+,0+,e64+,m4,ta,ma+ +** sf\.vc\.v\.fv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint64m4_t test_sf_vc_v_fv_se_u64m4(vuint64m4_t vs2, float64_t fs1, size_t vl) { + return __riscv_sf_vc_v_fv_se_u64m4(1, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fv_u64m8: +** ... +** vsetivli\s+zero+,0+,e64+,m8,ta,ma+ +** sf\.vc\.v\.fv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint64m8_t test_sf_vc_v_fv_u64m8(vuint64m8_t vs2, float64_t fs1, size_t vl) { + return __riscv_sf_vc_v_fv_u64m8(1, vs2, fs1, vl); +} + +/* +** test_sf_vc_fv_se_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.fv\t[0-9]+,[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fv_se_u16mf4(vuint16mf4_t vs2, float16_t fs1, size_t vl) { + __riscv_sf_vc_fv_se_u16mf4(1, 3, vs2, fs1, vl); +} + +/* +** test_sf_vc_fv_se_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.fv\t[0-9]+,[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fv_se_u16mf2(vuint16mf2_t vs2, float16_t fs1, size_t vl) { + __riscv_sf_vc_fv_se_u16mf2(1, 3, vs2, fs1, vl); +} + +/* +** test_sf_vc_fv_se_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.fv\t[0-9]+,[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fv_se_u16m1(vuint16m1_t vs2, float16_t fs1, size_t vl) { + __riscv_sf_vc_fv_se_u16m1(1, 3, vs2, fs1, vl); +} + +/* +** test_sf_vc_fv_se_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.fv\t[0-9]+,[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fv_se_u16m2(vuint16m2_t vs2, float16_t fs1, size_t vl) { + __riscv_sf_vc_fv_se_u16m2(1, 3, vs2, fs1, vl); +} + +/* +** test_sf_vc_fv_se_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.fv\t[0-9]+,[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fv_se_u16m4(vuint16m4_t vs2, float16_t fs1, size_t vl) { + __riscv_sf_vc_fv_se_u16m4(1, 3, vs2, fs1, vl); +} + +/* +** test_sf_vc_fv_se_u16m8: +** ... +** vsetivli\s+zero+,0+,e16+,m8,ta,ma+ +** sf\.vc\.fv\t[0-9]+,[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fv_se_u16m8(vuint16m8_t vs2, float16_t fs1, size_t vl) { + __riscv_sf_vc_fv_se_u16m8(1, 3, vs2, fs1, vl); +} + +/* +** test_sf_vc_fv_se_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.fv\t[0-9]+,[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fv_se_u32mf2(vuint32mf2_t vs2, float32_t fs1, size_t vl) { + __riscv_sf_vc_fv_se_u32mf2(1, 3, vs2, fs1, vl); +} + +/* +** test_sf_vc_fv_se_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.fv\t[0-9]+,[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fv_se_u32m1(vuint32m1_t vs2, float32_t fs1, size_t vl) { + __riscv_sf_vc_fv_se_u32m1(1, 3, vs2, fs1, vl); +} + +/* +** test_sf_vc_fv_se_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.fv\t[0-9]+,[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fv_se_u32m2(vuint32m2_t vs2, float32_t fs1, size_t vl) { + __riscv_sf_vc_fv_se_u32m2(1, 3, vs2, fs1, vl); +} + +/* +** test_sf_vc_fv_se_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.fv\t[0-9]+,[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fv_se_u32m4(vuint32m4_t vs2, float32_t fs1, size_t vl) { + __riscv_sf_vc_fv_se_u32m4(1, 3, vs2, fs1, vl); +} + +/* +** test_sf_vc_fv_se_u32m8: +** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ +** sf\.vc\.fv\t[0-9]+,[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fv_se_u32m8(vuint32m8_t vs2, float32_t fs1, size_t vl) { + __riscv_sf_vc_fv_se_u32m8(1, 3, vs2, fs1, vl); +} + +/* +** test_sf_vc_fv_se_u64m1: +** ... +** vsetivli\s+zero+,0+,e64+,m1,ta,ma+ +** sf\.vc\.fv\t[0-9]+,[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fv_se_u64m1(vuint64m1_t vs2, float64_t fs1, size_t vl) { + __riscv_sf_vc_fv_se_u64m1(1, 3, vs2, fs1, vl); +} + +/* +** test_sf_vc_fv_se_u64m2: +** ... +** vsetivli\s+zero+,0+,e64+,m2,ta,ma+ +** sf\.vc\.fv\t[0-9]+,[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fv_se_u64m2(vuint64m2_t vs2, float64_t fs1, size_t vl) { + __riscv_sf_vc_fv_se_u64m2(1, 3, vs2, fs1, vl); +} + +/* +** test_sf_vc_fv_se_u64m4: +** ... +** vsetivli\s+zero+,0+,e64+,m4,ta,ma+ +** sf\.vc\.fv\t[0-9]+,[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fv_se_u64m4(vuint64m4_t vs2, float64_t fs1, size_t vl) { + __riscv_sf_vc_fv_se_u64m4(1, 3, vs2, fs1, vl); +} + +/* +** test_sf_vc_fv_se_u64m8: +** ... +** vsetivli\s+zero+,0+,e64+,m8,ta,ma+ +** sf\.vc\.fv\t[0-9]+,[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fv_se_u64m8(vuint64m8_t vs2, float64_t fs1, size_t vl) { + __riscv_sf_vc_fv_se_u64m8(1, 3, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvv_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.v\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint16mf4_t test_sf_vc_v_fvv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, float16_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvv_u16mf4(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvv_se_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.v\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint16mf4_t test_sf_vc_v_fvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, float16_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvv_se_u16mf4(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvv_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.v\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint16mf2_t test_sf_vc_v_fvv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, float16_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvv_u16mf2(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvv_se_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.v\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint16mf2_t test_sf_vc_v_fvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, float16_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvv_se_u16mf2(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvv_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.v\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint16m1_t test_sf_vc_v_fvv_u16m1(vuint16m1_t vd, vuint16m1_t vs2, float16_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvv_u16m1(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvv_se_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.v\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint16m1_t test_sf_vc_v_fvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, float16_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvv_se_u16m1(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvv_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.v\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint16m2_t test_sf_vc_v_fvv_u16m2(vuint16m2_t vd, vuint16m2_t vs2, float16_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvv_u16m2(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvv_se_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.v\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint16m2_t test_sf_vc_v_fvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, float16_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvv_se_u16m2(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvv_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.v\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint16m4_t test_sf_vc_v_fvv_u16m4(vuint16m4_t vd, vuint16m4_t vs2, float16_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvv_u16m4(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvv_se_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.v\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint16m4_t test_sf_vc_v_fvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, float16_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvv_se_u16m4(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvv_u16m8: +** ... +** vsetivli\s+zero+,0+,e16+,m8,ta,ma+ +** sf\.vc\.v\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint16m8_t test_sf_vc_v_fvv_u16m8(vuint16m8_t vd, vuint16m8_t vs2, float16_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvv_u16m8(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvv_se_u16m8: +** ... +** vsetivli\s+zero+,0+,e16+,m8,ta,ma+ +** sf\.vc\.v\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint16m8_t test_sf_vc_v_fvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, float16_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvv_se_u16m8(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvv_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.v\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint32mf2_t test_sf_vc_v_fvv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, float32_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvv_u32mf2(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvv_se_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.v\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint32mf2_t test_sf_vc_v_fvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, float32_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvv_se_u32mf2(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvv_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.v\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint32m1_t test_sf_vc_v_fvv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, float32_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvv_u32m1(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvv_se_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.v\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint32m1_t test_sf_vc_v_fvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, float32_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvv_se_u32m1(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvv_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.v\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint32m2_t test_sf_vc_v_fvv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, float32_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvv_u32m2(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvv_se_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.v\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint32m2_t test_sf_vc_v_fvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, float32_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvv_se_u32m2(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvv_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.v\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint32m4_t test_sf_vc_v_fvv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, float32_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvv_u32m4(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvv_se_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.v\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint32m4_t test_sf_vc_v_fvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, float32_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvv_se_u32m4(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvv_u32m8: +** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ +** sf\.vc\.v\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint32m8_t test_sf_vc_v_fvv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, float32_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvv_u32m8(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvv_se_u32m8: +** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ +** sf\.vc\.v\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint32m8_t test_sf_vc_v_fvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, float32_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvv_se_u32m8(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvv_u64m1: +** ... +** vsetivli\s+zero+,0+,e64+,m1,ta,ma+ +** sf\.vc\.v\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint64m1_t test_sf_vc_v_fvv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, float64_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvv_u64m1(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvv_se_u64m1: +** ... +** vsetivli\s+zero+,0+,e64+,m1,ta,ma+ +** sf\.vc\.v\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint64m1_t test_sf_vc_v_fvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, float64_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvv_se_u64m1(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvv_u64m2: +** ... +** vsetivli\s+zero+,0+,e64+,m2,ta,ma+ +** sf\.vc\.v\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint64m2_t test_sf_vc_v_fvv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, float64_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvv_u64m2(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvv_se_u64m2: +** ... +** vsetivli\s+zero+,0+,e64+,m2,ta,ma+ +** sf\.vc\.v\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint64m2_t test_sf_vc_v_fvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, float64_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvv_se_u64m2(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvv_u64m4: +** ... +** vsetivli\s+zero+,0+,e64+,m4,ta,ma+ +** sf\.vc\.v\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint64m4_t test_sf_vc_v_fvv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, float64_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvv_u64m4(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvv_se_u64m4: +** ... +** vsetivli\s+zero+,0+,e64+,m4,ta,ma+ +** sf\.vc\.v\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint64m4_t test_sf_vc_v_fvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, float64_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvv_se_u64m4(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvv_u64m8: +** ... +** vsetivli\s+zero+,0+,e64+,m8,ta,ma+ +** sf\.vc\.v\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint64m8_t test_sf_vc_v_fvv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, float64_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvv_u64m8(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvv_se_u64m8: +** ... +** vsetivli\s+zero+,0+,e64+,m8,ta,ma+ +** sf\.vc\.v\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint64m8_t test_sf_vc_v_fvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, float64_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvv_se_u64m8(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_fvv_se_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, float16_t fs1, size_t vl) { + __riscv_sf_vc_fvv_se_u16mf4(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_fvv_se_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, float16_t fs1, size_t vl) { + __riscv_sf_vc_fvv_se_u16mf2(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_fvv_se_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, float16_t fs1, size_t vl) { + __riscv_sf_vc_fvv_se_u16m1(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_fvv_se_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, float16_t fs1, size_t vl) { + __riscv_sf_vc_fvv_se_u16m2(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_fvv_se_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, float16_t fs1, size_t vl) { + __riscv_sf_vc_fvv_se_u16m4(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_fvv_se_u16m8: +** ... +** vsetivli\s+zero+,0+,e16+,m8,ta,ma+ +** sf\.vc\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, float16_t fs1, size_t vl) { + __riscv_sf_vc_fvv_se_u16m8(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_fvv_se_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, float32_t fs1, size_t vl) { + __riscv_sf_vc_fvv_se_u32mf2(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_fvv_se_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, float32_t fs1, size_t vl) { + __riscv_sf_vc_fvv_se_u32m1(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_fvv_se_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, float32_t fs1, size_t vl) { + __riscv_sf_vc_fvv_se_u32m2(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_fvv_se_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, float32_t fs1, size_t vl) { + __riscv_sf_vc_fvv_se_u32m4(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_fvv_se_u32m8: +** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ +** sf\.vc\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, float32_t fs1, size_t vl) { + __riscv_sf_vc_fvv_se_u32m8(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_fvv_se_u64m1: +** ... +** vsetivli\s+zero+,0+,e64+,m1,ta,ma+ +** sf\.vc\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, float64_t fs1, size_t vl) { + __riscv_sf_vc_fvv_se_u64m1(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_fvv_se_u64m2: +** ... +** vsetivli\s+zero+,0+,e64+,m2,ta,ma+ +** sf\.vc\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, float64_t fs1, size_t vl) { + __riscv_sf_vc_fvv_se_u64m2(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_fvv_se_u64m4: +** ... +** vsetivli\s+zero+,0+,e64+,m4,ta,ma+ +** sf\.vc\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, float64_t fs1, size_t vl) { + __riscv_sf_vc_fvv_se_u64m4(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_fvv_se_u64m8: +** ... +** vsetivli\s+zero+,0+,e64+,m8,ta,ma+ +** sf\.vc\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, float64_t fs1, size_t vl) { + __riscv_sf_vc_fvv_se_u64m8(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvw_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.v\.fvw\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint32mf2_t test_sf_vc_v_fvw_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, float16_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvw_u16mf4(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvw_se_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.v\.fvw\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint32mf2_t test_sf_vc_v_fvw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, float16_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvw_se_u16mf4(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvw_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.v\.fvw\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint32m1_t test_sf_vc_v_fvw_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, float16_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvw_u16mf2(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvw_se_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.v\.fvw\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint32m1_t test_sf_vc_v_fvw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, float16_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvw_se_u16mf2(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvw_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.v\.fvw\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint32m2_t test_sf_vc_v_fvw_u16m1(vuint32m2_t vd, vuint16m1_t vs2, float16_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvw_u16m1(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvw_se_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.v\.fvw\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint32m2_t test_sf_vc_v_fvw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, float16_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvw_se_u16m1(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvw_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.v\.fvw\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint32m4_t test_sf_vc_v_fvw_u16m2(vuint32m4_t vd, vuint16m2_t vs2, float16_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvw_u16m2(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvw_se_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.v\.fvw\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint32m4_t test_sf_vc_v_fvw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, float16_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvw_se_u16m2(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvw_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.v\.fvw\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint32m8_t test_sf_vc_v_fvw_u16m4(vuint32m8_t vd, vuint16m4_t vs2, float16_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvw_u16m4(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvw_se_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.v\.fvw\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint32m8_t test_sf_vc_v_fvw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, float16_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvw_se_u16m4(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvw_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.v\.fvw\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint64m1_t test_sf_vc_v_fvw_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, float32_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvw_u32mf2(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvw_se_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.v\.fvw\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint64m1_t test_sf_vc_v_fvw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, float32_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvw_se_u32mf2(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvw_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.v\.fvw\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint64m2_t test_sf_vc_v_fvw_u32m1(vuint64m2_t vd, vuint32m1_t vs2, float32_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvw_u32m1(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvw_se_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.v\.fvw\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint64m2_t test_sf_vc_v_fvw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, float32_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvw_se_u32m1(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvw_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.v\.fvw\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint64m4_t test_sf_vc_v_fvw_u32m2(vuint64m4_t vd, vuint32m2_t vs2, float32_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvw_u32m2(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvw_se_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.v\.fvw\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint64m4_t test_sf_vc_v_fvw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, float32_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvw_se_u32m2(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvw_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.v\.fvw\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint64m8_t test_sf_vc_v_fvw_u32m4(vuint64m8_t vd, vuint32m4_t vs2, float32_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvw_u32m4(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_v_fvw_se_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.v\.fvw\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint64m8_t test_sf_vc_v_fvw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, float32_t fs1, size_t vl) { + return __riscv_sf_vc_v_fvw_se_u32m4(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_fvw_se_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.fvw\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fvw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, float16_t fs1, size_t vl) { + __riscv_sf_vc_fvw_se_u16mf4(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_fvw_se_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.fvw\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fvw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, float16_t fs1, size_t vl) { + __riscv_sf_vc_fvw_se_u16mf2(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_fvw_se_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.fvw\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fvw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, float16_t fs1, size_t vl) { + __riscv_sf_vc_fvw_se_u16m1(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_fvw_se_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.fvw\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fvw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, float16_t fs1, size_t vl) { + __riscv_sf_vc_fvw_se_u16m2(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_fvw_se_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.fvw\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fvw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, float16_t fs1, size_t vl) { + __riscv_sf_vc_fvw_se_u16m4(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_fvw_se_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.fvw\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fvw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, float32_t fs1, size_t vl) { + __riscv_sf_vc_fvw_se_u32mf2(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_fvw_se_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.fvw\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fvw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, float32_t fs1, size_t vl) { + __riscv_sf_vc_fvw_se_u32m1(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_fvw_se_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.fvw\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fvw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, float32_t fs1, size_t vl) { + __riscv_sf_vc_fvw_se_u32m2(1, vd, vs2, fs1, vl); +} + +/* +** test_sf_vc_fvw_se_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.fvw\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +void test_sf_vc_fvw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, float32_t fs1, size_t vl) { + __riscv_sf_vc_fvw_se_u32m4(1, vd, vs2, fs1, vl); +} + diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_i.c b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_i.c new file mode 100644 index 00000000000..7ec9da7504d --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_i.c @@ -0,0 +1,2682 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv_xsfvcp -mabi=lp64d -O3" } */ +/* { dg-final { check-function-bodies "**" "" } } */ + +#include "riscv_vector.h" + + +/* +** test_sf_vc_v_i_u8mf8: +** ... +** vsetivli\s+zero+,0+,e8+,mf8,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8mf8_t test_sf_vc_v_i_u8mf8(size_t vl) { + return __riscv_sf_vc_v_i_u8mf8(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_se_u8mf8: +** ... +** vsetivli\s+zero+,0+,e8+,mf8,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8mf8_t test_sf_vc_v_i_se_u8mf8(size_t vl) { + return __riscv_sf_vc_v_i_se_u8mf8(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_u8mf4: +** ... +** vsetivli\s+zero+,0+,e8+,mf4,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8mf4_t test_sf_vc_v_i_u8mf4(size_t vl) { + return __riscv_sf_vc_v_i_u8mf4(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_se_u8mf4: +** ... +** vsetivli\s+zero+,0+,e8+,mf4,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8mf4_t test_sf_vc_v_i_se_u8mf4(size_t vl) { + return __riscv_sf_vc_v_i_se_u8mf4(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_u8mf2: +** ... +** vsetivli\s+zero+,0+,e8+,mf2,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8mf2_t test_sf_vc_v_i_u8mf2(size_t vl) { + return __riscv_sf_vc_v_i_u8mf2(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_se_u8mf2: +** ... +** vsetivli\s+zero+,0+,e8+,mf2,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8mf2_t test_sf_vc_v_i_se_u8mf2(size_t vl) { + return __riscv_sf_vc_v_i_se_u8mf2(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_u8m1: +** ... +** vsetivli\s+zero+,0+,e8+,m1,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8m1_t test_sf_vc_v_i_u8m1(size_t vl) { + return __riscv_sf_vc_v_i_u8m1(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_se_u8m1: +** ... +** vsetivli\s+zero+,0+,e8+,m1,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8m1_t test_sf_vc_v_i_se_u8m1(size_t vl) { + return __riscv_sf_vc_v_i_se_u8m1(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_u8m2: +** ... +** vsetivli\s+zero+,0+,e8+,m2,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8m2_t test_sf_vc_v_i_u8m2(size_t vl) { + return __riscv_sf_vc_v_i_u8m2(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_se_u8m2: +** ... +** vsetivli\s+zero+,0+,e8+,m2,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8m2_t test_sf_vc_v_i_se_u8m2(size_t vl) { + return __riscv_sf_vc_v_i_se_u8m2(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_u8m4: +** ... +** vsetivli\s+zero+,0+,e8+,m4,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8m4_t test_sf_vc_v_i_u8m4(size_t vl) { + return __riscv_sf_vc_v_i_u8m4(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_se_u8m4: +** ... +** vsetivli\s+zero+,0+,e8+,m4,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8m4_t test_sf_vc_v_i_se_u8m4(size_t vl) { + return __riscv_sf_vc_v_i_se_u8m4(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_u8m8: +** ... +** vsetivli\s+zero+,0+,e8+,m8,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8m8_t test_sf_vc_v_i_u8m8(size_t vl) { + return __riscv_sf_vc_v_i_u8m8(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_se_u8m8: +** ... +** vsetivli\s+zero+,0+,e8+,m8,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8m8_t test_sf_vc_v_i_se_u8m8(size_t vl) { + return __riscv_sf_vc_v_i_se_u8m8(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16mf4_t test_sf_vc_v_i_u16mf4(size_t vl) { + return __riscv_sf_vc_v_i_u16mf4(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_se_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16mf4_t test_sf_vc_v_i_se_u16mf4(size_t vl) { + return __riscv_sf_vc_v_i_se_u16mf4(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16mf2_t test_sf_vc_v_i_u16mf2(size_t vl) { + return __riscv_sf_vc_v_i_u16mf2(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_se_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16mf2_t test_sf_vc_v_i_se_u16mf2(size_t vl) { + return __riscv_sf_vc_v_i_se_u16mf2(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16m1_t test_sf_vc_v_i_u16m1(size_t vl) { + return __riscv_sf_vc_v_i_u16m1(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_se_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16m1_t test_sf_vc_v_i_se_u16m1(size_t vl) { + return __riscv_sf_vc_v_i_se_u16m1(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16m2_t test_sf_vc_v_i_u16m2(size_t vl) { + return __riscv_sf_vc_v_i_u16m2(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_se_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16m2_t test_sf_vc_v_i_se_u16m2(size_t vl) { + return __riscv_sf_vc_v_i_se_u16m2(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16m4_t test_sf_vc_v_i_u16m4(size_t vl) { + return __riscv_sf_vc_v_i_u16m4(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_se_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16m4_t test_sf_vc_v_i_se_u16m4(size_t vl) { + return __riscv_sf_vc_v_i_se_u16m4(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_u16m8: +** ... +** vsetivli\s+zero+,0+,e16+,m8,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16m8_t test_sf_vc_v_i_u16m8(size_t vl) { + return __riscv_sf_vc_v_i_u16m8(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_se_u16m8: +** ... +** vsetivli\s+zero+,0+,e16+,m8,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16m8_t test_sf_vc_v_i_se_u16m8(size_t vl) { + return __riscv_sf_vc_v_i_se_u16m8(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32mf2_t test_sf_vc_v_i_u32mf2(size_t vl) { + return __riscv_sf_vc_v_i_u32mf2(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_se_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32mf2_t test_sf_vc_v_i_se_u32mf2(size_t vl) { + return __riscv_sf_vc_v_i_se_u32mf2(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32m1_t test_sf_vc_v_i_u32m1(size_t vl) { + return __riscv_sf_vc_v_i_u32m1(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_se_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32m1_t test_sf_vc_v_i_se_u32m1(size_t vl) { + return __riscv_sf_vc_v_i_se_u32m1(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32m2_t test_sf_vc_v_i_u32m2(size_t vl) { + return __riscv_sf_vc_v_i_u32m2(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_se_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32m2_t test_sf_vc_v_i_se_u32m2(size_t vl) { + return __riscv_sf_vc_v_i_se_u32m2(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32m4_t test_sf_vc_v_i_u32m4(size_t vl) { + return __riscv_sf_vc_v_i_u32m4(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_se_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32m4_t test_sf_vc_v_i_se_u32m4(size_t vl) { + return __riscv_sf_vc_v_i_se_u32m4(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_u32m8: +** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32m8_t test_sf_vc_v_i_u32m8(size_t vl) { + return __riscv_sf_vc_v_i_u32m8(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_se_u32m8: +** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32m8_t test_sf_vc_v_i_se_u32m8(size_t vl) { + return __riscv_sf_vc_v_i_se_u32m8(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_u64m1: +** ... +** vsetivli\s+zero+,0+,e64+,m1,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint64m1_t test_sf_vc_v_i_u64m1(size_t vl) { + return __riscv_sf_vc_v_i_u64m1(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_se_u64m1: +** ... +** vsetivli\s+zero+,0+,e64+,m1,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint64m1_t test_sf_vc_v_i_se_u64m1(size_t vl) { + return __riscv_sf_vc_v_i_se_u64m1(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_u64m2: +** ... +** vsetivli\s+zero+,0+,e64+,m2,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint64m2_t test_sf_vc_v_i_u64m2(size_t vl) { + return __riscv_sf_vc_v_i_u64m2(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_se_u64m2: +** ... +** vsetivli\s+zero+,0+,e64+,m2,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint64m2_t test_sf_vc_v_i_se_u64m2(size_t vl) { + return __riscv_sf_vc_v_i_se_u64m2(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_u64m4: +** ... +** vsetivli\s+zero+,0+,e64+,m4,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint64m4_t test_sf_vc_v_i_u64m4(size_t vl) { + return __riscv_sf_vc_v_i_u64m4(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_se_u64m4: +** ... +** vsetivli\s+zero+,0+,e64+,m4,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint64m4_t test_sf_vc_v_i_se_u64m4(size_t vl) { + return __riscv_sf_vc_v_i_se_u64m4(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_u64m8: +** ... +** vsetivli\s+zero+,0+,e64+,m8,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint64m8_t test_sf_vc_v_i_u64m8(size_t vl) { + return __riscv_sf_vc_v_i_u64m8(1, 2, 4, vl); +} + +/* +** test_sf_vc_v_i_se_u64m8: +** ... +** vsetivli\s+zero+,0+,e64+,m8,ta,ma+ +** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint64m8_t test_sf_vc_v_i_se_u64m8(size_t vl) { + return __riscv_sf_vc_v_i_se_u64m8(1, 2, 4, vl); +} + + +/* +** test_sf_vc_i_se_u8mf8: +** ... +** vsetivli\s+zero+,0+,e8+,mf8,ta,ma+ +** sf\.vc\.i\t[0-9]+,[0-9]+,[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_i_se_u8mf8(size_t vl) { + __riscv_sf_vc_i_se_u8mf8(1, 2, 3, 4, vl); +} + +/* +** test_sf_vc_i_se_u8mf4: +** ... +** vsetivli\s+zero+,0+,e8+,mf4,ta,ma+ +** sf\.vc\.i\t[0-9]+,[0-9]+,[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_i_se_u8mf4(size_t vl) { + __riscv_sf_vc_i_se_u8mf4(1, 2, 3, 4, vl); +} + +/* +** test_sf_vc_i_se_u8mf2: +** ... +** vsetivli\s+zero+,0+,e8+,mf2,ta,ma+ +** sf\.vc\.i\t[0-9]+,[0-9]+,[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_i_se_u8mf2(size_t vl) { + __riscv_sf_vc_i_se_u8mf2(1, 2, 3, 4, vl); +} + +/* +** test_sf_vc_i_se_u8m1: +** ... +** vsetivli\s+zero+,0+,e8+,m1,ta,ma+ +** sf\.vc\.i\t[0-9]+,[0-9]+,[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_i_se_u8m1(size_t vl) { + __riscv_sf_vc_i_se_u8m1(1, 2, 3, 4, vl); +} + +/* +** test_sf_vc_i_se_u8m2: +** ... +** vsetivli\s+zero+,0+,e8+,m2,ta,ma+ +** sf\.vc\.i\t[0-9]+,[0-9]+,[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_i_se_u8m2(size_t vl) { + __riscv_sf_vc_i_se_u8m2(1, 2, 3, 4, vl); +} + +/* +** test_sf_vc_i_se_u8m4: +** ... +** vsetivli\s+zero+,0+,e8+,m4,ta,ma+ +** sf\.vc\.i\t[0-9]+,[0-9]+,[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_i_se_u8m4(size_t vl) { + __riscv_sf_vc_i_se_u8m4(1, 2, 3, 4, vl); +} + +/* +** test_sf_vc_i_se_u8m8: +** ... +** vsetivli\s+zero+,0+,e8+,m8,ta,ma+ +** sf\.vc\.i\t[0-9]+,[0-9]+,[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_i_se_u8m8(size_t vl) { + __riscv_sf_vc_i_se_u8m8(1, 2, 3, 4, vl); +} + +/* +** test_sf_vc_i_se_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.i\t[0-9]+,[0-9]+,[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_i_se_u16mf4(size_t vl) { + __riscv_sf_vc_i_se_u16mf4(1, 2, 3, 4, vl); +} + +/* +** test_sf_vc_i_se_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.i\t[0-9]+,[0-9]+,[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_i_se_u16mf2(size_t vl) { + __riscv_sf_vc_i_se_u16mf2(1, 2, 3, 4, vl); +} + +/* +** test_sf_vc_i_se_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.i\t[0-9]+,[0-9]+,[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_i_se_u16m1(size_t vl) { + __riscv_sf_vc_i_se_u16m1(1, 2, 3, 4, vl); +} + +/* +** test_sf_vc_i_se_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.i\t[0-9]+,[0-9]+,[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_i_se_u16m2(size_t vl) { + __riscv_sf_vc_i_se_u16m2(1, 2, 3, 4, vl); +} + +/* +** test_sf_vc_i_se_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.i\t[0-9]+,[0-9]+,[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_i_se_u16m4(size_t vl) { + __riscv_sf_vc_i_se_u16m4(1, 2, 3, 4, vl); +} + +/* +** test_sf_vc_i_se_u16m8: +** ... +** vsetivli\s+zero+,0+,e16+,m8,ta,ma+ +** sf\.vc\.i\t[0-9]+,[0-9]+,[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_i_se_u16m8(size_t vl) { + __riscv_sf_vc_i_se_u16m8(1, 2, 3, 4, vl); +} + +/* +** test_sf_vc_i_se_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.i\t[0-9]+,[0-9]+,[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_i_se_u32mf2(size_t vl) { + __riscv_sf_vc_i_se_u32mf2(1, 2, 3, 4, vl); +} + +/* +** test_sf_vc_i_se_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.i\t[0-9]+,[0-9]+,[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_i_se_u32m1(size_t vl) { + __riscv_sf_vc_i_se_u32m1(1, 2, 3, 4, vl); +} + +/* +** test_sf_vc_i_se_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.i\t[0-9]+,[0-9]+,[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_i_se_u32m2(size_t vl) { + __riscv_sf_vc_i_se_u32m2(1, 2, 3, 4, vl); +} + +/* +** test_sf_vc_i_se_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.i\t[0-9]+,[0-9]+,[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_i_se_u32m4(size_t vl) { + __riscv_sf_vc_i_se_u32m4(1, 2, 3, 4, vl); +} + +/* +** test_sf_vc_i_se_u32m8: +** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ +** sf\.vc\.i\t[0-9]+,[0-9]+,[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_i_se_u32m8(size_t vl) { + __riscv_sf_vc_i_se_u32m8(1, 2, 3, 4, vl); +} + +/* +** test_sf_vc_i_se_u64m1: +** ... +** vsetivli\s+zero+,0+,e64+,m1,ta,ma+ +** sf\.vc\.i\t[0-9]+,[0-9]+,[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_i_se_u64m1(size_t vl) { + __riscv_sf_vc_i_se_u64m1(1, 2, 3, 4, vl); +} + +/* +** test_sf_vc_i_se_u64m2: +** ... +** vsetivli\s+zero+,0+,e64+,m2,ta,ma+ +** sf\.vc\.i\t[0-9]+,[0-9]+,[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_i_se_u64m2(size_t vl) { + __riscv_sf_vc_i_se_u64m2(1, 2, 3, 4, vl); +} + +/* +** test_sf_vc_i_se_u64m4: +** ... +** vsetivli\s+zero+,0+,e64+,m4,ta,ma+ +** sf\.vc\.i\t[0-9]+,[0-9]+,[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_i_se_u64m4(size_t vl) { + __riscv_sf_vc_i_se_u64m4(1, 2, 3, 4, vl); +} + +/* +** test_sf_vc_i_se_u64m8: +** ... +** vsetivli\s+zero+,0+,e64+,m8,ta,ma+ +** sf\.vc\.i\t[0-9]+,[0-9]+,[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_i_se_u64m8(size_t vl) { + __riscv_sf_vc_i_se_u64m8(1, 2, 3, 4, vl); +} + +/* +** test_sf_vc_v_iv_u8mf8: +** ... +** vsetivli\s+zero+,0+,e8+,mf8,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8mf8_t test_sf_vc_v_iv_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_u8mf8(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_se_u8mf8: +** ... +** vsetivli\s+zero+,0+,e8+,mf8,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8mf8_t test_sf_vc_v_iv_se_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_se_u8mf8(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_u8mf4: +** ... +** vsetivli\s+zero+,0+,e8+,mf4,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8mf4_t test_sf_vc_v_iv_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_u8mf4(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_se_u8mf4: +** ... +** vsetivli\s+zero+,0+,e8+,mf4,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8mf4_t test_sf_vc_v_iv_se_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_se_u8mf4(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_u8mf2: +** ... +** vsetivli\s+zero+,0+,e8+,mf2,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8mf2_t test_sf_vc_v_iv_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_u8mf2(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_se_u8mf2: +** ... +** vsetivli\s+zero+,0+,e8+,mf2,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8mf2_t test_sf_vc_v_iv_se_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_se_u8mf2(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_u8m1: +** ... +** vsetivli\s+zero+,0+,e8+,m1,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8m1_t test_sf_vc_v_iv_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_u8m1(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_se_u8m1: +** ... +** vsetivli\s+zero+,0+,e8+,m1,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8m1_t test_sf_vc_v_iv_se_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_se_u8m1(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_u8m2: +** ... +** vsetivli\s+zero+,0+,e8+,m2,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8m2_t test_sf_vc_v_iv_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_u8m2(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_se_u8m2: +** ... +** vsetivli\s+zero+,0+,e8+,m2,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8m2_t test_sf_vc_v_iv_se_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_se_u8m2(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_u8m4: +** ... +** vsetivli\s+zero+,0+,e8+,m4,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8m4_t test_sf_vc_v_iv_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_u8m4(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_se_u8m4: +** ... +** vsetivli\s+zero+,0+,e8+,m4,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8m4_t test_sf_vc_v_iv_se_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_se_u8m4(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_u8m8: +** ... +** vsetivli\s+zero+,0+,e8+,m8,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8m8_t test_sf_vc_v_iv_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_u8m8(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_se_u8m8: +** ... +** vsetivli\s+zero+,0+,e8+,m8,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8m8_t test_sf_vc_v_iv_se_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_se_u8m8(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16mf4_t test_sf_vc_v_iv_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_u16mf4(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_se_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16mf4_t test_sf_vc_v_iv_se_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_se_u16mf4(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16mf2_t test_sf_vc_v_iv_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_u16mf2(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_se_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16mf2_t test_sf_vc_v_iv_se_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_se_u16mf2(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16m1_t test_sf_vc_v_iv_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_u16m1(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_se_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16m1_t test_sf_vc_v_iv_se_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_se_u16m1(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16m2_t test_sf_vc_v_iv_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_u16m2(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_se_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16m2_t test_sf_vc_v_iv_se_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_se_u16m2(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16m4_t test_sf_vc_v_iv_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_u16m4(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_se_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16m4_t test_sf_vc_v_iv_se_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_se_u16m4(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_u16m8: +** ... +** vsetivli\s+zero+,0+,e16+,m8,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16m8_t test_sf_vc_v_iv_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_u16m8(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_se_u16m8: +** ... +** vsetivli\s+zero+,0+,e16+,m8,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16m8_t test_sf_vc_v_iv_se_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_se_u16m8(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32mf2_t test_sf_vc_v_iv_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_u32mf2(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_se_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32mf2_t test_sf_vc_v_iv_se_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_se_u32mf2(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32m1_t test_sf_vc_v_iv_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_u32m1(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_se_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32m1_t test_sf_vc_v_iv_se_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_se_u32m1(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32m2_t test_sf_vc_v_iv_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_u32m2(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_se_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32m2_t test_sf_vc_v_iv_se_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_se_u32m2(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32m4_t test_sf_vc_v_iv_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_u32m4(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_se_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32m4_t test_sf_vc_v_iv_se_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_se_u32m4(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_u32m8: +** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32m8_t test_sf_vc_v_iv_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_u32m8(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_se_u32m8: +** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32m8_t test_sf_vc_v_iv_se_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_se_u32m8(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_u64m1: +** ... +** vsetivli\s+zero+,0+,e64+,m1,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint64m1_t test_sf_vc_v_iv_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_u64m1(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_se_u64m1: +** ... +** vsetivli\s+zero+,0+,e64+,m1,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint64m1_t test_sf_vc_v_iv_se_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_se_u64m1(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_u64m2: +** ... +** vsetivli\s+zero+,0+,e64+,m2,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint64m2_t test_sf_vc_v_iv_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_u64m2(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_se_u64m2: +** ... +** vsetivli\s+zero+,0+,e64+,m2,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint64m2_t test_sf_vc_v_iv_se_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_se_u64m2(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_u64m4: +** ... +** vsetivli\s+zero+,0+,e64+,m4,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint64m4_t test_sf_vc_v_iv_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_u64m4(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_se_u64m4: +** ... +** vsetivli\s+zero+,0+,e64+,m4,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint64m4_t test_sf_vc_v_iv_se_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_se_u64m4(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_u64m8: +** ... +** vsetivli\s+zero+,0+,e64+,m8,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint64m8_t test_sf_vc_v_iv_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_u64m8(1, vs2, 4, vl); +} + +/* +** test_sf_vc_v_iv_se_u64m8: +** ... +** vsetivli\s+zero+,0+,e64+,m8,ta,ma+ +** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint64m8_t test_sf_vc_v_iv_se_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_sf_vc_v_iv_se_u64m8(1, vs2, 4, vl); +} + +/* +** test_sf_vc_iv_se_u8mf8: +** ... +** vsetivli\s+zero+,0+,e8+,mf8,ta,ma+ +** sf\.vc\.iv\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_iv_se_u8mf8(vuint8mf8_t vs2, size_t vl) { + __riscv_sf_vc_iv_se_u8mf8(1, 3, vs2, 4, vl); +} + +/* +** test_sf_vc_iv_se_u8mf4: +** ... +** vsetivli\s+zero+,0+,e8+,mf4,ta,ma+ +** sf\.vc\.iv\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_iv_se_u8mf4(vuint8mf4_t vs2, size_t vl) { + __riscv_sf_vc_iv_se_u8mf4(1, 3, vs2, 4, vl); +} + +/* +** test_sf_vc_iv_se_u8mf2: +** ... +** vsetivli\s+zero+,0+,e8+,mf2,ta,ma+ +** sf\.vc\.iv\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_iv_se_u8mf2(vuint8mf2_t vs2, size_t vl) { + __riscv_sf_vc_iv_se_u8mf2(1, 3, vs2, 4, vl); +} + +/* +** test_sf_vc_iv_se_u8m1: +** ... +** vsetivli\s+zero+,0+,e8+,m1,ta,ma+ +** sf\.vc\.iv\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_iv_se_u8m1(vuint8m1_t vs2, size_t vl) { + __riscv_sf_vc_iv_se_u8m1(1, 3, vs2, 4, vl); +} + +/* +** test_sf_vc_iv_se_u8m2: +** ... +** vsetivli\s+zero+,0+,e8+,m2,ta,ma+ +** sf\.vc\.iv\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_iv_se_u8m2(vuint8m2_t vs2, size_t vl) { + __riscv_sf_vc_iv_se_u8m2(1, 3, vs2, 4, vl); +} + +/* +** test_sf_vc_iv_se_u8m4: +** ... +** vsetivli\s+zero+,0+,e8+,m4,ta,ma+ +** sf\.vc\.iv\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_iv_se_u8m4(vuint8m4_t vs2, size_t vl) { + __riscv_sf_vc_iv_se_u8m4(1, 3, vs2, 4, vl); +} + +/* +** test_sf_vc_iv_se_u8m8: +** ... +** vsetivli\s+zero+,0+,e8+,m8,ta,ma+ +** sf\.vc\.iv\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_iv_se_u8m8(vuint8m8_t vs2, size_t vl) { + __riscv_sf_vc_iv_se_u8m8(1, 3, vs2, 4, vl); +} + +/* +** test_sf_vc_iv_se_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.iv\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_iv_se_u16mf4(vuint16mf4_t vs2, size_t vl) { + __riscv_sf_vc_iv_se_u16mf4(1, 3, vs2, 4, vl); +} + +/* +** test_sf_vc_iv_se_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.iv\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_iv_se_u16mf2(vuint16mf2_t vs2, size_t vl) { + __riscv_sf_vc_iv_se_u16mf2(1, 3, vs2, 4, vl); +} + +/* +** test_sf_vc_iv_se_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.iv\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_iv_se_u16m1(vuint16m1_t vs2, size_t vl) { + __riscv_sf_vc_iv_se_u16m1(1, 3, vs2, 4, vl); +} + +/* +** test_sf_vc_iv_se_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.iv\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_iv_se_u16m2(vuint16m2_t vs2, size_t vl) { + __riscv_sf_vc_iv_se_u16m2(1, 3, vs2, 4, vl); +} + +/* +** test_sf_vc_iv_se_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.iv\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_iv_se_u16m4(vuint16m4_t vs2, size_t vl) { + __riscv_sf_vc_iv_se_u16m4(1, 3, vs2, 4, vl); +} + +/* +** test_sf_vc_iv_se_u16m8: +** ... +** vsetivli\s+zero+,0+,e16+,m8,ta,ma+ +** sf\.vc\.iv\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_iv_se_u16m8(vuint16m8_t vs2, size_t vl) { + __riscv_sf_vc_iv_se_u16m8(1, 3, vs2, 4, vl); +} + +/* +** test_sf_vc_iv_se_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.iv\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_iv_se_u32mf2(vuint32mf2_t vs2, size_t vl) { + __riscv_sf_vc_iv_se_u32mf2(1, 3, vs2, 4, vl); +} + +/* +** test_sf_vc_iv_se_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.iv\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_iv_se_u32m1(vuint32m1_t vs2, size_t vl) { + __riscv_sf_vc_iv_se_u32m1(1, 3, vs2, 4, vl); +} + +/* +** test_sf_vc_iv_se_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.iv\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_iv_se_u32m2(vuint32m2_t vs2, size_t vl) { + __riscv_sf_vc_iv_se_u32m2(1, 3, vs2, 4, vl); +} + +/* +** test_sf_vc_iv_se_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.iv\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_iv_se_u32m4(vuint32m4_t vs2, size_t vl) { + __riscv_sf_vc_iv_se_u32m4(1, 3, vs2, 4, vl); +} + +/* +** test_sf_vc_iv_se_u32m8: +** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ +** sf\.vc\.iv\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_iv_se_u32m8(vuint32m8_t vs2, size_t vl) { + __riscv_sf_vc_iv_se_u32m8(1, 3, vs2, 4, vl); +} + +/* +** test_sf_vc_iv_se_u64m1: +** ... +** vsetivli\s+zero+,0+,e64+,m1,ta,ma+ +** sf\.vc\.iv\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_iv_se_u64m1(vuint64m1_t vs2, size_t vl) { + __riscv_sf_vc_iv_se_u64m1(1, 3, vs2, 4, vl); +} + +/* +** test_sf_vc_iv_se_u64m2: +** ... +** vsetivli\s+zero+,0+,e64+,m2,ta,ma+ +** sf\.vc\.iv\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_iv_se_u64m2(vuint64m2_t vs2, size_t vl) { + __riscv_sf_vc_iv_se_u64m2(1, 3, vs2, 4, vl); +} + +/* +** test_sf_vc_iv_se_u64m4: +** ... +** vsetivli\s+zero+,0+,e64+,m4,ta,ma+ +** sf\.vc\.iv\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_iv_se_u64m4(vuint64m4_t vs2, size_t vl) { + __riscv_sf_vc_iv_se_u64m4(1, 3, vs2, 4, vl); +} + +/* +** test_sf_vc_iv_se_u64m8: +** ... +** vsetivli\s+zero+,0+,e64+,m8,ta,ma+ +** sf\.vc\.iv\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_iv_se_u64m8(vuint64m8_t vs2, size_t vl) { + __riscv_sf_vc_iv_se_u64m8(1, 3, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_u8mf8: +** ... +** vsetivli\s+zero+,0+,e8+,mf8,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8mf8_t test_sf_vc_v_ivv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_u8mf8(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_se_u8mf8: +** ... +** vsetivli\s+zero+,0+,e8+,mf8,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8mf8_t test_sf_vc_v_ivv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_se_u8mf8(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_u8mf4: +** ... +** vsetivli\s+zero+,0+,e8+,mf4,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8mf4_t test_sf_vc_v_ivv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_u8mf4(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_se_u8mf4: +** ... +** vsetivli\s+zero+,0+,e8+,mf4,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8mf4_t test_sf_vc_v_ivv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_se_u8mf4(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_u8mf2: +** ... +** vsetivli\s+zero+,0+,e8+,mf2,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8mf2_t test_sf_vc_v_ivv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_u8mf2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_se_u8mf2: +** ... +** vsetivli\s+zero+,0+,e8+,mf2,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8mf2_t test_sf_vc_v_ivv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_se_u8mf2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_u8m1: +** ... +** vsetivli\s+zero+,0+,e8+,m1,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8m1_t test_sf_vc_v_ivv_u8m1(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_u8m1(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_se_u8m1: +** ... +** vsetivli\s+zero+,0+,e8+,m1,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8m1_t test_sf_vc_v_ivv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_se_u8m1(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_u8m2: +** ... +** vsetivli\s+zero+,0+,e8+,m2,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8m2_t test_sf_vc_v_ivv_u8m2(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_u8m2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_se_u8m2: +** ... +** vsetivli\s+zero+,0+,e8+,m2,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8m2_t test_sf_vc_v_ivv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_se_u8m2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_u8m4: +** ... +** vsetivli\s+zero+,0+,e8+,m4,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8m4_t test_sf_vc_v_ivv_u8m4(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_u8m4(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_se_u8m4: +** ... +** vsetivli\s+zero+,0+,e8+,m4,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8m4_t test_sf_vc_v_ivv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_se_u8m4(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_u8m8: +** ... +** vsetivli\s+zero+,0+,e8+,m8,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8m8_t test_sf_vc_v_ivv_u8m8(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_u8m8(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_se_u8m8: +** ... +** vsetivli\s+zero+,0+,e8+,m8,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint8m8_t test_sf_vc_v_ivv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_se_u8m8(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16mf4_t test_sf_vc_v_ivv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_u16mf4(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_se_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16mf4_t test_sf_vc_v_ivv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_se_u16mf4(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16mf2_t test_sf_vc_v_ivv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_u16mf2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_se_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16mf2_t test_sf_vc_v_ivv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_se_u16mf2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16m1_t test_sf_vc_v_ivv_u16m1(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_u16m1(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_se_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16m1_t test_sf_vc_v_ivv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_se_u16m1(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16m2_t test_sf_vc_v_ivv_u16m2(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_u16m2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_se_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16m2_t test_sf_vc_v_ivv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_se_u16m2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16m4_t test_sf_vc_v_ivv_u16m4(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_u16m4(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_se_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16m4_t test_sf_vc_v_ivv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_se_u16m4(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_u16m8: +** ... +** vsetivli\s+zero+,0+,e16+,m8,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16m8_t test_sf_vc_v_ivv_u16m8(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_u16m8(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_se_u16m8: +** ... +** vsetivli\s+zero+,0+,e16+,m8,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16m8_t test_sf_vc_v_ivv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_se_u16m8(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32mf2_t test_sf_vc_v_ivv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_u32mf2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_se_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32mf2_t test_sf_vc_v_ivv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_se_u32mf2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32m1_t test_sf_vc_v_ivv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_u32m1(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_se_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32m1_t test_sf_vc_v_ivv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_se_u32m1(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32m2_t test_sf_vc_v_ivv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_u32m2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_se_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32m2_t test_sf_vc_v_ivv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_se_u32m2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32m4_t test_sf_vc_v_ivv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_u32m4(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_se_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32m4_t test_sf_vc_v_ivv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_se_u32m4(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_u32m8: +** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32m8_t test_sf_vc_v_ivv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_u32m8(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_se_u32m8: +** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32m8_t test_sf_vc_v_ivv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_se_u32m8(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_u64m1: +** ... +** vsetivli\s+zero+,0+,e64+,m1,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint64m1_t test_sf_vc_v_ivv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_u64m1(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_se_u64m1: +** ... +** vsetivli\s+zero+,0+,e64+,m1,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint64m1_t test_sf_vc_v_ivv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_se_u64m1(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_u64m2: +** ... +** vsetivli\s+zero+,0+,e64+,m2,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint64m2_t test_sf_vc_v_ivv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_u64m2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_se_u64m2: +** ... +** vsetivli\s+zero+,0+,e64+,m2,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint64m2_t test_sf_vc_v_ivv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_se_u64m2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_u64m4: +** ... +** vsetivli\s+zero+,0+,e64+,m4,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint64m4_t test_sf_vc_v_ivv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_u64m4(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_se_u64m4: +** ... +** vsetivli\s+zero+,0+,e64+,m4,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint64m4_t test_sf_vc_v_ivv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_se_u64m4(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_u64m8: +** ... +** vsetivli\s+zero+,0+,e64+,m8,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint64m8_t test_sf_vc_v_ivv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_u64m8(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivv_se_u64m8: +** ... +** vsetivli\s+zero+,0+,e64+,m8,ta,ma+ +** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint64m8_t test_sf_vc_v_ivv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivv_se_u64m8(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_ivv_se_u8mf8: +** ... +** vsetivli\s+zero+,0+,e8+,mf8,ta,ma+ +** sf\.vc\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_ivv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + __riscv_sf_vc_ivv_se_u8mf8(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_ivv_se_u8mf4: +** ... +** vsetivli\s+zero+,0+,e8+,mf4,ta,ma+ +** sf\.vc\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_ivv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + __riscv_sf_vc_ivv_se_u8mf4(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_ivv_se_u8mf2: +** ... +** vsetivli\s+zero+,0+,e8+,mf2,ta,ma+ +** sf\.vc\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_ivv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + __riscv_sf_vc_ivv_se_u8mf2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_ivv_se_u8m1: +** ... +** vsetivli\s+zero+,0+,e8+,m1,ta,ma+ +** sf\.vc\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_ivv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + __riscv_sf_vc_ivv_se_u8m1(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_ivv_se_u8m2: +** ... +** vsetivli\s+zero+,0+,e8+,m2,ta,ma+ +** sf\.vc\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_ivv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + __riscv_sf_vc_ivv_se_u8m2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_ivv_se_u8m4: +** ... +** vsetivli\s+zero+,0+,e8+,m4,ta,ma+ +** sf\.vc\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_ivv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + __riscv_sf_vc_ivv_se_u8m4(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_ivv_se_u8m8: +** ... +** vsetivli\s+zero+,0+,e8+,m8,ta,ma+ +** sf\.vc\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_ivv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + __riscv_sf_vc_ivv_se_u8m8(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_ivv_se_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_ivv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + __riscv_sf_vc_ivv_se_u16mf4(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_ivv_se_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_ivv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + __riscv_sf_vc_ivv_se_u16mf2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_ivv_se_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_ivv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + __riscv_sf_vc_ivv_se_u16m1(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_ivv_se_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_ivv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + __riscv_sf_vc_ivv_se_u16m2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_ivv_se_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_ivv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + __riscv_sf_vc_ivv_se_u16m4(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_ivv_se_u16m8: +** ... +** vsetivli\s+zero+,0+,e16+,m8,ta,ma+ +** sf\.vc\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_ivv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + __riscv_sf_vc_ivv_se_u16m8(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_ivv_se_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_ivv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + __riscv_sf_vc_ivv_se_u32mf2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_ivv_se_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_ivv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + __riscv_sf_vc_ivv_se_u32m1(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_ivv_se_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_ivv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + __riscv_sf_vc_ivv_se_u32m2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_ivv_se_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_ivv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + __riscv_sf_vc_ivv_se_u32m4(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_ivv_se_u32m8: +** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ +** sf\.vc\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_ivv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + __riscv_sf_vc_ivv_se_u32m8(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_ivv_se_u64m1: +** ... +** vsetivli\s+zero+,0+,e64+,m1,ta,ma+ +** sf\.vc\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_ivv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + __riscv_sf_vc_ivv_se_u64m1(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_ivv_se_u64m2: +** ... +** vsetivli\s+zero+,0+,e64+,m2,ta,ma+ +** sf\.vc\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_ivv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + __riscv_sf_vc_ivv_se_u64m2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_ivv_se_u64m4: +** ... +** vsetivli\s+zero+,0+,e64+,m4,ta,ma+ +** sf\.vc\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_ivv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + __riscv_sf_vc_ivv_se_u64m4(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_ivv_se_u64m8: +** ... +** vsetivli\s+zero+,0+,e64+,m8,ta,ma+ +** sf\.vc\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_ivv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + __riscv_sf_vc_ivv_se_u64m8(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivw_u8mf8: +** ... +** vsetivli\s+zero+,0+,e8+,mf8,ta,ma+ +** sf\.vc\.v\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16mf4_t test_sf_vc_v_ivw_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivw_u8mf8(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivw_se_u8mf8: +** ... +** vsetivli\s+zero+,0+,e8+,mf8,ta,ma+ +** sf\.vc\.v\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16mf4_t test_sf_vc_v_ivw_se_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivw_se_u8mf8(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivw_u8mf4: +** ... +** vsetivli\s+zero+,0+,e8+,mf4,ta,ma+ +** sf\.vc\.v\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16mf2_t test_sf_vc_v_ivw_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivw_u8mf4(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivw_se_u8mf4: +** ... +** vsetivli\s+zero+,0+,e8+,mf4,ta,ma+ +** sf\.vc\.v\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16mf2_t test_sf_vc_v_ivw_se_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivw_se_u8mf4(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivw_u8mf2: +** ... +** vsetivli\s+zero+,0+,e8+,mf2,ta,ma+ +** sf\.vc\.v\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16m1_t test_sf_vc_v_ivw_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivw_u8mf2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivw_se_u8mf2: +** ... +** vsetivli\s+zero+,0+,e8+,mf2,ta,ma+ +** sf\.vc\.v\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16m1_t test_sf_vc_v_ivw_se_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivw_se_u8mf2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivw_u8m1: +** ... +** vsetivli\s+zero+,0+,e8+,m1,ta,ma+ +** sf\.vc\.v\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16m2_t test_sf_vc_v_ivw_u8m1(vuint16m2_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivw_u8m1(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivw_se_u8m1: +** ... +** vsetivli\s+zero+,0+,e8+,m1,ta,ma+ +** sf\.vc\.v\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16m2_t test_sf_vc_v_ivw_se_u8m1(vuint16m2_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivw_se_u8m1(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivw_u8m2: +** ... +** vsetivli\s+zero+,0+,e8+,m2,ta,ma+ +** sf\.vc\.v\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16m4_t test_sf_vc_v_ivw_u8m2(vuint16m4_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivw_u8m2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivw_se_u8m2: +** ... +** vsetivli\s+zero+,0+,e8+,m2,ta,ma+ +** sf\.vc\.v\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16m4_t test_sf_vc_v_ivw_se_u8m2(vuint16m4_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivw_se_u8m2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivw_u8m4: +** ... +** vsetivli\s+zero+,0+,e8+,m4,ta,ma+ +** sf\.vc\.v\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16m8_t test_sf_vc_v_ivw_u8m4(vuint16m8_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivw_u8m4(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivw_se_u8m4: +** ... +** vsetivli\s+zero+,0+,e8+,m4,ta,ma+ +** sf\.vc\.v\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint16m8_t test_sf_vc_v_ivw_se_u8m4(vuint16m8_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivw_se_u8m4(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivw_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.v\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32mf2_t test_sf_vc_v_ivw_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivw_u16mf4(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivw_se_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.v\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32mf2_t test_sf_vc_v_ivw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivw_se_u16mf4(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivw_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.v\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32m1_t test_sf_vc_v_ivw_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivw_u16mf2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivw_se_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.v\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32m1_t test_sf_vc_v_ivw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivw_se_u16mf2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivw_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.v\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32m2_t test_sf_vc_v_ivw_u16m1(vuint32m2_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivw_u16m1(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivw_se_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.v\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32m2_t test_sf_vc_v_ivw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivw_se_u16m1(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivw_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.v\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32m4_t test_sf_vc_v_ivw_u16m2(vuint32m4_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivw_u16m2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivw_se_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.v\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32m4_t test_sf_vc_v_ivw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivw_se_u16m2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivw_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.v\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32m8_t test_sf_vc_v_ivw_u16m4(vuint32m8_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivw_u16m4(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivw_se_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.v\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint32m8_t test_sf_vc_v_ivw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivw_se_u16m4(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivw_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.v\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint64m1_t test_sf_vc_v_ivw_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivw_u32mf2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivw_se_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.v\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint64m1_t test_sf_vc_v_ivw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivw_se_u32mf2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivw_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.v\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint64m2_t test_sf_vc_v_ivw_u32m1(vuint64m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivw_u32m1(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivw_se_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.v\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint64m2_t test_sf_vc_v_ivw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivw_se_u32m1(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivw_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.v\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ + +vuint64m4_t test_sf_vc_v_ivw_u32m2(vuint64m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivw_u32m2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivw_se_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.v\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint64m4_t test_sf_vc_v_ivw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivw_se_u32m2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivw_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.v\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint64m8_t test_sf_vc_v_ivw_u32m4(vuint64m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivw_u32m4(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_v_ivw_se_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.v\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +vuint64m8_t test_sf_vc_v_ivw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_sf_vc_v_ivw_se_u32m4(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_ivw_se_u8mf8: +** ... +** vsetivli\s+zero+,0+,e8+,mf8,ta,ma+ +** sf\.vc\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_ivw_se_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl) { + __riscv_sf_vc_ivw_se_u8mf8(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_ivw_se_u8mf4: +** ... +** vsetivli\s+zero+,0+,e8+,mf4,ta,ma+ +** sf\.vc\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_ivw_se_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) { + __riscv_sf_vc_ivw_se_u8mf4(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_ivw_se_u8mf2: +** ... +** vsetivli\s+zero+,0+,e8+,mf2,ta,ma+ +** sf\.vc\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_ivw_se_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) { + __riscv_sf_vc_ivw_se_u8mf2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_ivw_se_u8m1: +** ... +** vsetivli\s+zero+,0+,e8+,m1,ta,ma+ +** sf\.vc\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_ivw_se_u8m1(vuint16m2_t vd, vuint8m1_t vs2, size_t vl) { + __riscv_sf_vc_ivw_se_u8m1(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_ivw_se_u8m2: +** ... +** vsetivli\s+zero+,0+,e8+,m2,ta,ma+ +** sf\.vc\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_ivw_se_u8m2(vuint16m4_t vd, vuint8m2_t vs2, size_t vl) { + __riscv_sf_vc_ivw_se_u8m2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_ivw_se_u8m4: +** ... +** vsetivli\s+zero+,0+,e8+,m4,ta,ma+ +** sf\.vc\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_ivw_se_u8m4(vuint16m8_t vd, vuint8m4_t vs2, size_t vl) { + __riscv_sf_vc_ivw_se_u8m4(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_ivw_se_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_ivw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, size_t vl) { + __riscv_sf_vc_ivw_se_u16mf4(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_ivw_se_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_ivw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, size_t vl) { + __riscv_sf_vc_ivw_se_u16mf2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_ivw_se_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_ivw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, size_t vl) { + __riscv_sf_vc_ivw_se_u16m1(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_ivw_se_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_ivw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, size_t vl) { + __riscv_sf_vc_ivw_se_u16m2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_ivw_se_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_ivw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, size_t vl) { + __riscv_sf_vc_ivw_se_u16m4(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_ivw_se_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_ivw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, size_t vl) { + __riscv_sf_vc_ivw_se_u32mf2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_ivw_se_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_ivw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, size_t vl) { + __riscv_sf_vc_ivw_se_u32m1(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_ivw_se_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_ivw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, size_t vl) { + __riscv_sf_vc_ivw_se_u32m2(1, vd, vs2, 4, vl); +} + +/* +** test_sf_vc_ivw_se_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+ +** ... +*/ +void test_sf_vc_ivw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, size_t vl) { + __riscv_sf_vc_ivw_se_u32m4(1, vd, vs2, 4, vl); +} + diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_v.c b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_v.c new file mode 100644 index 00000000000..cbaaab9653b --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_v.c @@ -0,0 +1,1954 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv_xsfvcp -mabi=lp64d -O3" } */ +/* { dg-final { check-function-bodies "**" "" } } */ + +#include "riscv_vector.h" + + +/* +** test_sf_vc_v_vv_u8mf8: +** ... +** vsetivli\s+zero+,0+,e8+,mf8,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint8mf8_t test_sf_vc_v_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_u8mf8(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_se_u8mf8: +** ... +** vsetivli\s+zero+,0+,e8+,mf8,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint8mf8_t test_sf_vc_v_vv_se_u8mf8(vuint8mf8_t vs2, vuint8mf8_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_se_u8mf8(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_u8mf4: +** ... +** vsetivli\s+zero+,0+,e8+,mf4,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint8mf4_t test_sf_vc_v_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_u8mf4(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_se_u8mf4: +** ... +** vsetivli\s+zero+,0+,e8+,mf4,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint8mf4_t test_sf_vc_v_vv_se_u8mf4(vuint8mf4_t vs2, vuint8mf4_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_se_u8mf4(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_u8mf2: +** ... +** vsetivli\s+zero+,0+,e8+,mf2,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint8mf2_t test_sf_vc_v_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_u8mf2(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_se_u8mf2: +** ... +** vsetivli\s+zero+,0+,e8+,mf2,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint8mf2_t test_sf_vc_v_vv_se_u8mf2(vuint8mf2_t vs2, vuint8mf2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_se_u8mf2(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_u8m1: +** ... +** vsetivli\s+zero+,0+,e8+,m1,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint8m1_t test_sf_vc_v_vv_u8m1(vuint8m1_t vs2, vuint8m1_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_u8m1(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_se_u8m1: +** ... +** vsetivli\s+zero+,0+,e8+,m1,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint8m1_t test_sf_vc_v_vv_se_u8m1(vuint8m1_t vs2, vuint8m1_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_se_u8m1(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_u8m2: +** ... +** vsetivli\s+zero+,0+,e8+,m2,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint8m2_t test_sf_vc_v_vv_u8m2(vuint8m2_t vs2, vuint8m2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_u8m2(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_se_u8m2: +** ... +** vsetivli\s+zero+,0+,e8+,m2,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint8m2_t test_sf_vc_v_vv_se_u8m2(vuint8m2_t vs2, vuint8m2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_se_u8m2(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_u8m4: +** ... +** vsetivli\s+zero+,0+,e8+,m4,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint8m4_t test_sf_vc_v_vv_u8m4(vuint8m4_t vs2, vuint8m4_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_u8m4(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_se_u8m4: +** ... +** vsetivli\s+zero+,0+,e8+,m4,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint8m4_t test_sf_vc_v_vv_se_u8m4(vuint8m4_t vs2, vuint8m4_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_se_u8m4(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_u8m8: +** ... +** vsetivli\s+zero+,0+,e8+,m8,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint8m8_t test_sf_vc_v_vv_u8m8(vuint8m8_t vs2, vuint8m8_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_u8m8(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_se_u8m8: +** ... +** vsetivli\s+zero+,0+,e8+,m8,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint8m8_t test_sf_vc_v_vv_se_u8m8(vuint8m8_t vs2, vuint8m8_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_se_u8m8(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint16mf4_t test_sf_vc_v_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_u16mf4(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_se_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint16mf4_t test_sf_vc_v_vv_se_u16mf4(vuint16mf4_t vs2, vuint16mf4_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_se_u16mf4(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint16mf2_t test_sf_vc_v_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_u16mf2(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_se_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint16mf2_t test_sf_vc_v_vv_se_u16mf2(vuint16mf2_t vs2, vuint16mf2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_se_u16mf2(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint16m1_t test_sf_vc_v_vv_u16m1(vuint16m1_t vs2, vuint16m1_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_u16m1(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_se_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint16m1_t test_sf_vc_v_vv_se_u16m1(vuint16m1_t vs2, vuint16m1_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_se_u16m1(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint16m2_t test_sf_vc_v_vv_u16m2(vuint16m2_t vs2, vuint16m2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_u16m2(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_se_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint16m2_t test_sf_vc_v_vv_se_u16m2(vuint16m2_t vs2, vuint16m2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_se_u16m2(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint16m4_t test_sf_vc_v_vv_u16m4(vuint16m4_t vs2, vuint16m4_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_u16m4(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_se_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint16m4_t test_sf_vc_v_vv_se_u16m4(vuint16m4_t vs2, vuint16m4_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_se_u16m4(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_u16m8: +** ... +** vsetivli\s+zero+,0+,e16+,m8,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint16m8_t test_sf_vc_v_vv_u16m8(vuint16m8_t vs2, vuint16m8_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_u16m8(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_se_u16m8: +** ... +** vsetivli\s+zero+,0+,e16+,m8,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint16m8_t test_sf_vc_v_vv_se_u16m8(vuint16m8_t vs2, vuint16m8_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_se_u16m8(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint32mf2_t test_sf_vc_v_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_u32mf2(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_se_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint32mf2_t test_sf_vc_v_vv_se_u32mf2(vuint32mf2_t vs2, vuint32mf2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_se_u32mf2(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint32m1_t test_sf_vc_v_vv_u32m1(vuint32m1_t vs2, vuint32m1_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_u32m1(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_se_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint32m1_t test_sf_vc_v_vv_se_u32m1(vuint32m1_t vs2, vuint32m1_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_se_u32m1(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint32m2_t test_sf_vc_v_vv_u32m2(vuint32m2_t vs2, vuint32m2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_u32m2(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_se_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint32m2_t test_sf_vc_v_vv_se_u32m2(vuint32m2_t vs2, vuint32m2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_se_u32m2(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint32m4_t test_sf_vc_v_vv_u32m4(vuint32m4_t vs2, vuint32m4_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_u32m4(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_se_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint32m4_t test_sf_vc_v_vv_se_u32m4(vuint32m4_t vs2, vuint32m4_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_se_u32m4(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_u32m8: +** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint32m8_t test_sf_vc_v_vv_u32m8(vuint32m8_t vs2, vuint32m8_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_u32m8(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_se_u32m8: +** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint32m8_t test_sf_vc_v_vv_se_u32m8(vuint32m8_t vs2, vuint32m8_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_se_u32m8(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_u64m1: +** ... +** vsetivli\s+zero+,0+,e64+,m1,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint64m1_t test_sf_vc_v_vv_u64m1(vuint64m1_t vs2, vuint64m1_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_u64m1(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_se_u64m1: +** ... +** vsetivli\s+zero+,0+,e64+,m1,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint64m1_t test_sf_vc_v_vv_se_u64m1(vuint64m1_t vs2, vuint64m1_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_se_u64m1(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_u64m2: +** ... +** vsetivli\s+zero+,0+,e64+,m2,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint64m2_t test_sf_vc_v_vv_u64m2(vuint64m2_t vs2, vuint64m2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_u64m2(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_se_u64m2: +** ... +** vsetivli\s+zero+,0+,e64+,m2,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint64m2_t test_sf_vc_v_vv_se_u64m2(vuint64m2_t vs2, vuint64m2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_se_u64m2(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_u64m4: +** ... +** vsetivli\s+zero+,0+,e64+,m4,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint64m4_t test_sf_vc_v_vv_u64m4(vuint64m4_t vs2, vuint64m4_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_u64m4(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_se_u64m4: +** ... +** vsetivli\s+zero+,0+,e64+,m4,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint64m4_t test_sf_vc_v_vv_se_u64m4(vuint64m4_t vs2, vuint64m4_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_se_u64m4(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_u64m8: +** ... +** vsetivli\s+zero+,0+,e64+,m8,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint64m8_t test_sf_vc_v_vv_u64m8(vuint64m8_t vs2, vuint64m8_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_u64m8(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vv_se_u64m8: +** ... +** vsetivli\s+zero+,0+,e64+,m8,ta,ma+ +** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint64m8_t test_sf_vc_v_vv_se_u64m8(vuint64m8_t vs2, vuint64m8_t rs1, size_t vl) { + return __riscv_sf_vc_v_vv_se_u64m8(1, vs2, rs1, vl); +} + +/* +** test_sf_vc_vv_se_u8mf8: +** ... +** vsetivli\s+zero+,0+,e8+,mf8,ta,ma+ +** sf\.vc\.vv\t[0-9]+,[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vv_se_u8mf8(vuint8mf8_t vs2, vuint8mf8_t rs1, size_t vl) { + __riscv_sf_vc_vv_se_u8mf8(1, 3, vs2, rs1, vl); +} + +/* +** test_sf_vc_vv_se_u8mf4: +** ... +** vsetivli\s+zero+,0+,e8+,mf4,ta,ma+ +** sf\.vc\.vv\t[0-9]+,[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vv_se_u8mf4(vuint8mf4_t vs2, vuint8mf4_t rs1, size_t vl) { + __riscv_sf_vc_vv_se_u8mf4(1, 3, vs2, rs1, vl); +} + +/* +** test_sf_vc_vv_se_u8mf2: +** ... +** vsetivli\s+zero+,0+,e8+,mf2,ta,ma+ +** sf\.vc\.vv\t[0-9]+,[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vv_se_u8mf2(vuint8mf2_t vs2, vuint8mf2_t rs1, size_t vl) { + __riscv_sf_vc_vv_se_u8mf2(1, 3, vs2, rs1, vl); +} + +/* +** test_sf_vc_vv_se_u8m1: +** ... +** vsetivli\s+zero+,0+,e8+,m1,ta,ma+ +** sf\.vc\.vv\t[0-9]+,[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vv_se_u8m1(vuint8m1_t vs2, vuint8m1_t rs1, size_t vl) { + __riscv_sf_vc_vv_se_u8m1(1, 3, vs2, rs1, vl); +} + +/* +** test_sf_vc_vv_se_u8m2: +** ... +** vsetivli\s+zero+,0+,e8+,m2,ta,ma+ +** sf\.vc\.vv\t[0-9]+,[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vv_se_u8m2(vuint8m2_t vs2, vuint8m2_t rs1, size_t vl) { + __riscv_sf_vc_vv_se_u8m2(1, 3, vs2, rs1, vl); +} + +/* +** test_sf_vc_vv_se_u8m4: +** ... +** vsetivli\s+zero+,0+,e8+,m4,ta,ma+ +** sf\.vc\.vv\t[0-9]+,[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vv_se_u8m4(vuint8m4_t vs2, vuint8m4_t rs1, size_t vl) { + __riscv_sf_vc_vv_se_u8m4(1, 3, vs2, rs1, vl); +} + +/* +** test_sf_vc_vv_se_u8m8: +** ... +** vsetivli\s+zero+,0+,e8+,m8,ta,ma+ +** sf\.vc\.vv\t[0-9]+,[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vv_se_u8m8(vuint8m8_t vs2, vuint8m8_t rs1, size_t vl) { + __riscv_sf_vc_vv_se_u8m8(1, 3, vs2, rs1, vl); +} + +/* +** test_sf_vc_vv_se_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.vv\t[0-9]+,[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vv_se_u16mf4(vuint16mf4_t vs2, vuint16mf4_t rs1, size_t vl) { + __riscv_sf_vc_vv_se_u16mf4(1, 3, vs2, rs1, vl); +} + +/* +** test_sf_vc_vv_se_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.vv\t[0-9]+,[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vv_se_u16mf2(vuint16mf2_t vs2, vuint16mf2_t rs1, size_t vl) { + __riscv_sf_vc_vv_se_u16mf2(1, 3, vs2, rs1, vl); +} + +/* +** test_sf_vc_vv_se_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.vv\t[0-9]+,[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vv_se_u16m1(vuint16m1_t vs2, vuint16m1_t rs1, size_t vl) { + __riscv_sf_vc_vv_se_u16m1(1, 3, vs2, rs1, vl); +} + +/* +** test_sf_vc_vv_se_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.vv\t[0-9]+,[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vv_se_u16m2(vuint16m2_t vs2, vuint16m2_t rs1, size_t vl) { + __riscv_sf_vc_vv_se_u16m2(1, 3, vs2, rs1, vl); +} + +/* +** test_sf_vc_vv_se_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.vv\t[0-9]+,[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vv_se_u16m4(vuint16m4_t vs2, vuint16m4_t rs1, size_t vl) { + __riscv_sf_vc_vv_se_u16m4(1, 3, vs2, rs1, vl); +} + +/* +** test_sf_vc_vv_se_u16m8: +** ... +** vsetivli\s+zero+,0+,e16+,m8,ta,ma+ +** sf\.vc\.vv\t[0-9]+,[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vv_se_u16m8(vuint16m8_t vs2, vuint16m8_t rs1, size_t vl) { + __riscv_sf_vc_vv_se_u16m8(1, 3, vs2, rs1, vl); +} + +/* +** test_sf_vc_vv_se_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.vv\t[0-9]+,[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vv_se_u32mf2(vuint32mf2_t vs2, vuint32mf2_t rs1, size_t vl) { + __riscv_sf_vc_vv_se_u32mf2(1, 3, vs2, rs1, vl); +} + +/* +** test_sf_vc_vv_se_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.vv\t[0-9]+,[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vv_se_u32m1(vuint32m1_t vs2, vuint32m1_t rs1, size_t vl) { + __riscv_sf_vc_vv_se_u32m1(1, 3, vs2, rs1, vl); +} + +/* +** test_sf_vc_vv_se_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.vv\t[0-9]+,[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vv_se_u32m2(vuint32m2_t vs2, vuint32m2_t rs1, size_t vl) { + __riscv_sf_vc_vv_se_u32m2(1, 3, vs2, rs1, vl); +} + +/* +** test_sf_vc_vv_se_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.vv\t[0-9]+,[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vv_se_u32m4(vuint32m4_t vs2, vuint32m4_t rs1, size_t vl) { + __riscv_sf_vc_vv_se_u32m4(1, 3, vs2, rs1, vl); +} + +/* +** test_sf_vc_vv_se_u32m8: +** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ +** sf\.vc\.vv\t[0-9]+,[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vv_se_u32m8(vuint32m8_t vs2, vuint32m8_t rs1, size_t vl) { + __riscv_sf_vc_vv_se_u32m8(1, 3, vs2, rs1, vl); +} + +/* +** test_sf_vc_vv_se_u64m1: +** ... +** vsetivli\s+zero+,0+,e64+,m1,ta,ma+ +** sf\.vc\.vv\t[0-9]+,[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vv_se_u64m1(vuint64m1_t vs2, vuint64m1_t rs1, size_t vl) { + __riscv_sf_vc_vv_se_u64m1(1, 3, vs2, rs1, vl); +} + +/* +** test_sf_vc_vv_se_u64m2: +** ... +** vsetivli\s+zero+,0+,e64+,m2,ta,ma+ +** sf\.vc\.vv\t[0-9]+,[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vv_se_u64m2(vuint64m2_t vs2, vuint64m2_t rs1, size_t vl) { + __riscv_sf_vc_vv_se_u64m2(1, 3, vs2, rs1, vl); +} + +/* +** test_sf_vc_vv_se_u64m4: +** ... +** vsetivli\s+zero+,0+,e64+,m4,ta,ma+ +** sf\.vc\.vv\t[0-9]+,[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vv_se_u64m4(vuint64m4_t vs2, vuint64m4_t rs1, size_t vl) { + __riscv_sf_vc_vv_se_u64m4(1, 3, vs2, rs1, vl); +} + +/* +** test_sf_vc_vv_se_u64m8: +** ... +** vsetivli\s+zero+,0+,e64+,m8,ta,ma+ +** sf\.vc\.vv\t[0-9]+,[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vv_se_u64m8(vuint64m8_t vs2, vuint64m8_t rs1, size_t vl) { + __riscv_sf_vc_vv_se_u64m8(1, 3, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_u8mf8: +** ... +** vsetivli\s+zero+,0+,e8+,mf8,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint8mf8_t test_sf_vc_v_vvv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_u8mf8(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_se_u8mf8: +** ... +** vsetivli\s+zero+,0+,e8+,mf8,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint8mf8_t test_sf_vc_v_vvv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_se_u8mf8(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_u8mf4: +** ... +** vsetivli\s+zero+,0+,e8+,mf4,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint8mf4_t test_sf_vc_v_vvv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_u8mf4(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_se_u8mf4: +** ... +** vsetivli\s+zero+,0+,e8+,mf4,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint8mf4_t test_sf_vc_v_vvv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_se_u8mf4(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_u8mf2: +** ... +** vsetivli\s+zero+,0+,e8+,mf2,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint8mf2_t test_sf_vc_v_vvv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_u8mf2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_se_u8mf2: +** ... +** vsetivli\s+zero+,0+,e8+,mf2,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint8mf2_t test_sf_vc_v_vvv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_se_u8mf2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_u8m1: +** ... +** vsetivli\s+zero+,0+,e8+,m1,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint8m1_t test_sf_vc_v_vvv_u8m1(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_u8m1(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_se_u8m1: +** ... +** vsetivli\s+zero+,0+,e8+,m1,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint8m1_t test_sf_vc_v_vvv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_se_u8m1(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_u8m2: +** ... +** vsetivli\s+zero+,0+,e8+,m2,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint8m2_t test_sf_vc_v_vvv_u8m2(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_u8m2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_se_u8m2: +** ... +** vsetivli\s+zero+,0+,e8+,m2,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint8m2_t test_sf_vc_v_vvv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_se_u8m2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_u8m4: +** ... +** vsetivli\s+zero+,0+,e8+,m4,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint8m4_t test_sf_vc_v_vvv_u8m4(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_u8m4(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_se_u8m4: +** ... +** vsetivli\s+zero+,0+,e8+,m4,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint8m4_t test_sf_vc_v_vvv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_se_u8m4(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_u8m8: +** ... +** vsetivli\s+zero+,0+,e8+,m8,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint8m8_t test_sf_vc_v_vvv_u8m8(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_u8m8(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_se_u8m8: +** ... +** vsetivli\s+zero+,0+,e8+,m8,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint8m8_t test_sf_vc_v_vvv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_se_u8m8(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint16mf4_t test_sf_vc_v_vvv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_u16mf4(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_se_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint16mf4_t test_sf_vc_v_vvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_se_u16mf4(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint16mf2_t test_sf_vc_v_vvv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_u16mf2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_se_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint16mf2_t test_sf_vc_v_vvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_se_u16mf2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint16m1_t test_sf_vc_v_vvv_u16m1(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_u16m1(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_se_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint16m1_t test_sf_vc_v_vvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_se_u16m1(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint16m2_t test_sf_vc_v_vvv_u16m2(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_u16m2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_se_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint16m2_t test_sf_vc_v_vvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_se_u16m2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint16m4_t test_sf_vc_v_vvv_u16m4(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_u16m4(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_se_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint16m4_t test_sf_vc_v_vvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_se_u16m4(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_u16m8: +** ... +** vsetivli\s+zero+,0+,e16+,m8,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint16m8_t test_sf_vc_v_vvv_u16m8(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_u16m8(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_se_u16m8: +** ... +** vsetivli\s+zero+,0+,e16+,m8,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint16m8_t test_sf_vc_v_vvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_se_u16m8(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint32mf2_t test_sf_vc_v_vvv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_u32mf2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_se_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint32mf2_t test_sf_vc_v_vvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_se_u32mf2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint32m1_t test_sf_vc_v_vvv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_u32m1(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_se_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint32m1_t test_sf_vc_v_vvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_se_u32m1(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint32m2_t test_sf_vc_v_vvv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_u32m2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_se_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint32m2_t test_sf_vc_v_vvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_se_u32m2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint32m4_t test_sf_vc_v_vvv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_u32m4(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_se_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint32m4_t test_sf_vc_v_vvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_se_u32m4(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_u32m8: +** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint32m8_t test_sf_vc_v_vvv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_u32m8(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_se_u32m8: +** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint32m8_t test_sf_vc_v_vvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_se_u32m8(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_u64m1: +** ... +** vsetivli\s+zero+,0+,e64+,m1,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint64m1_t test_sf_vc_v_vvv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_u64m1(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_se_u64m1: +** ... +** vsetivli\s+zero+,0+,e64+,m1,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint64m1_t test_sf_vc_v_vvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_se_u64m1(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_u64m2: +** ... +** vsetivli\s+zero+,0+,e64+,m2,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint64m2_t test_sf_vc_v_vvv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_u64m2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_se_u64m2: +** ... +** vsetivli\s+zero+,0+,e64+,m2,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint64m2_t test_sf_vc_v_vvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_se_u64m2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_u64m4: +** ... +** vsetivli\s+zero+,0+,e64+,m4,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint64m4_t test_sf_vc_v_vvv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_u64m4(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_se_u64m4: +** ... +** vsetivli\s+zero+,0+,e64+,m4,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint64m4_t test_sf_vc_v_vvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_se_u64m4(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_u64m8: +** ... +** vsetivli\s+zero+,0+,e64+,m8,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint64m8_t test_sf_vc_v_vvv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_u64m8(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvv_se_u64m8: +** ... +** vsetivli\s+zero+,0+,e64+,m8,ta,ma+ +** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint64m8_t test_sf_vc_v_vvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvv_se_u64m8(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_vvv_se_u8mf8: +** ... +** vsetivli\s+zero+,0+,e8+,mf8,ta,ma+ +** sf\.vc\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vvv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t rs1, size_t vl) { + __riscv_sf_vc_vvv_se_u8mf8(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_vvv_se_u8mf4: +** ... +** vsetivli\s+zero+,0+,e8+,mf4,ta,ma+ +** sf\.vc\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vvv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t rs1, size_t vl) { + __riscv_sf_vc_vvv_se_u8mf4(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_vvv_se_u8mf2: +** ... +** vsetivli\s+zero+,0+,e8+,mf2,ta,ma+ +** sf\.vc\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vvv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t rs1, size_t vl) { + __riscv_sf_vc_vvv_se_u8mf2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_vvv_se_u8m1: +** ... +** vsetivli\s+zero+,0+,e8+,m1,ta,ma+ +** sf\.vc\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vvv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t rs1, size_t vl) { + __riscv_sf_vc_vvv_se_u8m1(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_vvv_se_u8m2: +** ... +** vsetivli\s+zero+,0+,e8+,m2,ta,ma+ +** sf\.vc\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vvv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t rs1, size_t vl) { + __riscv_sf_vc_vvv_se_u8m2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_vvv_se_u8m4: +** ... +** vsetivli\s+zero+,0+,e8+,m4,ta,ma+ +** sf\.vc\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vvv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t rs1, size_t vl) { + __riscv_sf_vc_vvv_se_u8m4(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_vvv_se_u8m8: +** ... +** vsetivli\s+zero+,0+,e8+,m8,ta,ma+ +** sf\.vc\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vvv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t rs1, size_t vl) { + __riscv_sf_vc_vvv_se_u8m8(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_vvv_se_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t rs1, size_t vl) { + __riscv_sf_vc_vvv_se_u16mf4(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_vvv_se_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t rs1, size_t vl) { + __riscv_sf_vc_vvv_se_u16mf2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_vvv_se_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t rs1, size_t vl) { + __riscv_sf_vc_vvv_se_u16m1(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_vvv_se_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t rs1, size_t vl) { + __riscv_sf_vc_vvv_se_u16m2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_vvv_se_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t rs1, size_t vl) { + __riscv_sf_vc_vvv_se_u16m4(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_vvv_se_u16m8: +** ... +** vsetivli\s+zero+,0+,e16+,m8,ta,ma+ +** sf\.vc\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t rs1, size_t vl) { + __riscv_sf_vc_vvv_se_u16m8(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_vvv_se_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t rs1, size_t vl) { + __riscv_sf_vc_vvv_se_u32mf2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_vvv_se_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t rs1, size_t vl) { + __riscv_sf_vc_vvv_se_u32m1(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_vvv_se_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t rs1, size_t vl) { + __riscv_sf_vc_vvv_se_u32m2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_vvv_se_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t rs1, size_t vl) { + __riscv_sf_vc_vvv_se_u32m4(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_vvv_se_u32m8: +** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ +** sf\.vc\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t rs1, size_t vl) { + __riscv_sf_vc_vvv_se_u32m8(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_vvv_se_u64m1: +** ... +** vsetivli\s+zero+,0+,e64+,m1,ta,ma+ +** sf\.vc\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t rs1, size_t vl) { + __riscv_sf_vc_vvv_se_u64m1(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_vvv_se_u64m2: +** ... +** vsetivli\s+zero+,0+,e64+,m2,ta,ma+ +** sf\.vc\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t rs1, size_t vl) { + __riscv_sf_vc_vvv_se_u64m2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_vvv_se_u64m4: +** ... +** vsetivli\s+zero+,0+,e64+,m4,ta,ma+ +** sf\.vc\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t rs1, size_t vl) { + __riscv_sf_vc_vvv_se_u64m4(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_vvv_se_u64m8: +** ... +** vsetivli\s+zero+,0+,e64+,m8,ta,ma+ +** sf\.vc\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t rs1, size_t vl) { + __riscv_sf_vc_vvv_se_u64m8(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvw_u8mf8: +** ... +** vsetivli\s+zero+,0+,e8+,mf8,ta,ma+ +** sf\.vc\.v\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint16mf4_t test_sf_vc_v_vvw_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvw_u8mf8(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvw_se_u8mf8: +** ... +** vsetivli\s+zero+,0+,e8+,mf8,ta,ma+ +** sf\.vc\.v\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint16mf4_t test_sf_vc_v_vvw_se_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvw_se_u8mf8(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvw_u8mf4: +** ... +** vsetivli\s+zero+,0+,e8+,mf4,ta,ma+ +** sf\.vc\.v\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint16mf2_t test_sf_vc_v_vvw_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvw_u8mf4(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvw_se_u8mf4: +** ... +** vsetivli\s+zero+,0+,e8+,mf4,ta,ma+ +** sf\.vc\.v\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint16mf2_t test_sf_vc_v_vvw_se_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvw_se_u8mf4(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvw_u8mf2: +** ... +** vsetivli\s+zero+,0+,e8+,mf2,ta,ma+ +** sf\.vc\.v\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint16m1_t test_sf_vc_v_vvw_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvw_u8mf2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvw_se_u8mf2: +** ... +** vsetivli\s+zero+,0+,e8+,mf2,ta,ma+ +** sf\.vc\.v\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint16m1_t test_sf_vc_v_vvw_se_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvw_se_u8mf2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvw_u8m1: +** ... +** vsetivli\s+zero+,0+,e8+,m1,ta,ma+ +** sf\.vc\.v\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint16m2_t test_sf_vc_v_vvw_u8m1(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvw_u8m1(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvw_se_u8m1: +** ... +** vsetivli\s+zero+,0+,e8+,m1,ta,ma+ +** sf\.vc\.v\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint16m2_t test_sf_vc_v_vvw_se_u8m1(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvw_se_u8m1(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvw_u8m2: +** ... +** vsetivli\s+zero+,0+,e8+,m2,ta,ma+ +** sf\.vc\.v\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint16m4_t test_sf_vc_v_vvw_u8m2(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvw_u8m2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvw_se_u8m2: +** ... +** vsetivli\s+zero+,0+,e8+,m2,ta,ma+ +** sf\.vc\.v\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint16m4_t test_sf_vc_v_vvw_se_u8m2(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvw_se_u8m2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvw_u8m4: +** ... +** vsetivli\s+zero+,0+,e8+,m4,ta,ma+ +** sf\.vc\.v\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint16m8_t test_sf_vc_v_vvw_u8m4(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvw_u8m4(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvw_se_u8m4: +** ... +** vsetivli\s+zero+,0+,e8+,m4,ta,ma+ +** sf\.vc\.v\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint16m8_t test_sf_vc_v_vvw_se_u8m4(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvw_se_u8m4(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvw_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.v\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint32mf2_t test_sf_vc_v_vvw_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvw_u16mf4(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvw_se_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.v\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint32mf2_t test_sf_vc_v_vvw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvw_se_u16mf4(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvw_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.v\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint32m1_t test_sf_vc_v_vvw_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvw_u16mf2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvw_se_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.v\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint32m1_t test_sf_vc_v_vvw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvw_se_u16mf2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvw_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.v\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint32m2_t test_sf_vc_v_vvw_u16m1(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvw_u16m1(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvw_se_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.v\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint32m2_t test_sf_vc_v_vvw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvw_se_u16m1(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvw_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.v\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint32m4_t test_sf_vc_v_vvw_u16m2(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvw_u16m2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvw_se_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.v\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint32m4_t test_sf_vc_v_vvw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvw_se_u16m2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvw_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.v\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint32m8_t test_sf_vc_v_vvw_u16m4(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvw_u16m4(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvw_se_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.v\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint32m8_t test_sf_vc_v_vvw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvw_se_u16m4(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvw_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.v\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint64m1_t test_sf_vc_v_vvw_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvw_u32mf2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvw_se_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.v\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint64m1_t test_sf_vc_v_vvw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvw_se_u32mf2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvw_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.v\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint64m2_t test_sf_vc_v_vvw_u32m1(vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvw_u32m1(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvw_se_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.v\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint64m2_t test_sf_vc_v_vvw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvw_se_u32m1(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvw_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.v\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint64m4_t test_sf_vc_v_vvw_u32m2(vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvw_u32m2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvw_se_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.v\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint64m4_t test_sf_vc_v_vvw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvw_se_u32m2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvw_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.v\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint64m8_t test_sf_vc_v_vvw_u32m4(vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvw_u32m4(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_v_vvw_se_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.v\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vuint64m8_t test_sf_vc_v_vvw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t rs1, size_t vl) { + return __riscv_sf_vc_v_vvw_se_u32m4(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_vvw_se_u8mf8: +** ... +** vsetivli\s+zero+,0+,e8+,mf8,ta,ma+ +** sf\.vc\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vvw_se_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t rs1, size_t vl) { + __riscv_sf_vc_vvw_se_u8mf8(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_vvw_se_u8mf4: +** ... +** vsetivli\s+zero+,0+,e8+,mf4,ta,ma+ +** sf\.vc\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vvw_se_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t rs1, size_t vl) { + __riscv_sf_vc_vvw_se_u8mf4(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_vvw_se_u8mf2: +** ... +** vsetivli\s+zero+,0+,e8+,mf2,ta,ma+ +** sf\.vc\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vvw_se_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t rs1, size_t vl) { + __riscv_sf_vc_vvw_se_u8mf2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_vvw_se_u8m1: +** ... +** vsetivli\s+zero+,0+,e8+,m1,ta,ma+ +** sf\.vc\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vvw_se_u8m1(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t rs1, size_t vl) { + __riscv_sf_vc_vvw_se_u8m1(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_vvw_se_u8m2: +** ... +** vsetivli\s+zero+,0+,e8+,m2,ta,ma+ +** sf\.vc\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vvw_se_u8m2(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t rs1, size_t vl) { + __riscv_sf_vc_vvw_se_u8m2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_vvw_se_u8m4: +** ... +** vsetivli\s+zero+,0+,e8+,m4,ta,ma+ +** sf\.vc\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vvw_se_u8m4(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t rs1, size_t vl) { + __riscv_sf_vc_vvw_se_u8m4(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_vvw_se_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vvw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t rs1, size_t vl) { + __riscv_sf_vc_vvw_se_u16mf4(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_vvw_se_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vvw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t rs1, size_t vl) { + __riscv_sf_vc_vvw_se_u16mf2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_vvw_se_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vvw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t rs1, size_t vl) { + __riscv_sf_vc_vvw_se_u16m1(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_vvw_se_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vvw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t rs1, size_t vl) { + __riscv_sf_vc_vvw_se_u16m2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_vvw_se_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vvw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t rs1, size_t vl) { + __riscv_sf_vc_vvw_se_u16m4(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_vvw_se_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vvw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t rs1, size_t vl) { + __riscv_sf_vc_vvw_se_u32mf2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_vvw_se_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vvw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t rs1, size_t vl) { + __riscv_sf_vc_vvw_se_u32m1(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_vvw_se_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vvw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t rs1, size_t vl) { + __riscv_sf_vc_vvw_se_u32m2(1, vd, vs2, rs1, vl); +} + +/* +** test_sf_vc_vvw_se_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +void test_sf_vc_vvw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t rs1, size_t vl) { + __riscv_sf_vc_vvw_se_u32m4(1, vd, vs2, rs1, vl); +} + diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_x.c b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_x.c new file mode 100644 index 00000000000..182a2d008ca --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_x.c @@ -0,0 +1,2679 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv_xsfvcp -mabi=lp64d -O3" } */ +/* { dg-final { check-function-bodies "**" "" } } */ + +#include "riscv_vector.h" + +/* +** test_sf_vc_v_x_u8mf8: +** ... +** vsetivli\s+zero+,0+,e8+,mf8,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8mf8_t test_sf_vc_v_x_u8mf8(uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_u8mf8(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_se_u8mf8: +** ... +** vsetivli\s+zero+,0+,e8+,mf8,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8mf8_t test_sf_vc_v_x_se_u8mf8(uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_se_u8mf8(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_u8mf4: +** ... +** vsetivli\s+zero+,0+,e8+,mf4,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8mf4_t test_sf_vc_v_x_u8mf4(uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_u8mf4(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_se_u8mf4: +** ... +** vsetivli\s+zero+,0+,e8+,mf4,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8mf4_t test_sf_vc_v_x_se_u8mf4(uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_se_u8mf4(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_u8mf2: +** ... +** vsetivli\s+zero+,0+,e8+,mf2,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8mf2_t test_sf_vc_v_x_u8mf2(uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_u8mf2(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_se_u8mf2: +** ... +** vsetivli\s+zero+,0+,e8+,mf2,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8mf2_t test_sf_vc_v_x_se_u8mf2(uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_se_u8mf2(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_u8m1: +** ... +** vsetivli\s+zero+,0+,e8+,m1,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8m1_t test_sf_vc_v_x_u8m1(uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_u8m1(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_se_u8m1: +** ... +** vsetivli\s+zero+,0+,e8+,m1,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8m1_t test_sf_vc_v_x_se_u8m1(uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_se_u8m1(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_u8m2: +** ... +** vsetivli\s+zero+,0+,e8+,m2,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8m2_t test_sf_vc_v_x_u8m2(uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_u8m2(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_se_u8m2: +** ... +** vsetivli\s+zero+,0+,e8+,m2,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8m2_t test_sf_vc_v_x_se_u8m2(uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_se_u8m2(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_u8m4: +** ... +** vsetivli\s+zero+,0+,e8+,m4,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8m4_t test_sf_vc_v_x_u8m4(uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_u8m4(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_se_u8m4: +** ... +** vsetivli\s+zero+,0+,e8+,m4,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8m4_t test_sf_vc_v_x_se_u8m4(uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_se_u8m4(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_u8m8: +** ... +** vsetivli\s+zero+,0+,e8+,m8,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8m8_t test_sf_vc_v_x_u8m8(uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_u8m8(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_se_u8m8: +** ... +** vsetivli\s+zero+,0+,e8+,m8,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8m8_t test_sf_vc_v_x_se_u8m8(uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_se_u8m8(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16mf4_t test_sf_vc_v_x_u16mf4(uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_u16mf4(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_se_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16mf4_t test_sf_vc_v_x_se_u16mf4(uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_se_u16mf4(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16mf2_t test_sf_vc_v_x_u16mf2(uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_u16mf2(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_se_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16mf2_t test_sf_vc_v_x_se_u16mf2(uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_se_u16mf2(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16m1_t test_sf_vc_v_x_u16m1(uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_u16m1(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_se_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16m1_t test_sf_vc_v_x_se_u16m1(uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_se_u16m1(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16m2_t test_sf_vc_v_x_u16m2(uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_u16m2(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_se_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16m2_t test_sf_vc_v_x_se_u16m2(uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_se_u16m2(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16m4_t test_sf_vc_v_x_u16m4(uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_u16m4(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_se_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16m4_t test_sf_vc_v_x_se_u16m4(uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_se_u16m4(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_u16m8: +** ... +** vsetivli\s+zero+,0+,e16+,m8,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16m8_t test_sf_vc_v_x_u16m8(uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_u16m8(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_se_u16m8: +** ... +** vsetivli\s+zero+,0+,e16+,m8,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16m8_t test_sf_vc_v_x_se_u16m8(uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_se_u16m8(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32mf2_t test_sf_vc_v_x_u32mf2(uint32_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_u32mf2(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_se_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32mf2_t test_sf_vc_v_x_se_u32mf2(uint32_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_se_u32mf2(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32m1_t test_sf_vc_v_x_u32m1(uint32_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_u32m1(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_se_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32m1_t test_sf_vc_v_x_se_u32m1(uint32_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_se_u32m1(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32m2_t test_sf_vc_v_x_u32m2(uint32_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_u32m2(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_se_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32m2_t test_sf_vc_v_x_se_u32m2(uint32_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_se_u32m2(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32m4_t test_sf_vc_v_x_u32m4(uint32_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_u32m4(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_se_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32m4_t test_sf_vc_v_x_se_u32m4(uint32_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_se_u32m4(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_u32m8: +** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32m8_t test_sf_vc_v_x_u32m8(uint32_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_u32m8(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_se_u32m8: +** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32m8_t test_sf_vc_v_x_se_u32m8(uint32_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_se_u32m8(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_u64m1: +** ... +** vsetivli\s+zero+,0+,e64+,m1,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint64m1_t test_sf_vc_v_x_u64m1(uint64_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_u64m1(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_se_u64m1: +** ... +** vsetivli\s+zero+,0+,e64+,m1,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint64m1_t test_sf_vc_v_x_se_u64m1(uint64_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_se_u64m1(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_u64m2: +** ... +** vsetivli\s+zero+,0+,e64+,m2,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint64m2_t test_sf_vc_v_x_u64m2(uint64_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_u64m2(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_se_u64m2: +** ... +** vsetivli\s+zero+,0+,e64+,m2,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint64m2_t test_sf_vc_v_x_se_u64m2(uint64_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_se_u64m2(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_u64m4: +** ... +** vsetivli\s+zero+,0+,e64+,m4,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint64m4_t test_sf_vc_v_x_u64m4(uint64_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_u64m4(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_se_u64m4: +** ... +** vsetivli\s+zero+,0+,e64+,m4,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint64m4_t test_sf_vc_v_x_se_u64m4(uint64_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_se_u64m4(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_u64m8: +** ... +** vsetivli\s+zero+,0+,e64+,m8,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint64m8_t test_sf_vc_v_x_u64m8(uint64_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_u64m8(1, 2, xs1, vl); +} + +/* +** test_sf_vc_v_x_se_u64m8: +** ... +** vsetivli\s+zero+,0+,e64+,m8,ta,ma+ +** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint64m8_t test_sf_vc_v_x_se_u64m8(uint64_t xs1, size_t vl) { + return __riscv_sf_vc_v_x_se_u64m8(1, 2, xs1, vl); +} + +/* +** test_sf_vc_x_se_u8mf8: +** ... +** vsetivli\s+zero+,0+,e8+,mf8,ta,ma+ +** sf\.vc\.x\t[0-9]+,[0-9]+,[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_x_se_u8mf8(uint8_t xs1, size_t vl) { + __riscv_sf_vc_x_se_u8mf8(1, 2, 3, xs1, vl); +} + +/* +** test_sf_vc_x_se_u8mf4: +** ... +** vsetivli\s+zero+,0+,e8+,mf4,ta,ma+ +** sf\.vc\.x\t[0-9]+,[0-9]+,[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_x_se_u8mf4(uint8_t xs1, size_t vl) { + __riscv_sf_vc_x_se_u8mf4(1, 2, 3, xs1, vl); +} + +/* +** test_sf_vc_x_se_u8mf2: +** ... +** vsetivli\s+zero+,0+,e8+,mf2,ta,ma+ +** sf\.vc\.x\t[0-9]+,[0-9]+,[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_x_se_u8mf2(uint8_t xs1, size_t vl) { + __riscv_sf_vc_x_se_u8mf2(1, 2, 3, xs1, vl); +} + +/* +** test_sf_vc_x_se_u8m1: +** ... +** vsetivli\s+zero+,0+,e8+,m1,ta,ma+ +** sf\.vc\.x\t[0-9]+,[0-9]+,[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_x_se_u8m1(uint8_t xs1, size_t vl) { + __riscv_sf_vc_x_se_u8m1(1, 2, 3, xs1, vl); +} + +/* +** test_sf_vc_x_se_u8m2: +** ... +** vsetivli\s+zero+,0+,e8+,m2,ta,ma+ +** sf\.vc\.x\t[0-9]+,[0-9]+,[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_x_se_u8m2(uint8_t xs1, size_t vl) { + __riscv_sf_vc_x_se_u8m2(1, 2, 3, xs1, vl); +} + +/* +** test_sf_vc_x_se_u8m4: +** ... +** vsetivli\s+zero+,0+,e8+,m4,ta,ma+ +** sf\.vc\.x\t[0-9]+,[0-9]+,[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_x_se_u8m4(uint8_t xs1, size_t vl) { + __riscv_sf_vc_x_se_u8m4(1, 2, 3, xs1, vl); +} + +/* +** test_sf_vc_x_se_u8m8: +** ... +** vsetivli\s+zero+,0+,e8+,m8,ta,ma+ +** sf\.vc\.x\t[0-9]+,[0-9]+,[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_x_se_u8m8(uint8_t xs1, size_t vl) { + __riscv_sf_vc_x_se_u8m8(1, 2, 3, xs1, vl); +} + +/* +** test_sf_vc_x_se_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.x\t[0-9]+,[0-9]+,[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_x_se_u16mf4(uint16_t xs1, size_t vl) { + __riscv_sf_vc_x_se_u16mf4(1, 2, 3, xs1, vl); +} + +/* +** test_sf_vc_x_se_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.x\t[0-9]+,[0-9]+,[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_x_se_u16mf2(uint16_t xs1, size_t vl) { + __riscv_sf_vc_x_se_u16mf2(1, 2, 3, xs1, vl); +} + +/* +** test_sf_vc_x_se_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.x\t[0-9]+,[0-9]+,[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_x_se_u16m1(uint16_t xs1, size_t vl) { + __riscv_sf_vc_x_se_u16m1(1, 2, 3, xs1, vl); +} + +/* +** test_sf_vc_x_se_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.x\t[0-9]+,[0-9]+,[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_x_se_u16m2(uint16_t xs1, size_t vl) { + __riscv_sf_vc_x_se_u16m2(1, 2, 3, xs1, vl); +} + +/* +** test_sf_vc_x_se_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.x\t[0-9]+,[0-9]+,[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_x_se_u16m4(uint16_t xs1, size_t vl) { + __riscv_sf_vc_x_se_u16m4(1, 2, 3, xs1, vl); +} + +/* +** test_sf_vc_x_se_u16m8: +** ... +** vsetivli\s+zero+,0+,e16+,m8,ta,ma+ +** sf\.vc\.x\t[0-9]+,[0-9]+,[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_x_se_u16m8(uint16_t xs1, size_t vl) { + __riscv_sf_vc_x_se_u16m8(1, 2, 3, xs1, vl); +} + +/* +** test_sf_vc_x_se_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.x\t[0-9]+,[0-9]+,[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_x_se_u32mf2(uint32_t xs1, size_t vl) { + __riscv_sf_vc_x_se_u32mf2(1, 2, 3, xs1, vl); +} + +/* +** test_sf_vc_x_se_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.x\t[0-9]+,[0-9]+,[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_x_se_u32m1(uint32_t xs1, size_t vl) { + __riscv_sf_vc_x_se_u32m1(1, 2, 3, xs1, vl); +} + +/* +** test_sf_vc_x_se_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.x\t[0-9]+,[0-9]+,[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_x_se_u32m2(uint32_t xs1, size_t vl) { + __riscv_sf_vc_x_se_u32m2(1, 2, 3, xs1, vl); +} + +/* +** test_sf_vc_x_se_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.x\t[0-9]+,[0-9]+,[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_x_se_u32m4(uint32_t xs1, size_t vl) { + __riscv_sf_vc_x_se_u32m4(1, 2, 3, xs1, vl); +} + +/* +** test_sf_vc_x_se_u32m8: +** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ +** sf\.vc\.x\t[0-9]+,[0-9]+,[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_x_se_u32m8(uint32_t xs1, size_t vl) { + __riscv_sf_vc_x_se_u32m8(1, 2, 3, xs1, vl); +} + +/* +** test_sf_vc_x_se_u64m1: +** ... +** vsetivli\s+zero+,0+,e64+,m1,ta,ma+ +** sf\.vc\.x\t[0-9]+,[0-9]+,[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_x_se_u64m1(uint64_t xs1, size_t vl) { + __riscv_sf_vc_x_se_u64m1(1, 2, 3, xs1, vl); +} + +/* +** test_sf_vc_x_se_u64m2: +** ... +** vsetivli\s+zero+,0+,e64+,m2,ta,ma+ +** sf\.vc\.x\t[0-9]+,[0-9]+,[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_x_se_u64m2(uint64_t xs1, size_t vl) { + __riscv_sf_vc_x_se_u64m2(1, 2, 3, xs1, vl); +} + +/* +** test_sf_vc_x_se_u64m4: +** ... +** vsetivli\s+zero+,0+,e64+,m4,ta,ma+ +** sf\.vc\.x\t[0-9]+,[0-9]+,[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_x_se_u64m4(uint64_t xs1, size_t vl) { + __riscv_sf_vc_x_se_u64m4(1, 2, 3, xs1, vl); +} + +/* +** test_sf_vc_x_se_u64m8: +** ... +** vsetivli\s+zero+,0+,e64+,m8,ta,ma+ +** sf\.vc\.x\t[0-9]+,[0-9]+,[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_x_se_u64m8(uint64_t xs1, size_t vl) { + __riscv_sf_vc_x_se_u64m8(1, 2, 3, xs1, vl); +} + +/* +** test_sf_vc_v_xv_u8mf8: +** ... +** vsetivli\s+zero+,0+,e8+,mf8,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8mf8_t test_sf_vc_v_xv_u8mf8(vuint8mf8_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_u8mf8(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_se_u8mf8: +** ... +** vsetivli\s+zero+,0+,e8+,mf8,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8mf8_t test_sf_vc_v_xv_se_u8mf8(vuint8mf8_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_se_u8mf8(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_u8mf4: +** ... +** vsetivli\s+zero+,0+,e8+,mf4,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8mf4_t test_sf_vc_v_xv_u8mf4(vuint8mf4_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_u8mf4(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_se_u8mf4: +** ... +** vsetivli\s+zero+,0+,e8+,mf4,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8mf4_t test_sf_vc_v_xv_se_u8mf4(vuint8mf4_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_se_u8mf4(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_u8mf2: +** ... +** vsetivli\s+zero+,0+,e8+,mf2,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8mf2_t test_sf_vc_v_xv_u8mf2(vuint8mf2_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_u8mf2(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_se_u8mf2: +** ... +** vsetivli\s+zero+,0+,e8+,mf2,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8mf2_t test_sf_vc_v_xv_se_u8mf2(vuint8mf2_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_se_u8mf2(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_u8m1: +** ... +** vsetivli\s+zero+,0+,e8+,m1,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8m1_t test_sf_vc_v_xv_u8m1(vuint8m1_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_u8m1(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_se_u8m1: +** ... +** vsetivli\s+zero+,0+,e8+,m1,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8m1_t test_sf_vc_v_xv_se_u8m1(vuint8m1_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_se_u8m1(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_u8m2: +** ... +** vsetivli\s+zero+,0+,e8+,m2,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8m2_t test_sf_vc_v_xv_u8m2(vuint8m2_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_u8m2(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_se_u8m2: +** ... +** vsetivli\s+zero+,0+,e8+,m2,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8m2_t test_sf_vc_v_xv_se_u8m2(vuint8m2_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_se_u8m2(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_u8m4: +** ... +** vsetivli\s+zero+,0+,e8+,m4,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8m4_t test_sf_vc_v_xv_u8m4(vuint8m4_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_u8m4(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_se_u8m4: +** ... +** vsetivli\s+zero+,0+,e8+,m4,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8m4_t test_sf_vc_v_xv_se_u8m4(vuint8m4_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_se_u8m4(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_u8m8: +** ... +** vsetivli\s+zero+,0+,e8+,m8,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8m8_t test_sf_vc_v_xv_u8m8(vuint8m8_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_u8m8(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_se_u8m8: +** ... +** vsetivli\s+zero+,0+,e8+,m8,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8m8_t test_sf_vc_v_xv_se_u8m8(vuint8m8_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_se_u8m8(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16mf4_t test_sf_vc_v_xv_u16mf4(vuint16mf4_t vs2, uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_u16mf4(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_se_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16mf4_t test_sf_vc_v_xv_se_u16mf4(vuint16mf4_t vs2, uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_se_u16mf4(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16mf2_t test_sf_vc_v_xv_u16mf2(vuint16mf2_t vs2, uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_u16mf2(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_se_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16mf2_t test_sf_vc_v_xv_se_u16mf2(vuint16mf2_t vs2, uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_se_u16mf2(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16m1_t test_sf_vc_v_xv_u16m1(vuint16m1_t vs2, uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_u16m1(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_se_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16m1_t test_sf_vc_v_xv_se_u16m1(vuint16m1_t vs2, uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_se_u16m1(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16m2_t test_sf_vc_v_xv_u16m2(vuint16m2_t vs2, uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_u16m2(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_se_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16m2_t test_sf_vc_v_xv_se_u16m2(vuint16m2_t vs2, uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_se_u16m2(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16m4_t test_sf_vc_v_xv_u16m4(vuint16m4_t vs2, uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_u16m4(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_se_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16m4_t test_sf_vc_v_xv_se_u16m4(vuint16m4_t vs2, uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_se_u16m4(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_u16m8: +** ... +** vsetivli\s+zero+,0+,e16+,m8,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16m8_t test_sf_vc_v_xv_u16m8(vuint16m8_t vs2, uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_u16m8(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_se_u16m8: +** ... +** vsetivli\s+zero+,0+,e16+,m8,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16m8_t test_sf_vc_v_xv_se_u16m8(vuint16m8_t vs2, uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_se_u16m8(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32mf2_t test_sf_vc_v_xv_u32mf2(vuint32mf2_t vs2, uint32_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_u32mf2(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_se_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32mf2_t test_sf_vc_v_xv_se_u32mf2(vuint32mf2_t vs2, uint32_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_se_u32mf2(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32m1_t test_sf_vc_v_xv_u32m1(vuint32m1_t vs2, uint32_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_u32m1(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_se_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32m1_t test_sf_vc_v_xv_se_u32m1(vuint32m1_t vs2, uint32_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_se_u32m1(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32m2_t test_sf_vc_v_xv_u32m2(vuint32m2_t vs2, uint32_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_u32m2(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_se_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32m2_t test_sf_vc_v_xv_se_u32m2(vuint32m2_t vs2, uint32_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_se_u32m2(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32m4_t test_sf_vc_v_xv_u32m4(vuint32m4_t vs2, uint32_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_u32m4(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_se_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32m4_t test_sf_vc_v_xv_se_u32m4(vuint32m4_t vs2, uint32_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_se_u32m4(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_u32m8: +** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32m8_t test_sf_vc_v_xv_u32m8(vuint32m8_t vs2, uint32_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_u32m8(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_se_u32m8: +** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32m8_t test_sf_vc_v_xv_se_u32m8(vuint32m8_t vs2, uint32_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_se_u32m8(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_u64m1: +** ... +** vsetivli\s+zero+,0+,e64+,m1,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint64m1_t test_sf_vc_v_xv_u64m1(vuint64m1_t vs2, uint64_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_u64m1(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_se_u64m1: +** ... +** vsetivli\s+zero+,0+,e64+,m1,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint64m1_t test_sf_vc_v_xv_se_u64m1(vuint64m1_t vs2, uint64_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_se_u64m1(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_u64m2: +** ... +** vsetivli\s+zero+,0+,e64+,m2,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint64m2_t test_sf_vc_v_xv_u64m2(vuint64m2_t vs2, uint64_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_u64m2(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_se_u64m2: +** ... +** vsetivli\s+zero+,0+,e64+,m2,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint64m2_t test_sf_vc_v_xv_se_u64m2(vuint64m2_t vs2, uint64_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_se_u64m2(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_u64m4: +** ... +** vsetivli\s+zero+,0+,e64+,m4,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint64m4_t test_sf_vc_v_xv_u64m4(vuint64m4_t vs2, uint64_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_u64m4(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_se_u64m4: +** ... +** vsetivli\s+zero+,0+,e64+,m4,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint64m4_t test_sf_vc_v_xv_se_u64m4(vuint64m4_t vs2, uint64_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_se_u64m4(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_u64m8: +** ... +** vsetivli\s+zero+,0+,e64+,m8,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint64m8_t test_sf_vc_v_xv_u64m8(vuint64m8_t vs2, uint64_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_u64m8(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xv_se_u64m8: +** ... +** vsetivli\s+zero+,0+,e64+,m8,ta,ma+ +** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint64m8_t test_sf_vc_v_xv_se_u64m8(vuint64m8_t vs2, uint64_t xs1, size_t vl) { + return __riscv_sf_vc_v_xv_se_u64m8(1, vs2, xs1, vl); +} + +/* +** test_sf_vc_xv_se_u8mf8: +** ... +** vsetivli\s+zero+,0+,e8+,mf8,ta,ma+ +** sf\.vc\.xv\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xv_se_u8mf8(vuint8mf8_t vs2, uint8_t xs1, size_t vl) { + __riscv_sf_vc_xv_se_u8mf8(1, 3, vs2, xs1, vl); +} + +/* +** test_sf_vc_xv_se_u8mf4: +** ... +** vsetivli\s+zero+,0+,e8+,mf4,ta,ma+ +** sf\.vc\.xv\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xv_se_u8mf4(vuint8mf4_t vs2, uint8_t xs1, size_t vl) { + __riscv_sf_vc_xv_se_u8mf4(1, 3, vs2, xs1, vl); +} + +/* +** test_sf_vc_xv_se_u8mf2: +** ... +** vsetivli\s+zero+,0+,e8+,mf2,ta,ma+ +** sf\.vc\.xv\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xv_se_u8mf2(vuint8mf2_t vs2, uint8_t xs1, size_t vl) { + __riscv_sf_vc_xv_se_u8mf2(1, 3, vs2, xs1, vl); +} + +/* +** test_sf_vc_xv_se_u8m1: +** ... +** vsetivli\s+zero+,0+,e8+,m1,ta,ma+ +** sf\.vc\.xv\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xv_se_u8m1(vuint8m1_t vs2, uint8_t xs1, size_t vl) { + __riscv_sf_vc_xv_se_u8m1(1, 3, vs2, xs1, vl); +} + +/* +** test_sf_vc_xv_se_u8m2: +** ... +** vsetivli\s+zero+,0+,e8+,m2,ta,ma+ +** sf\.vc\.xv\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xv_se_u8m2(vuint8m2_t vs2, uint8_t xs1, size_t vl) { + __riscv_sf_vc_xv_se_u8m2(1, 3, vs2, xs1, vl); +} + +/* +** test_sf_vc_xv_se_u8m4: +** ... +** vsetivli\s+zero+,0+,e8+,m4,ta,ma+ +** sf\.vc\.xv\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xv_se_u8m4(vuint8m4_t vs2, uint8_t xs1, size_t vl) { + __riscv_sf_vc_xv_se_u8m4(1, 3, vs2, xs1, vl); +} + +/* +** test_sf_vc_xv_se_u8m8: +** ... +** vsetivli\s+zero+,0+,e8+,m8,ta,ma+ +** sf\.vc\.xv\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xv_se_u8m8(vuint8m8_t vs2, uint8_t xs1, size_t vl) { + __riscv_sf_vc_xv_se_u8m8(1, 3, vs2, xs1, vl); +} + +/* +** test_sf_vc_xv_se_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.xv\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xv_se_u16mf4(vuint16mf4_t vs2, uint16_t xs1, size_t vl) { + __riscv_sf_vc_xv_se_u16mf4(1, 3, vs2, xs1, vl); +} + +/* +** test_sf_vc_xv_se_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.xv\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xv_se_u16mf2(vuint16mf2_t vs2, uint16_t xs1, size_t vl) { + __riscv_sf_vc_xv_se_u16mf2(1, 3, vs2, xs1, vl); +} + +/* +** test_sf_vc_xv_se_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.xv\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xv_se_u16m1(vuint16m1_t vs2, uint16_t xs1, size_t vl) { + __riscv_sf_vc_xv_se_u16m1(1, 3, vs2, xs1, vl); +} + +/* +** test_sf_vc_xv_se_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.xv\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xv_se_u16m2(vuint16m2_t vs2, uint16_t xs1, size_t vl) { + __riscv_sf_vc_xv_se_u16m2(1, 3, vs2, xs1, vl); +} + +/* +** test_sf_vc_xv_se_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.xv\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xv_se_u16m4(vuint16m4_t vs2, uint16_t xs1, size_t vl) { + __riscv_sf_vc_xv_se_u16m4(1, 3, vs2, xs1, vl); +} + +/* +** test_sf_vc_xv_se_u16m8: +** ... +** vsetivli\s+zero+,0+,e16+,m8,ta,ma+ +** sf\.vc\.xv\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xv_se_u16m8(vuint16m8_t vs2, uint16_t xs1, size_t vl) { + __riscv_sf_vc_xv_se_u16m8(1, 3, vs2, xs1, vl); +} + +/* +** test_sf_vc_xv_se_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.xv\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xv_se_u32mf2(vuint32mf2_t vs2, uint32_t xs1, size_t vl) { + __riscv_sf_vc_xv_se_u32mf2(1, 3, vs2, xs1, vl); +} + +/* +** test_sf_vc_xv_se_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.xv\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xv_se_u32m1(vuint32m1_t vs2, uint32_t xs1, size_t vl) { + __riscv_sf_vc_xv_se_u32m1(1, 3, vs2, xs1, vl); +} + +/* +** test_sf_vc_xv_se_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.xv\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xv_se_u32m2(vuint32m2_t vs2, uint32_t xs1, size_t vl) { + __riscv_sf_vc_xv_se_u32m2(1, 3, vs2, xs1, vl); +} + +/* +** test_sf_vc_xv_se_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.xv\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xv_se_u32m4(vuint32m4_t vs2, uint32_t xs1, size_t vl) { + __riscv_sf_vc_xv_se_u32m4(1, 3, vs2, xs1, vl); +} + +/* +** test_sf_vc_xv_se_u32m8: +** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ +** sf\.vc\.xv\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xv_se_u32m8(vuint32m8_t vs2, uint32_t xs1, size_t vl) { + __riscv_sf_vc_xv_se_u32m8(1, 3, vs2, xs1, vl); +} + +/* +** test_sf_vc_xv_se_u64m1: +** ... +** vsetivli\s+zero+,0+,e64+,m1,ta,ma+ +** sf\.vc\.xv\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xv_se_u64m1(vuint64m1_t vs2, uint64_t xs1, size_t vl) { + __riscv_sf_vc_xv_se_u64m1(1, 3, vs2, xs1, vl); +} + +/* +** test_sf_vc_xv_se_u64m2: +** ... +** vsetivli\s+zero+,0+,e64+,m2,ta,ma+ +** sf\.vc\.xv\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xv_se_u64m2(vuint64m2_t vs2, uint64_t xs1, size_t vl) { + __riscv_sf_vc_xv_se_u64m2(1, 3, vs2, xs1, vl); +} + +/* +** test_sf_vc_xv_se_u64m4: +** ... +** vsetivli\s+zero+,0+,e64+,m4,ta,ma+ +** sf\.vc\.xv\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xv_se_u64m4(vuint64m4_t vs2, uint64_t xs1, size_t vl) { + __riscv_sf_vc_xv_se_u64m4(1, 3, vs2, xs1, vl); +} + +/* +** test_sf_vc_xv_se_u64m8: +** ... +** vsetivli\s+zero+,0+,e64+,m8,ta,ma+ +** sf\.vc\.xv\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xv_se_u64m8(vuint64m8_t vs2, uint64_t xs1, size_t vl) { + __riscv_sf_vc_xv_se_u64m8(1, 3, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_u8mf8: +** ... +** vsetivli\s+zero+,0+,e8+,mf8,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8mf8_t test_sf_vc_v_xvv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_u8mf8(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_se_u8mf8: +** ... +** vsetivli\s+zero+,0+,e8+,mf8,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8mf8_t test_sf_vc_v_xvv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_se_u8mf8(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_u8mf4: +** ... +** vsetivli\s+zero+,0+,e8+,mf4,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8mf4_t test_sf_vc_v_xvv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_u8mf4(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_se_u8mf4: +** ... +** vsetivli\s+zero+,0+,e8+,mf4,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8mf4_t test_sf_vc_v_xvv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_se_u8mf4(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_u8mf2: +** ... +** vsetivli\s+zero+,0+,e8+,mf2,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8mf2_t test_sf_vc_v_xvv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_u8mf2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_se_u8mf2: +** ... +** vsetivli\s+zero+,0+,e8+,mf2,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8mf2_t test_sf_vc_v_xvv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_se_u8mf2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_u8m1: +** ... +** vsetivli\s+zero+,0+,e8+,m1,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8m1_t test_sf_vc_v_xvv_u8m1(vuint8m1_t vd, vuint8m1_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_u8m1(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_se_u8m1: +** ... +** vsetivli\s+zero+,0+,e8+,m1,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8m1_t test_sf_vc_v_xvv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_se_u8m1(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_u8m2: +** ... +** vsetivli\s+zero+,0+,e8+,m2,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8m2_t test_sf_vc_v_xvv_u8m2(vuint8m2_t vd, vuint8m2_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_u8m2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_se_u8m2: +** ... +** vsetivli\s+zero+,0+,e8+,m2,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8m2_t test_sf_vc_v_xvv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_se_u8m2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_u8m4: +** ... +** vsetivli\s+zero+,0+,e8+,m4,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8m4_t test_sf_vc_v_xvv_u8m4(vuint8m4_t vd, vuint8m4_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_u8m4(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_se_u8m4: +** ... +** vsetivli\s+zero+,0+,e8+,m4,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8m4_t test_sf_vc_v_xvv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_se_u8m4(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_u8m8: +** ... +** vsetivli\s+zero+,0+,e8+,m8,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8m8_t test_sf_vc_v_xvv_u8m8(vuint8m8_t vd, vuint8m8_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_u8m8(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_se_u8m8: +** ... +** vsetivli\s+zero+,0+,e8+,m8,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint8m8_t test_sf_vc_v_xvv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_se_u8m8(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16mf4_t test_sf_vc_v_xvv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_u16mf4(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_se_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16mf4_t test_sf_vc_v_xvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_se_u16mf4(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16mf2_t test_sf_vc_v_xvv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_u16mf2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_se_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16mf2_t test_sf_vc_v_xvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_se_u16mf2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16m1_t test_sf_vc_v_xvv_u16m1(vuint16m1_t vd, vuint16m1_t vs2, uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_u16m1(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_se_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16m1_t test_sf_vc_v_xvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_se_u16m1(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16m2_t test_sf_vc_v_xvv_u16m2(vuint16m2_t vd, vuint16m2_t vs2, uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_u16m2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_se_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16m2_t test_sf_vc_v_xvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_se_u16m2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16m4_t test_sf_vc_v_xvv_u16m4(vuint16m4_t vd, vuint16m4_t vs2, uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_u16m4(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_se_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16m4_t test_sf_vc_v_xvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_se_u16m4(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_u16m8: +** ... +** vsetivli\s+zero+,0+,e16+,m8,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16m8_t test_sf_vc_v_xvv_u16m8(vuint16m8_t vd, vuint16m8_t vs2, uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_u16m8(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_se_u16m8: +** ... +** vsetivli\s+zero+,0+,e16+,m8,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16m8_t test_sf_vc_v_xvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_se_u16m8(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32mf2_t test_sf_vc_v_xvv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_u32mf2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_se_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32mf2_t test_sf_vc_v_xvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_se_u32mf2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32m1_t test_sf_vc_v_xvv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, uint32_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_u32m1(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_se_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32m1_t test_sf_vc_v_xvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, uint32_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_se_u32m1(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32m2_t test_sf_vc_v_xvv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, uint32_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_u32m2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_se_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32m2_t test_sf_vc_v_xvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, uint32_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_se_u32m2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32m4_t test_sf_vc_v_xvv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, uint32_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_u32m4(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_se_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32m4_t test_sf_vc_v_xvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, uint32_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_se_u32m4(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_u32m8: +** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32m8_t test_sf_vc_v_xvv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, uint32_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_u32m8(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_se_u32m8: +** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32m8_t test_sf_vc_v_xvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, uint32_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_se_u32m8(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_u64m1: +** ... +** vsetivli\s+zero+,0+,e64+,m1,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint64m1_t test_sf_vc_v_xvv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, uint64_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_u64m1(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_se_u64m1: +** ... +** vsetivli\s+zero+,0+,e64+,m1,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint64m1_t test_sf_vc_v_xvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, uint64_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_se_u64m1(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_u64m2: +** ... +** vsetivli\s+zero+,0+,e64+,m2,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint64m2_t test_sf_vc_v_xvv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, uint64_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_u64m2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_se_u64m2: +** ... +** vsetivli\s+zero+,0+,e64+,m2,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint64m2_t test_sf_vc_v_xvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, uint64_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_se_u64m2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_u64m4: +** ... +** vsetivli\s+zero+,0+,e64+,m4,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint64m4_t test_sf_vc_v_xvv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, uint64_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_u64m4(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_se_u64m4: +** ... +** vsetivli\s+zero+,0+,e64+,m4,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint64m4_t test_sf_vc_v_xvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, uint64_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_se_u64m4(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_u64m8: +** ... +** vsetivli\s+zero+,0+,e64+,m8,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint64m8_t test_sf_vc_v_xvv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, uint64_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_u64m8(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvv_se_u64m8: +** ... +** vsetivli\s+zero+,0+,e64+,m8,ta,ma+ +** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint64m8_t test_sf_vc_v_xvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, uint64_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvv_se_u64m8(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_xvv_se_u8mf8: +** ... +** vsetivli\s+zero+,0+,e8+,mf8,ta,ma+ +** sf\.vc\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xvv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t xs1, size_t vl) { + __riscv_sf_vc_xvv_se_u8mf8(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_xvv_se_u8mf4: +** ... +** vsetivli\s+zero+,0+,e8+,mf4,ta,ma+ +** sf\.vc\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xvv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t xs1, size_t vl) { + __riscv_sf_vc_xvv_se_u8mf4(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_xvv_se_u8mf2: +** ... +** vsetivli\s+zero+,0+,e8+,mf2,ta,ma+ +** sf\.vc\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xvv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t xs1, size_t vl) { + __riscv_sf_vc_xvv_se_u8mf2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_xvv_se_u8m1: +** ... +** vsetivli\s+zero+,0+,e8+,m1,ta,ma+ +** sf\.vc\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xvv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, uint8_t xs1, size_t vl) { + __riscv_sf_vc_xvv_se_u8m1(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_xvv_se_u8m2: +** ... +** vsetivli\s+zero+,0+,e8+,m2,ta,ma+ +** sf\.vc\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xvv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, uint8_t xs1, size_t vl) { + __riscv_sf_vc_xvv_se_u8m2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_xvv_se_u8m4: +** ... +** vsetivli\s+zero+,0+,e8+,m4,ta,ma+ +** sf\.vc\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xvv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, uint8_t xs1, size_t vl) { + __riscv_sf_vc_xvv_se_u8m4(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_xvv_se_u8m8: +** ... +** vsetivli\s+zero+,0+,e8+,m8,ta,ma+ +** sf\.vc\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xvv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, uint8_t xs1, size_t vl) { + __riscv_sf_vc_xvv_se_u8m8(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_xvv_se_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t xs1, size_t vl) { + __riscv_sf_vc_xvv_se_u16mf4(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_xvv_se_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t xs1, size_t vl) { + __riscv_sf_vc_xvv_se_u16mf2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_xvv_se_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, uint16_t xs1, size_t vl) { + __riscv_sf_vc_xvv_se_u16m1(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_xvv_se_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, uint16_t xs1, size_t vl) { + __riscv_sf_vc_xvv_se_u16m2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_xvv_se_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, uint16_t xs1, size_t vl) { + __riscv_sf_vc_xvv_se_u16m4(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_xvv_se_u16m8: +** ... +** vsetivli\s+zero+,0+,e16+,m8,ta,ma+ +** sf\.vc\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, uint16_t xs1, size_t vl) { + __riscv_sf_vc_xvv_se_u16m8(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_xvv_se_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t xs1, size_t vl) { + __riscv_sf_vc_xvv_se_u32mf2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_xvv_se_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, uint32_t xs1, size_t vl) { + __riscv_sf_vc_xvv_se_u32m1(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_xvv_se_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, uint32_t xs1, size_t vl) { + __riscv_sf_vc_xvv_se_u32m2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_xvv_se_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, uint32_t xs1, size_t vl) { + __riscv_sf_vc_xvv_se_u32m4(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_xvv_se_u32m8: +** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ +** sf\.vc\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, uint32_t xs1, size_t vl) { + __riscv_sf_vc_xvv_se_u32m8(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_xvv_se_u64m1: +** ... +** vsetivli\s+zero+,0+,e64+,m1,ta,ma+ +** sf\.vc\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, uint64_t xs1, size_t vl) { + __riscv_sf_vc_xvv_se_u64m1(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_xvv_se_u64m2: +** ... +** vsetivli\s+zero+,0+,e64+,m2,ta,ma+ +** sf\.vc\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, uint64_t xs1, size_t vl) { + __riscv_sf_vc_xvv_se_u64m2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_xvv_se_u64m4: +** ... +** vsetivli\s+zero+,0+,e64+,m4,ta,ma+ +** sf\.vc\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, uint64_t xs1, size_t vl) { + __riscv_sf_vc_xvv_se_u64m4(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_xvv_se_u64m8: +** ... +** vsetivli\s+zero+,0+,e64+,m8,ta,ma+ +** sf\.vc\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, uint64_t xs1, size_t vl) { + __riscv_sf_vc_xvv_se_u64m8(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvw_u8mf8: +** ... +** vsetivli\s+zero+,0+,e8+,mf8,ta,ma+ +** sf\.vc\.v\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16mf4_t test_sf_vc_v_xvw_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvw_u8mf8(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvw_se_u8mf8: +** ... +** vsetivli\s+zero+,0+,e8+,mf8,ta,ma+ +** sf\.vc\.v\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16mf4_t test_sf_vc_v_xvw_se_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvw_se_u8mf8(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvw_u8mf4: +** ... +** vsetivli\s+zero+,0+,e8+,mf4,ta,ma+ +** sf\.vc\.v\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16mf2_t test_sf_vc_v_xvw_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvw_u8mf4(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvw_se_u8mf4: +** ... +** vsetivli\s+zero+,0+,e8+,mf4,ta,ma+ +** sf\.vc\.v\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16mf2_t test_sf_vc_v_xvw_se_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvw_se_u8mf4(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvw_u8mf2: +** ... +** vsetivli\s+zero+,0+,e8+,mf2,ta,ma+ +** sf\.vc\.v\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16m1_t test_sf_vc_v_xvw_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvw_u8mf2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvw_se_u8mf2: +** ... +** vsetivli\s+zero+,0+,e8+,mf2,ta,ma+ +** sf\.vc\.v\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16m1_t test_sf_vc_v_xvw_se_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvw_se_u8mf2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvw_u8m1: +** ... +** vsetivli\s+zero+,0+,e8+,m1,ta,ma+ +** sf\.vc\.v\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16m2_t test_sf_vc_v_xvw_u8m1(vuint16m2_t vd, vuint8m1_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvw_u8m1(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvw_se_u8m1: +** ... +** vsetivli\s+zero+,0+,e8+,m1,ta,ma+ +** sf\.vc\.v\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16m2_t test_sf_vc_v_xvw_se_u8m1(vuint16m2_t vd, vuint8m1_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvw_se_u8m1(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvw_u8m2: +** ... +** vsetivli\s+zero+,0+,e8+,m2,ta,ma+ +** sf\.vc\.v\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16m4_t test_sf_vc_v_xvw_u8m2(vuint16m4_t vd, vuint8m2_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvw_u8m2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvw_se_u8m2: +** ... +** vsetivli\s+zero+,0+,e8+,m2,ta,ma+ +** sf\.vc\.v\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16m4_t test_sf_vc_v_xvw_se_u8m2(vuint16m4_t vd, vuint8m2_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvw_se_u8m2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvw_u8m4: +** ... +** vsetivli\s+zero+,0+,e8+,m4,ta,ma+ +** sf\.vc\.v\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16m8_t test_sf_vc_v_xvw_u8m4(vuint16m8_t vd, vuint8m4_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvw_u8m4(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvw_se_u8m4: +** ... +** vsetivli\s+zero+,0+,e8+,m4,ta,ma+ +** sf\.vc\.v\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint16m8_t test_sf_vc_v_xvw_se_u8m4(vuint16m8_t vd, vuint8m4_t vs2, uint8_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvw_se_u8m4(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvw_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.v\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32mf2_t test_sf_vc_v_xvw_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvw_u16mf4(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvw_se_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.v\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32mf2_t test_sf_vc_v_xvw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvw_se_u16mf4(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvw_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.v\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32m1_t test_sf_vc_v_xvw_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvw_u16mf2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvw_se_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.v\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32m1_t test_sf_vc_v_xvw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvw_se_u16mf2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvw_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.v\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32m2_t test_sf_vc_v_xvw_u16m1(vuint32m2_t vd, vuint16m1_t vs2, uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvw_u16m1(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvw_se_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.v\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32m2_t test_sf_vc_v_xvw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvw_se_u16m1(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvw_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.v\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32m4_t test_sf_vc_v_xvw_u16m2(vuint32m4_t vd, vuint16m2_t vs2, uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvw_u16m2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvw_se_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.v\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32m4_t test_sf_vc_v_xvw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvw_se_u16m2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvw_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.v\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32m8_t test_sf_vc_v_xvw_u16m4(vuint32m8_t vd, vuint16m4_t vs2, uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvw_u16m4(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvw_se_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.v\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint32m8_t test_sf_vc_v_xvw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, uint16_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvw_se_u16m4(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvw_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.v\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint64m1_t test_sf_vc_v_xvw_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, uint32_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvw_u32mf2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvw_se_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.v\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint64m1_t test_sf_vc_v_xvw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, uint32_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvw_se_u32mf2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvw_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.v\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint64m2_t test_sf_vc_v_xvw_u32m1(vuint64m2_t vd, vuint32m1_t vs2, uint32_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvw_u32m1(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvw_se_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.v\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint64m2_t test_sf_vc_v_xvw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, uint32_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvw_se_u32m1(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvw_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.v\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint64m4_t test_sf_vc_v_xvw_u32m2(vuint64m4_t vd, vuint32m2_t vs2, uint32_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvw_u32m2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvw_se_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.v\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint64m4_t test_sf_vc_v_xvw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, uint32_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvw_se_u32m2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvw_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.v\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint64m8_t test_sf_vc_v_xvw_u32m4(vuint64m8_t vd, vuint32m4_t vs2, uint32_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvw_u32m4(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_v_xvw_se_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.v\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +vuint64m8_t test_sf_vc_v_xvw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, uint32_t xs1, size_t vl) { + return __riscv_sf_vc_v_xvw_se_u32m4(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_xvw_se_u8mf8: +** ... +** vsetivli\s+zero+,0+,e8+,mf8,ta,ma+ +** sf\.vc\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xvw_se_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t xs1, size_t vl) { + __riscv_sf_vc_xvw_se_u8mf8(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_xvw_se_u8mf4: +** ... +** vsetivli\s+zero+,0+,e8+,mf4,ta,ma+ +** sf\.vc\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xvw_se_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t xs1, size_t vl) { + __riscv_sf_vc_xvw_se_u8mf4(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_xvw_se_u8mf2: +** ... +** vsetivli\s+zero+,0+,e8+,mf2,ta,ma+ +** sf\.vc\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xvw_se_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, uint8_t xs1, size_t vl) { + __riscv_sf_vc_xvw_se_u8mf2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_xvw_se_u8m1: +** ... +** vsetivli\s+zero+,0+,e8+,m1,ta,ma+ +** sf\.vc\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xvw_se_u8m1(vuint16m2_t vd, vuint8m1_t vs2, uint8_t xs1, size_t vl) { + __riscv_sf_vc_xvw_se_u8m1(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_xvw_se_u8m2: +** ... +** vsetivli\s+zero+,0+,e8+,m2,ta,ma+ +** sf\.vc\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xvw_se_u8m2(vuint16m4_t vd, vuint8m2_t vs2, uint8_t xs1, size_t vl) { + __riscv_sf_vc_xvw_se_u8m2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_xvw_se_u8m4: +** ... +** vsetivli\s+zero+,0+,e8+,m4,ta,ma+ +** sf\.vc\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xvw_se_u8m4(vuint16m8_t vd, vuint8m4_t vs2, uint8_t xs1, size_t vl) { + __riscv_sf_vc_xvw_se_u8m4(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_xvw_se_u16mf4: +** ... +** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+ +** sf\.vc\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xvw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t xs1, size_t vl) { + __riscv_sf_vc_xvw_se_u16mf4(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_xvw_se_u16mf2: +** ... +** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+ +** sf\.vc\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xvw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, uint16_t xs1, size_t vl) { + __riscv_sf_vc_xvw_se_u16mf2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_xvw_se_u16m1: +** ... +** vsetivli\s+zero+,0+,e16+,m1,ta,ma+ +** sf\.vc\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xvw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, uint16_t xs1, size_t vl) { + __riscv_sf_vc_xvw_se_u16m1(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_xvw_se_u16m2: +** ... +** vsetivli\s+zero+,0+,e16+,m2,ta,ma+ +** sf\.vc\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xvw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, uint16_t xs1, size_t vl) { + __riscv_sf_vc_xvw_se_u16m2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_xvw_se_u16m4: +** ... +** vsetivli\s+zero+,0+,e16+,m4,ta,ma+ +** sf\.vc\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xvw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, uint16_t xs1, size_t vl) { + __riscv_sf_vc_xvw_se_u16m4(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_xvw_se_u32mf2: +** ... +** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+ +** sf\.vc\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xvw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, uint32_t xs1, size_t vl) { + __riscv_sf_vc_xvw_se_u32mf2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_xvw_se_u32m1: +** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ +** sf\.vc\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xvw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, uint32_t xs1, size_t vl) { + __riscv_sf_vc_xvw_se_u32m1(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_xvw_se_u32m2: +** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ +** sf\.vc\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xvw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, uint32_t xs1, size_t vl) { + __riscv_sf_vc_xvw_se_u32m2(1, vd, vs2, xs1, vl); +} + +/* +** test_sf_vc_xvw_se_u32m4: +** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ +** sf\.vc\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+ +** ... +*/ +void test_sf_vc_xvw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, uint32_t xs1, size_t vl) { + __riscv_sf_vc_xvw_se_u32m4(1, vd, vs2, xs1, vl); +} + diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vfnrclip_x_f_qf.c b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vfnrclip_x_f_qf.c index 813f7860f64..a4193b5aea9 100644 --- a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vfnrclip_x_f_qf.c +++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vfnrclip_x_f_qf.c @@ -7,6 +7,7 @@ /* ** test_sf_vfnrclip_x_f_qf_i8mf8_vint8mf8_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf8+,ta+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -17,6 +18,7 @@ vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_vint8mf8_t(vfloat32mf2_t vs2, float rs1 /* ** test_sf_vfnrclip_x_f_qf_i8mf4_vint8mf4_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf4+,ta+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -27,6 +29,7 @@ vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_vint8mf4_t(vfloat32m1_t vs2, float rs1, /* ** test_sf_vfnrclip_x_f_qf_i8mf2_vint8mf2_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf2+,ta+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -37,6 +40,7 @@ vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_vint8mf2_t(vfloat32m2_t vs2, float rs1, /* ** test_sf_vfnrclip_x_f_qf_i8m1_vint8m1_t: ** ... +** vsetivli\s+zero+,0+,e8+,m1+,ta+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -47,6 +51,7 @@ vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_vint8m1_t(vfloat32m4_t vs2, float rs1, si /* ** test_sf_vfnrclip_x_f_qf_i8m2_vint8m2_t: ** ... +** vsetivli\s+zero+,0+,e8+,m2+,ta+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -57,6 +62,7 @@ vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_vint8m2_t(vfloat32m8_t vs2, float rs1, si /* ** test_sf_vfnrclip_x_f_qf_i8mf8_m_vint8mf8_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf8+,ta+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -67,6 +73,7 @@ vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_m_vint8mf8_t(vbool64_t mask, vfloat32mf /* ** test_sf_vfnrclip_x_f_qf_i8mf4_m_vint8mf4_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf4+,ta+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -77,6 +84,7 @@ vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_m_vint8mf4_t(vbool32_t mask, vfloat32m1 /* ** test_sf_vfnrclip_x_f_qf_i8mf2_m_vint8mf2_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf2+,ta+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -87,6 +95,7 @@ vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_m_vint8mf2_t(vbool16_t mask, vfloat32m2 /* ** test_sf_vfnrclip_x_f_qf_i8m1_m_vint8m1_t: ** ... +** vsetivli\s+zero+,0+,e8+,m1+,ta+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -97,6 +106,7 @@ vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_m_vint8m1_t(vbool8_t mask, vfloat32m4_t v /* ** test_sf_vfnrclip_x_f_qf_i8m2_m_vint8m2_t: ** ... +** vsetivli\s+zero+,0+,e8+,m2+,ta+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -107,6 +117,7 @@ vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_m_vint8m2_t(vbool4_t mask, vfloat32m8_t v /* ** test_sf_vfnrclip_x_f_qf_vint8mf8_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf8+,ta+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -117,6 +128,7 @@ vint8mf8_t test_sf_vfnrclip_x_f_qf_vint8mf8_t(vfloat32mf2_t vs2, float rs1, size /* ** test_sf_vfnrclip_x_f_qf_vint8mf4_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf4+,ta+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -127,6 +139,7 @@ vint8mf4_t test_sf_vfnrclip_x_f_qf_vint8mf4_t(vfloat32m1_t vs2, float rs1, size_ /* ** test_sf_vfnrclip_x_f_qf_vint8mf2_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf2+,ta+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -137,6 +150,7 @@ vint8mf2_t test_sf_vfnrclip_x_f_qf_vint8mf2_t(vfloat32m2_t vs2, float rs1, size_ /* ** test_sf_vfnrclip_x_f_qf_vint8m1_t: ** ... +** vsetivli\s+zero+,0+,e8+,m1+,ta+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -147,6 +161,7 @@ vint8m1_t test_sf_vfnrclip_x_f_qf_vint8m1_t(vfloat32m4_t vs2, float rs1, size_t /* ** test_sf_vfnrclip_x_f_qf_vint8m2_t: ** ... +** vsetivli\s+zero+,0+,e8+,m2+,ta+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -157,6 +172,7 @@ vint8m2_t test_sf_vfnrclip_x_f_qf_vint8m2_t(vfloat32m8_t vs2, float rs1, size_t /* ** test_sf_vfnrclip_x_f_qf_mask_vint8mf8_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf8+,ta+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -167,6 +183,7 @@ vint8mf8_t test_sf_vfnrclip_x_f_qf_mask_vint8mf8_t(vbool64_t mask, vfloat32mf2_t /* ** test_sf_vfnrclip_x_f_qf_mask_vint8mf4_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf4+,ta+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -177,6 +194,7 @@ vint8mf4_t test_sf_vfnrclip_x_f_qf_mask_vint8mf4_t(vbool32_t mask, vfloat32m1_t /* ** test_sf_vfnrclip_x_f_qf_mask_vint8mf2_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf2+,ta+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -187,6 +205,7 @@ vint8mf2_t test_sf_vfnrclip_x_f_qf_mask_vint8mf2_t(vbool16_t mask, vfloat32m2_t /* ** test_sf_vfnrclip_x_f_qf_mask_vint8m1_t: ** ... +** vsetivli\s+zero+,0+,e8+,m1+,ta+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -197,6 +216,7 @@ vint8m1_t test_sf_vfnrclip_x_f_qf_mask_vint8m1_t(vbool8_t mask, vfloat32m4_t vs2 /* ** test_sf_vfnrclip_x_f_qf_mask_vint8m2_t: ** ... +** vsetivli\s+zero+,0+,e8+,m2+,ta+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -207,6 +227,7 @@ vint8m2_t test_sf_vfnrclip_x_f_qf_mask_vint8m2_t(vbool4_t mask,vfloat32m8_t vs2, /* ** test_sf_vfnrclip_x_f_qf_i8mf8_tu_vint8mf8_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf8+,tu+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -217,6 +238,7 @@ vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_tu_vint8mf8_t(vint8mf8_t maskedoff, vfl /* ** test_sf_vfnrclip_x_f_qf_i8mf4_tu_vint8mf4_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf4+,tu+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -227,6 +249,7 @@ vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_tu_vint8mf4_t(vint8mf4_t maskedoff, vfl /* ** test_sf_vfnrclip_x_f_qf_i8mf2_tu_vint8mf2_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf2+,tu+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -237,6 +260,7 @@ vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_tu_vint8mf2_t(vint8mf2_t maskedoff, vfl /* ** test_sf_vfnrclip_x_f_qf_i8m1_tu_vint8m1_t: ** ... +** vsetivli\s+zero+,0+,e8+,m1+,tu+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -247,6 +271,7 @@ vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_tu_vint8m1_t(vint8m1_t maskedoff, vfloat3 /* ** test_sf_vfnrclip_x_f_qf_i8m2_tu_vint8m2_t: ** ... +** vsetivli\s+zero+,0+,e8+,m2+,tu+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -257,6 +282,7 @@ vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_tu_vint8m2_t(vint8m2_t maskedoff, vfloat3 /* ** test_sf_vfnrclip_x_f_qf_i8mf8_tum_vint8mf8_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf8+,tu+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -267,6 +293,7 @@ vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_tum_vint8mf8_t(vbool64_t mask, vint8mf8 /* ** test_sf_vfnrclip_x_f_qf_i8mf4_tum_vint8mf4_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf4+,tu+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -277,6 +304,7 @@ vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_tum_vint8mf4_t(vbool32_t mask, vint8mf4 /* ** test_sf_vfnrclip_x_f_qf_i8mf2_tum_vint8mf2_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf2+,tu+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -287,6 +315,7 @@ vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_tum_vint8mf2_t(vbool16_t mask, vint8mf2 /* ** test_sf_vfnrclip_x_f_qf_i8m1_tum_vint8m1_t: ** ... +** vsetivli\s+zero+,0+,e8+,m1+,tu+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -297,6 +326,7 @@ vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_tum_vint8m1_t(vbool8_t mask, vint8m1_t ma /* ** test_sf_vfnrclip_x_f_qf_i8m2_tum_vint8m2_t: ** ... +** vsetivli\s+zero+,0+,e8+,m2+,tu+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -307,6 +337,7 @@ vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_tum_vint8m2_t(vbool4_t mask, vint8m2_t ma /* ** test_sf_vfnrclip_x_f_qf_i8mf8_tumu_vint8mf8_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf8+,tu+,mu+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -317,6 +348,7 @@ vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_tumu_vint8mf8_t(vbool64_t mask, vint8mf /* ** test_sf_vfnrclip_x_f_qf_i8mf4_tumu_vint8mf4_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf4+,tu+,mu+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -327,6 +359,7 @@ vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_tumu_vint8mf4_t(vbool32_t mask, vint8mf /* ** test_sf_vfnrclip_x_f_qf_i8mf2_tumu_vint8mf2_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf2+,tu+,mu+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -337,6 +370,7 @@ vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_tumu_vint8mf2_t(vbool16_t mask, vint8mf /* ** test_sf_vfnrclip_x_f_qf_i8m1_tumu_vint8m1_t: ** ... +** vsetivli\s+zero+,0+,e8+,m1+,tu+,mu+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -347,6 +381,7 @@ vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_tumu_vint8m1_t(vbool8_t mask, vint8m1_t m /* ** test_sf_vfnrclip_x_f_qf_i8m2_tumu_vint8m2_t: ** ... +** vsetivli\s+zero+,0+,e8+,m2+,tu+,mu+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -357,6 +392,7 @@ vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_tumu_vint8m2_t(vbool4_t mask, vint8m2_t m /* ** test_sf_vfnrclip_x_f_qf_i8mf8_mu_vint8mf8_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf8+,ta+,mu+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -367,6 +403,7 @@ vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_mu_vint8mf8_t(vbool64_t mask, vint8mf8_ /* ** test_sf_vfnrclip_x_f_qf_i8mf4_mu_vint8mf4_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf4+,ta+,mu+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -377,6 +414,7 @@ vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_mu_vint8mf4_t(vbool32_t mask, vint8mf4_ /* ** test_sf_vfnrclip_x_f_qf_i8mf2_mu_vint8mf2_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf2+,ta+,mu+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -387,6 +425,7 @@ vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_mu_vint8mf2_t(vbool16_t mask, vint8mf2_ /* ** test_sf_vfnrclip_x_f_qf_i8m1_mu_vint8m1_t: ** ... +** vsetivli\s+zero+,0+,e8+,m1+,ta+,mu+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -397,6 +436,7 @@ vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_mu_vint8m1_t(vbool8_t mask, vint8m1_t mas /* ** test_sf_vfnrclip_x_f_qf_i8m2_mu_vint8m2_t: ** ... +** vsetivli\s+zero+,0+,e8+,m2+,ta+,mu+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -407,6 +447,7 @@ vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_mu_vint8m2_t(vbool4_t mask, vint8m2_t mas /* ** test_sf_vfnrclip_x_f_qf_tu_vint8mf8_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf8+,tu+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -417,6 +458,7 @@ vint8mf8_t test_sf_vfnrclip_x_f_qf_tu_vint8mf8_t(vint8mf8_t maskedoff, vfloat32m /* ** test_sf_vfnrclip_x_f_qf_tu_vint8mf4_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf4+,tu+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -427,6 +469,7 @@ vint8mf4_t test_sf_vfnrclip_x_f_qf_tu_vint8mf4_t(vint8mf4_t maskedoff, vfloat32m /* ** test_sf_vfnrclip_x_f_qf_tu_vint8mf2_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf2+,tu+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -437,6 +480,7 @@ vint8mf2_t test_sf_vfnrclip_x_f_qf_tu_vint8mf2_t(vint8mf2_t maskedoff, vfloat32m /* ** test_sf_vfnrclip_x_f_qf_tu_vint8m1_t: ** ... +** vsetivli\s+zero+,0+,e8+,m1+,tu+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -447,6 +491,7 @@ vint8m1_t test_sf_vfnrclip_x_f_qf_tu_vint8m1_t(vint8m1_t maskedoff, vfloat32m4_t /* ** test_sf_vfnrclip_x_f_qf_tu_vint8m2_t: ** ... +** vsetivli\s+zero+,0+,e8+,m2+,tu+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -457,6 +502,7 @@ vint8m2_t test_sf_vfnrclip_x_f_qf_tu_vint8m2_t(vint8m2_t maskedoff, vfloat32m8_t /* ** test_sf_vfnrclip_x_f_qf_tum_vint8mf8_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf8+,tu+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -467,6 +513,7 @@ vint8mf8_t test_sf_vfnrclip_x_f_qf_tum_vint8mf8_t(vbool64_t mask, vint8mf8_t mas /* ** test_sf_vfnrclip_x_f_qf_tum_vint8mf4_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf4+,tu+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -477,6 +524,7 @@ vint8mf4_t test_sf_vfnrclip_x_f_qf_tum_vint8mf4_t(vbool32_t mask, vint8mf4_t mas /* ** test_sf_vfnrclip_x_f_qf_tum_vint8mf2_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf2+,tu+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -487,6 +535,7 @@ vint8mf2_t test_sf_vfnrclip_x_f_qf_tum_vint8mf2_t(vbool16_t mask, vint8mf2_t mas /* ** test_sf_vfnrclip_x_f_qf_tum_vint8m1_t: ** ... +** vsetivli\s+zero+,0+,e8+,m1+,tu+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -497,6 +546,7 @@ vint8m1_t test_sf_vfnrclip_x_f_qf_tum_vint8m1_t(vbool8_t mask, vint8m1_t maskedo /* ** test_sf_vfnrclip_x_f_qf_tum_vint8m2_t: ** ... +** vsetivli\s+zero+,0+,e8+,m2+,tu+,ma+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -508,6 +558,7 @@ vint8m2_t test_sf_vfnrclip_x_f_qf_tum_vint8m2_t(vbool4_t mask, vint8m2_t maskedo /* ** test_sf_vfnrclip_x_f_qf_tumu_vint8mf8_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf8+,tu+,mu+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -518,6 +569,7 @@ vint8mf8_t test_sf_vfnrclip_x_f_qf_tumu_vint8mf8_t(vbool64_t mask, vint8mf8_t ma /* ** test_sf_vfnrclip_x_f_qf_i8mf4_tumu_vint8mf4_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf4+,tu+,mu+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -528,6 +580,7 @@ vint8mf4_t test_sf_vfnrclip_x_f_qf_tumu_vint8mf4_t(vbool32_t mask, vint8mf4_t ma /* ** test_sf_vfnrclip_x_f_qf_tumu_vint8mf2_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf2+,tu+,mu+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -538,6 +591,7 @@ vint8mf2_t test_sf_vfnrclip_x_f_qf_tumu_vint8mf2_t(vbool16_t mask, vint8mf2_t ma /* ** test_sf_vfnrclip_x_f_qf_tumu_vint8m1_t: ** ... +** vsetivli\s+zero+,0+,e8+,m1+,tu+,mu+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -548,6 +602,7 @@ vint8m1_t test_sf_vfnrclip_x_f_qf_tumu_vint8m1_t(vbool8_t mask, vint8m1_t masked /* ** test_sf_vfnrclip_x_f_qf_tumu_vint8m2_t: ** ... +** vsetivli\s+zero+,0+,e8+,m2+,tu+,mu+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -558,6 +613,7 @@ vint8m2_t test_sf_vfnrclip_x_f_qf_tumu_vint8m2_t(vbool4_t mask, vint8m2_t masked /* ** test_sf_vfnrclip_x_f_qf_mu_vint8mf8_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf8+,ta+,mu+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -568,6 +624,7 @@ vint8mf8_t test_sf_vfnrclip_x_f_qf_mu_vint8mf8_t(vbool64_t mask, vint8mf8_t mask /* ** test_sf_vfnrclip_x_f_qf_mu_vint8mf4_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf4+,ta+,mu+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -578,6 +635,7 @@ vint8mf4_t test_sf_vfnrclip_x_f_qf_mu_vint8mf4_t(vbool32_t mask, vint8mf4_t mask /* ** test_sf_vfnrclip_x_f_qf_mu_vint8mf2_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf2+,ta+,mu+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -588,6 +646,7 @@ vint8mf2_t test_sf_vfnrclip_x_f_qf_mu_vint8mf2_t(vbool16_t mask, vint8mf2_t mask /* ** test_sf_vfnrclip_x_f_qf_mu_vint8m1_t: ** ... +** vsetivli\s+zero+,0+,e8+,m1+,ta+,mu+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -598,6 +657,7 @@ vint8m1_t test_sf_vfnrclip_x_f_qf_mu_vint8m1_t(vbool8_t mask, vint8m1_t maskedof /* ** test_sf_vfnrclip_x_f_qf_mu_vint8m2_t: ** ... +** vsetivli\s+zero+,0+,e8+,m2+,ta+,mu+ ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vfnrclip_xu_f_qf.c b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vfnrclip_xu_f_qf.c index f5a22966a99..c126746d581 100644 --- a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vfnrclip_xu_f_qf.c +++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vfnrclip_xu_f_qf.c @@ -7,6 +7,7 @@ /* ** test_sf_vfnrclip_xu_f_qf_u8mf8_vuint8mf8_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf8+,ta+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -17,6 +18,7 @@ vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_vuint8mf8_t(vfloat32mf2_t vs2, float /* ** test_sf_vfnrclip_xu_f_qf_u8mf4_vuint8mf4_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf4+,ta+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -27,6 +29,7 @@ vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_vuint8mf4_t(vfloat32m1_t vs2, float r /* ** test_sf_vfnrclip_xu_f_qf_u8mf2_vuint8mf2_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf2+,ta+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -37,6 +40,7 @@ vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_vuint8mf2_t(vfloat32m2_t vs2, float r /* ** test_sf_vfnrclip_xu_f_qf_u8m1_vuint8m1_t: ** ... +** vsetivli\s+zero+,0+,e8+,m1+,ta+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -47,6 +51,7 @@ vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_vuint8m1_t(vfloat32m4_t vs2, float rs1, /* ** test_sf_vfnrclip_xu_f_qf_u8m2_vuint8m2_t: ** ... +** vsetivli\s+zero+,0+,e8+,m2+,ta+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -57,6 +62,7 @@ vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_vuint8m2_t(vfloat32m8_t vs2, float rs1, /* ** test_sf_vfnrclip_xu_f_qf_u8mf8_m_vuint8mf8_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf8+,ta+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -67,6 +73,7 @@ vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_m_vuint8mf8_t(vbool64_t mask, vfloat3 /* ** test_sf_vfnrclip_xu_f_qf_u8mf4_m_vuint8mf4_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf4+,ta+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -77,6 +84,7 @@ vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_m_vuint8mf4_t(vbool32_t mask, vfloat3 /* ** test_sf_vfnrclip_xu_f_qf_u8mf2_m_vuint8mf2_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf2+,ta+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -87,6 +95,7 @@ vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_m_vuint8mf2_t(vbool16_t mask, vfloat3 /* ** test_sf_vfnrclip_xu_f_qf_u8m1_m_vuint8m1_t: ** ... +** vsetivli\s+zero+,0+,e8+,m1+,ta+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -97,6 +106,7 @@ vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_m_vuint8m1_t(vbool8_t mask, vfloat32m4_ /* ** test_sf_vfnrclip_xu_f_qf_u8m2_m_vuint8m2_t: ** ... +** vsetivli\s+zero+,0+,e8+,m2+,ta+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -107,6 +117,7 @@ vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_m_vuint8m2_t(vbool4_t mask, vfloat32m8_ /* ** test_sf_vfnrclip_xu_f_qf_vuint8mf8_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf8+,ta+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -117,6 +128,7 @@ vuint8mf8_t test_sf_vfnrclip_xu_f_qf_vuint8mf8_t(vfloat32mf2_t vs2, float rs1, s /* ** test_sf_vfnrclip_xu_f_qf_vuint8mf4_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf4+,ta+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -127,6 +139,7 @@ vuint8mf4_t test_sf_vfnrclip_xu_f_qf_vuint8mf4_t(vfloat32m1_t vs2, float rs1, si /* ** test_sf_vfnrclip_xu_f_qf_vuint8mf2_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf2+,ta+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -137,6 +150,7 @@ vuint8mf2_t test_sf_vfnrclip_xu_f_qf_vuint8mf2_t(vfloat32m2_t vs2, float rs1, si /* ** test_sf_vfnrclip_xu_f_qf_vuint8m1_t: ** ... +** vsetivli\s+zero+,0+,e8+,m1+,ta+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -147,6 +161,7 @@ vuint8m1_t test_sf_vfnrclip_xu_f_qf_vuint8m1_t(vfloat32m4_t vs2, float rs1, size /* ** test_sf_vfnrclip_xu_f_qf_vuint8m2_t: ** ... +** vsetivli\s+zero+,0+,e8+,m2+,ta+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -157,6 +172,7 @@ vuint8m2_t test_sf_vfnrclip_xu_f_qf_vuint8m2_t(vfloat32m8_t vs2, float rs1, size /* ** test_sf_vfnrclip_xu_f_qf_mask_vuint8mf8_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf8+,ta+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -167,6 +183,7 @@ vuint8mf8_t test_sf_vfnrclip_xu_f_qf_mask_vuint8mf8_t(vbool64_t mask, vfloat32mf /* ** test_sf_vfnrclip_xu_f_qf_mask_vuint8mf4_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf4+,ta+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -177,6 +194,7 @@ vuint8mf4_t test_sf_vfnrclip_xu_f_qf_mask_vuint8mf4_t(vbool32_t mask, vfloat32m1 /* ** test_sf_vfnrclip_xu_f_qf_mask_vuint8mf2_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf2+,ta+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -187,6 +205,7 @@ vuint8mf2_t test_sf_vfnrclip_xu_f_qf_mask_vuint8mf2_t(vbool16_t mask, vfloat32m2 /* ** test_sf_vfnrclip_xu_f_qf_mask_vuint8m1_t: ** ... +** vsetivli\s+zero+,0+,e8+,m1+,ta+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -197,16 +216,18 @@ vuint8m1_t test_sf_vfnrclip_xu_f_qf_mask_vuint8m1_t(vbool8_t mask, vfloat32m4_t /* ** test_sf_vfnrclip_xu_f_qf_mask_vuint8m2_t: ** ... +** vsetivli\s+zero+,0+,e8+,m2+,ta+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ -vuint8m2_t test_sf_vfnrclip_xu_f_qf_mask_vuint8m2_t(vbool4_t mask, vfloat32m8_t vs2, float rs1, size_t vl) { +vuint8m2_t test_sf_vfnrclip_xu_f_qf_mask_vuint8m2_t(vbool4_t mask,vfloat32m8_t vs2, float rs1, size_t vl) { return __riscv_sf_vfnrclip_xu_f_qf(mask, vs2, rs1, vl); } /* ** test_sf_vfnrclip_xu_f_qf_u8mf8_tu_vuint8mf8_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf8+,tu+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -217,6 +238,7 @@ vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_tu_vuint8mf8_t(vuint8mf8_t maskedoff, /* ** test_sf_vfnrclip_xu_f_qf_u8mf4_tu_vuint8mf4_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf4+,tu+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -227,6 +249,7 @@ vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_tu_vuint8mf4_t(vuint8mf4_t maskedoff, /* ** test_sf_vfnrclip_xu_f_qf_u8mf2_tu_vuint8mf2_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf2+,tu+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -237,6 +260,7 @@ vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_tu_vuint8mf2_t(vuint8mf2_t maskedoff, /* ** test_sf_vfnrclip_xu_f_qf_u8m1_tu_vuint8m1_t: ** ... +** vsetivli\s+zero+,0+,e8+,m1+,tu+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -247,6 +271,7 @@ vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_tu_vuint8m1_t(vuint8m1_t maskedoff, vfl /* ** test_sf_vfnrclip_xu_f_qf_u8m2_tu_vuint8m2_t: ** ... +** vsetivli\s+zero+,0+,e8+,m2+,tu+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -257,6 +282,7 @@ vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_tu_vuint8m2_t(vuint8m2_t maskedoff, vfl /* ** test_sf_vfnrclip_xu_f_qf_u8mf8_tum_vuint8mf8_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf8+,tu+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -267,6 +293,7 @@ vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_tum_vuint8mf8_t(vbool64_t mask, vuint /* ** test_sf_vfnrclip_xu_f_qf_u8mf4_tum_vuint8mf4_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf4+,tu+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -277,6 +304,7 @@ vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_tum_vuint8mf4_t(vbool32_t mask, vuint /* ** test_sf_vfnrclip_xu_f_qf_u8mf2_tum_vuint8mf2_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf2+,tu+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -287,6 +315,7 @@ vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_tum_vuint8mf2_t(vbool16_t mask, vuint /* ** test_sf_vfnrclip_xu_f_qf_u8m1_tum_vuint8m1_t: ** ... +** vsetivli\s+zero+,0+,e8+,m1+,tu+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -297,6 +326,7 @@ vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_tum_vuint8m1_t(vbool8_t mask, vuint8m1_ /* ** test_sf_vfnrclip_xu_f_qf_u8m2_tum_vuint8m2_t: ** ... +** vsetivli\s+zero+,0+,e8+,m2+,tu+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -307,6 +337,7 @@ vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_tum_vuint8m2_t(vbool4_t mask, vuint8m2_ /* ** test_sf_vfnrclip_xu_f_qf_u8mf8_tumu_vuint8mf8_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf8+,tu+,mu+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -317,6 +348,7 @@ vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_tumu_vuint8mf8_t(vbool64_t mask, vuin /* ** test_sf_vfnrclip_xu_f_qf_u8mf4_tumu_vuint8mf4_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf4+,tu+,mu+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -327,6 +359,7 @@ vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_tumu_vuint8mf4_t(vbool32_t mask, vuin /* ** test_sf_vfnrclip_xu_f_qf_u8mf2_tumu_vuint8mf2_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf2+,tu+,mu+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -337,6 +370,7 @@ vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_tumu_vuint8mf2_t(vbool16_t mask, vuin /* ** test_sf_vfnrclip_xu_f_qf_u8m1_tumu_vuint8m1_t: ** ... +** vsetivli\s+zero+,0+,e8+,m1+,tu+,mu+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -347,6 +381,7 @@ vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_tumu_vuint8m1_t(vbool8_t mask, vuint8m1 /* ** test_sf_vfnrclip_xu_f_qf_u8m2_tumu_vuint8m2_t: ** ... +** vsetivli\s+zero+,0+,e8+,m2+,tu+,mu+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -357,6 +392,7 @@ vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_tumu_vuint8m2_t(vbool4_t mask, vuint8m2 /* ** test_sf_vfnrclip_xu_f_qf_u8mf8_mu_vuint8mf8_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf8+,ta+,mu+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -367,6 +403,7 @@ vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_mu_vuint8mf8_t(vbool64_t mask, vuint8 /* ** test_sf_vfnrclip_xu_f_qf_u8mf4_mu_vuint8mf4_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf4+,ta+,mu+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -377,6 +414,7 @@ vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_mu_vuint8mf4_t(vbool32_t mask, vuint8 /* ** test_sf_vfnrclip_xu_f_qf_u8mf2_mu_vuint8mf2_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf2+,ta+,mu+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -387,6 +425,7 @@ vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_mu_vuint8mf2_t(vbool16_t mask, vuint8 /* ** test_sf_vfnrclip_xu_f_qf_u8m1_mu_vuint8m1_t: ** ... +** vsetivli\s+zero+,0+,e8+,m1+,ta+,mu+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -397,6 +436,7 @@ vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_mu_vuint8m1_t(vbool8_t mask, vuint8m1_t /* ** test_sf_vfnrclip_xu_f_qf_u8m2_mu_vuint8m2_t: ** ... +** vsetivli\s+zero+,0+,e8+,m2+,ta+,mu+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -407,6 +447,7 @@ vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_mu_vuint8m2_t(vbool4_t mask, vuint8m2_t /* ** test_sf_vfnrclip_xu_f_qf_tu_vuint8mf8_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf8+,tu+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -417,6 +458,7 @@ vuint8mf8_t test_sf_vfnrclip_xu_f_qf_tu_vuint8mf8_t(vuint8mf8_t maskedoff, vfloa /* ** test_sf_vfnrclip_xu_f_qf_tu_vuint8mf4_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf4+,tu+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -427,6 +469,7 @@ vuint8mf4_t test_sf_vfnrclip_xu_f_qf_tu_vuint8mf4_t(vuint8mf4_t maskedoff, vfloa /* ** test_sf_vfnrclip_xu_f_qf_tu_vuint8mf2_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf2+,tu+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -437,6 +480,7 @@ vuint8mf2_t test_sf_vfnrclip_xu_f_qf_tu_vuint8mf2_t(vuint8mf2_t maskedoff, vfloa /* ** test_sf_vfnrclip_xu_f_qf_tu_vuint8m1_t: ** ... +** vsetivli\s+zero+,0+,e8+,m1+,tu+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -447,6 +491,7 @@ vuint8m1_t test_sf_vfnrclip_xu_f_qf_tu_vuint8m1_t(vuint8m1_t maskedoff, vfloat32 /* ** test_sf_vfnrclip_xu_f_qf_tu_vuint8m2_t: ** ... +** vsetivli\s+zero+,0+,e8+,m2+,tu+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ ** ... */ @@ -457,6 +502,7 @@ vuint8m2_t test_sf_vfnrclip_xu_f_qf_tu_vuint8m2_t(vuint8m2_t maskedoff, vfloat32 /* ** test_sf_vfnrclip_xu_f_qf_tum_vuint8mf8_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf8+,tu+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -467,6 +513,7 @@ vuint8mf8_t test_sf_vfnrclip_xu_f_qf_tum_vuint8mf8_t(vbool64_t mask, vuint8mf8_t /* ** test_sf_vfnrclip_xu_f_qf_tum_vuint8mf4_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf4+,tu+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -477,6 +524,7 @@ vuint8mf4_t test_sf_vfnrclip_xu_f_qf_tum_vuint8mf4_t(vbool32_t mask, vuint8mf4_t /* ** test_sf_vfnrclip_xu_f_qf_tum_vuint8mf2_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf2+,tu+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -487,6 +535,7 @@ vuint8mf2_t test_sf_vfnrclip_xu_f_qf_tum_vuint8mf2_t(vbool16_t mask, vuint8mf2_t /* ** test_sf_vfnrclip_xu_f_qf_tum_vuint8m1_t: ** ... +** vsetivli\s+zero+,0+,e8+,m1+,tu+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -497,6 +546,7 @@ vuint8m1_t test_sf_vfnrclip_xu_f_qf_tum_vuint8m1_t(vbool8_t mask, vuint8m1_t mas /* ** test_sf_vfnrclip_xu_f_qf_tum_vuint8m2_t: ** ... +** vsetivli\s+zero+,0+,e8+,m2+,tu+,ma+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -504,9 +554,11 @@ vuint8m2_t test_sf_vfnrclip_xu_f_qf_tum_vuint8m2_t(vbool4_t mask, vuint8m2_t mas return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, vl); } + /* ** test_sf_vfnrclip_xu_f_qf_tumu_vuint8mf8_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf8+,tu+,mu+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -517,6 +569,7 @@ vuint8mf8_t test_sf_vfnrclip_xu_f_qf_tumu_vuint8mf8_t(vbool64_t mask, vuint8mf8_ /* ** test_sf_vfnrclip_xu_f_qf_u8mf4_tumu_vuint8mf4_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf4+,tu+,mu+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -527,6 +580,7 @@ vuint8mf4_t test_sf_vfnrclip_xu_f_qf_tumu_vuint8mf4_t(vbool32_t mask, vuint8mf4_ /* ** test_sf_vfnrclip_xu_f_qf_tumu_vuint8mf2_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf2+,tu+,mu+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -537,6 +591,7 @@ vuint8mf2_t test_sf_vfnrclip_xu_f_qf_tumu_vuint8mf2_t(vbool16_t mask, vuint8mf2_ /* ** test_sf_vfnrclip_xu_f_qf_tumu_vuint8m1_t: ** ... +** vsetivli\s+zero+,0+,e8+,m1+,tu+,mu+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -547,6 +602,7 @@ vuint8m1_t test_sf_vfnrclip_xu_f_qf_tumu_vuint8m1_t(vbool8_t mask, vuint8m1_t ma /* ** test_sf_vfnrclip_xu_f_qf_tumu_vuint8m2_t: ** ... +** vsetivli\s+zero+,0+,e8+,m2+,tu+,mu+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -557,6 +613,7 @@ vuint8m2_t test_sf_vfnrclip_xu_f_qf_tumu_vuint8m2_t(vbool4_t mask, vuint8m2_t ma /* ** test_sf_vfnrclip_xu_f_qf_mu_vuint8mf8_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf8+,ta+,mu+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -567,6 +624,7 @@ vuint8mf8_t test_sf_vfnrclip_xu_f_qf_mu_vuint8mf8_t(vbool64_t mask, vuint8mf8_t /* ** test_sf_vfnrclip_xu_f_qf_mu_vuint8mf4_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf4+,ta+,mu+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -577,6 +635,7 @@ vuint8mf4_t test_sf_vfnrclip_xu_f_qf_mu_vuint8mf4_t(vbool32_t mask, vuint8mf4_t /* ** test_sf_vfnrclip_xu_f_qf_mu_vuint8mf2_t: ** ... +** vsetivli\s+zero+,0+,e8+,mf2+,ta+,mu+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -587,6 +646,7 @@ vuint8mf2_t test_sf_vfnrclip_xu_f_qf_mu_vuint8mf2_t(vbool16_t mask, vuint8mf2_t /* ** test_sf_vfnrclip_xu_f_qf_mu_vuint8m1_t: ** ... +** vsetivli\s+zero+,0+,e8+,m1+,ta+,mu+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ @@ -597,6 +657,7 @@ vuint8m1_t test_sf_vfnrclip_xu_f_qf_mu_vuint8m1_t(vbool8_t mask, vuint8m1_t mask /* ** test_sf_vfnrclip_xu_f_qf_mu_vuint8m2_t: ** ... +** vsetivli\s+zero+,0+,e8+,m2+,ta+,mu+ ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t ** ... */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmacc_2x8x2.c b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmacc_2x8x2.c index f2058a14779..6bb659b5d23 100644 --- a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmacc_2x8x2.c +++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmacc_2x8x2.c @@ -7,6 +7,7 @@ /* ** test_sf_vqmacc_2x8x2_i32m1_vint32m1_t: ** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ ** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -20,6 +21,7 @@ test_sf_vqmacc_2x8x2_i32m1_vint32m1_t (vint32m1_t vd, vint8m1_t vs1, /* ** test_sf_vqmacc_2x8x2_i32m2_vint32m2_t: ** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ ** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -33,6 +35,7 @@ test_sf_vqmacc_2x8x2_i32m2_vint32m2_t (vint32m2_t vd, vint8m1_t vs1, /* ** test_sf_vqmacc_2x8x2_i32m4_vint32m4_t: ** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ ** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -46,6 +49,7 @@ test_sf_vqmacc_2x8x2_i32m4_vint32m4_t (vint32m4_t vd, vint8m1_t vs1, /* ** test_sf_vqmacc_2x8x2_i32m8_vint32m8_t: ** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ ** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -59,6 +63,7 @@ test_sf_vqmacc_2x8x2_i32m8_vint32m8_t (vint32m8_t vd, vint8m1_t vs1, /* ** test_sf_vqmacc_2x8x2_vint32m1_t: ** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ ** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -72,6 +77,7 @@ test_sf_vqmacc_2x8x2_vint32m1_t (vint32m1_t vd, vint8m1_t vs1, vint8m1_t vs2, /* ** test_sf_vqmacc_2x8x2_vint32m2_t: ** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ ** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -85,6 +91,7 @@ test_sf_vqmacc_2x8x2_vint32m2_t (vint32m2_t vd, vint8m1_t vs1, vint8m2_t vs2, /* ** test_sf_vqmacc_2x8x2_vint32m4_t: ** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ ** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -98,6 +105,7 @@ test_sf_vqmacc_2x8x2_vint32m4_t (vint32m4_t vd, vint8m1_t vs1, vint8m4_t vs2, /* ** test_sf_vqmacc_2x8x2_vint32m8_t: ** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ ** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -111,6 +119,7 @@ test_sf_vqmacc_2x8x2_vint32m8_t (vint32m8_t vd, vint8m1_t vs1, vint8m8_t vs2, /* ** test_sf_vqmacc_2x8x2_i32m1_tu_vint32m1_t: ** ... +** vsetivli\s+zero+,0+,e32+,m1,tu,ma+ ** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -124,6 +133,7 @@ test_sf_vqmacc_2x8x2_i32m1_tu_vint32m1_t (vint32m1_t vd, vint8m1_t vs1, /* ** test_sf_vqmacc_2x8x2_i32m2_tu_vint32m2_t: ** ... +** vsetivli\s+zero+,0+,e32+,m2,tu,ma+ ** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -137,6 +147,7 @@ test_sf_vqmacc_2x8x2_i32m2_tu_vint32m2_t (vint32m2_t vd, vint8m1_t vs1, /* ** test_sf_vqmacc_2x8x2_i32m4_tu_vint32m4_t: ** ... +** vsetivli\s+zero+,0+,e32+,m4,tu,ma+ ** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -150,6 +161,7 @@ test_sf_vqmacc_2x8x2_i32m4_tu_vint32m4_t (vint32m4_t vd, vint8m1_t vs1, /* ** test_sf_vqmacc_2x8x2_i32m8_tu_vint32m8_t: ** ... +** vsetivli\s+zero+,0+,e32+,m8,tu,ma+ ** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -163,6 +175,7 @@ test_sf_vqmacc_2x8x2_i32m8_tu_vint32m8_t (vint32m8_t vd, vint8m1_t vs1, /* ** test_sf_vqmacc_2x8x2_tu_vint32m1_t: ** ... +** vsetivli\s+zero+,0+,e32+,m1,tu,ma+ ** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -176,6 +189,7 @@ test_sf_vqmacc_2x8x2_tu_vint32m1_t (vint32m1_t vd, vint8m1_t vs1, vint8m1_t vs2, /* ** test_sf_vqmacc_2x8x2_tu_vint32m2_t: ** ... +** vsetivli\s+zero+,0+,e32+,m2,tu,ma+ ** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -189,6 +203,7 @@ test_sf_vqmacc_2x8x2_tu_vint32m2_t (vint32m2_t vd, vint8m1_t vs1, vint8m2_t vs2, /* ** test_sf_vqmacc_2x8x2_tu_vint32m4_t: ** ... +** vsetivli\s+zero+,0+,e32+,m4,tu,ma+ ** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -202,6 +217,7 @@ test_sf_vqmacc_2x8x2_tu_vint32m4_t (vint32m4_t vd, vint8m1_t vs1, vint8m4_t vs2, /* ** test_sf_vqmacc_2x8x2_tu_vint32m8_t: ** ... +** vsetivli\s+zero+,0+,e32+,m8,tu,ma+ ** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmacc_4x8x4.c b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmacc_4x8x4.c index 3bd6f1c273c..8106d0dbbab 100644 --- a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmacc_4x8x4.c +++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmacc_4x8x4.c @@ -7,6 +7,7 @@ /* ** test_sf_vqmacc_4x8x4_i32m1_vint32m1_t: ** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ ** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -20,6 +21,7 @@ test_sf_vqmacc_4x8x4_i32m1_vint32m1_t (vint32m1_t vd, vint8m1_t vs1, /* ** test_sf_vqmacc_4x8x4_i32m2_vint32m2_t: ** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ ** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -33,6 +35,7 @@ test_sf_vqmacc_4x8x4_i32m2_vint32m2_t (vint32m2_t vd, vint8m1_t vs1, /* ** test_sf_vqmacc_4x8x4_i32m4_vint32m4_t: ** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ ** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -46,6 +49,7 @@ test_sf_vqmacc_4x8x4_i32m4_vint32m4_t (vint32m4_t vd, vint8m1_t vs1, /* ** test_sf_vqmacc_4x8x4_i32m8_vint32m8_t: ** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ ** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -59,6 +63,7 @@ test_sf_vqmacc_4x8x4_i32m8_vint32m8_t (vint32m8_t vd, vint8m1_t vs1, /* ** test_sf_vqmacc_4x8x4_vint32m1_t: ** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ ** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -72,6 +77,7 @@ test_sf_vqmacc_4x8x4_vint32m1_t (vint32m1_t vd, vint8m1_t vs1, vint8mf2_t vs2, /* ** test_sf_vqmacc_4x8x4_vint32m2_t: ** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ ** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -85,6 +91,7 @@ test_sf_vqmacc_4x8x4_vint32m2_t (vint32m2_t vd, vint8m1_t vs1, vint8m1_t vs2, /* ** test_sf_vqmacc_4x8x4_vint32m4_t: ** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ ** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -98,6 +105,7 @@ test_sf_vqmacc_4x8x4_vint32m4_t (vint32m4_t vd, vint8m1_t vs1, vint8m2_t vs2, /* ** test_sf_vqmacc_4x8x4_vint32m8_t: ** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ ** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -111,6 +119,7 @@ test_sf_vqmacc_4x8x4_vint32m8_t (vint32m8_t vd, vint8m1_t vs1, vint8m4_t vs2, /* ** test_sf_vqmacc_4x8x4_i32m1_tu_vint32m1_t: ** ... +** vsetivli\s+zero+,0+,e32+,m1,tu,ma+ ** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -124,6 +133,7 @@ test_sf_vqmacc_4x8x4_i32m1_tu_vint32m1_t (vint32m1_t vd, vint8m1_t vs1, /* ** test_sf_vqmacc_4x8x4_i32m2_tu_vint32m2_t: ** ... +** vsetivli\s+zero+,0+,e32+,m2,tu,ma+ ** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -137,6 +147,7 @@ test_sf_vqmacc_4x8x4_i32m2_tu_vint32m2_t (vint32m2_t vd, vint8m1_t vs1, /* ** test_sf_vqmacc_4x8x4_i32m4_tu_vint32m4_t: ** ... +** vsetivli\s+zero+,0+,e32+,m4,tu,ma+ ** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -150,6 +161,7 @@ test_sf_vqmacc_4x8x4_i32m4_tu_vint32m4_t (vint32m4_t vd, vint8m1_t vs1, /* ** test_sf_vqmacc_4x8x4_i32m8_tu_vint32m8_t: ** ... +** vsetivli\s+zero+,0+,e32+,m8,tu,ma+ ** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -163,6 +175,7 @@ test_sf_vqmacc_4x8x4_i32m8_tu_vint32m8_t (vint32m8_t vd, vint8m1_t vs1, /* ** test_sf_vqmacc_4x8x4_tu_vint32m1_t: ** ... +** vsetivli\s+zero+,0+,e32+,m1,tu,ma+ ** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -176,6 +189,7 @@ test_sf_vqmacc_4x8x4_tu_vint32m1_t (vint32m1_t vd, vint8m1_t vs1, /* ** test_sf_vqmacc_4x8x4_tu_vint32m2_t: ** ... +** vsetivli\s+zero+,0+,e32+,m2,tu,ma+ ** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -189,6 +203,7 @@ test_sf_vqmacc_4x8x4_tu_vint32m2_t (vint32m2_t vd, vint8m1_t vs1, vint8m1_t vs2, /* ** test_sf_vqmacc_4x8x4_tu_vint32m4_t: ** ... +** vsetivli\s+zero+,0+,e32+,m4,tu,ma+ ** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -202,6 +217,7 @@ test_sf_vqmacc_4x8x4_tu_vint32m4_t (vint32m4_t vd, vint8m1_t vs1, vint8m2_t vs2, /* ** test_sf_vqmacc_4x8x4_tu_vint32m8_t: ** ... +** vsetivli\s+zero+,0+,e32+,m8,tu,ma+ ** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccsu_2x8x2.c b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccsu_2x8x2.c index 663c7634ebf..c51b53f7b17 100644 --- a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccsu_2x8x2.c +++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccsu_2x8x2.c @@ -7,6 +7,7 @@ /* ** test_sf_vqmaccsu_2x8x2_i32m1_vint32m1_t: ** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ ** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -20,6 +21,7 @@ test_sf_vqmaccsu_2x8x2_i32m1_vint32m1_t (vint32m1_t vd, vint8m1_t vs1, /* ** test_sf_vqmaccsu_2x8x2_i32m2_vint32m2_t: ** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ ** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -33,6 +35,7 @@ test_sf_vqmaccsu_2x8x2_i32m2_vint32m2_t (vint32m2_t vd, vint8m1_t vs1, /* ** test_sf_vqmaccsu_2x8x2_i32m4_vint32m4_t: ** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ ** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -46,6 +49,7 @@ test_sf_vqmaccsu_2x8x2_i32m4_vint32m4_t (vint32m4_t vd, vint8m1_t vs1, /* ** test_sf_vqmaccsu_2x8x2_i32m8_vint32m8_t: ** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ ** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -59,6 +63,7 @@ test_sf_vqmaccsu_2x8x2_i32m8_vint32m8_t (vint32m8_t vd, vint8m1_t vs1, /* ** test_sf_vqmaccsu_2x8x2_vint32m1_t: ** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ ** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -72,6 +77,7 @@ test_sf_vqmaccsu_2x8x2_vint32m1_t (vint32m1_t vd, vint8m1_t vs1, vuint8m1_t vs2, /* ** test_sf_vqmaccsu_2x8x2_vint32m2_t: ** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ ** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -85,6 +91,7 @@ test_sf_vqmaccsu_2x8x2_vint32m2_t (vint32m2_t vd, vint8m1_t vs1, vuint8m2_t vs2, /* ** test_sf_vqmaccsu_2x8x2_vint32m4_t: ** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ ** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -98,6 +105,7 @@ test_sf_vqmaccsu_2x8x2_vint32m4_t (vint32m4_t vd, vint8m1_t vs1, vuint8m4_t vs2, /* ** test_sf_vqmaccsu_2x8x2_vint32m8_t: ** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ ** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -111,6 +119,7 @@ test_sf_vqmaccsu_2x8x2_vint32m8_t (vint32m8_t vd, vint8m1_t vs1, vuint8m8_t vs2, /* ** test_sf_vqmaccsu_2x8x2_i32m1_tu_vint32m1_t: ** ... +** vsetivli\s+zero+,0+,e32+,m1,tu,ma+ ** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -124,6 +133,7 @@ test_sf_vqmaccsu_2x8x2_i32m1_tu_vint32m1_t (vint32m1_t vd, vint8m1_t vs1, /* ** test_sf_vqmaccsu_2x8x2_i32m2_tu_vint32m2_t: ** ... +** vsetivli\s+zero+,0+,e32+,m2,tu,ma+ ** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -137,6 +147,7 @@ test_sf_vqmaccsu_2x8x2_i32m2_tu_vint32m2_t (vint32m2_t vd, vint8m1_t vs1, /* ** test_sf_vqmaccsu_2x8x2_i32m4_tu_vint32m4_t: ** ... +** vsetivli\s+zero+,0+,e32+,m4,tu,ma+ ** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -150,6 +161,7 @@ test_sf_vqmaccsu_2x8x2_i32m4_tu_vint32m4_t (vint32m4_t vd, vint8m1_t vs1, /* ** test_sf_vqmaccsu_2x8x2_i32m8_tu_vint32m8_t: ** ... +** vsetivli\s+zero+,0+,e32+,m8,tu,ma+ ** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -163,6 +175,7 @@ test_sf_vqmaccsu_2x8x2_i32m8_tu_vint32m8_t (vint32m8_t vd, vint8m1_t vs1, /* ** test_sf_vqmaccsu_2x8x2_tu_vint32m1_t: ** ... +** vsetivli\s+zero+,0+,e32+,m1,tu,ma+ ** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -176,6 +189,7 @@ test_sf_vqmaccsu_2x8x2_tu_vint32m1_t (vint32m1_t vd, vint8m1_t vs1, /* ** test_sf_vqmaccsu_2x8x2_tu_vint32m2_t: ** ... +** vsetivli\s+zero+,0+,e32+,m2,tu,ma+ ** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -189,6 +203,7 @@ test_sf_vqmaccsu_2x8x2_tu_vint32m2_t (vint32m2_t vd, vint8m1_t vs1, /* ** test_sf_vqmaccsu_2x8x2_tu_vint32m4_t: ** ... +** vsetivli\s+zero+,0+,e32+,m4,tu,ma+ ** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -202,6 +217,7 @@ test_sf_vqmaccsu_2x8x2_tu_vint32m4_t (vint32m4_t vd, vint8m1_t vs1, /* ** test_sf_vqmaccsu_2x8x2_tu_vint32m8_t: ** ... +** vsetivli\s+zero+,0+,e32+,m8,tu,ma+ ** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -211,3 +227,4 @@ test_sf_vqmaccsu_2x8x2_tu_vint32m8_t (vint32m8_t vd, vint8m1_t vs1, { return __riscv_sf_vqmaccsu_2x8x2_tu (vd, vs1, vs2, vl); } + diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccsu_4x8x4.c b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccsu_4x8x4.c index 0554e564253..6625af7886b 100644 --- a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccsu_4x8x4.c +++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccsu_4x8x4.c @@ -7,6 +7,7 @@ /* ** test_sf_vqmaccsu_4x8x4_i32m1_vint32m1_t: ** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ ** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -20,6 +21,7 @@ test_sf_vqmaccsu_4x8x4_i32m1_vint32m1_t (vint32m1_t vd, vint8m1_t vs1, /* ** test_sf_vqmaccsu_4x8x4_i32m2_vint32m2_t: ** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ ** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -33,6 +35,7 @@ test_sf_vqmaccsu_4x8x4_i32m2_vint32m2_t (vint32m2_t vd, vint8m1_t vs1, /* ** test_sf_vqmaccsu_4x8x4_i32m4_vint32m4_t: ** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ ** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -46,6 +49,7 @@ test_sf_vqmaccsu_4x8x4_i32m4_vint32m4_t (vint32m4_t vd, vint8m1_t vs1, /* ** test_sf_vqmaccsu_4x8x4_i32m8_vint32m8_t: ** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ ** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -59,6 +63,7 @@ test_sf_vqmaccsu_4x8x4_i32m8_vint32m8_t (vint32m8_t vd, vint8m1_t vs1, /* ** test_sf_vqmaccsu_4x8x4_vint32m1_t: ** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ ** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -72,6 +77,7 @@ test_sf_vqmaccsu_4x8x4_vint32m1_t (vint32m1_t vd, vint8m1_t vs1, /* ** test_sf_vqmaccsu_4x8x4_vint32m2_t: ** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ ** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -85,6 +91,7 @@ test_sf_vqmaccsu_4x8x4_vint32m2_t (vint32m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, /* ** test_sf_vqmaccsu_4x8x4_vint32m4_t: ** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ ** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -98,6 +105,7 @@ test_sf_vqmaccsu_4x8x4_vint32m4_t (vint32m4_t vd, vint8m1_t vs1, vuint8m2_t vs2, /* ** test_sf_vqmaccsu_4x8x4_vint32m8_t: ** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ ** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -111,6 +119,7 @@ test_sf_vqmaccsu_4x8x4_vint32m8_t (vint32m8_t vd, vint8m1_t vs1, vuint8m4_t vs2, /* ** test_sf_vqmaccsu_4x8x4_i32m1_tu_vint32m1_t: ** ... +** vsetivli\s+zero+,0+,e32+,m1,tu,ma+ ** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -124,6 +133,7 @@ test_sf_vqmaccsu_4x8x4_i32m1_tu_vint32m1_t (vint32m1_t vd, vint8m1_t vs1, /* ** test_sf_vqmaccsu_4x8x4_i32m2_tu_vint32m2_t: ** ... +** vsetivli\s+zero+,0+,e32+,m2,tu,ma+ ** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -137,6 +147,7 @@ test_sf_vqmaccsu_4x8x4_i32m2_tu_vint32m2_t (vint32m2_t vd, vint8m1_t vs1, /* ** test_sf_vqmaccsu_4x8x4_i32m4_tu_vint32m4_t: ** ... +** vsetivli\s+zero+,0+,e32+,m4,tu,ma+ ** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -150,6 +161,7 @@ test_sf_vqmaccsu_4x8x4_i32m4_tu_vint32m4_t (vint32m4_t vd, vint8m1_t vs1, /* ** test_sf_vqmaccsu_4x8x4_i32m8_tu_vint32m8_t: ** ... +** vsetivli\s+zero+,0+,e32+,m8,tu,ma+ ** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -163,6 +175,7 @@ test_sf_vqmaccsu_4x8x4_i32m8_tu_vint32m8_t (vint32m8_t vd, vint8m1_t vs1, /* ** test_sf_vqmaccsu_4x8x4_tu_vint32m1_t: ** ... +** vsetivli\s+zero+,0+,e32+,m1,tu,ma+ ** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -176,6 +189,7 @@ test_sf_vqmaccsu_4x8x4_tu_vint32m1_t (vint32m1_t vd, vint8m1_t vs1, /* ** test_sf_vqmaccsu_4x8x4_tu_vint32m2_t: ** ... +** vsetivli\s+zero+,0+,e32+,m2,tu,ma+ ** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -189,6 +203,7 @@ test_sf_vqmaccsu_4x8x4_tu_vint32m2_t (vint32m2_t vd, vint8m1_t vs1, /* ** test_sf_vqmaccsu_4x8x4_tu_vint32m4_t: ** ... +** vsetivli\s+zero+,0+,e32+,m4,tu,ma+ ** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -202,6 +217,7 @@ test_sf_vqmaccsu_4x8x4_tu_vint32m4_t (vint32m4_t vd, vint8m1_t vs1, /* ** test_sf_vqmaccsu_4x8x4_tu_vint32m8_t: ** ... +** vsetivli\s+zero+,0+,e32+,m8,tu,ma+ ** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -211,3 +227,4 @@ test_sf_vqmaccsu_4x8x4_tu_vint32m8_t (vint32m8_t vd, vint8m1_t vs1, { return __riscv_sf_vqmaccsu_4x8x4_tu (vd, vs1, vs2, vl); } + diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccu_2x8x2.c b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccu_2x8x2.c index dd15cc2d544..46cbc0c0b09 100644 --- a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccu_2x8x2.c +++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccu_2x8x2.c @@ -7,6 +7,7 @@ /* ** test_sf_vqmaccu_2x8x2_i32m1_vint32m1_t: ** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ ** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -20,6 +21,7 @@ test_sf_vqmaccu_2x8x2_i32m1_vint32m1_t (vint32m1_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccu_2x8x2_i32m2_vint32m2_t: ** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ ** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -33,6 +35,7 @@ test_sf_vqmaccu_2x8x2_i32m2_vint32m2_t (vint32m2_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccu_2x8x2_i32m4_vint32m4_t: ** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ ** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -46,6 +49,7 @@ test_sf_vqmaccu_2x8x2_i32m4_vint32m4_t (vint32m4_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccu_2x8x2_i32m8_vint32m8_t: ** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ ** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -59,6 +63,7 @@ test_sf_vqmaccu_2x8x2_i32m8_vint32m8_t (vint32m8_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccu_2x8x2_vint32m1_t: ** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ ** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -72,6 +77,7 @@ test_sf_vqmaccu_2x8x2_vint32m1_t (vint32m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, /* ** test_sf_vqmaccu_2x8x2_vint32m2_t: ** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ ** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -85,6 +91,7 @@ test_sf_vqmaccu_2x8x2_vint32m2_t (vint32m2_t vd, vuint8m1_t vs1, vuint8m2_t vs2, /* ** test_sf_vqmaccu_2x8x2_vint32m4_t: ** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ ** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -98,6 +105,7 @@ test_sf_vqmaccu_2x8x2_vint32m4_t (vint32m4_t vd, vuint8m1_t vs1, vuint8m4_t vs2, /* ** test_sf_vqmaccu_2x8x2_vint32m8_t: ** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ ** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -111,6 +119,7 @@ test_sf_vqmaccu_2x8x2_vint32m8_t (vint32m8_t vd, vuint8m1_t vs1, vuint8m8_t vs2, /* ** test_sf_vqmaccu_2x8x2_i32m1_tu_vint32m1_t: ** ... +** vsetivli\s+zero+,0+,e32+,m1,tu,ma+ ** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -124,6 +133,7 @@ test_sf_vqmaccu_2x8x2_i32m1_tu_vint32m1_t (vint32m1_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccu_2x8x2_i32m2_tu_vint32m2_t: ** ... +** vsetivli\s+zero+,0+,e32+,m2,tu,ma+ ** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -137,6 +147,7 @@ test_sf_vqmaccu_2x8x2_i32m2_tu_vint32m2_t (vint32m2_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccu_2x8x2_i32m4_tu_vint32m4_t: ** ... +** vsetivli\s+zero+,0+,e32+,m4,tu,ma+ ** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -150,6 +161,7 @@ test_sf_vqmaccu_2x8x2_i32m4_tu_vint32m4_t (vint32m4_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccu_2x8x2_i32m8_tu_vint32m8_t: ** ... +** vsetivli\s+zero+,0+,e32+,m8,tu,ma+ ** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -163,6 +175,7 @@ test_sf_vqmaccu_2x8x2_i32m8_tu_vint32m8_t (vint32m8_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccu_2x8x2_tu_vint32m1_t: ** ... +** vsetivli\s+zero+,0+,e32+,m1,tu,ma+ ** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -176,6 +189,7 @@ test_sf_vqmaccu_2x8x2_tu_vint32m1_t (vint32m1_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccu_2x8x2_tu_vint32m2_t: ** ... +** vsetivli\s+zero+,0+,e32+,m2,tu,ma+ ** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -189,6 +203,7 @@ test_sf_vqmaccu_2x8x2_tu_vint32m2_t (vint32m2_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccu_2x8x2_tu_vint32m4_t: ** ... +** vsetivli\s+zero+,0+,e32+,m4,tu,ma+ ** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -202,6 +217,7 @@ test_sf_vqmaccu_2x8x2_tu_vint32m4_t (vint32m4_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccu_2x8x2_tu_vint32m8_t: ** ... +** vsetivli\s+zero+,0+,e32+,m8,tu,ma+ ** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccu_4x8x4.c b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccu_4x8x4.c index c386b4ee79e..fb20e460da8 100644 --- a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccu_4x8x4.c +++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccu_4x8x4.c @@ -7,6 +7,7 @@ /* ** test_sf_vqmaccu_4x8x4_i32m1_vint32m1_t: ** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ ** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -20,6 +21,7 @@ test_sf_vqmaccu_4x8x4_i32m1_vint32m1_t (vint32m1_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccu_4x8x4_i32m2_vint32m2_t: ** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ ** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -33,6 +35,7 @@ test_sf_vqmaccu_4x8x4_i32m2_vint32m2_t (vint32m2_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccu_4x8x4_i32m4_vint32m4_t: ** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ ** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -46,6 +49,7 @@ test_sf_vqmaccu_4x8x4_i32m4_vint32m4_t (vint32m4_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccu_4x8x4_i32m8_vint32m8_t: ** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ ** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -59,6 +63,7 @@ test_sf_vqmaccu_4x8x4_i32m8_vint32m8_t (vint32m8_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccu_4x8x4_vint32m1_t: ** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ ** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -72,6 +77,7 @@ test_sf_vqmaccu_4x8x4_vint32m1_t (vint32m1_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccu_4x8x4_vint32m2_t: ** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ ** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -85,6 +91,7 @@ test_sf_vqmaccu_4x8x4_vint32m2_t (vint32m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, /* ** test_sf_vqmaccu_4x8x4_vint32m4_t: ** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ ** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -98,6 +105,7 @@ test_sf_vqmaccu_4x8x4_vint32m4_t (vint32m4_t vd, vuint8m1_t vs1, vuint8m2_t vs2, /* ** test_sf_vqmaccu_4x8x4_vint32m8_t: ** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ ** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -111,6 +119,7 @@ test_sf_vqmaccu_4x8x4_vint32m8_t (vint32m8_t vd, vuint8m1_t vs1, vuint8m4_t vs2, /* ** test_sf_vqmaccu_4x8x4_i32m1_tu_vint32m1_t: ** ... +** vsetivli\s+zero+,0+,e32+,m1,tu,ma+ ** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -124,6 +133,7 @@ test_sf_vqmaccu_4x8x4_i32m1_tu_vint32m1_t (vint32m1_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccu_4x8x4_i32m2_tu_vint32m2_t: ** ... +** vsetivli\s+zero+,0+,e32+,m2,tu,ma+ ** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -137,6 +147,7 @@ test_sf_vqmaccu_4x8x4_i32m2_tu_vint32m2_t (vint32m2_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccu_4x8x4_i32m4_tu_vint32m4_t: ** ... +** vsetivli\s+zero+,0+,e32+,m4,tu,ma+ ** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -150,6 +161,7 @@ test_sf_vqmaccu_4x8x4_i32m4_tu_vint32m4_t (vint32m4_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccu_4x8x4_i32m8_tu_vint32m8_t: ** ... +** vsetivli\s+zero+,0+,e32+,m8,tu,ma+ ** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -163,6 +175,7 @@ test_sf_vqmaccu_4x8x4_i32m8_tu_vint32m8_t (vint32m8_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccu_4x8x4_tu_vint32m1_t: ** ... +** vsetivli\s+zero+,0+,e32+,m1,tu,ma+ ** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -176,6 +189,7 @@ test_sf_vqmaccu_4x8x4_tu_vint32m1_t (vint32m1_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccu_4x8x4_tu_vint32m2_t: ** ... +** vsetivli\s+zero+,0+,e32+,m2,tu,ma+ ** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -189,6 +203,7 @@ test_sf_vqmaccu_4x8x4_tu_vint32m2_t (vint32m2_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccu_4x8x4_tu_vint32m4_t: ** ... +** vsetivli\s+zero+,0+,e32+,m4,tu,ma+ ** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -202,6 +217,7 @@ test_sf_vqmaccu_4x8x4_tu_vint32m4_t (vint32m4_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccu_4x8x4_tu_vint32m8_t: ** ... +** vsetivli\s+zero+,0+,e32+,m8,tu,ma+ ** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -211,3 +227,4 @@ test_sf_vqmaccu_4x8x4_tu_vint32m8_t (vint32m8_t vd, vuint8m1_t vs1, { return __riscv_sf_vqmaccu_4x8x4_tu (vd, vs1, vs2, vl); } + diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccus_2x8x2.c b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccus_2x8x2.c index db1650eb6ad..4a25b1a598a 100644 --- a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccus_2x8x2.c +++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccus_2x8x2.c @@ -7,6 +7,7 @@ /* ** test_sf_vqmaccus_2x8x2_i32m1_vint32m1_t: ** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ ** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -20,6 +21,7 @@ test_sf_vqmaccus_2x8x2_i32m1_vint32m1_t (vint32m1_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccus_2x8x2_i32m2_vint32m2_t: ** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ ** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -33,6 +35,7 @@ test_sf_vqmaccus_2x8x2_i32m2_vint32m2_t (vint32m2_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccus_2x8x2_i32m4_vint32m4_t: ** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ ** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -46,6 +49,7 @@ test_sf_vqmaccus_2x8x2_i32m4_vint32m4_t (vint32m4_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccus_2x8x2_i32m8_vint32m8_t: ** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ ** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -59,6 +63,7 @@ test_sf_vqmaccus_2x8x2_i32m8_vint32m8_t (vint32m8_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccus_2x8x2_vint32m1_t: ** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ ** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -72,6 +77,7 @@ test_sf_vqmaccus_2x8x2_vint32m1_t (vint32m1_t vd, vuint8m1_t vs1, vint8m1_t vs2, /* ** test_sf_vqmaccus_2x8x2_vint32m2_t: ** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ ** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -85,6 +91,7 @@ test_sf_vqmaccus_2x8x2_vint32m2_t (vint32m2_t vd, vuint8m1_t vs1, vint8m2_t vs2, /* ** test_sf_vqmaccus_2x8x2_vint32m4_t: ** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ ** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -98,6 +105,7 @@ test_sf_vqmaccus_2x8x2_vint32m4_t (vint32m4_t vd, vuint8m1_t vs1, vint8m4_t vs2, /* ** test_sf_vqmaccus_2x8x2_vint32m8_t: ** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ ** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -111,6 +119,7 @@ test_sf_vqmaccus_2x8x2_vint32m8_t (vint32m8_t vd, vuint8m1_t vs1, vint8m8_t vs2, /* ** test_sf_vqmaccus_2x8x2_i32m1_tu_vint32m1_t: ** ... +** vsetivli\s+zero+,0+,e32+,m1,tu,ma+ ** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -124,6 +133,7 @@ test_sf_vqmaccus_2x8x2_i32m1_tu_vint32m1_t (vint32m1_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccus_2x8x2_i32m2_tu_vint32m2_t: ** ... +** vsetivli\s+zero+,0+,e32+,m2,tu,ma+ ** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -137,6 +147,7 @@ test_sf_vqmaccus_2x8x2_i32m2_tu_vint32m2_t (vint32m2_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccus_2x8x2_i32m4_tu_vint32m4_t: ** ... +** vsetivli\s+zero+,0+,e32+,m4,tu,ma+ ** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -150,6 +161,7 @@ test_sf_vqmaccus_2x8x2_i32m4_tu_vint32m4_t (vint32m4_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccus_2x8x2_i32m8_tu_vint32m8_t: ** ... +** vsetivli\s+zero+,0+,e32+,m8,tu,ma+ ** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -163,6 +175,7 @@ test_sf_vqmaccus_2x8x2_i32m8_tu_vint32m8_t (vint32m8_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccus_2x8x2_tu_vint32m1_t: ** ... +** vsetivli\s+zero+,0+,e32+,m1,tu,ma+ ** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -176,6 +189,7 @@ test_sf_vqmaccus_2x8x2_tu_vint32m1_t (vint32m1_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccus_2x8x2_tu_vint32m2_t: ** ... +** vsetivli\s+zero+,0+,e32+,m2,tu,ma+ ** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -189,6 +203,7 @@ test_sf_vqmaccus_2x8x2_tu_vint32m2_t (vint32m2_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccus_2x8x2_tu_vint32m4_t: ** ... +** vsetivli\s+zero+,0+,e32+,m4,tu,ma+ ** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -202,6 +217,7 @@ test_sf_vqmaccus_2x8x2_tu_vint32m4_t (vint32m4_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccus_2x8x2_tu_vint32m8_t: ** ... +** vsetivli\s+zero+,0+,e32+,m8,tu,ma+ ** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -211,3 +227,4 @@ test_sf_vqmaccus_2x8x2_tu_vint32m8_t (vint32m8_t vd, vuint8m1_t vs1, { return __riscv_sf_vqmaccus_2x8x2_tu (vd, vs1, vs2, vl); } + diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccus_4x8x4.c b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccus_4x8x4.c index 5c5e1a043bc..c82621cbe6e 100644 --- a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccus_4x8x4.c +++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccus_4x8x4.c @@ -7,6 +7,7 @@ /* ** test_sf_vqmaccus_4x8x4_i32m1_vint32m1_t: ** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ ** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -20,6 +21,7 @@ test_sf_vqmaccus_4x8x4_i32m1_vint32m1_t (vint32m1_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccus_4x8x4_i32m2_vint32m2_t: ** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ ** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -33,6 +35,7 @@ test_sf_vqmaccus_4x8x4_i32m2_vint32m2_t (vint32m2_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccus_4x8x4_i32m4_vint32m4_t: ** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ ** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -46,6 +49,7 @@ test_sf_vqmaccus_4x8x4_i32m4_vint32m4_t (vint32m4_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccus_4x8x4_i32m8_vint32m8_t: ** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ ** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -59,6 +63,7 @@ test_sf_vqmaccus_4x8x4_i32m8_vint32m8_t (vint32m8_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccus_4x8x4_vint32m1_t: ** ... +** vsetivli\s+zero+,0+,e32+,m1,ta,ma+ ** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -72,6 +77,7 @@ test_sf_vqmaccus_4x8x4_vint32m1_t (vint32m1_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccus_4x8x4_vint32m2_t: ** ... +** vsetivli\s+zero+,0+,e32+,m2,ta,ma+ ** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -85,6 +91,7 @@ test_sf_vqmaccus_4x8x4_vint32m2_t (vint32m2_t vd, vuint8m1_t vs1, vint8m1_t vs2, /* ** test_sf_vqmaccus_4x8x4_vint32m4_t: ** ... +** vsetivli\s+zero+,0+,e32+,m4,ta,ma+ ** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -98,6 +105,7 @@ test_sf_vqmaccus_4x8x4_vint32m4_t (vint32m4_t vd, vuint8m1_t vs1, vint8m2_t vs2, /* ** test_sf_vqmaccus_4x8x4_vint32m8_t: ** ... +** vsetivli\s+zero+,0+,e32+,m8,ta,ma+ ** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -111,6 +119,7 @@ test_sf_vqmaccus_4x8x4_vint32m8_t (vint32m8_t vd, vuint8m1_t vs1, vint8m4_t vs2, /* ** test_sf_vqmaccus_4x8x4_i32m1_tu_vint32m1_t: ** ... +** vsetivli\s+zero+,0+,e32+,m1,tu,ma+ ** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -124,6 +133,7 @@ test_sf_vqmaccus_4x8x4_i32m1_tu_vint32m1_t (vint32m1_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccus_4x8x4_i32m2_tu_vint32m2_t: ** ... +** vsetivli\s+zero+,0+,e32+,m2,tu,ma+ ** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -137,6 +147,7 @@ test_sf_vqmaccus_4x8x4_i32m2_tu_vint32m2_t (vint32m2_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccus_4x8x4_i32m4_tu_vint32m4_t: ** ... +** vsetivli\s+zero+,0+,e32+,m4,tu,ma+ ** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -150,6 +161,7 @@ test_sf_vqmaccus_4x8x4_i32m4_tu_vint32m4_t (vint32m4_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccus_4x8x4_i32m8_tu_vint32m8_t: ** ... +** vsetivli\s+zero+,0+,e32+,m8,tu,ma+ ** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -163,6 +175,7 @@ test_sf_vqmaccus_4x8x4_i32m8_tu_vint32m8_t (vint32m8_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccus_4x8x4_tu_vint32m1_t: ** ... +** vsetivli\s+zero+,0+,e32+,m1,tu,ma+ ** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -176,6 +189,7 @@ test_sf_vqmaccus_4x8x4_tu_vint32m1_t (vint32m1_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccus_4x8x4_tu_vint32m2_t: ** ... +** vsetivli\s+zero+,0+,e32+,m2,tu,ma+ ** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -189,6 +203,7 @@ test_sf_vqmaccus_4x8x4_tu_vint32m2_t (vint32m2_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccus_4x8x4_tu_vint32m4_t: ** ... +** vsetivli\s+zero+,0+,e32+,m4,tu,ma+ ** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -202,6 +217,7 @@ test_sf_vqmaccus_4x8x4_tu_vint32m4_t (vint32m4_t vd, vuint8m1_t vs1, /* ** test_sf_vqmaccus_4x8x4_tu_vint32m8_t: ** ... +** vsetivli\s+zero+,0+,e32+,m8,tu,ma+ ** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ ** ... */ @@ -211,3 +227,4 @@ test_sf_vqmaccus_4x8x4_tu_vint32m8_t (vint32m8_t vd, vuint8m1_t vs1, { return __riscv_sf_vqmaccus_4x8x4_tu (vd, vs1, vs2, vl); } +