aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJonathan Wright <jonathan.wright@arm.com>2021-11-10 10:49:19 +0000
committerJonathan Wright <jonathan.wright@arm.com>2021-11-11 15:34:51 +0000
commit1716ddd1e9e7a5c63062e07ac3ca41364fea9917 (patch)
tree4ee1e74f00ddf2b1a7e13a00cb766850bdda638e
parent6eca10aa761c56b9a26763e036c72475337194f8 (diff)
aarch64: Use type-qualified builtins for LD1/ST1 Neon intrinsics
Declare unsigned and polynomial type-qualified builtins for LD1/ST1 Neon intrinsics. Using these builtins removes the need for many casts in arm_neon.h. The new type-qualified builtins are also lowered to gimple - as the unqualified builtins are already. gcc/ChangeLog: 2021-11-10 Jonathan Wright <jonathan.wright@arm.com> * config/aarch64/aarch64-builtins.c (TYPES_LOAD1_U): Define. (TYPES_LOAD1_P): Define. (TYPES_STORE1_U): Define. (TYPES_STORE1P): Rename to... (TYPES_STORE1_P): This. (get_mem_type_for_load_store): Add unsigned and poly types. (aarch64_general_gimple_fold_builtin): Add unsigned and poly type-qualified builtin declarations. * config/aarch64/aarch64-simd-builtins.def: Declare type- qualified builtins for LD1/ST1. * config/aarch64/arm_neon.h (vld1_p8): Use type-qualified builtin and remove cast. (vld1_p16): Likewise. (vld1_u8): Likewise. (vld1_u16): Likewise. (vld1_u32): Likewise. (vld1q_p8): Likewise. (vld1q_p16): Likewise. (vld1q_p64): Likewise. (vld1q_u8): Likewise. (vld1q_u16): Likewise. (vld1q_u32): Likewise. (vld1q_u64): Likewise. (vst1_p8): Likewise. (vst1_p16): Likewise. (vst1_u8): Likewise. (vst1_u16): Likewise. (vst1_u32): Likewise. (vst1q_p8): Likewise. (vst1q_p16): Likewise. (vst1q_p64): Likewise. (vst1q_u8): Likewise. (vst1q_u16): Likewise. (vst1q_u32): Likewise. (vst1q_u64): Likewise. * config/aarch64/iterators.md (VALLP_NO_DI): New iterator.
-rw-r--r--gcc/config/aarch64/aarch64-builtins.c101
-rw-r--r--gcc/config/aarch64/aarch64-simd-builtins.def6
-rw-r--r--gcc/config/aarch64/arm_neon.h84
-rw-r--r--gcc/config/aarch64/iterators.md3
4 files changed, 115 insertions, 79 deletions
diff --git a/gcc/config/aarch64/aarch64-builtins.c b/gcc/config/aarch64/aarch64-builtins.c
index 03f09f64ae5..5d8af627bcf 100644
--- a/gcc/config/aarch64/aarch64-builtins.c
+++ b/gcc/config/aarch64/aarch64-builtins.c
@@ -372,10 +372,12 @@ aarch64_types_load1_qualifiers[SIMD_MAX_BUILTIN_ARGS]
static enum aarch64_type_qualifiers
aarch64_types_load1_u_qualifiers[SIMD_MAX_BUILTIN_ARGS]
= { qualifier_unsigned, qualifier_const_pointer_map_mode };
+#define TYPES_LOAD1_U (aarch64_types_load1_u_qualifiers)
#define TYPES_LOADSTRUCT_U (aarch64_types_load1_u_qualifiers)
static enum aarch64_type_qualifiers
aarch64_types_load1_p_qualifiers[SIMD_MAX_BUILTIN_ARGS]
= { qualifier_poly, qualifier_const_pointer_map_mode };
+#define TYPES_LOAD1_P (aarch64_types_load1_p_qualifiers)
#define TYPES_LOADSTRUCT_P (aarch64_types_load1_p_qualifiers)
static enum aarch64_type_qualifiers
@@ -423,11 +425,12 @@ aarch64_types_store1_qualifiers[SIMD_MAX_BUILTIN_ARGS]
static enum aarch64_type_qualifiers
aarch64_types_store1_u_qualifiers[SIMD_MAX_BUILTIN_ARGS]
= { qualifier_void, qualifier_pointer_map_mode, qualifier_unsigned };
+#define TYPES_STORE1_U (aarch64_types_store1_u_qualifiers)
#define TYPES_STORESTRUCT_U (aarch64_types_store1_u_qualifiers)
static enum aarch64_type_qualifiers
aarch64_types_store1_p_qualifiers[SIMD_MAX_BUILTIN_ARGS]
= { qualifier_void, qualifier_pointer_map_mode, qualifier_poly };
-#define TYPES_STORE1P (aarch64_types_store1_p_qualifiers)
+#define TYPES_STORE1_P (aarch64_types_store1_p_qualifiers)
#define TYPES_STORESTRUCT_P (aarch64_types_store1_p_qualifiers)
static enum aarch64_type_qualifiers
@@ -2590,47 +2593,83 @@ get_mem_type_for_load_store (unsigned int fcode)
{
switch (fcode)
{
- VAR1 (LOAD1, ld1 , 0, LOAD, v8qi)
- VAR1 (STORE1, st1 , 0, STORE, v8qi)
+ VAR1 (LOAD1, ld1, 0, LOAD, v8qi)
+ VAR1 (STORE1, st1, 0, STORE, v8qi)
return Int8x8_t;
- VAR1 (LOAD1, ld1 , 0, LOAD, v16qi)
- VAR1 (STORE1, st1 , 0, STORE, v16qi)
+ VAR1 (LOAD1, ld1, 0, LOAD, v16qi)
+ VAR1 (STORE1, st1, 0, STORE, v16qi)
return Int8x16_t;
- VAR1 (LOAD1, ld1 , 0, LOAD, v4hi)
- VAR1 (STORE1, st1 , 0, STORE, v4hi)
+ VAR1 (LOAD1, ld1, 0, LOAD, v4hi)
+ VAR1 (STORE1, st1, 0, STORE, v4hi)
return Int16x4_t;
- VAR1 (LOAD1, ld1 , 0, LOAD, v8hi)
- VAR1 (STORE1, st1 , 0, STORE, v8hi)
+ VAR1 (LOAD1, ld1, 0, LOAD, v8hi)
+ VAR1 (STORE1, st1, 0, STORE, v8hi)
return Int16x8_t;
- VAR1 (LOAD1, ld1 , 0, LOAD, v2si)
- VAR1 (STORE1, st1 , 0, STORE, v2si)
+ VAR1 (LOAD1, ld1, 0, LOAD, v2si)
+ VAR1 (STORE1, st1, 0, STORE, v2si)
return Int32x2_t;
- VAR1 (LOAD1, ld1 , 0, LOAD, v4si)
- VAR1 (STORE1, st1 , 0, STORE, v4si)
+ VAR1 (LOAD1, ld1, 0, LOAD, v4si)
+ VAR1 (STORE1, st1, 0, STORE, v4si)
return Int32x4_t;
- VAR1 (LOAD1, ld1 , 0, LOAD, v2di)
- VAR1 (STORE1, st1 , 0, STORE, v2di)
+ VAR1 (LOAD1, ld1, 0, LOAD, v2di)
+ VAR1 (STORE1, st1, 0, STORE, v2di)
return Int64x2_t;
- VAR1 (LOAD1, ld1 , 0, LOAD, v4hf)
- VAR1 (STORE1, st1 , 0, STORE, v4hf)
+ VAR1 (LOAD1_U, ld1, 0, LOAD, v8qi)
+ VAR1 (STORE1_U, st1, 0, STORE, v8qi)
+ return Uint8x8_t;
+ VAR1 (LOAD1_U, ld1, 0, LOAD, v16qi)
+ VAR1 (STORE1_U, st1, 0, STORE, v16qi)
+ return Uint8x16_t;
+ VAR1 (LOAD1_U, ld1, 0, LOAD, v4hi)
+ VAR1 (STORE1_U, st1, 0, STORE, v4hi)
+ return Uint16x4_t;
+ VAR1 (LOAD1_U, ld1, 0, LOAD, v8hi)
+ VAR1 (STORE1_U, st1, 0, STORE, v8hi)
+ return Uint16x8_t;
+ VAR1 (LOAD1_U, ld1, 0, LOAD, v2si)
+ VAR1 (STORE1_U, st1, 0, STORE, v2si)
+ return Uint32x2_t;
+ VAR1 (LOAD1_U, ld1, 0, LOAD, v4si)
+ VAR1 (STORE1_U, st1, 0, STORE, v4si)
+ return Uint32x4_t;
+ VAR1 (LOAD1_U, ld1, 0, LOAD, v2di)
+ VAR1 (STORE1_U, st1, 0, STORE, v2di)
+ return Uint64x2_t;
+ VAR1 (LOAD1_P, ld1, 0, LOAD, v8qi)
+ VAR1 (STORE1_P, st1, 0, STORE, v8qi)
+ return Poly8x8_t;
+ VAR1 (LOAD1_P, ld1, 0, LOAD, v16qi)
+ VAR1 (STORE1_P, st1, 0, STORE, v16qi)
+ return Poly8x16_t;
+ VAR1 (LOAD1_P, ld1, 0, LOAD, v4hi)
+ VAR1 (STORE1_P, st1, 0, STORE, v4hi)
+ return Poly16x4_t;
+ VAR1 (LOAD1_P, ld1, 0, LOAD, v8hi)
+ VAR1 (STORE1_P, st1, 0, STORE, v8hi)
+ return Poly16x8_t;
+ VAR1 (LOAD1_P, ld1, 0, LOAD, v2di)
+ VAR1 (STORE1_P, st1, 0, STORE, v2di)
+ return Poly64x2_t;
+ VAR1 (LOAD1, ld1, 0, LOAD, v4hf)
+ VAR1 (STORE1, st1, 0, STORE, v4hf)
return Float16x4_t;
- VAR1 (LOAD1, ld1 , 0, LOAD, v8hf)
- VAR1 (STORE1, st1 , 0, STORE, v8hf)
+ VAR1 (LOAD1, ld1, 0, LOAD, v8hf)
+ VAR1 (STORE1, st1, 0, STORE, v8hf)
return Float16x8_t;
- VAR1 (LOAD1, ld1 , 0, LOAD, v4bf)
- VAR1 (STORE1, st1 , 0, STORE, v4bf)
+ VAR1 (LOAD1, ld1, 0, LOAD, v4bf)
+ VAR1 (STORE1, st1, 0, STORE, v4bf)
return Bfloat16x4_t;
- VAR1 (LOAD1, ld1 , 0, LOAD, v8bf)
- VAR1 (STORE1, st1 , 0, STORE, v8bf)
+ VAR1 (LOAD1, ld1, 0, LOAD, v8bf)
+ VAR1 (STORE1, st1, 0, STORE, v8bf)
return Bfloat16x8_t;
- VAR1 (LOAD1, ld1 , 0, LOAD, v2sf)
- VAR1 (STORE1, st1 , 0, STORE, v2sf)
+ VAR1 (LOAD1, ld1, 0, LOAD, v2sf)
+ VAR1 (STORE1, st1, 0, STORE, v2sf)
return Float32x2_t;
- VAR1 (LOAD1, ld1 , 0, LOAD, v4sf)
- VAR1 (STORE1, st1 , 0, STORE, v4sf)
+ VAR1 (LOAD1, ld1, 0, LOAD, v4sf)
+ VAR1 (STORE1, st1, 0, STORE, v4sf)
return Float32x4_t;
- VAR1 (LOAD1, ld1 , 0, LOAD, v2df)
- VAR1 (STORE1, st1 , 0, STORE, v2df)
+ VAR1 (LOAD1, ld1, 0, LOAD, v2df)
+ VAR1 (STORE1, st1, 0, STORE, v2df)
return Float64x2_t;
default:
gcc_unreachable ();
@@ -2664,6 +2703,8 @@ aarch64_general_gimple_fold_builtin (unsigned int fcode, gcall *stmt,
/*lower store and load neon builtins to gimple. */
BUILTIN_VALL_F16 (LOAD1, ld1, 0, LOAD)
+ BUILTIN_VDQ_I (LOAD1_U, ld1, 0, LOAD)
+ BUILTIN_VALLP_NO_DI (LOAD1_P, ld1, 0, LOAD)
if (!BYTES_BIG_ENDIAN)
{
enum aarch64_simd_type mem_type
@@ -2686,6 +2727,8 @@ aarch64_general_gimple_fold_builtin (unsigned int fcode, gcall *stmt,
break;
BUILTIN_VALL_F16 (STORE1, st1, 0, STORE)
+ BUILTIN_VDQ_I (STORE1_U, st1, 0, STORE)
+ BUILTIN_VALLP_NO_DI (STORE1_P, st1, 0, STORE)
if (!BYTES_BIG_ENDIAN)
{
enum aarch64_simd_type mem_type
diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
index d9e9e115874..3a376310b4d 100644
--- a/gcc/config/aarch64/aarch64-simd-builtins.def
+++ b/gcc/config/aarch64/aarch64-simd-builtins.def
@@ -697,11 +697,13 @@
/* Implemented by aarch64_ld1<VALL_F16:mode>. */
BUILTIN_VALL_F16 (LOAD1, ld1, 0, LOAD)
- VAR1(STORE1P, ld1, 0, ALL, v2di)
+ BUILTIN_VDQ_I (LOAD1_U, ld1, 0, LOAD)
+ BUILTIN_VALLP_NO_DI (LOAD1_P, ld1, 0, LOAD)
/* Implemented by aarch64_st1<VALL_F16:mode>. */
BUILTIN_VALL_F16 (STORE1, st1, 0, STORE)
- VAR1 (STORE1P, st1, 0, STORE, v2di)
+ BUILTIN_VDQ_I (STORE1_U, st1, 0, STORE)
+ BUILTIN_VALLP_NO_DI (STORE1_P, st1, 0, STORE)
/* Implemented by fma<mode>4. */
BUILTIN_VHSDF (TERNOP, fma, 4, FP)
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index 743907e1741..3b00d940a76 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -14960,16 +14960,16 @@ __extension__ extern __inline poly8x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_p8 (const poly8_t *__a)
{
- return (poly8x8_t)
- __builtin_aarch64_ld1v8qi ((const __builtin_aarch64_simd_qi *) __a);
+ return __builtin_aarch64_ld1v8qi_ps (
+ (const __builtin_aarch64_simd_qi *) __a);
}
__extension__ extern __inline poly16x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_p16 (const poly16_t *__a)
{
- return (poly16x4_t)
- __builtin_aarch64_ld1v4hi ((const __builtin_aarch64_simd_hi *) __a);
+ return __builtin_aarch64_ld1v4hi_ps (
+ (const __builtin_aarch64_simd_hi *) __a);
}
__extension__ extern __inline poly64x1_t
@@ -15011,24 +15011,24 @@ __extension__ extern __inline uint8x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_u8 (const uint8_t *__a)
{
- return (uint8x8_t)
- __builtin_aarch64_ld1v8qi ((const __builtin_aarch64_simd_qi *) __a);
+ return __builtin_aarch64_ld1v8qi_us (
+ (const __builtin_aarch64_simd_qi *) __a);
}
__extension__ extern __inline uint16x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_u16 (const uint16_t *__a)
{
- return (uint16x4_t)
- __builtin_aarch64_ld1v4hi ((const __builtin_aarch64_simd_hi *) __a);
+ return __builtin_aarch64_ld1v4hi_us (
+ (const __builtin_aarch64_simd_hi *) __a);
}
__extension__ extern __inline uint32x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_u32 (const uint32_t *__a)
{
- return (uint32x2_t)
- __builtin_aarch64_ld1v2si ((const __builtin_aarch64_simd_si *) __a);
+ return __builtin_aarch64_ld1v2si_us (
+ (const __builtin_aarch64_simd_si *) __a);
}
__extension__ extern __inline uint64x1_t
@@ -15278,24 +15278,24 @@ __extension__ extern __inline poly8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_p8 (const poly8_t *__a)
{
- return (poly8x16_t)
- __builtin_aarch64_ld1v16qi ((const __builtin_aarch64_simd_qi *) __a);
+ return __builtin_aarch64_ld1v16qi_ps (
+ (const __builtin_aarch64_simd_qi *) __a);
}
__extension__ extern __inline poly16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_p16 (const poly16_t *__a)
{
- return (poly16x8_t)
- __builtin_aarch64_ld1v8hi ((const __builtin_aarch64_simd_hi *) __a);
+ return __builtin_aarch64_ld1v8hi_ps (
+ (const __builtin_aarch64_simd_hi *) __a);
}
__extension__ extern __inline poly64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_p64 (const poly64_t *__a)
{
- return (poly64x2_t)
- __builtin_aarch64_ld1v2di ((const __builtin_aarch64_simd_di *) __a);
+ return __builtin_aarch64_ld1v2di_ps (
+ (const __builtin_aarch64_simd_di *) __a);
}
__extension__ extern __inline int8x16_t
@@ -15330,8 +15330,8 @@ __extension__ extern __inline uint8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_u8 (const uint8_t *__a)
{
- return (uint8x16_t)
- __builtin_aarch64_ld1v16qi ((const __builtin_aarch64_simd_qi *) __a);
+ return __builtin_aarch64_ld1v16qi_us (
+ (const __builtin_aarch64_simd_qi *) __a);
}
__extension__ extern __inline uint8x8x2_t
@@ -15549,24 +15549,24 @@ __extension__ extern __inline uint16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_u16 (const uint16_t *__a)
{
- return (uint16x8_t)
- __builtin_aarch64_ld1v8hi ((const __builtin_aarch64_simd_hi *) __a);
+ return __builtin_aarch64_ld1v8hi_us (
+ (const __builtin_aarch64_simd_hi *) __a);
}
__extension__ extern __inline uint32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_u32 (const uint32_t *__a)
{
- return (uint32x4_t)
- __builtin_aarch64_ld1v4si ((const __builtin_aarch64_simd_si *) __a);
+ return __builtin_aarch64_ld1v4si_us (
+ (const __builtin_aarch64_simd_si *) __a);
}
__extension__ extern __inline uint64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_u64 (const uint64_t *__a)
{
- return (uint64x2_t)
- __builtin_aarch64_ld1v2di ((const __builtin_aarch64_simd_di *) __a);
+ return __builtin_aarch64_ld1v2di_us (
+ (const __builtin_aarch64_simd_di *) __a);
}
/* vld1(q)_x4. */
@@ -24624,16 +24624,14 @@ __extension__ extern __inline void
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1_p8 (poly8_t *__a, poly8x8_t __b)
{
- __builtin_aarch64_st1v8qi ((__builtin_aarch64_simd_qi *) __a,
- (int8x8_t) __b);
+ __builtin_aarch64_st1v8qi_sp ((__builtin_aarch64_simd_qi *) __a, __b);
}
__extension__ extern __inline void
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1_p16 (poly16_t *__a, poly16x4_t __b)
{
- __builtin_aarch64_st1v4hi ((__builtin_aarch64_simd_hi *) __a,
- (int16x4_t) __b);
+ __builtin_aarch64_st1v4hi_sp ((__builtin_aarch64_simd_hi *) __a, __b);
}
__extension__ extern __inline void
@@ -24675,24 +24673,21 @@ __extension__ extern __inline void
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1_u8 (uint8_t *__a, uint8x8_t __b)
{
- __builtin_aarch64_st1v8qi ((__builtin_aarch64_simd_qi *) __a,
- (int8x8_t) __b);
+ __builtin_aarch64_st1v8qi_su ((__builtin_aarch64_simd_qi *) __a, __b);
}
__extension__ extern __inline void
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1_u16 (uint16_t *__a, uint16x4_t __b)
{
- __builtin_aarch64_st1v4hi ((__builtin_aarch64_simd_hi *) __a,
- (int16x4_t) __b);
+ __builtin_aarch64_st1v4hi_su ((__builtin_aarch64_simd_hi *) __a, __b);
}
__extension__ extern __inline void
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1_u32 (uint32_t *__a, uint32x2_t __b)
{
- __builtin_aarch64_st1v2si ((__builtin_aarch64_simd_si *) __a,
- (int32x2_t) __b);
+ __builtin_aarch64_st1v2si_su ((__builtin_aarch64_simd_si *) __a, __b);
}
__extension__ extern __inline void
@@ -24729,24 +24724,21 @@ __extension__ extern __inline void
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1q_p8 (poly8_t *__a, poly8x16_t __b)
{
- __builtin_aarch64_st1v16qi ((__builtin_aarch64_simd_qi *) __a,
- (int8x16_t) __b);
+ __builtin_aarch64_st1v16qi_sp ((__builtin_aarch64_simd_qi *) __a, __b);
}
__extension__ extern __inline void
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1q_p16 (poly16_t *__a, poly16x8_t __b)
{
- __builtin_aarch64_st1v8hi ((__builtin_aarch64_simd_hi *) __a,
- (int16x8_t) __b);
+ __builtin_aarch64_st1v8hi_sp ((__builtin_aarch64_simd_hi *) __a, __b);
}
__extension__ extern __inline void
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1q_p64 (poly64_t *__a, poly64x2_t __b)
{
- __builtin_aarch64_st1v2di_sp ((__builtin_aarch64_simd_di *) __a,
- (poly64x2_t) __b);
+ __builtin_aarch64_st1v2di_sp ((__builtin_aarch64_simd_di *) __a, __b);
}
__extension__ extern __inline void
@@ -24781,32 +24773,28 @@ __extension__ extern __inline void
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1q_u8 (uint8_t *__a, uint8x16_t __b)
{
- __builtin_aarch64_st1v16qi ((__builtin_aarch64_simd_qi *) __a,
- (int8x16_t) __b);
+ __builtin_aarch64_st1v16qi_su ((__builtin_aarch64_simd_qi *) __a, __b);
}
__extension__ extern __inline void
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1q_u16 (uint16_t *__a, uint16x8_t __b)
{
- __builtin_aarch64_st1v8hi ((__builtin_aarch64_simd_hi *) __a,
- (int16x8_t) __b);
+ __builtin_aarch64_st1v8hi_su ((__builtin_aarch64_simd_hi *) __a, __b);
}
__extension__ extern __inline void
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1q_u32 (uint32_t *__a, uint32x4_t __b)
{
- __builtin_aarch64_st1v4si ((__builtin_aarch64_simd_si *) __a,
- (int32x4_t) __b);
+ __builtin_aarch64_st1v4si_su ((__builtin_aarch64_simd_si *) __a, __b);
}
__extension__ extern __inline void
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1q_u64 (uint64_t *__a, uint64x2_t __b)
{
- __builtin_aarch64_st1v2di ((__builtin_aarch64_simd_di *) __a,
- (int64x2_t) __b);
+ __builtin_aarch64_st1v2di_su ((__builtin_aarch64_simd_di *) __a, __b);
}
/* vst1_lane */
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index e8eebd863a6..b19dc5c2177 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -207,6 +207,9 @@
;; All Advanced SIMD polynomial modes and DI.
(define_mode_iterator VALLP [V8QI V16QI V4HI V8HI V2DI DI])
+;; All Advanced SIMD polynomial modes.
+(define_mode_iterator VALLP_NO_DI [V8QI V16QI V4HI V8HI V2DI])
+
;; Advanced SIMD modes for Integer reduction across lanes.
(define_mode_iterator VDQV [V8QI V16QI V4HI V8HI V4SI V2DI])