aboutsummaryrefslogtreecommitdiff
path: root/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vldX_lane.c
diff options
context:
space:
mode:
authorYvan Roux <yvan.roux@linaro.org>2015-10-26 13:36:18 +0100
committerLinaro Code Review <review@review.linaro.org>2015-10-27 19:49:00 +0000
commitd2c0c9b163372623969620283187c5848e479c4e (patch)
tree036a9b66b3b9c86ed5b893a4bc8fb7797b551783 /gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vldX_lane.c
parent16d999dc7dfa8ddde9598eb0564f881fa9e0a8aa (diff)
gcc/
Backport from trunk r226346. 2015-07-29 Alan Lawrence <alan.lawrence@arm.com> * config/aarch64/aarch64-builtins.c (aarch64_fp16_type_node): New. (aarch64_init_builtins): Make aarch64_fp16_type_node, use for __fp16. * config/aarch64/aarch64-modes.def: Add HFmode. * config/aarch64/aarch64.h (TARGET_CPU_CPP_BUILTINS): Define __ARM_FP16_FORMAT_IEEE and __ARM_FP16_ARGS. Set bit 1 of __ARM_FP. * config/aarch64/aarch64.c (aarch64_init_libfuncs, aarch64_promoted_type): New. (aarch64_float_const_representable_p): Disable HFmode. (aarch64_mangle_type): Mangle half-precision floats to "Dh". (TARGET_PROMOTED_TYPE): Define to aarch64_promoted_type. (TARGET_INIT_LIBFUNCS): Define to aarch64_init_libfuncs. * config/aarch64/aarch64.md (mov<mode>): Include HFmode using GPF_F16. (movhf_aarch64, extendhfsf2, extendhfdf2, truncsfhf2, truncdfhf2): New. * config/aarch64/iterators.md (GPF_F16): New. gcc/testsuite/ Backport from trunk r226346. 2015-07-29 Alan Lawrence <alan.lawrence@arm.com> * gcc.target/aarch64/f16_movs_1.c: New test. gcc/testsuite/ Backport from trunk r226350. 2015-07-29 Alan Lawrence <alan.lawrence@arm.com> * gcc.target/aarch64/fp16/fp16.exp: New. * gcc.target/aarch64/fp16/f16_convs_1.c: New. * gcc.target/aarch64/fp16/f16_convs_2.c: New. gcc/ Backport from trunk r227033. 2015-08-20 Alan Lawrence <alan.lawrence@arm.com> * config/arm/arm-builtins.c (arm_init_simd_builtin_types): Move initialization of HFmode scalar type (float16_t) to... (arm_init_fp16_builtins): ... Here. Combine with __fp16 initialization code. (arm_init_builtins): Call arm_init_fp16_builtins earlier and always. * config/arm/arm_neon.h (vcvt_f16_f32, vcvt_f32_f16): Condition on having an -mfp16-format. gcc/testsuite/ Backport from trunk r227033. 2015-08-20 Alan Lawrence <alan.lawrence@arm.com> * lib/target-supports.exp (check_effective_target_arm_neon_fp16_ok_nocache): Add flag variants with -mfp16-format=ieee. gcc/ Backport from trunk r227535, r227558. 2015-09-08 Alan Lawrence <alan.lawrence@arm.com> * config/arm/arm_neon.h (float16_t, vget_lane_f16, vset_lane_f16, vcreate_f16, vld1_lane_f16, vld1_dup_f16, vreinterpret_p8_f16, vreinterpret_p16_f16, vreinterpret_f16_p8, vreinterpret_f16_p16, vreinterpret_f16_f32, vreinterpret_f16_p64, vreinterpret_f16_s64, vreinterpret_f16_u64, vreinterpret_f16_s8, vreinterpret_f16_s16, vreinterpret_f16_s32, vreinterpret_f16_u8, vreinterpret_f16_u16, vreinterpret_f16_u32, vreinterpret_f32_f16, vreinterpret_p64_f16, vreinterpret_s64_f16, vreinterpret_u64_f16, vreinterpret_s8_f16, vreinterpret_s16_f16, vreinterpret_s32_f16, vreinterpret_u8_f16, vreinterpret_u16_f16, vreinterpret_u32_f16): New. gcc/ Backport from trunk r227536, r227558. 2015-09-08 Alan Lawrence <alan.lawrence@arm.com> * config/arm/arm.h (VALID_NEON_QREG_MODE): Add V8HFmode. * config/arm/arm.c (arm_vector_mode_supported_p): Support V8HFmode. * config/arm/arm-builtins.c (v8hf_UP): New. (arm_init_simd_builtin_types): Initialise Float16x8_t. * config/arm/arm-simd-builtin-types.def (Float16x8_t): New. * config/arm/arm_neon.h (float16x8_t): New typedef. gcc/ Backport from trunk r227538, r227558. 2015-09-08 Alan Lawrence <alan.lawrence@arm.com> * config/arm/arm_neon.h (vgetq_lane_f16, vsetq_lane_f16, vld1q_lane_f16, vld1q_dup_f16, vreinterpretq_p8_f16, vreinterpretq_p16_f16, vreinterpretq_f16_p8, vreinterpretq_f16_p16, vreinterpretq_f16_f32, vreinterpretq_f16_p64, vreinterpretq_f16_p128, vreinterpretq_f16_s64, vreinterpretq_f16_u64, vreinterpretq_f16_s8, vreinterpretq_f16_s16, vreinterpretq_f16_s32, vreinterpretq_f16_u8, vreinterpretq_f16_u16, vreinterpretq_f16_u32, vreinterpretq_f32_f16, vreinterpretq_p64_f16, vreinterpretq_p128_f16, vreinterpretq_s64_f16, vreinterpretq_u64_f16, vreinterpretq_s8_f16, vreinterpretq_s16_f16, vreinterpretq_s32_f16, vreinterpretq_u8_f16, vreinterpretq_u16_f16, vreinterpretq_u32_f16): New. gcc/ Backport from trunk r227541. 2015-09-08 Alan Lawrence <alan.lawrence@arm.com> * config/arm/arm-builtins.c (VAR11, VAR12): New. * config/arm/arm_neon_builtins.def (vcombine, vld2_dup, vld3_dup, vld4_dup): Add v4hf variant. (vget_high, vget_low): Add v8hf variant. (vld1, vst1, vst1_lane, vld2, vld2_lane, vst2, vst2_lane, vld3, vld3_lane, vst3, vst3_lane, vld4, vld4_lane, vst4, vst4_lane): Add v4hf and v8hf variants. * config/arm/iterators.md (VD_LANE, VD_RE, VQ2, VQ_HS): New. (VDX): Add V4HF. (V_DOUBLE): Add case for V4HF. (VQX): Add V8HF. (V_HALF): Add case for V8HF. (VDQX): Add V4HF, V8HF. (V_elem, V_two_elem, V_three_elem, V_four_elem, V_cmp_result, V_uf_sclr, V_sz_elem, V_mode_nunits, q): Add cases for V4HF & V8HF. * config/arm/neon.md (vec_set<mode>internal, vec_extract<mode>, neon_vget_lane<mode>_sext_internal, neon_vget_lane<mode>_zext_internal, vec_load_lanesoi<mode>, neon_vld2<mode>, vec_store_lanesoi<mode>, neon_vst2<mode>, vec_load_lanesci<mode>, neon_vld3<mode>, neon_vld3qa<mode>, neon_vld3qb<mode>, vec_store_lanesci<mode>, neon_vst3<mode>, neon_vst3qa<mode>, neon_vst3qb<mode>, vec_load_lanesxi<mode>, neon_vld4<mode>, neon_vld4qa<mode>, neon_vld4qb<mode>, vec_store_lanesxi<mode>, neon_vst4<mode>, neon_vst4qa<mode>, neon_vst4qb<mode>): Change VQ iterator to VQ2. (neon_vcreate, neon_vreinterpretv8qi<mode>, neon_vreinterpretv4hi<mode>, neon_vreinterpretv2si<mode>, neon_vreinterpretv2sf<mode>, neon_vreinterpretdi<mode>): Change VDX to VD_RE. (neon_vld2_lane<mode>, neon_vst2_lane<mode>, neon_vld3_lane<mode>, neon_vst3_lane<mode>, neon_vld4_lane<mode>, neon_vst4_lane<mode>): Change VD iterator to VD_LANE, and VMQ iterator to VQ_HS. * config/arm/arm_neon.h (float16x4x2_t, float16x8x2_t, float16x4x3_t, float16x8x3_t, float16x4x4_t, float16x8x4_t, vcombine_f16, vget_high_f16, vget_low_f16, vld1_f16, vld1q_f16, vst1_f16, vst1q_f16, vst1_lane_f16, vst1q_lane_f16, vld2_f16, vld2q_f16, vld2_lane_f16, vld2q_lane_f16, vld2_dup_f16, vst2_f16, vst2q_f16, vst2_lane_f16, vst2q_lane_f16, vld3_f16, vld3q_f16, vld3_lane_f16, vld3q_lane_f16, vld3_dup_f16, vst3_f16, vst3q_f16, vst3_lane_f16, vst3q_lane_f16, vld4_f16, vld4q_f16, vld4_lane_f16, vld4q_lane_f16, vld4_dup_f16, vst4_f16, vst4q_f16, vst4_lane_f16, vst4q_lane_f16): New. gcc/ Backport from trunk r227542. 2015-09-08 Alan Lawrence <alan.lawrence@arm.com> * config/aarch64/aarch64.c (aarch64_vector_mode_supported_p): Support V4HFmode and V8HFmode. (aarch64_split_simd_move): Add case for V8HFmode. * config/aarch64/aarch64-builtins.c (v4hf_UP, v8hf_UP): Define. (aarch64_simd_builtin_std_type): Handle HFmode. (aarch64_init_simd_builtin_types): Include Float16x4_t and Float16x8_t. * config/aarch64/aarch64-simd.md (mov<mode>, aarch64_get_lane<mode>, aarch64_ld1<VALL:mode>, aarch64_st1<VALL:mode): Use VALL_F16 iterator. (aarch64_be_ld1<mode>, aarch64_be_st1<mode>): Use VALLDI_F16 iterator. * config/aarch64/aarch64-simd-builtin-types.def: Add Float16x4_t, Float16x8_t. * config/aarch64/aarch64-simd-builtins.def (ld1, st1): Use VALL_F16. * config/aarch64/arm_neon.h (float16x4_t, float16x8_t, float16_t): New typedefs. (vget_lane_f16, vgetq_lane_f16, vset_lane_f16, vsetq_lane_f16, vld1_f16, vld1q_f16, vst1_f16, vst1q_f16, vst1_lane_f16, vst1q_lane_f16): New. * config/aarch64/iterators.md (VD, VQ, VQ_NO2E): Add vectors of HFmode. (VALLDI_F16, VALL_F16): New. (Vmtype, VEL, VCONQ, VHALF, V_TWO_ELEM, V_THREE_ELEM, V_FOUR_ELEM, q): Add cases for V4HF and V8HF. (VDBL, VRL2, VRL3, VRL4): Add V4HF case. gcc/testsuite/ Backport from trunk r227542. 2015-09-08 Alan Lawrence <alan.lawrence@arm.com> * g++.dg/abi/mangle-neon-aarch64.C: Add cases for float16x4_t and float16x8_t. * gcc.target/aarch64/vset_lane_1.c: Likewise. * gcc.target/aarch64/vld1-vst1_1.c: Likewise. * gcc.target/aarch64/vld1_lane.c: Likewise. gcc/ Backport from trunk r227543. 2015-09-08 Alan Lawrence <alan.lawrence@arm.com> * config/aarch64/aarch64.c (aarch64_split_simd_combine): Add V4HFmode. * config/aarch64/aarch64-builtins.c (VAR13, VAR14): New. (aarch64_scalar_builtin_types, aarch64_init_simd_builtin_scalar_types): Add __builtin_aarch64_simd_hf. * config/aarch64/arm_neon.h (float16x4x2_t, float16x8x2_t, float16x4x3_t, float16x8x3_t, float16x4x4_t, float16x8x4_t, vcombine_f16, vst2_lane_f16, vst2q_lane_f16, vst3_lane_f16, vst3q_lane_f16, vst4_lane_f16, vst4q_lane_f16, vld2_f16, vld2q_f16, vld3_f16, vld3q_f16, vld4_f16, vld4q_f16, vld2_dup_f16, vld2q_dup_f16, vld3_dup_f16, vld3q_dup_f16, vld4_dup_f16, vld4q_dup_f16, vld2_lane_f16, vld2q_lane_f16, vld3_lane_f16, vld3q_lane_f16, vld4_lane_f16, vld4q_lane_f16, vst2_f16, vst2q_f16, vst3_f16, vst3q_f16, vst4_f16, vst4q_f16, vcreate_f16): New. * config/aarch64/iterators.md (VALLDIF, Vtype, Vetype, Vbtype, V_cmp_result, v_cmp_result): Add cases for V4HF and V8HF. (VDC, Vdbl): Add V4HF. gcc/testsuite/ Backport from trunk r227543. 2015-09-08 Alan Lawrence <alan.lawrence@arm.com> * gcc.target/aarch64/vldN_1.c: Add float16x4_t and float16x8_t cases. * gcc.target/aarch64/vldN_dup_1.c: Likewise. * gcc.target/aarch64/vldN_lane_1.c: Likewise. (main): update orig_data to avoid float16 NaN on bigendian. gcc/ Backport from trunk r227545. 2015-09-08 Alan Lawrence <alan.lawrence@arm.com> * config/aarch64/aarch64-simd.md (aarch64_float_truncate_lo_v2sf): Reparameterize to... (aarch64_float_truncate_lo_<mode>): ...this, for both V2SF and V4HF. (aarch64_float_truncate_hi_v4sf): Reparameterize to... (aarch64_float_truncate_hi_<Vdbl>): ...this, for both V4SF and V8HF. * config/aarch64/aarch64-simd-builtins.def (float_truncate_hi_): Add v8hf variant. (float_truncate_lo_): Use BUILTIN_VDF iterator. * config/aarch64/arm_neon.h (vcvt_f16_f32, vcvt_high_f16_f32): New. * config/aarch64/iterators.md (VDF, Vdtype): New. (VWIDE, Vmwtype): Add cases for V4HF and V2SF. gcc/ Backport from trunk r227546. 2015-09-08 Alan Lawrence <alan.lawrence@arm.com> * config/aarch64/arm_neon.h (vreinterpret_p8_f16, vreinterpret_p16_f16, vreinterpret_f16_f64, vreinterpret_f16_s8, vreinterpret_f16_s16, vreinterpret_f16_s32, vreinterpret_f16_s64, vreinterpret_f16_f32, vreinterpret_f16_u8, vreinterpret_f16_u16, vreinterpret_f16_u32, vreinterpret_f16_u64, vreinterpret_f16_p8, vreinterpret_f16_p16, vreinterpretq_f16_f64, vreinterpretq_f16_s8, vreinterpretq_f16_s16, vreinterpretq_f16_s32, vreinterpretq_f16_s64, vreinterpretq_f16_f32, vreinterpretq_f16_u8, vreinterpretq_f16_u16, vreinterpretq_f16_u32, vreinterpretq_f16_u64, vreinterpretq_f16_p8, vreinterpretq_f16_p16, vreinterpret_f32_f16, vreinterpret_f64_f16, vreinterpret_s64_f16, vreinterpret_u64_f16, vreinterpretq_u64_f16, vreinterpret_s8_f16, vreinterpret_s16_f16, vreinterpret_s32_f16, vreinterpret_u8_f16, vreinterpret_u16_f16, vreinterpret_u32_f16, vreinterpretq_p8_f16, vreinterpretq_p16_f16, vreinterpretq_f32_f16, vreinterpretq_f64_f16, vreinterpretq_s64_f16, vreinterpretq_s8_f16, vreinterpretq_s16_f16, vreinterpretq_s32_f16, vreinterpretq_u8_f16, vreinterpretq_u16_f16, vreinterpretq_u32_f16, vget_low_f16, vget_high_f16, vld1_dup_f16, vld1q_dup_f16): New. gcc/testsuite/ Backport from trunk r227546. 2015-09-08 Alan Lawrence <alan.lawrence@arm.com> * gcc.target/aarch64/vget_high_1.c: Add float16x8->float16x4 case. * gcc.target/aarch64/vget_low_1.c: Likewise. gcc/ Backport from trunk r227550. 2015-09-08 Alan Lawrence <alan.lawrence@arm.com> * config/aarch64/aarch64-simd.md (aarch64_simd_dup<mode>, aarch64_dup_lane<mode>, aarch64_dup_lane_<vswap_width_name><mode>, aarch64_simd_vec_set<mode>, vec_set<mode>, vec_perm_const<mode>, vec_init<mode>, *aarch64_simd_ld1r<mode>, vec_extract<mode>): Add V4HF and V8HF variants to iterator. * config/aarch64/aarch64.c (aarch64_evpc_dup): Add V4HF and V8HF cases. * config/aarch64/iterators.md (VDQF_F16): New. (VSWAP_WIDTH, vswap_width_name): Add V4HF and V8HF cases. gcc/ Backport from trunk r227551. 2015-09-08 Alan Lawrence <alan.lawrence@arm.com> * config/aarch64/aarch64-simd.md (aarch64_simd_vec_unpacks_lo_<mode>, aarch64_simd_vec_unpacks_hi_<mode>): New insn. (vec_unpacks_lo_v4sf, vec_unpacks_hi_v4sf): Delete insn. (vec_unpacks_lo_<mode>, vec_unpacks_hi_<mode>): New expand. (aarch64_float_extend_lo_v2df): Rename to... (aarch64_float_extend_lo_<Vwide>): this, using VDF and so adding V4SF. * config/aarch64/aarch64-simd-builtins.def (vec_unpacks_hi): Add v8hf. (float_extend_lo): Add v4sf. * config/aarch64/arm_neon.h (vcvt_f32_f16, vcvt_high_f32_f16): New. * config/aarch64/iterators.md (VQ_HSF): New iterator. (VWIDE, Vwtype, Vhalftype): Add V8HF, V4SF. (Vwide): New mode_attr. gcc/ Backport from trunk r227552. 2015-09-08 Alan Lawrence <alan.lawrence@arm.com> * fold-const.c (native_interpret_real): Fix HFmode for bigendian where UNITS_PER_WORD >= 4. gcc/testsuite/ Backport from trunk r227554. 2015-09-08 Alan Lawrence <alan.lawrence@arm.com> * gcc.target/aarch64/advsimd-intrinsics/advsimd-intrinsics.exp: Set additional_flags for neon-fp16 if supported, else fallback to neon. * gcc.target/aarch64/advsimd-intrinsics/arm-neon-ref.h (hfloat16_t): New. (result, expected, clean_results, DECL_VARIABLE_64BITS_VARIANTS, DECL_VARIABLE_128BITS_VARIANTS): Add float16x4_t and float16x8_t cases if supported. (CHECK_RESULTS): Redefine using CHECK_RESULTS_NAMED. (CHECK_RESULTS_NAMED): Move body to CHECK_RESULTS_NAMED_NO_FP16; redefine in terms of CHECK_RESULTS_NAMED_NO_FP16 with float16 variants when those are supported. (CHECK_RESULTS_NAMED_NO_FP16, CHECK_RESULTS_NO_FP16): New. (vdup_n_f16): New. * gcc.target/aarch64/advsimd-intrinsics/compute-ref-data.h (buffer, buffer_pad, buffer_dup, buffer_dup_pad): Add float16x4 and float16x8_t cases if supported. * gcc.target/aarch64/advsimd-intrinsics/vbsl.c (exec_vbsl): Use CHECK_RESULTS_NO_FP16 in place of CHECK_RESULTS. * gcc.target/aarch64/advsimd-intrinsics/vdup-vmov.c (exec_vdup_vmov): Likewise. * gcc.target/aarch64/advsimd-intrinsics/vdup_lane.c (exec_vdup_lane): Likewise. * gcc.target/aarch64/advsimd-intrinsics/vext.c (exec_vext): Likewise. * gcc.target/aarch64/advsimd-intrinsics/vcombine.c (expected): Add float16x8_t case. (main, exec_vcombine): test float16x4_t -> float16x8_t, if supported. * gcc.target/aarch64/advsimd-intrinsics/vcreate.c (expected, main, exec_vcreate): Likewise. * gcc.target/aarch64/advsimd-intrinsics/vget_high (expected, exec_vget_high): Likewise. * gcc.target/aarch64/advsimd-intrinsics/vget_low.c (expected, exec_vget_low): Likewise. * gcc.target/aarch64/advsimd-intrinsics/vld1.c (expected, exec_vld1): Likewise. * gcc.target/aarch64/advsimd-intrinsics/vld1_dup.c (expected, exec_vld1_dup): Likewise. * gcc.target/aarch64/advsimd-intrinsics/vld1_lane.c (expected, exec_vld1_lane): Likewise. * gcc.target/aarch64/advsimd-intrinsics/vldX.c (expected, exec_vldX): Likewise. * gcc.target/aarch64/advsimd-intrinsics/vldX_dup.c (expected, exec_vldX_dup): Likewise. * gcc.target/aarch64/advsimd-intrinsics/vldX_lane.c (expected, exec_vldX_lane): Likewise. * gcc.target/aarch64/advsimd-intrinsics/vset_lane.c (expected, exec_vset_lane): Likewise. * gcc.target/aarch64/advsimd-intrinsics/vst1_lane.c (expected, exec_vst1_lane): Likewise. gcc/testsuite/ Backport from trunk r227555. 2015-09-08 Alan Lawrence <alan.lawrence@arm.com> * gcc.target/aarch64/advsimd-intrinsics/vcvt_f16.c: New. * lib/target-supports.exp (check_effective_target_arm_neon_fp16_hw): New. gcc/ Backport from trunk r227556. 2015-09-08 Alan Lawrence <alan.lawrence@arm.com> * doc/sourcebuild.texi (arm_neon_fp16): Correct cross-reference. (arm_neon_fp16_ok): Document adding of -mfp16-format=ieee flag. (arm_neon_fp16_hw): New. gcc/testsuite/ Backport from trunk r227557. 2015-09-08 Alan Lawrence <alan.lawrence@arm.com> PR target/63870 * gcc.target/aarch64/advsimd-intrinsics/vld2_lane_f16_indices_1.c: New. * gcc.target/aarch64/advsimd-intrinsics/vld2q_lane_f16_indices_1.c: New. * gcc.target/aarch64/advsimd-intrinsics/vld3_lane_f16_indices_1.c: New. * gcc.target/aarch64/advsimd-intrinsics/vld3q_lane_f16_indices_1.c: New. * gcc.target/aarch64/advsimd-intrinsics/vld4_lane_f16_indices_1.c: New. * gcc.target/aarch64/advsimd-intrinsics/vld4q_lane_f16_indices_1.c: New. * gcc.target/aarch64/advsimd-intrinsics/vst2_lane_f16_indices_1.c: New. * gcc.target/aarch64/advsimd-intrinsics/vst2q_lane_f16_indices_1.c: New. * gcc.target/aarch64/advsimd-intrinsics/vst3_lane_f16_indices_1.c: New. * gcc.target/aarch64/advsimd-intrinsics/vst3q_lane_f16_indices_1.c: New. * gcc.target/aarch64/advsimd-intrinsics/vst4_lane_f16_indices_1.c: New. * gcc.target/aarch64/advsimd-intrinsics/vst4q_lane_f16_indices_1.c: New. Change-Id: I20b8ee88a73373649cac10a0ffecc58e7e11acff
Diffstat (limited to 'gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vldX_lane.c')
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vldX_lane.c93
1 files changed, 86 insertions, 7 deletions
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vldX_lane.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vldX_lane.c
index 2f2e62f0e3e..33b0eafbadb 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vldX_lane.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vldX_lane.c
@@ -18,6 +18,7 @@ VECT_VAR_DECL(expected_vld2_0,poly,8,8) [] = { 0xaa, 0xaa, 0xaa, 0xaa,
0xaa, 0xaa, 0xaa, 0xaa };
VECT_VAR_DECL(expected_vld2_0,poly,16,4) [] = { 0xaaaa, 0xaaaa,
0xaaaa, 0xaaaa };
+VECT_VAR_DECL(expected_vld2_0,hfloat,16,4) [] = { 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa };
VECT_VAR_DECL(expected_vld2_0,hfloat,32,2) [] = { 0xc1800000, 0xc1700000 };
VECT_VAR_DECL(expected_vld2_0,int,16,8) [] = { 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa,
0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa };
@@ -29,6 +30,8 @@ VECT_VAR_DECL(expected_vld2_0,uint,32,4) [] = { 0xfffffff0, 0xfffffff1,
0xaaaaaaaa, 0xaaaaaaaa };
VECT_VAR_DECL(expected_vld2_0,poly,16,8) [] = { 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa,
0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa };
+VECT_VAR_DECL(expected_vld2_0,hfloat,16,8) [] = { 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa,
+ 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa } ;
VECT_VAR_DECL(expected_vld2_0,hfloat,32,4) [] = { 0xaaaaaaaa, 0xaaaaaaaa,
0xaaaaaaaa, 0xaaaaaaaa };
@@ -44,6 +47,7 @@ VECT_VAR_DECL(expected_vld2_1,uint,32,2) [] = { 0xfffffff0, 0xfffffff1 };
VECT_VAR_DECL(expected_vld2_1,poly,8,8) [] = { 0xf0, 0xf1, 0xaa, 0xaa,
0xaa, 0xaa, 0xaa, 0xaa };
VECT_VAR_DECL(expected_vld2_1,poly,16,4) [] = { 0xaaaa, 0xaaaa, 0xfff0, 0xfff1 };
+VECT_VAR_DECL(expected_vld2_1,hfloat,16,4) [] = { 0xcc00, 0xcb80, 0xaaaa, 0xaaaa };
VECT_VAR_DECL(expected_vld2_1,hfloat,32,2) [] = { 0xaaaaaaaa, 0xaaaaaaaa };
VECT_VAR_DECL(expected_vld2_1,int,16,8) [] = { 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa,
0xfff0, 0xfff1, 0xaaaa, 0xaaaa };
@@ -55,6 +59,8 @@ VECT_VAR_DECL(expected_vld2_1,uint,32,4) [] = { 0xaaaaaaaa, 0xaaaaaaaa,
0xaaaaaaaa, 0xaaaaaaaa };
VECT_VAR_DECL(expected_vld2_1,poly,16,8) [] = { 0xaaaa, 0xaaaa, 0xfff0, 0xfff1,
0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa };
+VECT_VAR_DECL(expected_vld2_1,hfloat,16,8) [] = { 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa,
+ 0xcc00, 0xcb80, 0xaaaa, 0xaaaa };
VECT_VAR_DECL(expected_vld2_1,hfloat,32,4) [] = { 0xc1800000, 0xc1700000,
0xaaaaaaaa, 0xaaaaaaaa };
@@ -70,6 +76,7 @@ VECT_VAR_DECL(expected_vld3_0,uint,32,2) [] = { 0xaaaaaaaa, 0xaaaaaaaa };
VECT_VAR_DECL(expected_vld3_0,poly,8,8) [] = { 0xaa, 0xaa, 0xaa, 0xaa,
0xaa, 0xaa, 0xaa, 0xaa };
VECT_VAR_DECL(expected_vld3_0,poly,16,4) [] = { 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa };
+VECT_VAR_DECL(expected_vld3_0,hfloat,16,4) [] = { 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa };
VECT_VAR_DECL(expected_vld3_0,hfloat,32,2) [] = { 0xc1800000, 0xc1700000 };
VECT_VAR_DECL(expected_vld3_0,int,16,8) [] = { 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa,
0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa };
@@ -81,6 +88,8 @@ VECT_VAR_DECL(expected_vld3_0,uint,32,4) [] = { 0xfffffff0, 0xfffffff1,
0xfffffff2, 0xaaaaaaaa };
VECT_VAR_DECL(expected_vld3_0,poly,16,8) [] = { 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa,
0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa };
+VECT_VAR_DECL(expected_vld3_0,hfloat,16,8) [] = { 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa,
+ 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa };
VECT_VAR_DECL(expected_vld3_0,hfloat,32,4) [] = { 0xaaaaaaaa, 0xaaaaaaaa,
0xaaaaaaaa, 0xaaaaaaaa };
@@ -96,6 +105,7 @@ VECT_VAR_DECL(expected_vld3_1,uint,32,2) [] = { 0xaaaaaaaa, 0xfffffff0 };
VECT_VAR_DECL(expected_vld3_1,poly,8,8) [] = { 0xaa, 0xaa, 0xaa, 0xaa,
0xf0, 0xf1, 0xf2, 0xaa };
VECT_VAR_DECL(expected_vld3_1,poly,16,4) [] = { 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa };
+VECT_VAR_DECL(expected_vld3_1,hfloat,16,4) [] = { 0xaaaa, 0xaaaa, 0xcc00, 0xcb80 };
VECT_VAR_DECL(expected_vld3_1,hfloat,32,2) [] = { 0xc1600000, 0xaaaaaaaa };
VECT_VAR_DECL(expected_vld3_1,int,16,8) [] = { 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa,
0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa };
@@ -107,6 +117,8 @@ VECT_VAR_DECL(expected_vld3_1,uint,32,4) [] = { 0xaaaaaaaa, 0xaaaaaaaa,
0xaaaaaaaa, 0xaaaaaaaa };
VECT_VAR_DECL(expected_vld3_1,poly,16,8) [] = { 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa,
0xaaaa, 0xaaaa, 0xaaaa, 0xfff0 };
+VECT_VAR_DECL(expected_vld3_1,hfloat,16,8) [] = { 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa,
+ 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa };
VECT_VAR_DECL(expected_vld3_1,hfloat,32,4) [] = { 0xaaaaaaaa, 0xaaaaaaaa,
0xc1800000, 0xc1700000 };
@@ -122,6 +134,7 @@ VECT_VAR_DECL(expected_vld3_2,uint,32,2) [] = { 0xfffffff1, 0xfffffff2 };
VECT_VAR_DECL(expected_vld3_2,poly,8,8) [] = { 0xaa, 0xaa, 0xaa, 0xaa,
0xaa, 0xaa, 0xaa, 0xaa };
VECT_VAR_DECL(expected_vld3_2,poly,16,4) [] = { 0xaaaa, 0xfff0, 0xfff1, 0xfff2 };
+VECT_VAR_DECL(expected_vld3_2,hfloat,16,4) [] = { 0xcb00, 0xaaaa, 0xaaaa, 0xaaaa };
VECT_VAR_DECL(expected_vld3_2,hfloat,32,2) [] = { 0xaaaaaaaa, 0xaaaaaaaa };
VECT_VAR_DECL(expected_vld3_2,int,16,8) [] = { 0xaaaa, 0xaaaa, 0xfff0, 0xfff1,
0xfff2, 0xaaaa, 0xaaaa, 0xaaaa };
@@ -133,6 +146,8 @@ VECT_VAR_DECL(expected_vld3_2,uint,32,4) [] = { 0xaaaaaaaa, 0xaaaaaaaa,
0xaaaaaaaa, 0xaaaaaaaa };
VECT_VAR_DECL(expected_vld3_2,poly,16,8) [] = { 0xfff1, 0xfff2, 0xaaaa, 0xaaaa,
0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa };
+VECT_VAR_DECL(expected_vld3_2,hfloat,16,8) [] = { 0xaaaa, 0xaaaa, 0xcc00, 0xcb80,
+ 0xcb00, 0xaaaa, 0xaaaa, 0xaaaa };
VECT_VAR_DECL(expected_vld3_2,hfloat,32,4) [] = { 0xc1600000, 0xaaaaaaaa,
0xaaaaaaaa, 0xaaaaaaaa };
@@ -148,6 +163,7 @@ VECT_VAR_DECL(expected_vld4_0,uint,32,2) [] = { 0xaaaaaaaa, 0xaaaaaaaa };
VECT_VAR_DECL(expected_vld4_0,poly,8,8) [] = { 0xaa, 0xaa, 0xaa, 0xaa,
0xaa, 0xaa, 0xaa, 0xaa };
VECT_VAR_DECL(expected_vld4_0,poly,16,4) [] = { 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa };
+VECT_VAR_DECL(expected_vld4_0,hfloat,16,4) [] = { 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa };
VECT_VAR_DECL(expected_vld4_0,hfloat,32,2) [] = { 0xc1800000, 0xc1700000 };
VECT_VAR_DECL(expected_vld4_0,int,16,8) [] = { 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa,
0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa };
@@ -159,6 +175,8 @@ VECT_VAR_DECL(expected_vld4_0,uint,32,4) [] = { 0xfffffff0, 0xfffffff1,
0xfffffff2, 0xfffffff3 };
VECT_VAR_DECL(expected_vld4_0,poly,16,8) [] = { 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa,
0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa };
+VECT_VAR_DECL(expected_vld4_0,hfloat,16,8) [] = { 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa,
+ 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa };
VECT_VAR_DECL(expected_vld4_0,hfloat,32,4) [] = { 0xaaaaaaaa, 0xaaaaaaaa,
0xaaaaaaaa, 0xaaaaaaaa };
@@ -174,6 +192,7 @@ VECT_VAR_DECL(expected_vld4_1,uint,32,2) [] = { 0xaaaaaaaa, 0xaaaaaaaa };
VECT_VAR_DECL(expected_vld4_1,poly,8,8) [] = { 0xaa, 0xaa, 0xaa, 0xaa,
0xaa, 0xaa, 0xaa, 0xaa };
VECT_VAR_DECL(expected_vld4_1,poly,16,4) [] = { 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa };
+VECT_VAR_DECL(expected_vld4_1,hfloat,16,4) [] = { 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa };
VECT_VAR_DECL(expected_vld4_1,hfloat,32,2) [] = { 0xc1600000, 0xc1500000 };
VECT_VAR_DECL(expected_vld4_1,int,16,8) [] = { 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa,
0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa };
@@ -185,6 +204,8 @@ VECT_VAR_DECL(expected_vld4_1,uint,32,4) [] = { 0xaaaaaaaa, 0xaaaaaaaa,
0xaaaaaaaa, 0xaaaaaaaa };
VECT_VAR_DECL(expected_vld4_1,poly,16,8) [] = { 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa,
0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa };
+VECT_VAR_DECL(expected_vld4_1,hfloat,16,8) [] = { 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa,
+ 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa };
VECT_VAR_DECL(expected_vld4_1,hfloat,32,4) [] = { 0xaaaaaaaa, 0xaaaaaaaa,
0xaaaaaaaa, 0xaaaaaaaa };
@@ -200,6 +221,7 @@ VECT_VAR_DECL(expected_vld4_2,uint,32,2) [] = { 0xfffffff0, 0xfffffff1 };
VECT_VAR_DECL(expected_vld4_2,poly,8,8) [] = { 0xf0, 0xf1, 0xf2, 0xf3,
0xaa, 0xaa, 0xaa, 0xaa };
VECT_VAR_DECL(expected_vld4_2,poly,16,4) [] = { 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa };
+VECT_VAR_DECL(expected_vld4_2,hfloat,16,4) [] = { 0xcc00, 0xcb80, 0xcb00, 0xca80 };
VECT_VAR_DECL(expected_vld4_2,hfloat,32,2) [] = { 0xaaaaaaaa, 0xaaaaaaaa };
VECT_VAR_DECL(expected_vld4_2,int,16,8) [] = { 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa,
0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa };
@@ -211,6 +233,8 @@ VECT_VAR_DECL(expected_vld4_2,uint,32,4) [] = { 0xaaaaaaaa, 0xaaaaaaaa,
0xaaaaaaaa, 0xaaaaaaaa };
VECT_VAR_DECL(expected_vld4_2,poly,16,8) [] = { 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa,
0xfff0, 0xfff1, 0xfff2, 0xfff3 };
+VECT_VAR_DECL(expected_vld4_2,hfloat,16,8) [] = { 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa,
+ 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa };
VECT_VAR_DECL(expected_vld4_2,hfloat,32,4) [] = { 0xc1800000, 0xc1700000,
0xc1600000, 0xc1500000 };
@@ -226,6 +250,7 @@ VECT_VAR_DECL(expected_vld4_3,uint,32,2) [] = { 0xfffffff2, 0xfffffff3 };
VECT_VAR_DECL(expected_vld4_3,poly,8,8) [] = { 0xaa, 0xaa, 0xaa, 0xaa,
0xaa, 0xaa, 0xaa, 0xaa };
VECT_VAR_DECL(expected_vld4_3,poly,16,4) [] = { 0xfff0, 0xfff1, 0xfff2, 0xfff3 };
+VECT_VAR_DECL(expected_vld4_3,hfloat,16,4) [] = { 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa };
VECT_VAR_DECL(expected_vld4_3,hfloat,32,2) [] = { 0xaaaaaaaa, 0xaaaaaaaa };
VECT_VAR_DECL(expected_vld4_3,int,16,8) [] = { 0xfff0, 0xfff1, 0xfff2, 0xfff3,
0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa };
@@ -237,6 +262,8 @@ VECT_VAR_DECL(expected_vld4_3,uint,32,4) [] = { 0xaaaaaaaa, 0xaaaaaaaa,
0xaaaaaaaa, 0xaaaaaaaa };
VECT_VAR_DECL(expected_vld4_3,poly,16,8) [] = { 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa,
0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa };
+VECT_VAR_DECL(expected_vld4_3,hfloat,16,8) [] = { 0xcc00, 0xcb80, 0xcb00, 0xca80,
+ 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa };
VECT_VAR_DECL(expected_vld4_3,hfloat,32,4) [] = { 0xaaaaaaaa, 0xaaaaaaaa,
0xaaaaaaaa, 0xaaaaaaaa };
@@ -252,6 +279,9 @@ VECT_VAR_DECL_INIT(buffer_vld2_lane, uint, 32, 2);
VECT_VAR_DECL_INIT(buffer_vld2_lane, uint, 64, 2);
VECT_VAR_DECL_INIT(buffer_vld2_lane, poly, 8, 2);
VECT_VAR_DECL_INIT(buffer_vld2_lane, poly, 16, 2);
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+VECT_VAR_DECL_INIT(buffer_vld2_lane, float, 16, 2);
+#endif
VECT_VAR_DECL_INIT(buffer_vld2_lane, float, 32, 2);
/* Input buffers for vld3_lane */
@@ -265,6 +295,9 @@ VECT_VAR_DECL_INIT(buffer_vld3_lane, uint, 32, 3);
VECT_VAR_DECL_INIT(buffer_vld3_lane, uint, 64, 3);
VECT_VAR_DECL_INIT(buffer_vld3_lane, poly, 8, 3);
VECT_VAR_DECL_INIT(buffer_vld3_lane, poly, 16, 3);
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+VECT_VAR_DECL_INIT(buffer_vld3_lane, float, 16, 3);
+#endif
VECT_VAR_DECL_INIT(buffer_vld3_lane, float, 32, 3);
/* Input buffers for vld4_lane */
@@ -278,6 +311,9 @@ VECT_VAR_DECL_INIT(buffer_vld4_lane, uint, 32, 4);
VECT_VAR_DECL_INIT(buffer_vld4_lane, uint, 64, 4);
VECT_VAR_DECL_INIT(buffer_vld4_lane, poly, 8, 4);
VECT_VAR_DECL_INIT(buffer_vld4_lane, poly, 16, 4);
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+VECT_VAR_DECL_INIT(buffer_vld4_lane, float, 16, 4);
+#endif
VECT_VAR_DECL_INIT(buffer_vld4_lane, float, 32, 4);
void exec_vldX_lane (void)
@@ -321,7 +357,7 @@ void exec_vldX_lane (void)
sizeof(VECT_VAR(result, T1, W, N)));
/* We need all variants in 64 bits, but there is no 64x2 variant. */
-#define DECL_ALL_VLDX_LANE(X) \
+#define DECL_ALL_VLDX_LANE_NO_FP16(X) \
DECL_VLDX_LANE(int, 8, 8, X); \
DECL_VLDX_LANE(int, 16, 4, X); \
DECL_VLDX_LANE(int, 32, 2, X); \
@@ -338,6 +374,15 @@ void exec_vldX_lane (void)
DECL_VLDX_LANE(float, 32, 2, X); \
DECL_VLDX_LANE(float, 32, 4, X)
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+#define DECL_ALL_VLDX_LANE(X) \
+ DECL_ALL_VLDX_LANE_NO_FP16(X); \
+ DECL_VLDX_LANE(float, 16, 4, X); \
+ DECL_VLDX_LANE(float, 16, 8, X)
+#else
+#define DECL_ALL_VLDX_LANE(X) DECL_ALL_VLDX_LANE_NO_FP16(X)
+#endif
+
/* Add some padding to try to catch out of bound accesses. */
#define ARRAY1(V, T, W, N) VECT_VAR_DECL(V,T,W,N)[1]={42}
#define DUMMY_ARRAY(V, T, W, N, L) \
@@ -346,7 +391,7 @@ void exec_vldX_lane (void)
/* Use the same lanes regardless of the size of the array (X), for
simplicity. */
-#define TEST_ALL_VLDX_LANE(X) \
+#define TEST_ALL_VLDX_LANE_NO_FP16(X) \
TEST_VLDX_LANE(, int, s, 8, 8, X, 7); \
TEST_VLDX_LANE(, int, s, 16, 4, X, 2); \
TEST_VLDX_LANE(, int, s, 32, 2, X, 0); \
@@ -363,7 +408,16 @@ void exec_vldX_lane (void)
TEST_VLDX_LANE(, float, f, 32, 2, X, 0); \
TEST_VLDX_LANE(q, float, f, 32, 4, X, 2)
-#define TEST_ALL_EXTRA_CHUNKS(X, Y) \
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+#define TEST_ALL_VLDX_LANE(X) \
+ TEST_ALL_VLDX_LANE_NO_FP16(X); \
+ TEST_VLDX_LANE(, float, f, 16, 4, X, 2); \
+ TEST_VLDX_LANE(q, float, f, 16, 8, X, 6)
+#else
+#define TEST_ALL_VLDX_LANE(X) TEST_ALL_VLDX_LANE_NO_FP16(X)
+#endif
+
+#define TEST_ALL_EXTRA_CHUNKS_NO_FP16(X,Y) \
TEST_EXTRA_CHUNK(int, 8, 8, X, Y); \
TEST_EXTRA_CHUNK(int, 16, 4, X, Y); \
TEST_EXTRA_CHUNK(int, 32, 2, X, Y); \
@@ -380,9 +434,17 @@ void exec_vldX_lane (void)
TEST_EXTRA_CHUNK(float, 32, 2, X, Y); \
TEST_EXTRA_CHUNK(float, 32, 4, X, Y)
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+#define TEST_ALL_EXTRA_CHUNKS(X,Y) \
+ TEST_ALL_EXTRA_CHUNKS_NO_FP16(X, Y); \
+ TEST_EXTRA_CHUNK(float, 16, 4, X, Y); \
+ TEST_EXTRA_CHUNK(float, 16, 8, X, Y)
+#else
+#define TEST_ALL_EXTRA_CHUNKS(X,Y) TEST_ALL_EXTRA_CHUNKS_NO_FP16(X, Y)
+#endif
+
/* vldX_lane supports only a subset of all variants. */
-#define CHECK_RESULTS_VLDX_LANE(test_name,EXPECTED,comment) \
- { \
+#define CHECK_RESULTS_VLDX_LANE_NO_FP16(test_name,EXPECTED,comment) \
CHECK(test_name, int, 8, 8, PRIx8, EXPECTED, comment); \
CHECK(test_name, int, 16, 4, PRIx16, EXPECTED, comment); \
CHECK(test_name, int, 32, 2, PRIx32, EXPECTED, comment); \
@@ -397,8 +459,21 @@ void exec_vldX_lane (void)
CHECK(test_name, uint, 16, 8, PRIx16, EXPECTED, comment); \
CHECK(test_name, uint, 32, 4, PRIx32, EXPECTED, comment); \
CHECK(test_name, poly, 16, 8, PRIx16, EXPECTED, comment); \
- CHECK_FP(test_name, float, 32, 4, PRIx32, EXPECTED, comment); \
- } \
+ CHECK_FP(test_name, float, 32, 4, PRIx32, EXPECTED, comment)
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+#define CHECK_RESULTS_VLDX_LANE(test_name,EXPECTED,comment) \
+ { \
+ CHECK_RESULTS_VLDX_LANE_NO_FP16(test_name,EXPECTED,comment); \
+ CHECK_FP(test_name, float, 16, 4, PRIx16, EXPECTED, comment); \
+ CHECK_FP(test_name, float, 16, 8, PRIx16, EXPECTED, comment); \
+ }
+#else
+#define CHECK_RESULTS_VLDX_LANE(test_name,EXPECTED,comment) \
+ { \
+ CHECK_RESULTS_VLDX_LANE_NO_FP16(test_name,EXPECTED,comment); \
+ }
+#endif
/* Declare the temporary buffers / variables. */
DECL_ALL_VLDX_LANE(2);
@@ -419,6 +494,10 @@ void exec_vldX_lane (void)
DUMMY_ARRAY(buffer_src, uint, 16, 8, 4);
DUMMY_ARRAY(buffer_src, uint, 32, 4, 4);
DUMMY_ARRAY(buffer_src, poly, 16, 8, 4);
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+ DUMMY_ARRAY(buffer_src, float, 16, 4, 4);
+ DUMMY_ARRAY(buffer_src, float, 16, 8, 4);
+#endif
DUMMY_ARRAY(buffer_src, float, 32, 2, 4);
DUMMY_ARRAY(buffer_src, float, 32, 4, 4);