# Not implemented in stdarch yet vbfdot_f32 vbfdot_lane_f32 vbfdot_laneq_f32 vbfdotq_f32 vbfdotq_lane_f32 vbfdotq_laneq_f32 vbfmlalbq_f32 vbfmlalbq_lane_f32 vbfmlalbq_laneq_f32 vbfmlaltq_f32 vbfmlaltq_lane_f32 vbfmlaltq_laneq_f32 vbfmmlaq_f32 vsudot_laneq_s32 vsudot_lane_s32 vsudotq_laneq_s32 vsudotq_lane_s32 vusdot_laneq_s32 vusdot_lane_s32 vusdotq_laneq_s32 vusdotq_lane_s32 vusdotq_s32 vusdot_s32 # Implemented in Clang and stdarch but missing from CSV vtst_p16 vtstq_p16 # QEMU 6.0 doesn't support these instructions vmmlaq_s32 vmmlaq_u32 vusmmlaq_s32 # Implemented in Clang and stdarch for A64 only even though CSV claims A32 support __crc32d __crc32cd vaddq_p64 vbsl_p64 vbslq_p64 vceq_p64 vceqq_p64 vceqz_p64 vceqzq_p64 vcombine_p64 vcopy_lane_p64 vcopy_laneq_p64 vcopyq_lane_p64 vcopyq_laneq_p64 vcreate_p64 vdup_lane_p64 vdup_n_p64 vdupq_lane_p64 vdupq_n_p64 vext_p64 vextq_p64 vget_high_p64 vget_lane_p64 vget_low_p64 vgetq_lane_p64 vmovn_high_s16 vmovn_high_s32 vmovn_high_s64 vmovn_high_u16 vmovn_high_u32 vmovn_high_u64 vmull_high_p64 vmull_p64 vreinterpret_p16_p64 vreinterpret_p64_f32 vreinterpret_p64_p16 vreinterpret_p64_p8 vreinterpret_p64_s16 vreinterpret_p64_s32 vreinterpret_p64_s8 vreinterpret_p64_u16 vreinterpret_p64_u32 vreinterpret_p64_u64 vreinterpret_p64_u8 vreinterpret_p8_p64 vreinterpretq_f64_u64 vreinterpretq_p128_f32 vreinterpretq_p128_p16 vreinterpretq_p128_p8 vreinterpretq_p128_s16 vreinterpretq_p128_s32 vreinterpretq_p128_s64 vreinterpretq_p128_s8 vreinterpretq_p128_u16 vreinterpretq_p128_u32 vreinterpretq_p128_u64 vreinterpretq_p128_u8 vreinterpretq_p16_p64 vreinterpretq_p64_f32 vreinterpretq_p64_p16 vreinterpretq_p64_p8 vreinterpretq_p64_s16 vreinterpretq_p64_s32 vreinterpretq_p64_s64 vreinterpretq_p64_s8 vreinterpretq_p64_u16 vreinterpretq_p64_u32 vreinterpretq_p64_u64 vreinterpretq_p64_u8 vreinterpretq_p8_p64 vreinterpretq_s16_p64 vreinterpretq_s32_p64 vreinterpretq_s64_p64 vreinterpretq_s8_p64 vreinterpretq_u16_p64 vreinterpretq_u32_p64 vreinterpretq_u64_p64 vreinterpretq_u8_p64 vreinterpret_s16_p64 vreinterpret_s32_p64 vreinterpret_s64_p64 vreinterpret_s8_p64 vreinterpret_u16_p64 vreinterpret_u32_p64 vreinterpret_u64_p64 vreinterpret_u8_p64 vrndn_f64 vrndnq_f64 vset_lane_p64 vsetq_lane_p64 vsli_n_p64 vsliq_n_p64 vsri_n_p64 vsriq_n_p64 vtst_p64 vtstq_p64 # Present in Clang header but triggers an ICE due to lack of backend support. vcmla_f32 vcmla_lane_f32 vcmla_laneq_f32 vcmla_rot180_f32 vcmla_rot180_lane_f32 vcmla_rot180_laneq_f32 vcmla_rot270_f32 vcmla_rot270_lane_f32 vcmla_rot270_laneq_f32 vcmla_rot90_f32 vcmla_rot90_lane_f32 vcmla_rot90_laneq_f32 vcmlaq_f32 vcmlaq_lane_f32 vcmlaq_laneq_f32 vcmlaq_rot180_f32 vcmlaq_rot180_lane_f32 vcmlaq_rot180_laneq_f32 vcmlaq_rot270_f32 vcmlaq_rot270_lane_f32 vcmlaq_rot270_laneq_f32 vcmlaq_rot90_f32 vcmlaq_rot90_lane_f32 vcmlaq_rot90_laneq_f32 # Implemented in stdarch for A64 only, Clang support both A32/A64 vadd_s64 vadd_u64 vcaddq_rot270_f32 vcaddq_rot90_f32 vcadd_rot270_f32 vcadd_rot90_f32 vcvtaq_s32_f32 vcvtaq_u32_f32 vcvta_s32_f32 vcvta_u32_f32 vcvtmq_s32_f32 vcvtmq_u32_f32 vcvtm_s32_f32 vcvtm_u32_f32 vcvtnq_s32_f32 vcvtnq_u32_f32 vcvtn_s32_f32 vcvtn_u32_f32 vcvtpq_s32_f32 vcvtpq_u32_f32 vcvtp_s32_f32 vcvtp_u32_f32 vdot_lane_s32 vdot_lane_u32 vdotq_lane_s32 vdotq_lane_u32 vdotq_s32 vdotq_u32 vdot_s32 vdot_u32 vqdmulh_lane_s16 vqdmulh_lane_s32 vqdmulhq_lane_s16 vqdmulhq_lane_s32 vrnda_f32 vrnda_f32 vrndaq_f32 vrndaq_f32 vrnd_f32 vrnd_f32 vrndi_f32 vrndi_f32 vrndiq_f32 vrndiq_f32 vrndm_f32 vrndm_f32 vrndmq_f32 vrndmq_f32 vrndns_f32 vrndp_f32 vrndpq_f32 vrndq_f32 vrndq_f32 vrndx_f32 vrndxq_f32 # LLVM select error in debug builds vqrshrn_n_s16 vqrshrn_n_s32 vqrshrn_n_s64 vqrshrn_n_u16 vqrshrn_n_u32 vqrshrn_n_u64 vqrshrun_n_s16 vqrshrun_n_s32 vqrshrun_n_s64 vqshrn_n_s16 vqshrn_n_s32 vqshrn_n_s64 vqshrn_n_u16 vqshrn_n_u32 vqshrn_n_u64 vqshrun_n_s16 vqshrun_n_s32 vqshrun_n_s64 vrshrn_n_s16 vrshrn_n_s32 vrshrn_n_s64 vrshrn_n_u16 vrshrn_n_u32 vrshrn_n_u64 vshrq_n_u64 vshr_n_u64 # Failing tests: stdarch has incorrect results compared to Clang vqshlu_n_s16 vqshlu_n_s32 vqshlu_n_s64 vqshlu_n_s8 vqshluq_n_s16 vqshluq_n_s32 vqshluq_n_s64 vqshluq_n_s8 vsli_n_p16 vsli_n_p8 vsli_n_s16 vsli_n_s32 vsli_n_s64 vsli_n_s8 vsli_n_u16 vsli_n_u32 vsli_n_u64 vsli_n_u8 vsliq_n_p16 vsliq_n_p8 vsliq_n_s16 vsliq_n_s32 vsliq_n_s64 vsliq_n_s8 vsliq_n_u16 vsliq_n_u32 vsliq_n_u64 vsliq_n_u8 vsri_n_p16 vsri_n_p8 vsri_n_s16 vsri_n_s32 vsri_n_s64 vsri_n_s8 vsri_n_u16 vsri_n_u32 vsri_n_u64 vsri_n_u8 vsriq_n_p16 vsriq_n_p8 vsriq_n_s16 vsriq_n_s32 vsriq_n_s64 vsriq_n_s8 vsriq_n_u16 vsriq_n_u32 vsriq_n_u64 vsriq_n_u8 # These produce a different result on Clang depending on the optimization level. # This is definitely a bug in LLVM. vadd_f32 vaddq_f32 vcvt_s32_f32 vcvt_u32_f32 vcvtq_s32_f32 vcvtq_u32_f32 vfma_f32 vfma_n_f32 vfmaq_f32 vfmaq_n_f32 vfms_f32 vfmsq_f32 vmla_f32 vmla_lane_f32 vmla_n_f32 vmlaq_f32 vmlaq_lane_f32 vmlaq_n_f32 vmls_f32 vmls_lane_f32 vmls_n_f32 vmlsq_f32 vmlsq_lane_f32 vmlsq_n_f32 vmul_lane_f32 vmul_n_f32 vmulq_lane_f32 vmulq_n_f32