Notations are as follows:
T
for trick usually using other intrinsics
E
for scalar emulation
NOOP
for no operation
NA
means the operator does not exist for the given type
intrinsic
for the actual wrapped intrinsic
abs on i8: svabs_s8_x
abs on u8: NOOP
abs on i16: svabs_s16_x
abs on u16: NOOP
abs on i32: svabs_s32_x
abs on u32: NOOP
abs on i64: svabs_s64_x
abs on u64: NOOP
abs on f16: svabs_f16_x
abs on f32: svabs_f32_x
abs on f64: svabs_f64_x
add on i8: svadd_s8_x
add on u8: svadd_u8_x
add on i16: svadd_s16_x
add on u16: svadd_u16_x
add on i32: svadd_s32_x
add on u32: svadd_u32_x
add on i64: svadd_s64_x
add on u64: svadd_u64_x
add on f16: svadd_f16_x
add on f32: svadd_f32_x
add on f64: svadd_f64_x
adds on i8: svqadd_s8
adds on u8: svqadd_u8
adds on i16: svqadd_s16
adds on u16: svqadd_u16
adds on i32: svqadd_s32
adds on u32: svqadd_u32
adds on i64: svqadd_s64
adds on u64: svqadd_u64
adds on f16: T
adds on f32: T
adds on f64: T
addv on i8: NA
addv on u8: NA
addv on i16: NA
addv on u16: NA
addv on i32: NA
addv on u32: NA
addv on i64: NA
addv on u64: NA
addv on f16: svaddv_f16
addv on f32: svaddv_f32
addv on f64: svaddv_f64
all on i8: T
all on u8: T
all on i16: T
all on u16: T
all on i32: T
all on u32: T
all on i64: T
all on u64: T
all on f16: T
all on f32: T
all on f64: T
andb on i8: svand_s8_x
andb on u8: svand_u8_x
andb on i16: svand_s16_x
andb on u16: svand_u16_x
andb on i32: svand_s32_x
andb on u32: svand_u32_x
andb on i64: svand_s64_x
andb on u64: svand_u64_x
andb on f16: svand_u16_x
andb on f32: svand_u32_x
andb on f64: svand_u64_x
andl on i8: svand_z
andl on u8: svand_z
andl on i16: svand_z
andl on u16: svand_z
andl on i32: svand_z
andl on u32: svand_z
andl on i64: svand_z
andl on u64: svand_z
andl on f16: svand_z
andl on f32: svand_z
andl on f64: svand_z
andnotb on i8: svbic_s8_x
andnotb on u8: svbic_u8_x
andnotb on i16: svbic_s16_x
andnotb on u16: svbic_u16_x
andnotb on i32: svbic_s32_x
andnotb on u32: svbic_u32_x
andnotb on i64: svbic_s64_x
andnotb on u64: svbic_u64_x
andnotb on f16: svbic_u16_x
andnotb on f32: svbic_u32_x
andnotb on f64: svbic_u64_x
andnotl on i8: svbic_z
andnotl on u8: svbic_z
andnotl on i16: svbic_z
andnotl on u16: svbic_z
andnotl on i32: svbic_z
andnotl on u32: svbic_z
andnotl on i64: svbic_z
andnotl on u64: svbic_z
andnotl on f16: svbic_z
andnotl on f32: svbic_z
andnotl on f64: svbic_z
any on i8: svptest_any
any on u8: svptest_any
any on i16: svptest_any
any on u16: svptest_any
any on i32: svptest_any
any on u32: svptest_any
any on i64: svptest_any
any on u64: svptest_any
any on f16: svptest_any
any on f32: svptest_any
any on f64: svptest_any
ceil on i8: NOOP
ceil on u8: NOOP
ceil on i16: NOOP
ceil on u16: NOOP
ceil on i32: NOOP
ceil on u32: NOOP
ceil on i64: NOOP
ceil on u64: NOOP
ceil on f16: svrintp_f16_x
ceil on f32: svrintp_f32_x
ceil on f64: svrintp_f64_x
div on i8: E
div on u8: E
div on i16: E
div on u16: E
div on i32: svdiv_s32_x
div on u32: svdiv_u32_x
div on i64: svdiv_s64_x
div on u64: svdiv_u64_x
div on f16: svdiv_f16_x
div on f32: svdiv_f32_x
div on f64: svdiv_f64_x
eq on i8: svcmpeq_s8
eq on u8: svcmpeq_u8
eq on i16: svcmpeq_s16
eq on u16: svcmpeq_u16
eq on i32: svcmpeq_s32
eq on u32: svcmpeq_u32
eq on i64: svcmpeq_s64
eq on u64: svcmpeq_u64
eq on f16: svcmpeq_f16
eq on f32: svcmpeq_f32
eq on f64: svcmpeq_f64
floor on i8: NOOP
floor on u8: NOOP
floor on i16: NOOP
floor on u16: NOOP
floor on i32: NOOP
floor on u32: NOOP
floor on i64: NOOP
floor on u64: NOOP
floor on f16: svrintm_f16_x
floor on f32: svrintm_f32_x
floor on f64: svrintm_f64_x
fma on i8: svmla_s8_x
fma on u8: svmla_u8_x
fma on i16: svmla_s16_x
fma on u16: svmla_u16_x
fma on i32: svmla_s32_x
fma on u32: svmla_u32_x
fma on i64: svmla_s64_x
fma on u64: svmla_u64_x
fma on f16: svmla_f16_x
fma on f32: svmla_f32_x
fma on f64: svmla_f64_x
fms on i8: T
fms on u8: T
fms on i16: T
fms on u16: T
fms on i32: T
fms on u32: T
fms on i64: T
fms on u64: T
fms on f16: svnmls_f16_x
fms on f32: svnmls_f32_x
fms on f64: svnmls_f64_x
fnma on i8: svmls_s8_x
fnma on u8: svmls_u8_x
fnma on i16: svmls_s16_x
fnma on u16: svmls_u16_x
fnma on i32: svmls_s32_x
fnma on u32: svmls_u32_x
fnma on i64: svmls_s64_x
fnma on u64: svmls_u64_x
fnma on f16: svmls_f16_x
fnma on f32: svmls_f32_x
fnma on f64: svmls_f64_x
fnms on i8: T
fnms on u8: T
fnms on i16: T
fnms on u16: T
fnms on i32: T
fnms on u32: T
fnms on i64: T
fnms on u64: T
fnms on f16: svnmla_f16_x
fnms on f32: svnmla_f32_x
fnms on f64: svnmla_f64_x
gather on i8: NA
gather on u8: NA
gather on i16: E
gather on u16: E
gather on i32: svld1_gather_s32index_s32
gather on u32: svld1_gather_s32index_u32
gather on i64: svld1_gather_s64index_s64
gather on u64: svld1_gather_s64index_u64
gather on f16: E
gather on f32: svld1_gather_s32index_f32
gather on f64: svld1_gather_s64index_f64
gather_linear on i8: E
gather_linear on u8: E
gather_linear on i16: E
gather_linear on u16: E
gather_linear on i32: T
gather_linear on u32: T
gather_linear on i64: T
gather_linear on u64: T
gather_linear on f16: E
gather_linear on f32: T
gather_linear on f64: T
ge on i8: svcmpge_s8
ge on u8: svcmpge_u8
ge on i16: svcmpge_s16
ge on u16: svcmpge_u16
ge on i32: svcmpge_s32
ge on u32: svcmpge_u32
ge on i64: svcmpge_s64
ge on u64: svcmpge_u64
ge on f16: svcmpge_f16
ge on f32: svcmpge_f32
ge on f64: svcmpge_f64
gt on i8: svcmpgt_s8
gt on u8: svcmpgt_u8
gt on i16: svcmpgt_s16
gt on u16: svcmpgt_u16
gt on i32: svcmpgt_s32
gt on u32: svcmpgt_u32
gt on i64: svcmpgt_s64
gt on u64: svcmpgt_u64
gt on f16: svcmpgt_f16
gt on f32: svcmpgt_f32
gt on f64: svcmpgt_f64
if_else1 on i8: svsel_s8
if_else1 on u8: svsel_u8
if_else1 on i16: svsel_s16
if_else1 on u16: svsel_u16
if_else1 on i32: svsel_s32
if_else1 on u32: svsel_u32
if_else1 on i64: svsel_s64
if_else1 on u64: svsel_u64
if_else1 on f16: svsel_f16
if_else1 on f32: svsel_f32
if_else1 on f64: svsel_f64
iota on i8: svindex_s8
iota on u8: svindex_u8
iota on i16: svindex_s16
iota on u16: svindex_u16
iota on i32: svindex_s32
iota on u32: svindex_u32
iota on i64: svindex_s64
iota on u64: svindex_u64
iota on f16: T
iota on f32: T
iota on f64: T
le on i8: svcmple_s8
le on u8: svcmple_u8
le on i16: svcmple_s16
le on u16: svcmple_u16
le on i32: svcmple_s32
le on u32: svcmple_u32
le on i64: svcmple_s64
le on u64: svcmple_u64
le on f16: svcmple_f16
le on f32: svcmple_f32
le on f64: svcmple_f64
len on i8: NOOP
len on u8: NOOP
len on i16: NOOP
len on u16: NOOP
len on i32: NOOP
len on u32: NOOP
len on i64: NOOP
len on u64: NOOP
len on f16: NOOP
len on f32: NOOP
len on f64: NOOP
load2a on i8: T
load2a on u8: T
load2a on i16: T
load2a on u16: T
load2a on i32: T
load2a on u32: T
load2a on i64: T
load2a on u64: T
load2a on f16: T
load2a on f32: T
load2a on f64: T
load2u on i8: T
load2u on u8: T
load2u on i16: T
load2u on u16: T
load2u on i32: T
load2u on u32: T
load2u on i64: T
load2u on u64: T
load2u on f16: T
load2u on f32: T
load2u on f64: T
load3a on i8: T
load3a on u8: T
load3a on i16: T
load3a on u16: T
load3a on i32: T
load3a on u32: T
load3a on i64: T
load3a on u64: T
load3a on f16: T
load3a on f32: T
load3a on f64: T
load3u on i8: T
load3u on u8: T
load3u on i16: T
load3u on u16: T
load3u on i32: T
load3u on u32: T
load3u on i64: T
load3u on u64: T
load3u on f16: T
load3u on f32: T
load3u on f64: T
load4a on i8: T
load4a on u8: T
load4a on i16: T
load4a on u16: T
load4a on i32: T
load4a on u32: T
load4a on i64: T
load4a on u64: T
load4a on f16: T
load4a on f32: T
load4a on f64: T
load4u on i8: T
load4u on u8: T
load4u on i16: T
load4u on u16: T
load4u on i32: T
load4u on u32: T
load4u on i64: T
load4u on u64: T
load4u on f16: T
load4u on f32: T
load4u on f64: T
loada on i8: svld1_s8
loada on u8: svld1_u8
loada on i16: svld1_s16
loada on u16: svld1_u16
loada on i32: svld1_s32
loada on u32: svld1_u32
loada on i64: svld1_s64
loada on u64: svld1_u64
loada on f16: svld1_f16
loada on f32: svld1_f32
loada on f64: svld1_f64
loadla on i8: T
loadla on u8: T
loadla on i16: T
loadla on u16: T
loadla on i32: T
loadla on u32: T
loadla on i64: T
loadla on u64: T
loadla on f16: T
loadla on f32: T
loadla on f64: T
loadlu on i8: T
loadlu on u8: T
loadlu on i16: T
loadlu on u16: T
loadlu on i32: T
loadlu on u32: T
loadlu on i64: T
loadlu on u64: T
loadlu on f16: T
loadlu on f32: T
loadlu on f64: T
loadu on i8: svld1_s8
loadu on u8: svld1_u8
loadu on i16: svld1_s16
loadu on u16: svld1_u16
loadu on i32: svld1_s32
loadu on u32: svld1_u32
loadu on i64: svld1_s64
loadu on u64: svld1_u64
loadu on f16: svld1_f16
loadu on f32: svld1_f32
loadu on f64: svld1_f64
lt on i8: svcmplt_s8
lt on u8: svcmplt_u8
lt on i16: svcmplt_s16
lt on u16: svcmplt_u16
lt on i32: svcmplt_s32
lt on u32: svcmplt_u32
lt on i64: svcmplt_s64
lt on u64: svcmplt_u64
lt on f16: svcmplt_f16
lt on f32: svcmplt_f32
lt on f64: svcmplt_f64
mask_for_loop_tail on i8: T
mask_for_loop_tail on u8: T
mask_for_loop_tail on i16: T
mask_for_loop_tail on u16: T
mask_for_loop_tail on i32: T
mask_for_loop_tail on u32: T
mask_for_loop_tail on i64: T
mask_for_loop_tail on u64: T
mask_for_loop_tail on f16: T
mask_for_loop_tail on f32: T
mask_for_loop_tail on f64: T
mask_storea1 on i8: svst1_s8
mask_storea1 on u8: svst1_u8
mask_storea1 on i16: svst1_s16
mask_storea1 on u16: svst1_u16
mask_storea1 on i32: svst1_s32
mask_storea1 on u32: svst1_u32
mask_storea1 on i64: svst1_s64
mask_storea1 on u64: svst1_u64
mask_storea1 on f16: svst1_f16
mask_storea1 on f32: svst1_f32
mask_storea1 on f64: svst1_f64
mask_storeu1 on i8: svst1_s8
mask_storeu1 on u8: svst1_u8
mask_storeu1 on i16: svst1_s16
mask_storeu1 on u16: svst1_u16
mask_storeu1 on i32: svst1_s32
mask_storeu1 on u32: svst1_u32
mask_storeu1 on i64: svst1_s64
mask_storeu1 on u64: svst1_u64
mask_storeu1 on f16: svst1_f16
mask_storeu1 on f32: svst1_f32
mask_storeu1 on f64: svst1_f64
masko_loada1 on i8: T
masko_loada1 on u8: T
masko_loada1 on i16: T
masko_loada1 on u16: T
masko_loada1 on i32: T
masko_loada1 on u32: T
masko_loada1 on i64: T
masko_loada1 on u64: T
masko_loada1 on f16: T
masko_loada1 on f32: T
masko_loada1 on f64: T
masko_loadu1 on i8: T
masko_loadu1 on u8: T
masko_loadu1 on i16: T
masko_loadu1 on u16: T
masko_loadu1 on i32: T
masko_loadu1 on u32: T
masko_loadu1 on i64: T
masko_loadu1 on u64: T
masko_loadu1 on f16: T
masko_loadu1 on f32: T
masko_loadu1 on f64: T
maskz_loada1 on i8: T
maskz_loada1 on u8: T
maskz_loada1 on i16: T
maskz_loada1 on u16: T
maskz_loada1 on i32: T
maskz_loada1 on u32: T
maskz_loada1 on i64: T
maskz_loada1 on u64: T
maskz_loada1 on f16: T
maskz_loada1 on f32: T
maskz_loada1 on f64: T
maskz_loadu1 on i8: T
maskz_loadu1 on u8: T
maskz_loadu1 on i16: T
maskz_loadu1 on u16: T
maskz_loadu1 on i32: T
maskz_loadu1 on u32: T
maskz_loadu1 on i64: T
maskz_loadu1 on u64: T
maskz_loadu1 on f16: T
maskz_loadu1 on f32: T
maskz_loadu1 on f64: T
max on i8: svmax_s8_x
max on u8: svmax_u8_x
max on i16: svmax_s16_x
max on u16: svmax_u16_x
max on i32: svmax_s32_x
max on u32: svmax_u32_x
max on i64: svmax_s64_x
max on u64: svmax_u64_x
max on f16: svmax_f16_x
max on f32: svmax_f32_x
max on f64: svmax_f64_x
min on i8: svmin_s8_x
min on u8: svmin_u8_x
min on i16: svmin_s16_x
min on u16: svmin_u16_x
min on i32: svmin_s32_x
min on u32: svmin_u32_x
min on i64: svmin_s64_x
min on u64: svmin_u64_x
min on f16: svmin_f16_x
min on f32: svmin_f32_x
min on f64: svmin_f64_x
mul on i8: svmul_s8_x
mul on u8: svmul_u8_x
mul on i16: svmul_s16_x
mul on u16: svmul_u16_x
mul on i32: svmul_s32_x
mul on u32: svmul_u32_x
mul on i64: svmul_s64_x
mul on u64: svmul_u64_x
mul on f16: svmul_f16_x
mul on f32: svmul_f32_x
mul on f64: svmul_f64_x
nbtrue on i8: svcntp_b8
nbtrue on u8: svcntp_b8
nbtrue on i16: svcntp_b16
nbtrue on u16: svcntp_b16
nbtrue on i32: svcntp_b32
nbtrue on u32: svcntp_b32
nbtrue on i64: svcntp_b64
nbtrue on u64: svcntp_b64
nbtrue on f16: svcntp_b16
nbtrue on f32: svcntp_b32
nbtrue on f64: svcntp_b64
ne on i8: svcmpne_s8
ne on u8: svcmpne_u8
ne on i16: svcmpne_s16
ne on u16: svcmpne_u16
ne on i32: svcmpne_s32
ne on u32: svcmpne_u32
ne on i64: svcmpne_s64
ne on u64: svcmpne_u64
ne on f16: svcmpne_f16
ne on f32: svcmpne_f32
ne on f64: svcmpne_f64
neg on i8: svneg_s8_x
neg on u8: svneg_s8_x
neg on i16: svneg_s16_x
neg on u16: svneg_s16_x
neg on i32: svneg_s32_x
neg on u32: svneg_s32_x
neg on i64: svneg_s64_x
neg on u64: svneg_s64_x
neg on f16: svneg_f16_x
neg on f32: svneg_f32_x
neg on f64: svneg_f64_x
notb on i8: svnot_s8_x
notb on u8: svnot_u8_x
notb on i16: svnot_s16_x
notb on u16: svnot_u16_x
notb on i32: svnot_s32_x
notb on u32: svnot_u32_x
notb on i64: svnot_s64_x
notb on u64: svnot_u64_x
notb on f16: svnot_u16_x
notb on f32: svnot_u32_x
notb on f64: svnot_u64_x
notl on i8: svnot_z
notl on u8: svnot_z
notl on i16: svnot_z
notl on u16: svnot_z
notl on i32: svnot_z
notl on u32: svnot_z
notl on i64: svnot_z
notl on u64: svnot_z
notl on f16: svnot_z
notl on f32: svnot_z
notl on f64: svnot_z
orb on i8: svorr_s8_x
orb on u8: svorr_u8_x
orb on i16: svorr_s16_x
orb on u16: svorr_u16_x
orb on i32: svorr_s32_x
orb on u32: svorr_u32_x
orb on i64: svorr_s64_x
orb on u64: svorr_u64_x
orb on f16: svorr_u16_x
orb on f32: svorr_u32_x
orb on f64: svorr_u64_x
orl on i8: svorr_z
orl on u8: svorr_z
orl on i16: svorr_z
orl on u16: svorr_z
orl on i32: svorr_z
orl on u32: svorr_z
orl on i64: svorr_z
orl on u64: svorr_z
orl on f16: svorr_z
orl on f32: svorr_z
orl on f64: svorr_z
rec on i8: NA
rec on u8: NA
rec on i16: NA
rec on u16: NA
rec on i32: NA
rec on u32: NA
rec on i64: NA
rec on u64: NA
rec on f16: T
rec on f32: T
rec on f64: T
rec11 on i8: NA
rec11 on u8: NA
rec11 on i16: NA
rec11 on u16: NA
rec11 on i32: NA
rec11 on u32: NA
rec11 on i64: NA
rec11 on u64: NA
rec11 on f16: T
rec11 on f32: T
rec11 on f64: T
rec8 on i8: NA
rec8 on u8: NA
rec8 on i16: NA
rec8 on u16: NA
rec8 on i32: NA
rec8 on u32: NA
rec8 on i64: NA
rec8 on u64: NA
rec8 on f16: svrecpe_f16
rec8 on f32: svrecpe_f32
rec8 on f64: svrecpe_f64
round_to_even on i8: NOOP
round_to_even on u8: NOOP
round_to_even on i16: NOOP
round_to_even on u16: NOOP
round_to_even on i32: NOOP
round_to_even on u32: NOOP
round_to_even on i64: NOOP
round_to_even on u64: NOOP
round_to_even on f16: svrintn_f16_x
round_to_even on f32: svrintn_f32_x
round_to_even on f64: svrintn_f64_x
rsqrt11 on i8: NA
rsqrt11 on u8: NA
rsqrt11 on i16: NA
rsqrt11 on u16: NA
rsqrt11 on i32: NA
rsqrt11 on u32: NA
rsqrt11 on i64: NA
rsqrt11 on u64: NA
rsqrt11 on f16: T
rsqrt11 on f32: T
rsqrt11 on f64: T
rsqrt8 on i8: NA
rsqrt8 on u8: NA
rsqrt8 on i16: NA
rsqrt8 on u16: NA
rsqrt8 on i32: NA
rsqrt8 on u32: NA
rsqrt8 on i64: NA
rsqrt8 on u64: NA
rsqrt8 on f16: svrsqrte_f16
rsqrt8 on f32: svrsqrte_f32
rsqrt8 on f64: svrsqrte_f64
scatter on i8: NA
scatter on u8: NA
scatter on i16: E
scatter on u16: E
scatter on i32: svst1_scatter_s32index_s32
scatter on u32: svst1_scatter_s32index_u32
scatter on i64: svst1_scatter_s64index_s64
scatter on u64: svst1_scatter_s64index_u64
scatter on f16: E
scatter on f32: svst1_scatter_s32index_f32
scatter on f64: svst1_scatter_s64index_f64
scatter_linear on i8: E
scatter_linear on u8: E
scatter_linear on i16: E
scatter_linear on u16: E
scatter_linear on i32: T
scatter_linear on u32: T
scatter_linear on i64: T
scatter_linear on u64: T
scatter_linear on f16: E
scatter_linear on f32: T
scatter_linear on f64: T
set1 on i8: svdup_n_s8
set1 on u8: svdup_n_u8
set1 on i16: svdup_n_s16
set1 on u16: svdup_n_u16
set1 on i32: svdup_n_s32
set1 on u32: svdup_n_u32
set1 on i64: svdup_n_s64
set1 on u64: svdup_n_u64
set1 on f16: svdup_n_f16
set1 on f32: svdup_n_f32
set1 on f64: svdup_n_f64
set1l on i8: T
set1l on u8: T
set1l on i16: T
set1l on u16: T
set1l on i32: T
set1l on u32: T
set1l on i64: T
set1l on u64: T
set1l on f16: T
set1l on f32: T
set1l on f64: T
shl on i8: T
shl on u8: T
shl on i16: T
shl on u16: T
shl on i32: T
shl on u32: T
shl on i64: T
shl on u64: T
shl on f16: NA
shl on f32: NA
shl on f64: NA
shr on i8: T
shr on u8: T
shr on i16: T
shr on u16: T
shr on i32: T
shr on u32: T
shr on i64: T
shr on u64: T
shr on f16: NA
shr on f32: NA
shr on f64: NA
shra on i8: svasr_n_s8_x
shra on u8: T
shra on i16: svasr_n_s16_x
shra on u16: T
shra on i32: svasr_n_s32_x
shra on u32: T
shra on i64: svasr_n_s64_x
shra on u64: T
shra on f16: NA
shra on f32: NA
shra on f64: NA
sqrt on i8: NA
sqrt on u8: NA
sqrt on i16: NA
sqrt on u16: NA
sqrt on i32: NA
sqrt on u32: NA
sqrt on i64: NA
sqrt on u64: NA
sqrt on f16: svsqrt_f16_x
sqrt on f32: svsqrt_f32_x
sqrt on f64: svsqrt_f64_x
store2a on i8: T
store2a on u8: T
store2a on i16: T
store2a on u16: T
store2a on i32: T
store2a on u32: T
store2a on i64: T
store2a on u64: T
store2a on f16: T
store2a on f32: T
store2a on f64: T
store2u on i8: T
store2u on u8: T
store2u on i16: T
store2u on u16: T
store2u on i32: T
store2u on u32: T
store2u on i64: T
store2u on u64: T
store2u on f16: T
store2u on f32: T
store2u on f64: T
store3a on i8: T
store3a on u8: T
store3a on i16: T
store3a on u16: T
store3a on i32: T
store3a on u32: T
store3a on i64: T
store3a on u64: T
store3a on f16: T
store3a on f32: T
store3a on f64: T
store3u on i8: T
store3u on u8: T
store3u on i16: T
store3u on u16: T
store3u on i32: T
store3u on u32: T
store3u on i64: T
store3u on u64: T
store3u on f16: T
store3u on f32: T
store3u on f64: T
store4a on i8: T
store4a on u8: T
store4a on i16: T
store4a on u16: T
store4a on i32: T
store4a on u32: T
store4a on i64: T
store4a on u64: T
store4a on f16: T
store4a on f32: T
store4a on f64: T
store4u on i8: T
store4u on u8: T
store4u on i16: T
store4u on u16: T
store4u on i32: T
store4u on u32: T
store4u on i64: T
store4u on u64: T
store4u on f16: T
store4u on f32: T
store4u on f64: T
storea on i8: svst1_s8
storea on u8: svst1_u8
storea on i16: svst1_s16
storea on u16: svst1_u16
storea on i32: svst1_s32
storea on u32: svst1_u32
storea on i64: svst1_s64
storea on u64: svst1_u64
storea on f16: svst1_f16
storea on f32: svst1_f32
storea on f64: svst1_f64
storela on i8: T
storela on u8: T
storela on i16: T
storela on u16: T
storela on i32: T
storela on u32: T
storela on i64: T
storela on u64: T
storela on f16: T
storela on f32: T
storela on f64: T
storelu on i8: T
storelu on u8: T
storelu on i16: T
storelu on u16: T
storelu on i32: T
storelu on u32: T
storelu on i64: T
storelu on u64: T
storelu on f16: T
storelu on f32: T
storelu on f64: T
storeu on i8: svst1_s8
storeu on u8: svst1_u8
storeu on i16: svst1_s16
storeu on u16: svst1_u16
storeu on i32: svst1_s32
storeu on u32: svst1_u32
storeu on i64: svst1_s64
storeu on u64: svst1_u64
storeu on f16: svst1_f16
storeu on f32: svst1_f32
storeu on f64: svst1_f64
sub on i8: svsub_s8_x
sub on u8: svsub_u8_x
sub on i16: svsub_s16_x
sub on u16: svsub_u16_x
sub on i32: svsub_s32_x
sub on u32: svsub_u32_x
sub on i64: svsub_s64_x
sub on u64: svsub_u64_x
sub on f16: svsub_f16_x
sub on f32: svsub_f32_x
sub on f64: svsub_f64_x
subs on i8: svqsub_s8
subs on u8: svqsub_u8
subs on i16: svqsub_s16
subs on u16: svqsub_u16
subs on i32: svqsub_s32
subs on u32: svqsub_u32
subs on i64: svqsub_s64
subs on u64: svqsub_u64
subs on f16: T
subs on f32: T
subs on f64: T
to_logical on i8: T
to_logical on u8: T
to_logical on i16: T
to_logical on u16: T
to_logical on i32: T
to_logical on u32: T
to_logical on i64: T
to_logical on u64: T
to_logical on f16: T
to_logical on f32: T
to_logical on f64: T
to_mask on i8: T
to_mask on u8: T
to_mask on i16: T
to_mask on u16: T
to_mask on i32: T
to_mask on u32: T
to_mask on i64: T
to_mask on u64: T
to_mask on f16: T
to_mask on f32: T
to_mask on f64: T
trunc on i8: NOOP
trunc on u8: NOOP
trunc on i16: NOOP
trunc on u16: NOOP
trunc on i32: NOOP
trunc on u32: NOOP
trunc on i64: NOOP
trunc on u64: NOOP
trunc on f16: svrintz_f16_x
trunc on f32: svrintz_f32_x
trunc on f64: svrintz_f64_x
unzip on i8: T
unzip on u8: T
unzip on i16: T
unzip on u16: T
unzip on i32: T
unzip on u32: T
unzip on i64: T
unzip on u64: T
unzip on f16: T
unzip on f32: T
unzip on f64: T
unziphi on i8: svuzp2_s8
unziphi on u8: svuzp2_u8
unziphi on i16: svuzp2_s16
unziphi on u16: svuzp2_u16
unziphi on i32: svuzp2_s32
unziphi on u32: svuzp2_u32
unziphi on i64: svuzp2_s64
unziphi on u64: svuzp2_u64
unziphi on f16: svuzp2_f16
unziphi on f32: svuzp2_f32
unziphi on f64: svuzp2_f64
unziplo on i8: svuzp1_s8
unziplo on u8: svuzp1_u8
unziplo on i16: svuzp1_s16
unziplo on u16: svuzp1_u16
unziplo on i32: svuzp1_s32
unziplo on u32: svuzp1_u32
unziplo on i64: svuzp1_s64
unziplo on u64: svuzp1_u64
unziplo on f16: svuzp1_f16
unziplo on f32: svuzp1_f32
unziplo on f64: svuzp1_f64
xorb on i8: sveor_s8_x
xorb on u8: sveor_u8_x
xorb on i16: sveor_s16_x
xorb on u16: sveor_u16_x
xorb on i32: sveor_s32_x
xorb on u32: sveor_u32_x
xorb on i64: sveor_s64_x
xorb on u64: sveor_u64_x
xorb on f16: sveor_u16_x
xorb on f32: sveor_u32_x
xorb on f64: sveor_u64_x
xorl on i8: sveor_z
xorl on u8: sveor_z
xorl on i16: sveor_z
xorl on u16: sveor_z
xorl on i32: sveor_z
xorl on u32: sveor_z
xorl on i64: sveor_z
xorl on u64: sveor_z
xorl on f16: sveor_z
xorl on f32: sveor_z
xorl on f64: sveor_z
zip on i8: T
zip on u8: T
zip on i16: T
zip on u16: T
zip on i32: T
zip on u32: T
zip on i64: T
zip on u64: T
zip on f16: T
zip on f32: T
zip on f64: T
ziphi on i8: svzip2_s8
ziphi on u8: svzip2_u8
ziphi on i16: svzip2_s16
ziphi on u16: svzip2_u16
ziphi on i32: svzip2_s32
ziphi on u32: svzip2_u32
ziphi on i64: svzip2_s64
ziphi on u64: svzip2_u64
ziphi on f16: svzip2_f16
ziphi on f32: svzip2_f32
ziphi on f64: svzip2_f64
ziplo on i8: svzip1_s8
ziplo on u8: svzip1_u8
ziplo on i16: svzip1_s16
ziplo on u16: svzip1_u16
ziplo on i32: svzip1_s32
ziplo on u32: svzip1_u32
ziplo on i64: svzip1_s64
ziplo on u64: svzip1_u64
ziplo on f16: svzip1_f16
ziplo on f32: svzip1_f32
ziplo on f64: svzip1_f64
cvt from i8 to i8: NOOP
cvt from i8 to u8: NOOP
cvt from u8 to i8: NOOP
cvt from u8 to u8: NOOP
cvt from i16 to i16: NOOP
cvt from i16 to u16: NOOP
cvt from i16 to f16: svcvt_f16_s16_x
cvt from u16 to i16: NOOP
cvt from u16 to u16: NOOP
cvt from u16 to f16: svcvt_f16_u16_x
cvt from i32 to i32: NOOP
cvt from i32 to u32: NOOP
cvt from i32 to f32: svcvt_f32_s32_x
cvt from u32 to i32: NOOP
cvt from u32 to u32: NOOP
cvt from u32 to f32: svcvt_f32_u32_x
cvt from i64 to i64: NOOP
cvt from i64 to u64: NOOP
cvt from i64 to f64: svcvt_f64_s64_x
cvt from u64 to i64: NOOP
cvt from u64 to u64: NOOP
cvt from u64 to f64: svcvt_f64_u64_x
cvt from f16 to i16: svcvt_s16_f16_x
cvt from f16 to u16: svcvt_u16_f16_x
cvt from f16 to f16: NOOP
cvt from f32 to i32: svcvt_s32_f32_x
cvt from f32 to u32: svcvt_u32_f32_x
cvt from f32 to f32: NOOP
cvt from f64 to i64: svcvt_s64_f64_x
cvt from f64 to u64: svcvt_u64_f64_x
cvt from f64 to f64: NOOP
reinterpret from i8 to i8: NOOP
reinterpret from i8 to u8: NOOP
reinterpret from u8 to i8: NOOP
reinterpret from u8 to u8: NOOP
reinterpret from i16 to i16: NOOP
reinterpret from i16 to u16: NOOP
reinterpret from i16 to f16: NOOP
reinterpret from u16 to i16: NOOP
reinterpret from u16 to u16: NOOP
reinterpret from u16 to f16: NOOP
reinterpret from i32 to i32: NOOP
reinterpret from i32 to u32: NOOP
reinterpret from i32 to f32: NOOP
reinterpret from u32 to i32: NOOP
reinterpret from u32 to u32: NOOP
reinterpret from u32 to f32: NOOP
reinterpret from i64 to i64: NOOP
reinterpret from i64 to u64: NOOP
reinterpret from i64 to f64: NOOP
reinterpret from u64 to i64: NOOP
reinterpret from u64 to u64: NOOP
reinterpret from u64 to f64: NOOP
reinterpret from f16 to i16: NOOP
reinterpret from f16 to u16: NOOP
reinterpret from f16 to f16: NOOP
reinterpret from f32 to i32: NOOP
reinterpret from f32 to u32: NOOP
reinterpret from f32 to f32: NOOP
reinterpret from f64 to i64: NOOP
reinterpret from f64 to u64: NOOP
reinterpret from f64 to f64: NOOP
reinterpretl from i8 to i8: NOOP
reinterpretl from i8 to u8: NOOP
reinterpretl from u8 to i8: NOOP
reinterpretl from u8 to u8: NOOP
reinterpretl from i16 to i16: NOOP
reinterpretl from i16 to u16: NOOP
reinterpretl from i16 to f16: NOOP
reinterpretl from u16 to i16: NOOP
reinterpretl from u16 to u16: NOOP
reinterpretl from u16 to f16: NOOP
reinterpretl from i32 to i32: NOOP
reinterpretl from i32 to u32: NOOP
reinterpretl from i32 to f32: NOOP
reinterpretl from u32 to i32: NOOP
reinterpretl from u32 to u32: NOOP
reinterpretl from u32 to f32: NOOP
reinterpretl from i64 to i64: NOOP
reinterpretl from i64 to u64: NOOP
reinterpretl from i64 to f64: NOOP
reinterpretl from u64 to i64: NOOP
reinterpretl from u64 to u64: NOOP
reinterpretl from u64 to f64: NOOP
reinterpretl from f16 to i16: NOOP
reinterpretl from f16 to u16: NOOP
reinterpretl from f16 to f16: NOOP
reinterpretl from f32 to i32: NOOP
reinterpretl from f32 to u32: NOOP
reinterpretl from f32 to f32: NOOP
reinterpretl from f64 to i64: NOOP
reinterpretl from f64 to u64: NOOP
reinterpretl from f64 to f64: NOOP
upcvt from i8 to i16: T
upcvt from i8 to u16: T
upcvt from i8 to f16: T
upcvt from u8 to i16: T
upcvt from u8 to u16: T
upcvt from u8 to f16: T
upcvt from i16 to i32: T
upcvt from i16 to u32: T
upcvt from i16 to f32: T
upcvt from u16 to i32: T
upcvt from u16 to u32: T
upcvt from u16 to f32: T
upcvt from i32 to i64: T
upcvt from i32 to u64: T
upcvt from i32 to f64: T
upcvt from u32 to i64: T
upcvt from u32 to u64: T
upcvt from u32 to f64: T
upcvt from f16 to i32: T
upcvt from f16 to u32: T
upcvt from f16 to f32: T
upcvt from f32 to i64: T
upcvt from f32 to u64: T
upcvt from f32 to f64: T
downcvt from i16 to i8: svuzp1_s8
downcvt from i16 to u8: svuzp1_u8
downcvt from u16 to i8: svuzp1_s8
downcvt from u16 to u8: svuzp1_u8
downcvt from i32 to i16: svuzp1_s16
downcvt from i32 to u16: svuzp1_u16
downcvt from i32 to f16: T
downcvt from u32 to i16: svuzp1_s16
downcvt from u32 to u16: svuzp1_u16
downcvt from u32 to f16: T
downcvt from i64 to i32: svuzp1_s32
downcvt from i64 to u32: svuzp1_u32
downcvt from i64 to f32: T
downcvt from u64 to i32: svuzp1_s32
downcvt from u64 to u32: svuzp1_u32
downcvt from u64 to f32: T
downcvt from f16 to i8: T
downcvt from f16 to u8: T
downcvt from f32 to i16: T
downcvt from f32 to u16: T
downcvt from f32 to f16: T
downcvt from f64 to i32: T
downcvt from f64 to u32: T
downcvt from f64 to f32: T