Skip to main content

core/stdarch/crates/core_arch/src/aarch64/neon/
generated.rs

1// This code is automatically generated. DO NOT MODIFY.
2//
3// Instead, modify `crates/stdarch-gen-arm/spec/` and run the following command to re-generate this file:
4//
5// ```
6// cargo run --bin=stdarch-gen-arm -- crates/stdarch-gen-arm/spec
7// ```
8#![allow(improper_ctypes)]
9
10#[cfg(test)]
11use stdarch_test::assert_instr;
12
13use super::*;
14
15#[doc = "CRC32-C single round checksum for quad words (64 bits)."]
16#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32cd)"]
17#[inline(always)]
18#[target_feature(enable = "crc")]
19#[cfg_attr(test, assert_instr(crc32cx))]
20#[stable(feature = "stdarch_aarch64_crc32", since = "1.80.0")]
21pub fn __crc32cd(crc: u32, data: u64) -> u32 {
22    unsafe extern "unadjusted" {
23        #[cfg_attr(
24            any(target_arch = "aarch64", target_arch = "arm64ec"),
25            link_name = "llvm.aarch64.crc32cx"
26        )]
27        fn ___crc32cd(crc: u32, data: u64) -> u32;
28    }
29    unsafe { ___crc32cd(crc, data) }
30}
31#[doc = "CRC32 single round checksum for quad words (64 bits)."]
32#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32d)"]
33#[inline(always)]
34#[target_feature(enable = "crc")]
35#[cfg_attr(test, assert_instr(crc32x))]
36#[stable(feature = "stdarch_aarch64_crc32", since = "1.80.0")]
37pub fn __crc32d(crc: u32, data: u64) -> u32 {
38    unsafe extern "unadjusted" {
39        #[cfg_attr(
40            any(target_arch = "aarch64", target_arch = "arm64ec"),
41            link_name = "llvm.aarch64.crc32x"
42        )]
43        fn ___crc32d(crc: u32, data: u64) -> u32;
44    }
45    unsafe { ___crc32d(crc, data) }
46}
47#[doc = "Floating-point JavaScript convert to signed fixed-point, rounding toward zero"]
48#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__jcvt)"]
49#[inline(always)]
50#[target_feature(enable = "jsconv")]
51#[cfg_attr(test, assert_instr(fjcvtzs))]
52#[stable(feature = "stdarch_aarch64_jscvt", since = "1.95.0")]
53pub fn __jcvt(a: f64) -> i32 {
54    unsafe extern "unadjusted" {
55        #[cfg_attr(
56            any(target_arch = "aarch64", target_arch = "arm64ec"),
57            link_name = "llvm.aarch64.fjcvtzs"
58        )]
59        fn ___jcvt(a: f64) -> i32;
60    }
61    unsafe { ___jcvt(a) }
62}
63#[doc = "Signed Absolute difference and Accumulate Long"]
64#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s8)"]
65#[inline(always)]
66#[target_feature(enable = "neon")]
67#[stable(feature = "neon_intrinsics", since = "1.59.0")]
68#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal2))]
69pub fn vabal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
70    unsafe {
71        let d: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
72        let e: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
73        let f: int8x8_t = vabd_s8(d, e);
74        let f: uint8x8_t = simd_cast(f);
75        simd_add(a, simd_cast(f))
76    }
77}
78#[doc = "Signed Absolute difference and Accumulate Long"]
79#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s16)"]
80#[inline(always)]
81#[target_feature(enable = "neon")]
82#[stable(feature = "neon_intrinsics", since = "1.59.0")]
83#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal2))]
84pub fn vabal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
85    unsafe {
86        let d: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
87        let e: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
88        let f: int16x4_t = vabd_s16(d, e);
89        let f: uint16x4_t = simd_cast(f);
90        simd_add(a, simd_cast(f))
91    }
92}
93#[doc = "Signed Absolute difference and Accumulate Long"]
94#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s32)"]
95#[inline(always)]
96#[target_feature(enable = "neon")]
97#[stable(feature = "neon_intrinsics", since = "1.59.0")]
98#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal2))]
99pub fn vabal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
100    unsafe {
101        let d: int32x2_t = simd_shuffle!(b, b, [2, 3]);
102        let e: int32x2_t = simd_shuffle!(c, c, [2, 3]);
103        let f: int32x2_t = vabd_s32(d, e);
104        let f: uint32x2_t = simd_cast(f);
105        simd_add(a, simd_cast(f))
106    }
107}
108#[doc = "Unsigned Absolute difference and Accumulate Long"]
109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u8)"]
110#[inline(always)]
111#[target_feature(enable = "neon")]
112#[stable(feature = "neon_intrinsics", since = "1.59.0")]
113#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal2))]
114pub fn vabal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
115    unsafe {
116        let d: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
117        let e: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
118        let f: uint8x8_t = vabd_u8(d, e);
119        simd_add(a, simd_cast(f))
120    }
121}
122#[doc = "Unsigned Absolute difference and Accumulate Long"]
123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u16)"]
124#[inline(always)]
125#[target_feature(enable = "neon")]
126#[stable(feature = "neon_intrinsics", since = "1.59.0")]
127#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal2))]
128pub fn vabal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
129    unsafe {
130        let d: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
131        let e: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
132        let f: uint16x4_t = vabd_u16(d, e);
133        simd_add(a, simd_cast(f))
134    }
135}
136#[doc = "Unsigned Absolute difference and Accumulate Long"]
137#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u32)"]
138#[inline(always)]
139#[target_feature(enable = "neon")]
140#[stable(feature = "neon_intrinsics", since = "1.59.0")]
141#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal2))]
142pub fn vabal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
143    unsafe {
144        let d: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
145        let e: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
146        let f: uint32x2_t = vabd_u32(d, e);
147        simd_add(a, simd_cast(f))
148    }
149}
150#[doc = "Absolute difference between the arguments of Floating"]
151#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_f64)"]
152#[inline(always)]
153#[target_feature(enable = "neon")]
154#[stable(feature = "neon_intrinsics", since = "1.59.0")]
155#[cfg_attr(test, assert_instr(fabd))]
156pub fn vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
157    unsafe extern "unadjusted" {
158        #[cfg_attr(
159            any(target_arch = "aarch64", target_arch = "arm64ec"),
160            link_name = "llvm.aarch64.neon.fabd.v1f64"
161        )]
162        fn _vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
163    }
164    unsafe { _vabd_f64(a, b) }
165}
166#[doc = "Absolute difference between the arguments of Floating"]
167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_f64)"]
168#[inline(always)]
169#[target_feature(enable = "neon")]
170#[stable(feature = "neon_intrinsics", since = "1.59.0")]
171#[cfg_attr(test, assert_instr(fabd))]
172pub fn vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
173    unsafe extern "unadjusted" {
174        #[cfg_attr(
175            any(target_arch = "aarch64", target_arch = "arm64ec"),
176            link_name = "llvm.aarch64.neon.fabd.v2f64"
177        )]
178        fn _vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
179    }
180    unsafe { _vabdq_f64(a, b) }
181}
182#[doc = "Floating-point absolute difference"]
183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdd_f64)"]
184#[inline(always)]
185#[target_feature(enable = "neon")]
186#[stable(feature = "neon_intrinsics", since = "1.59.0")]
187#[cfg_attr(test, assert_instr(fabd))]
188pub fn vabdd_f64(a: f64, b: f64) -> f64 {
189    unsafe { simd_extract!(vabd_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
190}
191#[doc = "Floating-point absolute difference"]
192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabds_f32)"]
193#[inline(always)]
194#[target_feature(enable = "neon")]
195#[stable(feature = "neon_intrinsics", since = "1.59.0")]
196#[cfg_attr(test, assert_instr(fabd))]
197pub fn vabds_f32(a: f32, b: f32) -> f32 {
198    unsafe { simd_extract!(vabd_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
199}
200#[doc = "Floating-point absolute difference"]
201#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdh_f16)"]
202#[inline(always)]
203#[target_feature(enable = "neon,fp16")]
204#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
205#[cfg(not(target_arch = "arm64ec"))]
206#[cfg_attr(test, assert_instr(fabd))]
207pub fn vabdh_f16(a: f16, b: f16) -> f16 {
208    unsafe { simd_extract!(vabd_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
209}
210#[doc = "Signed Absolute difference Long"]
211#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s16)"]
212#[inline(always)]
213#[target_feature(enable = "neon")]
214#[stable(feature = "neon_intrinsics", since = "1.59.0")]
215#[cfg_attr(test, assert_instr(sabdl2))]
216pub fn vabdl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
217    unsafe {
218        let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
219        let d: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
220        let e: uint16x4_t = simd_cast(vabd_s16(c, d));
221        simd_cast(e)
222    }
223}
224#[doc = "Signed Absolute difference Long"]
225#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s32)"]
226#[inline(always)]
227#[target_feature(enable = "neon")]
228#[stable(feature = "neon_intrinsics", since = "1.59.0")]
229#[cfg_attr(test, assert_instr(sabdl2))]
230pub fn vabdl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
231    unsafe {
232        let c: int32x2_t = simd_shuffle!(a, a, [2, 3]);
233        let d: int32x2_t = simd_shuffle!(b, b, [2, 3]);
234        let e: uint32x2_t = simd_cast(vabd_s32(c, d));
235        simd_cast(e)
236    }
237}
238#[doc = "Signed Absolute difference Long"]
239#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s8)"]
240#[inline(always)]
241#[target_feature(enable = "neon")]
242#[stable(feature = "neon_intrinsics", since = "1.59.0")]
243#[cfg_attr(test, assert_instr(sabdl2))]
244pub fn vabdl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
245    unsafe {
246        let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
247        let d: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
248        let e: uint8x8_t = simd_cast(vabd_s8(c, d));
249        simd_cast(e)
250    }
251}
252#[doc = "Unsigned Absolute difference Long"]
253#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u8)"]
254#[inline(always)]
255#[target_feature(enable = "neon")]
256#[cfg_attr(test, assert_instr(uabdl2))]
257#[stable(feature = "neon_intrinsics", since = "1.59.0")]
258pub fn vabdl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
259    unsafe {
260        let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
261        let d: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
262        simd_cast(vabd_u8(c, d))
263    }
264}
265#[doc = "Unsigned Absolute difference Long"]
266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u16)"]
267#[inline(always)]
268#[target_feature(enable = "neon")]
269#[cfg_attr(test, assert_instr(uabdl2))]
270#[stable(feature = "neon_intrinsics", since = "1.59.0")]
271pub fn vabdl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
272    unsafe {
273        let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
274        let d: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
275        simd_cast(vabd_u16(c, d))
276    }
277}
278#[doc = "Unsigned Absolute difference Long"]
279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u32)"]
280#[inline(always)]
281#[target_feature(enable = "neon")]
282#[cfg_attr(test, assert_instr(uabdl2))]
283#[stable(feature = "neon_intrinsics", since = "1.59.0")]
284pub fn vabdl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
285    unsafe {
286        let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
287        let d: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
288        simd_cast(vabd_u32(c, d))
289    }
290}
291#[doc = "Floating-point absolute value"]
292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_f64)"]
293#[inline(always)]
294#[target_feature(enable = "neon")]
295#[cfg_attr(test, assert_instr(fabs))]
296#[stable(feature = "neon_intrinsics", since = "1.59.0")]
297pub fn vabs_f64(a: float64x1_t) -> float64x1_t {
298    unsafe { simd_fabs(a) }
299}
300#[doc = "Floating-point absolute value"]
301#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_f64)"]
302#[inline(always)]
303#[target_feature(enable = "neon")]
304#[cfg_attr(test, assert_instr(fabs))]
305#[stable(feature = "neon_intrinsics", since = "1.59.0")]
306pub fn vabsq_f64(a: float64x2_t) -> float64x2_t {
307    unsafe { simd_fabs(a) }
308}
309#[doc = "Absolute Value (wrapping)."]
310#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s64)"]
311#[inline(always)]
312#[target_feature(enable = "neon")]
313#[stable(feature = "neon_intrinsics", since = "1.59.0")]
314#[cfg_attr(test, assert_instr(abs))]
315pub fn vabs_s64(a: int64x1_t) -> int64x1_t {
316    unsafe {
317        let neg: int64x1_t = simd_neg(a);
318        let mask: int64x1_t = simd_ge(a, neg);
319        simd_select(mask, a, neg)
320    }
321}
322#[doc = "Absolute Value (wrapping)."]
323#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s64)"]
324#[inline(always)]
325#[target_feature(enable = "neon")]
326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
327#[cfg_attr(test, assert_instr(abs))]
328pub fn vabsq_s64(a: int64x2_t) -> int64x2_t {
329    unsafe {
330        let neg: int64x2_t = simd_neg(a);
331        let mask: int64x2_t = simd_ge(a, neg);
332        simd_select(mask, a, neg)
333    }
334}
335#[doc = "Absolute Value (wrapping)."]
336#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsd_s64)"]
337#[inline(always)]
338#[target_feature(enable = "neon")]
339#[stable(feature = "neon_intrinsics", since = "1.59.0")]
340#[cfg_attr(test, assert_instr(abs))]
341pub fn vabsd_s64(a: i64) -> i64 {
342    unsafe extern "unadjusted" {
343        #[cfg_attr(
344            any(target_arch = "aarch64", target_arch = "arm64ec"),
345            link_name = "llvm.aarch64.neon.abs.i64"
346        )]
347        fn _vabsd_s64(a: i64) -> i64;
348    }
349    unsafe { _vabsd_s64(a) }
350}
351#[doc = "Add"]
352#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_s64)"]
353#[inline(always)]
354#[target_feature(enable = "neon")]
355#[stable(feature = "neon_intrinsics", since = "1.59.0")]
356#[cfg_attr(test, assert_instr(nop))]
357pub fn vaddd_s64(a: i64, b: i64) -> i64 {
358    a.wrapping_add(b)
359}
360#[doc = "Add"]
361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_u64)"]
362#[inline(always)]
363#[target_feature(enable = "neon")]
364#[stable(feature = "neon_intrinsics", since = "1.59.0")]
365#[cfg_attr(test, assert_instr(nop))]
366pub fn vaddd_u64(a: u64, b: u64) -> u64 {
367    a.wrapping_add(b)
368}
369#[doc = "Signed Add Long across Vector"]
370#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s16)"]
371#[inline(always)]
372#[target_feature(enable = "neon")]
373#[stable(feature = "neon_intrinsics", since = "1.59.0")]
374#[cfg_attr(test, assert_instr(saddlv))]
375pub fn vaddlv_s16(a: int16x4_t) -> i32 {
376    unsafe extern "unadjusted" {
377        #[cfg_attr(
378            any(target_arch = "aarch64", target_arch = "arm64ec"),
379            link_name = "llvm.aarch64.neon.saddlv.i32.v4i16"
380        )]
381        fn _vaddlv_s16(a: int16x4_t) -> i32;
382    }
383    unsafe { _vaddlv_s16(a) }
384}
385#[doc = "Signed Add Long across Vector"]
386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s16)"]
387#[inline(always)]
388#[target_feature(enable = "neon")]
389#[stable(feature = "neon_intrinsics", since = "1.59.0")]
390#[cfg_attr(test, assert_instr(saddlv))]
391pub fn vaddlvq_s16(a: int16x8_t) -> i32 {
392    unsafe extern "unadjusted" {
393        #[cfg_attr(
394            any(target_arch = "aarch64", target_arch = "arm64ec"),
395            link_name = "llvm.aarch64.neon.saddlv.i32.v8i16"
396        )]
397        fn _vaddlvq_s16(a: int16x8_t) -> i32;
398    }
399    unsafe { _vaddlvq_s16(a) }
400}
401#[doc = "Signed Add Long across Vector"]
402#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s32)"]
403#[inline(always)]
404#[target_feature(enable = "neon")]
405#[stable(feature = "neon_intrinsics", since = "1.59.0")]
406#[cfg_attr(test, assert_instr(saddlv))]
407pub fn vaddlvq_s32(a: int32x4_t) -> i64 {
408    unsafe extern "unadjusted" {
409        #[cfg_attr(
410            any(target_arch = "aarch64", target_arch = "arm64ec"),
411            link_name = "llvm.aarch64.neon.saddlv.i64.v4i32"
412        )]
413        fn _vaddlvq_s32(a: int32x4_t) -> i64;
414    }
415    unsafe { _vaddlvq_s32(a) }
416}
417#[doc = "Signed Add Long across Vector"]
418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s32)"]
419#[inline(always)]
420#[target_feature(enable = "neon")]
421#[stable(feature = "neon_intrinsics", since = "1.59.0")]
422#[cfg_attr(test, assert_instr(saddlp))]
423pub fn vaddlv_s32(a: int32x2_t) -> i64 {
424    unsafe extern "unadjusted" {
425        #[cfg_attr(
426            any(target_arch = "aarch64", target_arch = "arm64ec"),
427            link_name = "llvm.aarch64.neon.saddlv.i64.v2i32"
428        )]
429        fn _vaddlv_s32(a: int32x2_t) -> i64;
430    }
431    unsafe { _vaddlv_s32(a) }
432}
433#[doc = "Signed Add Long across Vector"]
434#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s8)"]
435#[inline(always)]
436#[target_feature(enable = "neon")]
437#[stable(feature = "neon_intrinsics", since = "1.59.0")]
438#[cfg_attr(test, assert_instr(saddlv))]
439pub fn vaddlv_s8(a: int8x8_t) -> i16 {
440    unsafe extern "unadjusted" {
441        #[cfg_attr(
442            any(target_arch = "aarch64", target_arch = "arm64ec"),
443            link_name = "llvm.aarch64.neon.saddlv.i32.v8i8"
444        )]
445        fn _vaddlv_s8(a: int8x8_t) -> i32;
446    }
447    unsafe { _vaddlv_s8(a) as i16 }
448}
449#[doc = "Signed Add Long across Vector"]
450#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s8)"]
451#[inline(always)]
452#[target_feature(enable = "neon")]
453#[stable(feature = "neon_intrinsics", since = "1.59.0")]
454#[cfg_attr(test, assert_instr(saddlv))]
455pub fn vaddlvq_s8(a: int8x16_t) -> i16 {
456    unsafe extern "unadjusted" {
457        #[cfg_attr(
458            any(target_arch = "aarch64", target_arch = "arm64ec"),
459            link_name = "llvm.aarch64.neon.saddlv.i32.v16i8"
460        )]
461        fn _vaddlvq_s8(a: int8x16_t) -> i32;
462    }
463    unsafe { _vaddlvq_s8(a) as i16 }
464}
465#[doc = "Unsigned Add Long across Vector"]
466#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u16)"]
467#[inline(always)]
468#[target_feature(enable = "neon")]
469#[stable(feature = "neon_intrinsics", since = "1.59.0")]
470#[cfg_attr(test, assert_instr(uaddlv))]
471pub fn vaddlv_u16(a: uint16x4_t) -> u32 {
472    unsafe extern "unadjusted" {
473        #[cfg_attr(
474            any(target_arch = "aarch64", target_arch = "arm64ec"),
475            link_name = "llvm.aarch64.neon.uaddlv.i32.v4i16"
476        )]
477        fn _vaddlv_u16(a: uint16x4_t) -> u32;
478    }
479    unsafe { _vaddlv_u16(a) }
480}
481#[doc = "Unsigned Add Long across Vector"]
482#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u16)"]
483#[inline(always)]
484#[target_feature(enable = "neon")]
485#[stable(feature = "neon_intrinsics", since = "1.59.0")]
486#[cfg_attr(test, assert_instr(uaddlv))]
487pub fn vaddlvq_u16(a: uint16x8_t) -> u32 {
488    unsafe extern "unadjusted" {
489        #[cfg_attr(
490            any(target_arch = "aarch64", target_arch = "arm64ec"),
491            link_name = "llvm.aarch64.neon.uaddlv.i32.v8i16"
492        )]
493        fn _vaddlvq_u16(a: uint16x8_t) -> u32;
494    }
495    unsafe { _vaddlvq_u16(a) }
496}
497#[doc = "Unsigned Add Long across Vector"]
498#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u32)"]
499#[inline(always)]
500#[target_feature(enable = "neon")]
501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
502#[cfg_attr(test, assert_instr(uaddlv))]
503pub fn vaddlvq_u32(a: uint32x4_t) -> u64 {
504    unsafe extern "unadjusted" {
505        #[cfg_attr(
506            any(target_arch = "aarch64", target_arch = "arm64ec"),
507            link_name = "llvm.aarch64.neon.uaddlv.i64.v4i32"
508        )]
509        fn _vaddlvq_u32(a: uint32x4_t) -> u64;
510    }
511    unsafe { _vaddlvq_u32(a) }
512}
513#[doc = "Unsigned Add Long across Vector"]
514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u32)"]
515#[inline(always)]
516#[target_feature(enable = "neon")]
517#[stable(feature = "neon_intrinsics", since = "1.59.0")]
518#[cfg_attr(test, assert_instr(uaddlp))]
519pub fn vaddlv_u32(a: uint32x2_t) -> u64 {
520    unsafe extern "unadjusted" {
521        #[cfg_attr(
522            any(target_arch = "aarch64", target_arch = "arm64ec"),
523            link_name = "llvm.aarch64.neon.uaddlv.i64.v2i32"
524        )]
525        fn _vaddlv_u32(a: uint32x2_t) -> u64;
526    }
527    unsafe { _vaddlv_u32(a) }
528}
529#[doc = "Unsigned Add Long across Vector"]
530#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u8)"]
531#[inline(always)]
532#[target_feature(enable = "neon")]
533#[stable(feature = "neon_intrinsics", since = "1.59.0")]
534#[cfg_attr(test, assert_instr(uaddlv))]
535pub fn vaddlv_u8(a: uint8x8_t) -> u16 {
536    unsafe extern "unadjusted" {
537        #[cfg_attr(
538            any(target_arch = "aarch64", target_arch = "arm64ec"),
539            link_name = "llvm.aarch64.neon.uaddlv.i32.v8i8"
540        )]
541        fn _vaddlv_u8(a: uint8x8_t) -> i32;
542    }
543    unsafe { _vaddlv_u8(a) as u16 }
544}
545#[doc = "Unsigned Add Long across Vector"]
546#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u8)"]
547#[inline(always)]
548#[target_feature(enable = "neon")]
549#[stable(feature = "neon_intrinsics", since = "1.59.0")]
550#[cfg_attr(test, assert_instr(uaddlv))]
551pub fn vaddlvq_u8(a: uint8x16_t) -> u16 {
552    unsafe extern "unadjusted" {
553        #[cfg_attr(
554            any(target_arch = "aarch64", target_arch = "arm64ec"),
555            link_name = "llvm.aarch64.neon.uaddlv.i32.v16i8"
556        )]
557        fn _vaddlvq_u8(a: uint8x16_t) -> i32;
558    }
559    unsafe { _vaddlvq_u8(a) as u16 }
560}
561#[doc = "Floating-point add across vector"]
562#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_f32)"]
563#[inline(always)]
564#[target_feature(enable = "neon")]
565#[stable(feature = "neon_intrinsics", since = "1.59.0")]
566#[cfg_attr(test, assert_instr(faddp))]
567pub fn vaddv_f32(a: float32x2_t) -> f32 {
568    unsafe extern "unadjusted" {
569        #[cfg_attr(
570            any(target_arch = "aarch64", target_arch = "arm64ec"),
571            link_name = "llvm.aarch64.neon.faddv.f32.v2f32"
572        )]
573        fn _vaddv_f32(a: float32x2_t) -> f32;
574    }
575    unsafe { _vaddv_f32(a) }
576}
577#[doc = "Floating-point add across vector"]
578#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f32)"]
579#[inline(always)]
580#[target_feature(enable = "neon")]
581#[stable(feature = "neon_intrinsics", since = "1.59.0")]
582#[cfg_attr(test, assert_instr(faddp))]
583pub fn vaddvq_f32(a: float32x4_t) -> f32 {
584    unsafe extern "unadjusted" {
585        #[cfg_attr(
586            any(target_arch = "aarch64", target_arch = "arm64ec"),
587            link_name = "llvm.aarch64.neon.faddv.f32.v4f32"
588        )]
589        fn _vaddvq_f32(a: float32x4_t) -> f32;
590    }
591    unsafe { _vaddvq_f32(a) }
592}
593#[doc = "Floating-point add across vector"]
594#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f64)"]
595#[inline(always)]
596#[target_feature(enable = "neon")]
597#[stable(feature = "neon_intrinsics", since = "1.59.0")]
598#[cfg_attr(test, assert_instr(faddp))]
599pub fn vaddvq_f64(a: float64x2_t) -> f64 {
600    unsafe extern "unadjusted" {
601        #[cfg_attr(
602            any(target_arch = "aarch64", target_arch = "arm64ec"),
603            link_name = "llvm.aarch64.neon.faddv.f64.v2f64"
604        )]
605        fn _vaddvq_f64(a: float64x2_t) -> f64;
606    }
607    unsafe { _vaddvq_f64(a) }
608}
609#[doc = "Add across vector"]
610#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s32)"]
611#[inline(always)]
612#[target_feature(enable = "neon")]
613#[stable(feature = "neon_intrinsics", since = "1.59.0")]
614#[cfg_attr(test, assert_instr(addp))]
615pub fn vaddv_s32(a: int32x2_t) -> i32 {
616    unsafe { simd_reduce_add_ordered(a, 0) }
617}
618#[doc = "Add across vector"]
619#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s8)"]
620#[inline(always)]
621#[target_feature(enable = "neon")]
622#[stable(feature = "neon_intrinsics", since = "1.59.0")]
623#[cfg_attr(test, assert_instr(addv))]
624pub fn vaddv_s8(a: int8x8_t) -> i8 {
625    unsafe { simd_reduce_add_ordered(a, 0) }
626}
627#[doc = "Add across vector"]
628#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s8)"]
629#[inline(always)]
630#[target_feature(enable = "neon")]
631#[stable(feature = "neon_intrinsics", since = "1.59.0")]
632#[cfg_attr(test, assert_instr(addv))]
633pub fn vaddvq_s8(a: int8x16_t) -> i8 {
634    unsafe { simd_reduce_add_ordered(a, 0) }
635}
636#[doc = "Add across vector"]
637#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s16)"]
638#[inline(always)]
639#[target_feature(enable = "neon")]
640#[stable(feature = "neon_intrinsics", since = "1.59.0")]
641#[cfg_attr(test, assert_instr(addv))]
642pub fn vaddv_s16(a: int16x4_t) -> i16 {
643    unsafe { simd_reduce_add_ordered(a, 0) }
644}
645#[doc = "Add across vector"]
646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s16)"]
647#[inline(always)]
648#[target_feature(enable = "neon")]
649#[stable(feature = "neon_intrinsics", since = "1.59.0")]
650#[cfg_attr(test, assert_instr(addv))]
651pub fn vaddvq_s16(a: int16x8_t) -> i16 {
652    unsafe { simd_reduce_add_ordered(a, 0) }
653}
654#[doc = "Add across vector"]
655#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s32)"]
656#[inline(always)]
657#[target_feature(enable = "neon")]
658#[stable(feature = "neon_intrinsics", since = "1.59.0")]
659#[cfg_attr(test, assert_instr(addv))]
660pub fn vaddvq_s32(a: int32x4_t) -> i32 {
661    unsafe { simd_reduce_add_ordered(a, 0) }
662}
663#[doc = "Add across vector"]
664#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u32)"]
665#[inline(always)]
666#[target_feature(enable = "neon")]
667#[stable(feature = "neon_intrinsics", since = "1.59.0")]
668#[cfg_attr(test, assert_instr(addp))]
669pub fn vaddv_u32(a: uint32x2_t) -> u32 {
670    unsafe { simd_reduce_add_ordered(a, 0) }
671}
672#[doc = "Add across vector"]
673#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u8)"]
674#[inline(always)]
675#[target_feature(enable = "neon")]
676#[stable(feature = "neon_intrinsics", since = "1.59.0")]
677#[cfg_attr(test, assert_instr(addv))]
678pub fn vaddv_u8(a: uint8x8_t) -> u8 {
679    unsafe { simd_reduce_add_ordered(a, 0) }
680}
681#[doc = "Add across vector"]
682#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u8)"]
683#[inline(always)]
684#[target_feature(enable = "neon")]
685#[stable(feature = "neon_intrinsics", since = "1.59.0")]
686#[cfg_attr(test, assert_instr(addv))]
687pub fn vaddvq_u8(a: uint8x16_t) -> u8 {
688    unsafe { simd_reduce_add_ordered(a, 0) }
689}
690#[doc = "Add across vector"]
691#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u16)"]
692#[inline(always)]
693#[target_feature(enable = "neon")]
694#[stable(feature = "neon_intrinsics", since = "1.59.0")]
695#[cfg_attr(test, assert_instr(addv))]
696pub fn vaddv_u16(a: uint16x4_t) -> u16 {
697    unsafe { simd_reduce_add_ordered(a, 0) }
698}
699#[doc = "Add across vector"]
700#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u16)"]
701#[inline(always)]
702#[target_feature(enable = "neon")]
703#[stable(feature = "neon_intrinsics", since = "1.59.0")]
704#[cfg_attr(test, assert_instr(addv))]
705pub fn vaddvq_u16(a: uint16x8_t) -> u16 {
706    unsafe { simd_reduce_add_ordered(a, 0) }
707}
708#[doc = "Add across vector"]
709#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u32)"]
710#[inline(always)]
711#[target_feature(enable = "neon")]
712#[stable(feature = "neon_intrinsics", since = "1.59.0")]
713#[cfg_attr(test, assert_instr(addv))]
714pub fn vaddvq_u32(a: uint32x4_t) -> u32 {
715    unsafe { simd_reduce_add_ordered(a, 0) }
716}
717#[doc = "Add across vector"]
718#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s64)"]
719#[inline(always)]
720#[target_feature(enable = "neon")]
721#[stable(feature = "neon_intrinsics", since = "1.59.0")]
722#[cfg_attr(test, assert_instr(addp))]
723pub fn vaddvq_s64(a: int64x2_t) -> i64 {
724    unsafe { simd_reduce_add_ordered(a, 0) }
725}
726#[doc = "Add across vector"]
727#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u64)"]
728#[inline(always)]
729#[target_feature(enable = "neon")]
730#[stable(feature = "neon_intrinsics", since = "1.59.0")]
731#[cfg_attr(test, assert_instr(addp))]
732pub fn vaddvq_u64(a: uint64x2_t) -> u64 {
733    unsafe { simd_reduce_add_ordered(a, 0) }
734}
735#[doc = "Multi-vector floating-point absolute maximum"]
736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamax_f16)"]
737#[inline(always)]
738#[target_feature(enable = "neon,faminmax")]
739#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famax))]
740#[unstable(feature = "faminmax", issue = "137933")]
741pub fn vamax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
742    unsafe extern "unadjusted" {
743        #[cfg_attr(
744            any(target_arch = "aarch64", target_arch = "arm64ec"),
745            link_name = "llvm.aarch64.neon.famax.v4f16"
746        )]
747        fn _vamax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
748    }
749    unsafe { _vamax_f16(a, b) }
750}
751#[doc = "Multi-vector floating-point absolute maximum"]
752#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamaxq_f16)"]
753#[inline(always)]
754#[target_feature(enable = "neon,faminmax")]
755#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famax))]
756#[unstable(feature = "faminmax", issue = "137933")]
757pub fn vamaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
758    unsafe extern "unadjusted" {
759        #[cfg_attr(
760            any(target_arch = "aarch64", target_arch = "arm64ec"),
761            link_name = "llvm.aarch64.neon.famax.v8f16"
762        )]
763        fn _vamaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
764    }
765    unsafe { _vamaxq_f16(a, b) }
766}
767#[doc = "Multi-vector floating-point absolute maximum"]
768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamax_f32)"]
769#[inline(always)]
770#[target_feature(enable = "neon,faminmax")]
771#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famax))]
772#[unstable(feature = "faminmax", issue = "137933")]
773pub fn vamax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
774    unsafe extern "unadjusted" {
775        #[cfg_attr(
776            any(target_arch = "aarch64", target_arch = "arm64ec"),
777            link_name = "llvm.aarch64.neon.famax.v2f32"
778        )]
779        fn _vamax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
780    }
781    unsafe { _vamax_f32(a, b) }
782}
783#[doc = "Multi-vector floating-point absolute maximum"]
784#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamaxq_f32)"]
785#[inline(always)]
786#[target_feature(enable = "neon,faminmax")]
787#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famax))]
788#[unstable(feature = "faminmax", issue = "137933")]
789pub fn vamaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
790    unsafe extern "unadjusted" {
791        #[cfg_attr(
792            any(target_arch = "aarch64", target_arch = "arm64ec"),
793            link_name = "llvm.aarch64.neon.famax.v4f32"
794        )]
795        fn _vamaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
796    }
797    unsafe { _vamaxq_f32(a, b) }
798}
799#[doc = "Multi-vector floating-point absolute maximum"]
800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamaxq_f64)"]
801#[inline(always)]
802#[target_feature(enable = "neon,faminmax")]
803#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famax))]
804#[unstable(feature = "faminmax", issue = "137933")]
805pub fn vamaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
806    unsafe extern "unadjusted" {
807        #[cfg_attr(
808            any(target_arch = "aarch64", target_arch = "arm64ec"),
809            link_name = "llvm.aarch64.neon.famax.v2f64"
810        )]
811        fn _vamaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
812    }
813    unsafe { _vamaxq_f64(a, b) }
814}
815#[doc = "Multi-vector floating-point absolute minimum"]
816#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamin_f16)"]
817#[inline(always)]
818#[target_feature(enable = "neon,faminmax")]
819#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famin))]
820#[unstable(feature = "faminmax", issue = "137933")]
821pub fn vamin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
822    unsafe extern "unadjusted" {
823        #[cfg_attr(
824            any(target_arch = "aarch64", target_arch = "arm64ec"),
825            link_name = "llvm.aarch64.neon.famin.v4f16"
826        )]
827        fn _vamin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
828    }
829    unsafe { _vamin_f16(a, b) }
830}
831#[doc = "Multi-vector floating-point absolute minimum"]
832#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaminq_f16)"]
833#[inline(always)]
834#[target_feature(enable = "neon,faminmax")]
835#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famin))]
836#[unstable(feature = "faminmax", issue = "137933")]
837pub fn vaminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
838    unsafe extern "unadjusted" {
839        #[cfg_attr(
840            any(target_arch = "aarch64", target_arch = "arm64ec"),
841            link_name = "llvm.aarch64.neon.famin.v8f16"
842        )]
843        fn _vaminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
844    }
845    unsafe { _vaminq_f16(a, b) }
846}
847#[doc = "Multi-vector floating-point absolute minimum"]
848#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamin_f32)"]
849#[inline(always)]
850#[target_feature(enable = "neon,faminmax")]
851#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famin))]
852#[unstable(feature = "faminmax", issue = "137933")]
853pub fn vamin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
854    unsafe extern "unadjusted" {
855        #[cfg_attr(
856            any(target_arch = "aarch64", target_arch = "arm64ec"),
857            link_name = "llvm.aarch64.neon.famin.v2f32"
858        )]
859        fn _vamin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
860    }
861    unsafe { _vamin_f32(a, b) }
862}
863#[doc = "Multi-vector floating-point absolute minimum"]
864#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaminq_f32)"]
865#[inline(always)]
866#[target_feature(enable = "neon,faminmax")]
867#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famin))]
868#[unstable(feature = "faminmax", issue = "137933")]
869pub fn vaminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
870    unsafe extern "unadjusted" {
871        #[cfg_attr(
872            any(target_arch = "aarch64", target_arch = "arm64ec"),
873            link_name = "llvm.aarch64.neon.famin.v4f32"
874        )]
875        fn _vaminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
876    }
877    unsafe { _vaminq_f32(a, b) }
878}
879#[doc = "Multi-vector floating-point absolute minimum"]
880#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaminq_f64)"]
881#[inline(always)]
882#[target_feature(enable = "neon,faminmax")]
883#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famin))]
884#[unstable(feature = "faminmax", issue = "137933")]
885pub fn vaminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
886    unsafe extern "unadjusted" {
887        #[cfg_attr(
888            any(target_arch = "aarch64", target_arch = "arm64ec"),
889            link_name = "llvm.aarch64.neon.famin.v2f64"
890        )]
891        fn _vaminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
892    }
893    unsafe { _vaminq_f64(a, b) }
894}
895#[doc = "Bit clear and exclusive OR"]
896#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s8)"]
897#[inline(always)]
898#[target_feature(enable = "neon,sha3")]
899#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
900#[cfg_attr(test, assert_instr(bcax))]
901pub fn vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
902    unsafe extern "unadjusted" {
903        #[cfg_attr(
904            any(target_arch = "aarch64", target_arch = "arm64ec"),
905            link_name = "llvm.aarch64.crypto.bcaxs.v16i8"
906        )]
907        fn _vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t;
908    }
909    unsafe { _vbcaxq_s8(a, b, c) }
910}
911#[doc = "Bit clear and exclusive OR"]
912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s16)"]
913#[inline(always)]
914#[target_feature(enable = "neon,sha3")]
915#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
916#[cfg_attr(test, assert_instr(bcax))]
917pub fn vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
918    unsafe extern "unadjusted" {
919        #[cfg_attr(
920            any(target_arch = "aarch64", target_arch = "arm64ec"),
921            link_name = "llvm.aarch64.crypto.bcaxs.v8i16"
922        )]
923        fn _vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
924    }
925    unsafe { _vbcaxq_s16(a, b, c) }
926}
927#[doc = "Bit clear and exclusive OR"]
928#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s32)"]
929#[inline(always)]
930#[target_feature(enable = "neon,sha3")]
931#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
932#[cfg_attr(test, assert_instr(bcax))]
933pub fn vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
934    unsafe extern "unadjusted" {
935        #[cfg_attr(
936            any(target_arch = "aarch64", target_arch = "arm64ec"),
937            link_name = "llvm.aarch64.crypto.bcaxs.v4i32"
938        )]
939        fn _vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
940    }
941    unsafe { _vbcaxq_s32(a, b, c) }
942}
943#[doc = "Bit clear and exclusive OR"]
944#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s64)"]
945#[inline(always)]
946#[target_feature(enable = "neon,sha3")]
947#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
948#[cfg_attr(test, assert_instr(bcax))]
949pub fn vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t {
950    unsafe extern "unadjusted" {
951        #[cfg_attr(
952            any(target_arch = "aarch64", target_arch = "arm64ec"),
953            link_name = "llvm.aarch64.crypto.bcaxs.v2i64"
954        )]
955        fn _vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t;
956    }
957    unsafe { _vbcaxq_s64(a, b, c) }
958}
959#[doc = "Bit clear and exclusive OR"]
960#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u8)"]
961#[inline(always)]
962#[target_feature(enable = "neon,sha3")]
963#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
964#[cfg_attr(test, assert_instr(bcax))]
965pub fn vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
966    unsafe extern "unadjusted" {
967        #[cfg_attr(
968            any(target_arch = "aarch64", target_arch = "arm64ec"),
969            link_name = "llvm.aarch64.crypto.bcaxu.v16i8"
970        )]
971        fn _vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t;
972    }
973    unsafe { _vbcaxq_u8(a, b, c) }
974}
975#[doc = "Bit clear and exclusive OR"]
976#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u16)"]
977#[inline(always)]
978#[target_feature(enable = "neon,sha3")]
979#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
980#[cfg_attr(test, assert_instr(bcax))]
981pub fn vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
982    unsafe extern "unadjusted" {
983        #[cfg_attr(
984            any(target_arch = "aarch64", target_arch = "arm64ec"),
985            link_name = "llvm.aarch64.crypto.bcaxu.v8i16"
986        )]
987        fn _vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t;
988    }
989    unsafe { _vbcaxq_u16(a, b, c) }
990}
991#[doc = "Bit clear and exclusive OR"]
992#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u32)"]
993#[inline(always)]
994#[target_feature(enable = "neon,sha3")]
995#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
996#[cfg_attr(test, assert_instr(bcax))]
997pub fn vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
998    unsafe extern "unadjusted" {
999        #[cfg_attr(
1000            any(target_arch = "aarch64", target_arch = "arm64ec"),
1001            link_name = "llvm.aarch64.crypto.bcaxu.v4i32"
1002        )]
1003        fn _vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
1004    }
1005    unsafe { _vbcaxq_u32(a, b, c) }
1006}
1007#[doc = "Bit clear and exclusive OR"]
1008#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u64)"]
1009#[inline(always)]
1010#[target_feature(enable = "neon,sha3")]
1011#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
1012#[cfg_attr(test, assert_instr(bcax))]
1013pub fn vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
1014    unsafe extern "unadjusted" {
1015        #[cfg_attr(
1016            any(target_arch = "aarch64", target_arch = "arm64ec"),
1017            link_name = "llvm.aarch64.crypto.bcaxu.v2i64"
1018        )]
1019        fn _vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
1020    }
1021    unsafe { _vbcaxq_u64(a, b, c) }
1022}
1023#[doc = "Floating-point complex add"]
1024#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f16)"]
1025#[inline(always)]
1026#[target_feature(enable = "neon,fp16")]
1027#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1028#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1029#[cfg(not(target_arch = "arm64ec"))]
1030#[cfg_attr(test, assert_instr(fcadd))]
1031pub fn vcadd_rot270_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
1032    unsafe extern "unadjusted" {
1033        #[cfg_attr(
1034            any(target_arch = "aarch64", target_arch = "arm64ec"),
1035            link_name = "llvm.aarch64.neon.vcadd.rot270.v4f16"
1036        )]
1037        fn _vcadd_rot270_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
1038    }
1039    unsafe { _vcadd_rot270_f16(a, b) }
1040}
1041#[doc = "Floating-point complex add"]
1042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f16)"]
1043#[inline(always)]
1044#[target_feature(enable = "neon,fp16")]
1045#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1046#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1047#[cfg(not(target_arch = "arm64ec"))]
1048#[cfg_attr(test, assert_instr(fcadd))]
1049pub fn vcaddq_rot270_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
1050    unsafe extern "unadjusted" {
1051        #[cfg_attr(
1052            any(target_arch = "aarch64", target_arch = "arm64ec"),
1053            link_name = "llvm.aarch64.neon.vcadd.rot270.v8f16"
1054        )]
1055        fn _vcaddq_rot270_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
1056    }
1057    unsafe { _vcaddq_rot270_f16(a, b) }
1058}
1059#[doc = "Floating-point complex add"]
1060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f32)"]
1061#[inline(always)]
1062#[target_feature(enable = "neon,fcma")]
1063#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1064#[cfg_attr(test, assert_instr(fcadd))]
1065pub fn vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
1066    unsafe extern "unadjusted" {
1067        #[cfg_attr(
1068            any(target_arch = "aarch64", target_arch = "arm64ec"),
1069            link_name = "llvm.aarch64.neon.vcadd.rot270.v2f32"
1070        )]
1071        fn _vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
1072    }
1073    unsafe { _vcadd_rot270_f32(a, b) }
1074}
1075#[doc = "Floating-point complex add"]
1076#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f32)"]
1077#[inline(always)]
1078#[target_feature(enable = "neon,fcma")]
1079#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1080#[cfg_attr(test, assert_instr(fcadd))]
1081pub fn vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
1082    unsafe extern "unadjusted" {
1083        #[cfg_attr(
1084            any(target_arch = "aarch64", target_arch = "arm64ec"),
1085            link_name = "llvm.aarch64.neon.vcadd.rot270.v4f32"
1086        )]
1087        fn _vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
1088    }
1089    unsafe { _vcaddq_rot270_f32(a, b) }
1090}
1091#[doc = "Floating-point complex add"]
1092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f64)"]
1093#[inline(always)]
1094#[target_feature(enable = "neon,fcma")]
1095#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1096#[cfg_attr(test, assert_instr(fcadd))]
1097pub fn vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
1098    unsafe extern "unadjusted" {
1099        #[cfg_attr(
1100            any(target_arch = "aarch64", target_arch = "arm64ec"),
1101            link_name = "llvm.aarch64.neon.vcadd.rot270.v2f64"
1102        )]
1103        fn _vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
1104    }
1105    unsafe { _vcaddq_rot270_f64(a, b) }
1106}
1107#[doc = "Floating-point complex add"]
1108#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f16)"]
1109#[inline(always)]
1110#[target_feature(enable = "neon,fp16")]
1111#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1112#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1113#[cfg(not(target_arch = "arm64ec"))]
1114#[cfg_attr(test, assert_instr(fcadd))]
1115pub fn vcadd_rot90_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
1116    unsafe extern "unadjusted" {
1117        #[cfg_attr(
1118            any(target_arch = "aarch64", target_arch = "arm64ec"),
1119            link_name = "llvm.aarch64.neon.vcadd.rot90.v4f16"
1120        )]
1121        fn _vcadd_rot90_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
1122    }
1123    unsafe { _vcadd_rot90_f16(a, b) }
1124}
1125#[doc = "Floating-point complex add"]
1126#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f16)"]
1127#[inline(always)]
1128#[target_feature(enable = "neon,fp16")]
1129#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1130#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1131#[cfg(not(target_arch = "arm64ec"))]
1132#[cfg_attr(test, assert_instr(fcadd))]
1133pub fn vcaddq_rot90_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
1134    unsafe extern "unadjusted" {
1135        #[cfg_attr(
1136            any(target_arch = "aarch64", target_arch = "arm64ec"),
1137            link_name = "llvm.aarch64.neon.vcadd.rot90.v8f16"
1138        )]
1139        fn _vcaddq_rot90_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
1140    }
1141    unsafe { _vcaddq_rot90_f16(a, b) }
1142}
1143#[doc = "Floating-point complex add"]
1144#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f32)"]
1145#[inline(always)]
1146#[target_feature(enable = "neon,fcma")]
1147#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1148#[cfg_attr(test, assert_instr(fcadd))]
1149pub fn vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
1150    unsafe extern "unadjusted" {
1151        #[cfg_attr(
1152            any(target_arch = "aarch64", target_arch = "arm64ec"),
1153            link_name = "llvm.aarch64.neon.vcadd.rot90.v2f32"
1154        )]
1155        fn _vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
1156    }
1157    unsafe { _vcadd_rot90_f32(a, b) }
1158}
1159#[doc = "Floating-point complex add"]
1160#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f32)"]
1161#[inline(always)]
1162#[target_feature(enable = "neon,fcma")]
1163#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1164#[cfg_attr(test, assert_instr(fcadd))]
1165pub fn vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
1166    unsafe extern "unadjusted" {
1167        #[cfg_attr(
1168            any(target_arch = "aarch64", target_arch = "arm64ec"),
1169            link_name = "llvm.aarch64.neon.vcadd.rot90.v4f32"
1170        )]
1171        fn _vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
1172    }
1173    unsafe { _vcaddq_rot90_f32(a, b) }
1174}
1175#[doc = "Floating-point complex add"]
1176#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f64)"]
1177#[inline(always)]
1178#[target_feature(enable = "neon,fcma")]
1179#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1180#[cfg_attr(test, assert_instr(fcadd))]
1181pub fn vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
1182    unsafe extern "unadjusted" {
1183        #[cfg_attr(
1184            any(target_arch = "aarch64", target_arch = "arm64ec"),
1185            link_name = "llvm.aarch64.neon.vcadd.rot90.v2f64"
1186        )]
1187        fn _vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
1188    }
1189    unsafe { _vcaddq_rot90_f64(a, b) }
1190}
1191#[doc = "Floating-point absolute compare greater than or equal"]
1192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcage_f64)"]
1193#[inline(always)]
1194#[target_feature(enable = "neon")]
1195#[cfg_attr(test, assert_instr(facge))]
1196#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1197pub fn vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1198    unsafe extern "unadjusted" {
1199        #[cfg_attr(
1200            any(target_arch = "aarch64", target_arch = "arm64ec"),
1201            link_name = "llvm.aarch64.neon.facge.v1i64.v1f64"
1202        )]
1203        fn _vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t;
1204    }
1205    unsafe { _vcage_f64(a, b) }
1206}
1207#[doc = "Floating-point absolute compare greater than or equal"]
1208#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f64)"]
1209#[inline(always)]
1210#[target_feature(enable = "neon")]
1211#[cfg_attr(test, assert_instr(facge))]
1212#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1213pub fn vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1214    unsafe extern "unadjusted" {
1215        #[cfg_attr(
1216            any(target_arch = "aarch64", target_arch = "arm64ec"),
1217            link_name = "llvm.aarch64.neon.facge.v2i64.v2f64"
1218        )]
1219        fn _vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t;
1220    }
1221    unsafe { _vcageq_f64(a, b) }
1222}
1223#[doc = "Floating-point absolute compare greater than or equal"]
1224#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaged_f64)"]
1225#[inline(always)]
1226#[target_feature(enable = "neon")]
1227#[cfg_attr(test, assert_instr(facge))]
1228#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1229pub fn vcaged_f64(a: f64, b: f64) -> u64 {
1230    unsafe extern "unadjusted" {
1231        #[cfg_attr(
1232            any(target_arch = "aarch64", target_arch = "arm64ec"),
1233            link_name = "llvm.aarch64.neon.facge.i64.f64"
1234        )]
1235        fn _vcaged_f64(a: f64, b: f64) -> u64;
1236    }
1237    unsafe { _vcaged_f64(a, b) }
1238}
1239#[doc = "Floating-point absolute compare greater than or equal"]
1240#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcages_f32)"]
1241#[inline(always)]
1242#[target_feature(enable = "neon")]
1243#[cfg_attr(test, assert_instr(facge))]
1244#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1245pub fn vcages_f32(a: f32, b: f32) -> u32 {
1246    unsafe extern "unadjusted" {
1247        #[cfg_attr(
1248            any(target_arch = "aarch64", target_arch = "arm64ec"),
1249            link_name = "llvm.aarch64.neon.facge.i32.f32"
1250        )]
1251        fn _vcages_f32(a: f32, b: f32) -> u32;
1252    }
1253    unsafe { _vcages_f32(a, b) }
1254}
1255#[doc = "Floating-point absolute compare greater than or equal"]
1256#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageh_f16)"]
1257#[inline(always)]
1258#[cfg_attr(test, assert_instr(facge))]
1259#[target_feature(enable = "neon,fp16")]
1260#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1261#[cfg(not(target_arch = "arm64ec"))]
1262pub fn vcageh_f16(a: f16, b: f16) -> u16 {
1263    unsafe extern "unadjusted" {
1264        #[cfg_attr(
1265            any(target_arch = "aarch64", target_arch = "arm64ec"),
1266            link_name = "llvm.aarch64.neon.facge.i32.f16"
1267        )]
1268        fn _vcageh_f16(a: f16, b: f16) -> i32;
1269    }
1270    unsafe { _vcageh_f16(a, b) as u16 }
1271}
1272#[doc = "Floating-point absolute compare greater than"]
1273#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagt_f64)"]
1274#[inline(always)]
1275#[target_feature(enable = "neon")]
1276#[cfg_attr(test, assert_instr(facgt))]
1277#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1278pub fn vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1279    unsafe extern "unadjusted" {
1280        #[cfg_attr(
1281            any(target_arch = "aarch64", target_arch = "arm64ec"),
1282            link_name = "llvm.aarch64.neon.facgt.v1i64.v1f64"
1283        )]
1284        fn _vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t;
1285    }
1286    unsafe { _vcagt_f64(a, b) }
1287}
1288#[doc = "Floating-point absolute compare greater than"]
1289#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f64)"]
1290#[inline(always)]
1291#[target_feature(enable = "neon")]
1292#[cfg_attr(test, assert_instr(facgt))]
1293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1294pub fn vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1295    unsafe extern "unadjusted" {
1296        #[cfg_attr(
1297            any(target_arch = "aarch64", target_arch = "arm64ec"),
1298            link_name = "llvm.aarch64.neon.facgt.v2i64.v2f64"
1299        )]
1300        fn _vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t;
1301    }
1302    unsafe { _vcagtq_f64(a, b) }
1303}
1304#[doc = "Floating-point absolute compare greater than"]
1305#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtd_f64)"]
1306#[inline(always)]
1307#[target_feature(enable = "neon")]
1308#[cfg_attr(test, assert_instr(facgt))]
1309#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1310pub fn vcagtd_f64(a: f64, b: f64) -> u64 {
1311    unsafe extern "unadjusted" {
1312        #[cfg_attr(
1313            any(target_arch = "aarch64", target_arch = "arm64ec"),
1314            link_name = "llvm.aarch64.neon.facgt.i64.f64"
1315        )]
1316        fn _vcagtd_f64(a: f64, b: f64) -> u64;
1317    }
1318    unsafe { _vcagtd_f64(a, b) }
1319}
1320#[doc = "Floating-point absolute compare greater than"]
1321#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagts_f32)"]
1322#[inline(always)]
1323#[target_feature(enable = "neon")]
1324#[cfg_attr(test, assert_instr(facgt))]
1325#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1326pub fn vcagts_f32(a: f32, b: f32) -> u32 {
1327    unsafe extern "unadjusted" {
1328        #[cfg_attr(
1329            any(target_arch = "aarch64", target_arch = "arm64ec"),
1330            link_name = "llvm.aarch64.neon.facgt.i32.f32"
1331        )]
1332        fn _vcagts_f32(a: f32, b: f32) -> u32;
1333    }
1334    unsafe { _vcagts_f32(a, b) }
1335}
1336#[doc = "Floating-point absolute compare greater than"]
1337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagth_f16)"]
1338#[inline(always)]
1339#[cfg_attr(test, assert_instr(facgt))]
1340#[target_feature(enable = "neon,fp16")]
1341#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1342#[cfg(not(target_arch = "arm64ec"))]
1343pub fn vcagth_f16(a: f16, b: f16) -> u16 {
1344    unsafe extern "unadjusted" {
1345        #[cfg_attr(
1346            any(target_arch = "aarch64", target_arch = "arm64ec"),
1347            link_name = "llvm.aarch64.neon.facgt.i32.f16"
1348        )]
1349        fn _vcagth_f16(a: f16, b: f16) -> i32;
1350    }
1351    unsafe { _vcagth_f16(a, b) as u16 }
1352}
1353#[doc = "Floating-point absolute compare less than or equal"]
1354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcale_f64)"]
1355#[inline(always)]
1356#[target_feature(enable = "neon")]
1357#[cfg_attr(test, assert_instr(facge))]
1358#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1359pub fn vcale_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1360    vcage_f64(b, a)
1361}
1362#[doc = "Floating-point absolute compare less than or equal"]
1363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleq_f64)"]
1364#[inline(always)]
1365#[target_feature(enable = "neon")]
1366#[cfg_attr(test, assert_instr(facge))]
1367#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1368pub fn vcaleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1369    vcageq_f64(b, a)
1370}
1371#[doc = "Floating-point absolute compare less than or equal"]
1372#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaled_f64)"]
1373#[inline(always)]
1374#[target_feature(enable = "neon")]
1375#[cfg_attr(test, assert_instr(facge))]
1376#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1377pub fn vcaled_f64(a: f64, b: f64) -> u64 {
1378    vcaged_f64(b, a)
1379}
1380#[doc = "Floating-point absolute compare less than or equal"]
1381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcales_f32)"]
1382#[inline(always)]
1383#[target_feature(enable = "neon")]
1384#[cfg_attr(test, assert_instr(facge))]
1385#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1386pub fn vcales_f32(a: f32, b: f32) -> u32 {
1387    vcages_f32(b, a)
1388}
1389#[doc = "Floating-point absolute compare less than or equal"]
1390#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleh_f16)"]
1391#[inline(always)]
1392#[cfg_attr(test, assert_instr(facge))]
1393#[target_feature(enable = "neon,fp16")]
1394#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1395#[cfg(not(target_arch = "arm64ec"))]
1396pub fn vcaleh_f16(a: f16, b: f16) -> u16 {
1397    vcageh_f16(b, a)
1398}
1399#[doc = "Floating-point absolute compare less than"]
1400#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalt_f64)"]
1401#[inline(always)]
1402#[target_feature(enable = "neon")]
1403#[cfg_attr(test, assert_instr(facgt))]
1404#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1405pub fn vcalt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1406    vcagt_f64(b, a)
1407}
1408#[doc = "Floating-point absolute compare less than"]
1409#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltq_f64)"]
1410#[inline(always)]
1411#[target_feature(enable = "neon")]
1412#[cfg_attr(test, assert_instr(facgt))]
1413#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1414pub fn vcaltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1415    vcagtq_f64(b, a)
1416}
1417#[doc = "Floating-point absolute compare less than"]
1418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltd_f64)"]
1419#[inline(always)]
1420#[target_feature(enable = "neon")]
1421#[cfg_attr(test, assert_instr(facgt))]
1422#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1423pub fn vcaltd_f64(a: f64, b: f64) -> u64 {
1424    vcagtd_f64(b, a)
1425}
1426#[doc = "Floating-point absolute compare less than"]
1427#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalts_f32)"]
1428#[inline(always)]
1429#[target_feature(enable = "neon")]
1430#[cfg_attr(test, assert_instr(facgt))]
1431#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1432pub fn vcalts_f32(a: f32, b: f32) -> u32 {
1433    vcagts_f32(b, a)
1434}
1435#[doc = "Floating-point absolute compare less than"]
1436#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalth_f16)"]
1437#[inline(always)]
1438#[cfg_attr(test, assert_instr(facgt))]
1439#[target_feature(enable = "neon,fp16")]
1440#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1441#[cfg(not(target_arch = "arm64ec"))]
1442pub fn vcalth_f16(a: f16, b: f16) -> u16 {
1443    vcagth_f16(b, a)
1444}
1445#[doc = "Floating-point compare equal"]
1446#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_f64)"]
1447#[inline(always)]
1448#[target_feature(enable = "neon")]
1449#[cfg_attr(test, assert_instr(fcmeq))]
1450#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1451pub fn vceq_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1452    unsafe { simd_eq(a, b) }
1453}
1454#[doc = "Floating-point compare equal"]
1455#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_f64)"]
1456#[inline(always)]
1457#[target_feature(enable = "neon")]
1458#[cfg_attr(test, assert_instr(fcmeq))]
1459#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1460pub fn vceqq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1461    unsafe { simd_eq(a, b) }
1462}
1463#[doc = "Compare bitwise Equal (vector)"]
1464#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s64)"]
1465#[inline(always)]
1466#[target_feature(enable = "neon")]
1467#[cfg_attr(test, assert_instr(cmeq))]
1468#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1469pub fn vceq_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
1470    unsafe { simd_eq(a, b) }
1471}
1472#[doc = "Compare bitwise Equal (vector)"]
1473#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s64)"]
1474#[inline(always)]
1475#[target_feature(enable = "neon")]
1476#[cfg_attr(test, assert_instr(cmeq))]
1477#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1478pub fn vceqq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
1479    unsafe { simd_eq(a, b) }
1480}
1481#[doc = "Compare bitwise Equal (vector)"]
1482#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u64)"]
1483#[inline(always)]
1484#[target_feature(enable = "neon")]
1485#[cfg_attr(test, assert_instr(cmeq))]
1486#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1487pub fn vceq_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
1488    unsafe { simd_eq(a, b) }
1489}
1490#[doc = "Compare bitwise Equal (vector)"]
1491#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u64)"]
1492#[inline(always)]
1493#[target_feature(enable = "neon")]
1494#[cfg_attr(test, assert_instr(cmeq))]
1495#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1496pub fn vceqq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
1497    unsafe { simd_eq(a, b) }
1498}
1499#[doc = "Compare bitwise Equal (vector)"]
1500#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_p64)"]
1501#[inline(always)]
1502#[target_feature(enable = "neon")]
1503#[cfg_attr(test, assert_instr(cmeq))]
1504#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1505pub fn vceq_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t {
1506    unsafe { simd_eq(a, b) }
1507}
1508#[doc = "Compare bitwise Equal (vector)"]
1509#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_p64)"]
1510#[inline(always)]
1511#[target_feature(enable = "neon")]
1512#[cfg_attr(test, assert_instr(cmeq))]
1513#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1514pub fn vceqq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t {
1515    unsafe { simd_eq(a, b) }
1516}
1517#[doc = "Floating-point compare equal"]
1518#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_f64)"]
1519#[inline(always)]
1520#[target_feature(enable = "neon")]
1521#[cfg_attr(test, assert_instr(fcmp))]
1522#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1523pub fn vceqd_f64(a: f64, b: f64) -> u64 {
1524    unsafe { simd_extract!(vceq_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
1525}
1526#[doc = "Floating-point compare equal"]
1527#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqs_f32)"]
1528#[inline(always)]
1529#[target_feature(enable = "neon")]
1530#[cfg_attr(test, assert_instr(fcmp))]
1531#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1532pub fn vceqs_f32(a: f32, b: f32) -> u32 {
1533    unsafe { simd_extract!(vceq_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
1534}
1535#[doc = "Compare bitwise equal"]
1536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_s64)"]
1537#[inline(always)]
1538#[target_feature(enable = "neon")]
1539#[cfg_attr(test, assert_instr(cmp))]
1540#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1541pub fn vceqd_s64(a: i64, b: i64) -> u64 {
1542    unsafe { transmute(vceq_s64(transmute(a), transmute(b))) }
1543}
1544#[doc = "Compare bitwise equal"]
1545#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_u64)"]
1546#[inline(always)]
1547#[target_feature(enable = "neon")]
1548#[cfg_attr(test, assert_instr(cmp))]
1549#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1550pub fn vceqd_u64(a: u64, b: u64) -> u64 {
1551    unsafe { transmute(vceq_u64(transmute(a), transmute(b))) }
1552}
1553#[doc = "Floating-point compare equal"]
1554#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqh_f16)"]
1555#[inline(always)]
1556#[cfg_attr(test, assert_instr(fcmp))]
1557#[target_feature(enable = "neon,fp16")]
1558#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1559#[cfg(not(target_arch = "arm64ec"))]
1560pub fn vceqh_f16(a: f16, b: f16) -> u16 {
1561    unsafe { simd_extract!(vceq_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
1562}
1563#[doc = "Floating-point compare bitwise equal to zero"]
1564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f16)"]
1565#[inline(always)]
1566#[cfg_attr(test, assert_instr(fcmeq))]
1567#[target_feature(enable = "neon,fp16")]
1568#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
1569#[cfg(not(target_arch = "arm64ec"))]
1570pub fn vceqz_f16(a: float16x4_t) -> uint16x4_t {
1571    let b: f16x4 = f16x4::new(0.0, 0.0, 0.0, 0.0);
1572    unsafe { simd_eq(a, transmute(b)) }
1573}
1574#[doc = "Floating-point compare bitwise equal to zero"]
1575#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f16)"]
1576#[inline(always)]
1577#[cfg_attr(test, assert_instr(fcmeq))]
1578#[target_feature(enable = "neon,fp16")]
1579#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
1580#[cfg(not(target_arch = "arm64ec"))]
1581pub fn vceqzq_f16(a: float16x8_t) -> uint16x8_t {
1582    let b: f16x8 = f16x8::new(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0);
1583    unsafe { simd_eq(a, transmute(b)) }
1584}
1585#[doc = "Floating-point compare bitwise equal to zero"]
1586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f32)"]
1587#[inline(always)]
1588#[target_feature(enable = "neon")]
1589#[cfg_attr(test, assert_instr(fcmeq))]
1590#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1591pub fn vceqz_f32(a: float32x2_t) -> uint32x2_t {
1592    let b: f32x2 = f32x2::new(0.0, 0.0);
1593    unsafe { simd_eq(a, transmute(b)) }
1594}
1595#[doc = "Floating-point compare bitwise equal to zero"]
1596#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f32)"]
1597#[inline(always)]
1598#[target_feature(enable = "neon")]
1599#[cfg_attr(test, assert_instr(fcmeq))]
1600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1601pub fn vceqzq_f32(a: float32x4_t) -> uint32x4_t {
1602    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
1603    unsafe { simd_eq(a, transmute(b)) }
1604}
1605#[doc = "Floating-point compare bitwise equal to zero"]
1606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f64)"]
1607#[inline(always)]
1608#[target_feature(enable = "neon")]
1609#[cfg_attr(test, assert_instr(fcmeq))]
1610#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1611pub fn vceqz_f64(a: float64x1_t) -> uint64x1_t {
1612    let b: f64 = 0.0;
1613    unsafe { simd_eq(a, transmute(b)) }
1614}
1615#[doc = "Floating-point compare bitwise equal to zero"]
1616#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f64)"]
1617#[inline(always)]
1618#[target_feature(enable = "neon")]
1619#[cfg_attr(test, assert_instr(fcmeq))]
1620#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1621pub fn vceqzq_f64(a: float64x2_t) -> uint64x2_t {
1622    let b: f64x2 = f64x2::new(0.0, 0.0);
1623    unsafe { simd_eq(a, transmute(b)) }
1624}
1625#[doc = "Signed compare bitwise equal to zero"]
1626#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s8)"]
1627#[inline(always)]
1628#[target_feature(enable = "neon")]
1629#[cfg_attr(test, assert_instr(cmeq))]
1630#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1631pub fn vceqz_s8(a: int8x8_t) -> uint8x8_t {
1632    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1633    unsafe { simd_eq(a, transmute(b)) }
1634}
1635#[doc = "Signed compare bitwise equal to zero"]
1636#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s8)"]
1637#[inline(always)]
1638#[target_feature(enable = "neon")]
1639#[cfg_attr(test, assert_instr(cmeq))]
1640#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1641pub fn vceqzq_s8(a: int8x16_t) -> uint8x16_t {
1642    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1643    unsafe { simd_eq(a, transmute(b)) }
1644}
1645#[doc = "Signed compare bitwise equal to zero"]
1646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s16)"]
1647#[inline(always)]
1648#[target_feature(enable = "neon")]
1649#[cfg_attr(test, assert_instr(cmeq))]
1650#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1651pub fn vceqz_s16(a: int16x4_t) -> uint16x4_t {
1652    let b: i16x4 = i16x4::new(0, 0, 0, 0);
1653    unsafe { simd_eq(a, transmute(b)) }
1654}
1655#[doc = "Signed compare bitwise equal to zero"]
1656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s16)"]
1657#[inline(always)]
1658#[target_feature(enable = "neon")]
1659#[cfg_attr(test, assert_instr(cmeq))]
1660#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1661pub fn vceqzq_s16(a: int16x8_t) -> uint16x8_t {
1662    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1663    unsafe { simd_eq(a, transmute(b)) }
1664}
1665#[doc = "Signed compare bitwise equal to zero"]
1666#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s32)"]
1667#[inline(always)]
1668#[target_feature(enable = "neon")]
1669#[cfg_attr(test, assert_instr(cmeq))]
1670#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1671pub fn vceqz_s32(a: int32x2_t) -> uint32x2_t {
1672    let b: i32x2 = i32x2::new(0, 0);
1673    unsafe { simd_eq(a, transmute(b)) }
1674}
1675#[doc = "Signed compare bitwise equal to zero"]
1676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s32)"]
1677#[inline(always)]
1678#[target_feature(enable = "neon")]
1679#[cfg_attr(test, assert_instr(cmeq))]
1680#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1681pub fn vceqzq_s32(a: int32x4_t) -> uint32x4_t {
1682    let b: i32x4 = i32x4::new(0, 0, 0, 0);
1683    unsafe { simd_eq(a, transmute(b)) }
1684}
1685#[doc = "Signed compare bitwise equal to zero"]
1686#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s64)"]
1687#[inline(always)]
1688#[target_feature(enable = "neon")]
1689#[cfg_attr(test, assert_instr(cmeq))]
1690#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1691pub fn vceqz_s64(a: int64x1_t) -> uint64x1_t {
1692    let b: i64x1 = i64x1::new(0);
1693    unsafe { simd_eq(a, transmute(b)) }
1694}
1695#[doc = "Signed compare bitwise equal to zero"]
1696#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s64)"]
1697#[inline(always)]
1698#[target_feature(enable = "neon")]
1699#[cfg_attr(test, assert_instr(cmeq))]
1700#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1701pub fn vceqzq_s64(a: int64x2_t) -> uint64x2_t {
1702    let b: i64x2 = i64x2::new(0, 0);
1703    unsafe { simd_eq(a, transmute(b)) }
1704}
1705#[doc = "Signed compare bitwise equal to zero"]
1706#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p8)"]
1707#[inline(always)]
1708#[target_feature(enable = "neon")]
1709#[cfg_attr(test, assert_instr(cmeq))]
1710#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1711pub fn vceqz_p8(a: poly8x8_t) -> uint8x8_t {
1712    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1713    unsafe { simd_eq(a, transmute(b)) }
1714}
1715#[doc = "Signed compare bitwise equal to zero"]
1716#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p8)"]
1717#[inline(always)]
1718#[target_feature(enable = "neon")]
1719#[cfg_attr(test, assert_instr(cmeq))]
1720#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1721pub fn vceqzq_p8(a: poly8x16_t) -> uint8x16_t {
1722    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1723    unsafe { simd_eq(a, transmute(b)) }
1724}
1725#[doc = "Signed compare bitwise equal to zero"]
1726#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p64)"]
1727#[inline(always)]
1728#[target_feature(enable = "neon")]
1729#[cfg_attr(test, assert_instr(cmeq))]
1730#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1731pub fn vceqz_p64(a: poly64x1_t) -> uint64x1_t {
1732    let b: i64x1 = i64x1::new(0);
1733    unsafe { simd_eq(a, transmute(b)) }
1734}
1735#[doc = "Signed compare bitwise equal to zero"]
1736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p64)"]
1737#[inline(always)]
1738#[target_feature(enable = "neon")]
1739#[cfg_attr(test, assert_instr(cmeq))]
1740#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1741pub fn vceqzq_p64(a: poly64x2_t) -> uint64x2_t {
1742    let b: i64x2 = i64x2::new(0, 0);
1743    unsafe { simd_eq(a, transmute(b)) }
1744}
1745#[doc = "Unsigned compare bitwise equal to zero"]
1746#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u8)"]
1747#[inline(always)]
1748#[target_feature(enable = "neon")]
1749#[cfg_attr(test, assert_instr(cmeq))]
1750#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1751pub fn vceqz_u8(a: uint8x8_t) -> uint8x8_t {
1752    let b: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1753    unsafe { simd_eq(a, transmute(b)) }
1754}
1755#[doc = "Unsigned compare bitwise equal to zero"]
1756#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u8)"]
1757#[inline(always)]
1758#[target_feature(enable = "neon")]
1759#[cfg_attr(test, assert_instr(cmeq))]
1760#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1761pub fn vceqzq_u8(a: uint8x16_t) -> uint8x16_t {
1762    let b: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1763    unsafe { simd_eq(a, transmute(b)) }
1764}
1765#[doc = "Unsigned compare bitwise equal to zero"]
1766#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u16)"]
1767#[inline(always)]
1768#[target_feature(enable = "neon")]
1769#[cfg_attr(test, assert_instr(cmeq))]
1770#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1771pub fn vceqz_u16(a: uint16x4_t) -> uint16x4_t {
1772    let b: u16x4 = u16x4::new(0, 0, 0, 0);
1773    unsafe { simd_eq(a, transmute(b)) }
1774}
1775#[doc = "Unsigned compare bitwise equal to zero"]
1776#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u16)"]
1777#[inline(always)]
1778#[target_feature(enable = "neon")]
1779#[cfg_attr(test, assert_instr(cmeq))]
1780#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1781pub fn vceqzq_u16(a: uint16x8_t) -> uint16x8_t {
1782    let b: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1783    unsafe { simd_eq(a, transmute(b)) }
1784}
1785#[doc = "Unsigned compare bitwise equal to zero"]
1786#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u32)"]
1787#[inline(always)]
1788#[target_feature(enable = "neon")]
1789#[cfg_attr(test, assert_instr(cmeq))]
1790#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1791pub fn vceqz_u32(a: uint32x2_t) -> uint32x2_t {
1792    let b: u32x2 = u32x2::new(0, 0);
1793    unsafe { simd_eq(a, transmute(b)) }
1794}
1795#[doc = "Unsigned compare bitwise equal to zero"]
1796#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u32)"]
1797#[inline(always)]
1798#[target_feature(enable = "neon")]
1799#[cfg_attr(test, assert_instr(cmeq))]
1800#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1801pub fn vceqzq_u32(a: uint32x4_t) -> uint32x4_t {
1802    let b: u32x4 = u32x4::new(0, 0, 0, 0);
1803    unsafe { simd_eq(a, transmute(b)) }
1804}
1805#[doc = "Unsigned compare bitwise equal to zero"]
1806#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u64)"]
1807#[inline(always)]
1808#[target_feature(enable = "neon")]
1809#[cfg_attr(test, assert_instr(cmeq))]
1810#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1811pub fn vceqz_u64(a: uint64x1_t) -> uint64x1_t {
1812    let b: u64x1 = u64x1::new(0);
1813    unsafe { simd_eq(a, transmute(b)) }
1814}
1815#[doc = "Unsigned compare bitwise equal to zero"]
1816#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u64)"]
1817#[inline(always)]
1818#[target_feature(enable = "neon")]
1819#[cfg_attr(test, assert_instr(cmeq))]
1820#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1821pub fn vceqzq_u64(a: uint64x2_t) -> uint64x2_t {
1822    let b: u64x2 = u64x2::new(0, 0);
1823    unsafe { simd_eq(a, transmute(b)) }
1824}
1825#[doc = "Compare bitwise equal to zero"]
1826#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_s64)"]
1827#[inline(always)]
1828#[target_feature(enable = "neon")]
1829#[cfg_attr(test, assert_instr(cmp))]
1830#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1831pub fn vceqzd_s64(a: i64) -> u64 {
1832    unsafe { transmute(vceqz_s64(transmute(a))) }
1833}
1834#[doc = "Compare bitwise equal to zero"]
1835#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_u64)"]
1836#[inline(always)]
1837#[target_feature(enable = "neon")]
1838#[cfg_attr(test, assert_instr(cmp))]
1839#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1840pub fn vceqzd_u64(a: u64) -> u64 {
1841    unsafe { transmute(vceqz_u64(transmute(a))) }
1842}
1843#[doc = "Floating-point compare bitwise equal to zero"]
1844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzh_f16)"]
1845#[inline(always)]
1846#[cfg_attr(test, assert_instr(fcmp))]
1847#[target_feature(enable = "neon,fp16")]
1848#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1849#[cfg(not(target_arch = "arm64ec"))]
1850pub fn vceqzh_f16(a: f16) -> u16 {
1851    unsafe { simd_extract!(vceqz_f16(vdup_n_f16(a)), 0) }
1852}
1853#[doc = "Floating-point compare bitwise equal to zero"]
1854#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzs_f32)"]
1855#[inline(always)]
1856#[target_feature(enable = "neon")]
1857#[cfg_attr(test, assert_instr(fcmp))]
1858#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1859pub fn vceqzs_f32(a: f32) -> u32 {
1860    unsafe { simd_extract!(vceqz_f32(vdup_n_f32(a)), 0) }
1861}
1862#[doc = "Floating-point compare bitwise equal to zero"]
1863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_f64)"]
1864#[inline(always)]
1865#[target_feature(enable = "neon")]
1866#[cfg_attr(test, assert_instr(fcmp))]
1867#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1868pub fn vceqzd_f64(a: f64) -> u64 {
1869    unsafe { simd_extract!(vceqz_f64(vdup_n_f64(a)), 0) }
1870}
1871#[doc = "Floating-point compare greater than or equal"]
1872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_f64)"]
1873#[inline(always)]
1874#[target_feature(enable = "neon")]
1875#[cfg_attr(test, assert_instr(fcmge))]
1876#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1877pub fn vcge_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1878    unsafe { simd_ge(a, b) }
1879}
1880#[doc = "Floating-point compare greater than or equal"]
1881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_f64)"]
1882#[inline(always)]
1883#[target_feature(enable = "neon")]
1884#[cfg_attr(test, assert_instr(fcmge))]
1885#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1886pub fn vcgeq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1887    unsafe { simd_ge(a, b) }
1888}
1889#[doc = "Compare signed greater than or equal"]
1890#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s64)"]
1891#[inline(always)]
1892#[target_feature(enable = "neon")]
1893#[cfg_attr(test, assert_instr(cmge))]
1894#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1895pub fn vcge_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
1896    unsafe { simd_ge(a, b) }
1897}
1898#[doc = "Compare signed greater than or equal"]
1899#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s64)"]
1900#[inline(always)]
1901#[target_feature(enable = "neon")]
1902#[cfg_attr(test, assert_instr(cmge))]
1903#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1904pub fn vcgeq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
1905    unsafe { simd_ge(a, b) }
1906}
1907#[doc = "Compare unsigned greater than or equal"]
1908#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u64)"]
1909#[inline(always)]
1910#[target_feature(enable = "neon")]
1911#[cfg_attr(test, assert_instr(cmhs))]
1912#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1913pub fn vcge_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
1914    unsafe { simd_ge(a, b) }
1915}
1916#[doc = "Compare unsigned greater than or equal"]
1917#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u64)"]
1918#[inline(always)]
1919#[target_feature(enable = "neon")]
1920#[cfg_attr(test, assert_instr(cmhs))]
1921#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1922pub fn vcgeq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
1923    unsafe { simd_ge(a, b) }
1924}
1925#[doc = "Floating-point compare greater than or equal"]
1926#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_f64)"]
1927#[inline(always)]
1928#[target_feature(enable = "neon")]
1929#[cfg_attr(test, assert_instr(fcmp))]
1930#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1931pub fn vcged_f64(a: f64, b: f64) -> u64 {
1932    unsafe { simd_extract!(vcge_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
1933}
1934#[doc = "Floating-point compare greater than or equal"]
1935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcges_f32)"]
1936#[inline(always)]
1937#[target_feature(enable = "neon")]
1938#[cfg_attr(test, assert_instr(fcmp))]
1939#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1940pub fn vcges_f32(a: f32, b: f32) -> u32 {
1941    unsafe { simd_extract!(vcge_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
1942}
1943#[doc = "Compare greater than or equal"]
1944#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_s64)"]
1945#[inline(always)]
1946#[target_feature(enable = "neon")]
1947#[cfg_attr(test, assert_instr(cmp))]
1948#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1949pub fn vcged_s64(a: i64, b: i64) -> u64 {
1950    unsafe { transmute(vcge_s64(transmute(a), transmute(b))) }
1951}
1952#[doc = "Compare greater than or equal"]
1953#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_u64)"]
1954#[inline(always)]
1955#[target_feature(enable = "neon")]
1956#[cfg_attr(test, assert_instr(cmp))]
1957#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1958pub fn vcged_u64(a: u64, b: u64) -> u64 {
1959    unsafe { transmute(vcge_u64(transmute(a), transmute(b))) }
1960}
1961#[doc = "Floating-point compare greater than or equal"]
1962#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeh_f16)"]
1963#[inline(always)]
1964#[cfg_attr(test, assert_instr(fcmp))]
1965#[target_feature(enable = "neon,fp16")]
1966#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1967#[cfg(not(target_arch = "arm64ec"))]
1968pub fn vcgeh_f16(a: f16, b: f16) -> u16 {
1969    unsafe { simd_extract!(vcge_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
1970}
1971#[doc = "Floating-point compare greater than or equal to zero"]
1972#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f32)"]
1973#[inline(always)]
1974#[target_feature(enable = "neon")]
1975#[cfg_attr(test, assert_instr(fcmge))]
1976#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1977pub fn vcgez_f32(a: float32x2_t) -> uint32x2_t {
1978    let b: f32x2 = f32x2::new(0.0, 0.0);
1979    unsafe { simd_ge(a, transmute(b)) }
1980}
1981#[doc = "Floating-point compare greater than or equal to zero"]
1982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f32)"]
1983#[inline(always)]
1984#[target_feature(enable = "neon")]
1985#[cfg_attr(test, assert_instr(fcmge))]
1986#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1987pub fn vcgezq_f32(a: float32x4_t) -> uint32x4_t {
1988    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
1989    unsafe { simd_ge(a, transmute(b)) }
1990}
1991#[doc = "Floating-point compare greater than or equal to zero"]
1992#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f64)"]
1993#[inline(always)]
1994#[target_feature(enable = "neon")]
1995#[cfg_attr(test, assert_instr(fcmge))]
1996#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1997pub fn vcgez_f64(a: float64x1_t) -> uint64x1_t {
1998    let b: f64 = 0.0;
1999    unsafe { simd_ge(a, transmute(b)) }
2000}
2001#[doc = "Floating-point compare greater than or equal to zero"]
2002#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f64)"]
2003#[inline(always)]
2004#[target_feature(enable = "neon")]
2005#[cfg_attr(test, assert_instr(fcmge))]
2006#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2007pub fn vcgezq_f64(a: float64x2_t) -> uint64x2_t {
2008    let b: f64x2 = f64x2::new(0.0, 0.0);
2009    unsafe { simd_ge(a, transmute(b)) }
2010}
2011#[doc = "Compare signed greater than or equal to zero"]
2012#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s8)"]
2013#[inline(always)]
2014#[target_feature(enable = "neon")]
2015#[cfg_attr(test, assert_instr(cmge))]
2016#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2017pub fn vcgez_s8(a: int8x8_t) -> uint8x8_t {
2018    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2019    unsafe { simd_ge(a, transmute(b)) }
2020}
2021#[doc = "Compare signed greater than or equal to zero"]
2022#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s8)"]
2023#[inline(always)]
2024#[target_feature(enable = "neon")]
2025#[cfg_attr(test, assert_instr(cmge))]
2026#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2027pub fn vcgezq_s8(a: int8x16_t) -> uint8x16_t {
2028    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2029    unsafe { simd_ge(a, transmute(b)) }
2030}
2031#[doc = "Compare signed greater than or equal to zero"]
2032#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s16)"]
2033#[inline(always)]
2034#[target_feature(enable = "neon")]
2035#[cfg_attr(test, assert_instr(cmge))]
2036#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2037pub fn vcgez_s16(a: int16x4_t) -> uint16x4_t {
2038    let b: i16x4 = i16x4::new(0, 0, 0, 0);
2039    unsafe { simd_ge(a, transmute(b)) }
2040}
2041#[doc = "Compare signed greater than or equal to zero"]
2042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s16)"]
2043#[inline(always)]
2044#[target_feature(enable = "neon")]
2045#[cfg_attr(test, assert_instr(cmge))]
2046#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2047pub fn vcgezq_s16(a: int16x8_t) -> uint16x8_t {
2048    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2049    unsafe { simd_ge(a, transmute(b)) }
2050}
2051#[doc = "Compare signed greater than or equal to zero"]
2052#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s32)"]
2053#[inline(always)]
2054#[target_feature(enable = "neon")]
2055#[cfg_attr(test, assert_instr(cmge))]
2056#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2057pub fn vcgez_s32(a: int32x2_t) -> uint32x2_t {
2058    let b: i32x2 = i32x2::new(0, 0);
2059    unsafe { simd_ge(a, transmute(b)) }
2060}
2061#[doc = "Compare signed greater than or equal to zero"]
2062#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s32)"]
2063#[inline(always)]
2064#[target_feature(enable = "neon")]
2065#[cfg_attr(test, assert_instr(cmge))]
2066#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2067pub fn vcgezq_s32(a: int32x4_t) -> uint32x4_t {
2068    let b: i32x4 = i32x4::new(0, 0, 0, 0);
2069    unsafe { simd_ge(a, transmute(b)) }
2070}
2071#[doc = "Compare signed greater than or equal to zero"]
2072#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s64)"]
2073#[inline(always)]
2074#[target_feature(enable = "neon")]
2075#[cfg_attr(test, assert_instr(cmge))]
2076#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2077pub fn vcgez_s64(a: int64x1_t) -> uint64x1_t {
2078    let b: i64x1 = i64x1::new(0);
2079    unsafe { simd_ge(a, transmute(b)) }
2080}
2081#[doc = "Compare signed greater than or equal to zero"]
2082#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s64)"]
2083#[inline(always)]
2084#[target_feature(enable = "neon")]
2085#[cfg_attr(test, assert_instr(cmge))]
2086#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2087pub fn vcgezq_s64(a: int64x2_t) -> uint64x2_t {
2088    let b: i64x2 = i64x2::new(0, 0);
2089    unsafe { simd_ge(a, transmute(b)) }
2090}
2091#[doc = "Floating-point compare greater than or equal to zero"]
2092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_f64)"]
2093#[inline(always)]
2094#[target_feature(enable = "neon")]
2095#[cfg_attr(test, assert_instr(fcmp))]
2096#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2097pub fn vcgezd_f64(a: f64) -> u64 {
2098    unsafe { simd_extract!(vcgez_f64(vdup_n_f64(a)), 0) }
2099}
2100#[doc = "Floating-point compare greater than or equal to zero"]
2101#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezs_f32)"]
2102#[inline(always)]
2103#[target_feature(enable = "neon")]
2104#[cfg_attr(test, assert_instr(fcmp))]
2105#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2106pub fn vcgezs_f32(a: f32) -> u32 {
2107    unsafe { simd_extract!(vcgez_f32(vdup_n_f32(a)), 0) }
2108}
2109#[doc = "Compare signed greater than or equal to zero"]
2110#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_s64)"]
2111#[inline(always)]
2112#[target_feature(enable = "neon")]
2113#[cfg_attr(test, assert_instr(nop))]
2114#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2115pub fn vcgezd_s64(a: i64) -> u64 {
2116    unsafe { transmute(vcgez_s64(transmute(a))) }
2117}
2118#[doc = "Floating-point compare greater than or equal to zero"]
2119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezh_f16)"]
2120#[inline(always)]
2121#[cfg_attr(test, assert_instr(fcmp))]
2122#[target_feature(enable = "neon,fp16")]
2123#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2124#[cfg(not(target_arch = "arm64ec"))]
2125pub fn vcgezh_f16(a: f16) -> u16 {
2126    unsafe { simd_extract!(vcgez_f16(vdup_n_f16(a)), 0) }
2127}
2128#[doc = "Floating-point compare greater than"]
2129#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_f64)"]
2130#[inline(always)]
2131#[target_feature(enable = "neon")]
2132#[cfg_attr(test, assert_instr(fcmgt))]
2133#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2134pub fn vcgt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2135    unsafe { simd_gt(a, b) }
2136}
2137#[doc = "Floating-point compare greater than"]
2138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_f64)"]
2139#[inline(always)]
2140#[target_feature(enable = "neon")]
2141#[cfg_attr(test, assert_instr(fcmgt))]
2142#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2143pub fn vcgtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2144    unsafe { simd_gt(a, b) }
2145}
2146#[doc = "Compare signed greater than"]
2147#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s64)"]
2148#[inline(always)]
2149#[target_feature(enable = "neon")]
2150#[cfg_attr(test, assert_instr(cmgt))]
2151#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2152pub fn vcgt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
2153    unsafe { simd_gt(a, b) }
2154}
2155#[doc = "Compare signed greater than"]
2156#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s64)"]
2157#[inline(always)]
2158#[target_feature(enable = "neon")]
2159#[cfg_attr(test, assert_instr(cmgt))]
2160#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2161pub fn vcgtq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
2162    unsafe { simd_gt(a, b) }
2163}
2164#[doc = "Compare unsigned greater than"]
2165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u64)"]
2166#[inline(always)]
2167#[target_feature(enable = "neon")]
2168#[cfg_attr(test, assert_instr(cmhi))]
2169#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2170pub fn vcgt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
2171    unsafe { simd_gt(a, b) }
2172}
2173#[doc = "Compare unsigned greater than"]
2174#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u64)"]
2175#[inline(always)]
2176#[target_feature(enable = "neon")]
2177#[cfg_attr(test, assert_instr(cmhi))]
2178#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2179pub fn vcgtq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
2180    unsafe { simd_gt(a, b) }
2181}
2182#[doc = "Floating-point compare greater than"]
2183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_f64)"]
2184#[inline(always)]
2185#[target_feature(enable = "neon")]
2186#[cfg_attr(test, assert_instr(fcmp))]
2187#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2188pub fn vcgtd_f64(a: f64, b: f64) -> u64 {
2189    unsafe { simd_extract!(vcgt_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
2190}
2191#[doc = "Floating-point compare greater than"]
2192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgts_f32)"]
2193#[inline(always)]
2194#[target_feature(enable = "neon")]
2195#[cfg_attr(test, assert_instr(fcmp))]
2196#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2197pub fn vcgts_f32(a: f32, b: f32) -> u32 {
2198    unsafe { simd_extract!(vcgt_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
2199}
2200#[doc = "Compare greater than"]
2201#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_s64)"]
2202#[inline(always)]
2203#[target_feature(enable = "neon")]
2204#[cfg_attr(test, assert_instr(cmp))]
2205#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2206pub fn vcgtd_s64(a: i64, b: i64) -> u64 {
2207    unsafe { transmute(vcgt_s64(transmute(a), transmute(b))) }
2208}
2209#[doc = "Compare greater than"]
2210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_u64)"]
2211#[inline(always)]
2212#[target_feature(enable = "neon")]
2213#[cfg_attr(test, assert_instr(cmp))]
2214#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2215pub fn vcgtd_u64(a: u64, b: u64) -> u64 {
2216    unsafe { transmute(vcgt_u64(transmute(a), transmute(b))) }
2217}
2218#[doc = "Floating-point compare greater than"]
2219#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgth_f16)"]
2220#[inline(always)]
2221#[cfg_attr(test, assert_instr(fcmp))]
2222#[target_feature(enable = "neon,fp16")]
2223#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2224#[cfg(not(target_arch = "arm64ec"))]
2225pub fn vcgth_f16(a: f16, b: f16) -> u16 {
2226    unsafe { simd_extract!(vcgt_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
2227}
2228#[doc = "Floating-point compare greater than zero"]
2229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f32)"]
2230#[inline(always)]
2231#[target_feature(enable = "neon")]
2232#[cfg_attr(test, assert_instr(fcmgt))]
2233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2234pub fn vcgtz_f32(a: float32x2_t) -> uint32x2_t {
2235    let b: f32x2 = f32x2::new(0.0, 0.0);
2236    unsafe { simd_gt(a, transmute(b)) }
2237}
2238#[doc = "Floating-point compare greater than zero"]
2239#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f32)"]
2240#[inline(always)]
2241#[target_feature(enable = "neon")]
2242#[cfg_attr(test, assert_instr(fcmgt))]
2243#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2244pub fn vcgtzq_f32(a: float32x4_t) -> uint32x4_t {
2245    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2246    unsafe { simd_gt(a, transmute(b)) }
2247}
2248#[doc = "Floating-point compare greater than zero"]
2249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f64)"]
2250#[inline(always)]
2251#[target_feature(enable = "neon")]
2252#[cfg_attr(test, assert_instr(fcmgt))]
2253#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2254pub fn vcgtz_f64(a: float64x1_t) -> uint64x1_t {
2255    let b: f64 = 0.0;
2256    unsafe { simd_gt(a, transmute(b)) }
2257}
2258#[doc = "Floating-point compare greater than zero"]
2259#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f64)"]
2260#[inline(always)]
2261#[target_feature(enable = "neon")]
2262#[cfg_attr(test, assert_instr(fcmgt))]
2263#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2264pub fn vcgtzq_f64(a: float64x2_t) -> uint64x2_t {
2265    let b: f64x2 = f64x2::new(0.0, 0.0);
2266    unsafe { simd_gt(a, transmute(b)) }
2267}
2268#[doc = "Compare signed greater than zero"]
2269#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s8)"]
2270#[inline(always)]
2271#[target_feature(enable = "neon")]
2272#[cfg_attr(test, assert_instr(cmgt))]
2273#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2274pub fn vcgtz_s8(a: int8x8_t) -> uint8x8_t {
2275    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2276    unsafe { simd_gt(a, transmute(b)) }
2277}
2278#[doc = "Compare signed greater than zero"]
2279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s8)"]
2280#[inline(always)]
2281#[target_feature(enable = "neon")]
2282#[cfg_attr(test, assert_instr(cmgt))]
2283#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2284pub fn vcgtzq_s8(a: int8x16_t) -> uint8x16_t {
2285    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2286    unsafe { simd_gt(a, transmute(b)) }
2287}
2288#[doc = "Compare signed greater than zero"]
2289#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s16)"]
2290#[inline(always)]
2291#[target_feature(enable = "neon")]
2292#[cfg_attr(test, assert_instr(cmgt))]
2293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2294pub fn vcgtz_s16(a: int16x4_t) -> uint16x4_t {
2295    let b: i16x4 = i16x4::new(0, 0, 0, 0);
2296    unsafe { simd_gt(a, transmute(b)) }
2297}
2298#[doc = "Compare signed greater than zero"]
2299#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s16)"]
2300#[inline(always)]
2301#[target_feature(enable = "neon")]
2302#[cfg_attr(test, assert_instr(cmgt))]
2303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2304pub fn vcgtzq_s16(a: int16x8_t) -> uint16x8_t {
2305    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2306    unsafe { simd_gt(a, transmute(b)) }
2307}
2308#[doc = "Compare signed greater than zero"]
2309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s32)"]
2310#[inline(always)]
2311#[target_feature(enable = "neon")]
2312#[cfg_attr(test, assert_instr(cmgt))]
2313#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2314pub fn vcgtz_s32(a: int32x2_t) -> uint32x2_t {
2315    let b: i32x2 = i32x2::new(0, 0);
2316    unsafe { simd_gt(a, transmute(b)) }
2317}
2318#[doc = "Compare signed greater than zero"]
2319#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s32)"]
2320#[inline(always)]
2321#[target_feature(enable = "neon")]
2322#[cfg_attr(test, assert_instr(cmgt))]
2323#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2324pub fn vcgtzq_s32(a: int32x4_t) -> uint32x4_t {
2325    let b: i32x4 = i32x4::new(0, 0, 0, 0);
2326    unsafe { simd_gt(a, transmute(b)) }
2327}
2328#[doc = "Compare signed greater than zero"]
2329#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s64)"]
2330#[inline(always)]
2331#[target_feature(enable = "neon")]
2332#[cfg_attr(test, assert_instr(cmgt))]
2333#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2334pub fn vcgtz_s64(a: int64x1_t) -> uint64x1_t {
2335    let b: i64x1 = i64x1::new(0);
2336    unsafe { simd_gt(a, transmute(b)) }
2337}
2338#[doc = "Compare signed greater than zero"]
2339#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s64)"]
2340#[inline(always)]
2341#[target_feature(enable = "neon")]
2342#[cfg_attr(test, assert_instr(cmgt))]
2343#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2344pub fn vcgtzq_s64(a: int64x2_t) -> uint64x2_t {
2345    let b: i64x2 = i64x2::new(0, 0);
2346    unsafe { simd_gt(a, transmute(b)) }
2347}
2348#[doc = "Floating-point compare greater than zero"]
2349#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_f64)"]
2350#[inline(always)]
2351#[target_feature(enable = "neon")]
2352#[cfg_attr(test, assert_instr(fcmp))]
2353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2354pub fn vcgtzd_f64(a: f64) -> u64 {
2355    unsafe { simd_extract!(vcgtz_f64(vdup_n_f64(a)), 0) }
2356}
2357#[doc = "Floating-point compare greater than zero"]
2358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzs_f32)"]
2359#[inline(always)]
2360#[target_feature(enable = "neon")]
2361#[cfg_attr(test, assert_instr(fcmp))]
2362#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2363pub fn vcgtzs_f32(a: f32) -> u32 {
2364    unsafe { simd_extract!(vcgtz_f32(vdup_n_f32(a)), 0) }
2365}
2366#[doc = "Compare signed greater than zero"]
2367#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_s64)"]
2368#[inline(always)]
2369#[target_feature(enable = "neon")]
2370#[cfg_attr(test, assert_instr(cmp))]
2371#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2372pub fn vcgtzd_s64(a: i64) -> u64 {
2373    unsafe { transmute(vcgtz_s64(transmute(a))) }
2374}
2375#[doc = "Floating-point compare greater than zero"]
2376#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzh_f16)"]
2377#[inline(always)]
2378#[cfg_attr(test, assert_instr(fcmp))]
2379#[target_feature(enable = "neon,fp16")]
2380#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2381#[cfg(not(target_arch = "arm64ec"))]
2382pub fn vcgtzh_f16(a: f16) -> u16 {
2383    unsafe { simd_extract!(vcgtz_f16(vdup_n_f16(a)), 0) }
2384}
2385#[doc = "Floating-point compare less than or equal"]
2386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_f64)"]
2387#[inline(always)]
2388#[target_feature(enable = "neon")]
2389#[cfg_attr(test, assert_instr(fcmge))]
2390#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2391pub fn vcle_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2392    unsafe { simd_le(a, b) }
2393}
2394#[doc = "Floating-point compare less than or equal"]
2395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_f64)"]
2396#[inline(always)]
2397#[target_feature(enable = "neon")]
2398#[cfg_attr(test, assert_instr(fcmge))]
2399#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2400pub fn vcleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2401    unsafe { simd_le(a, b) }
2402}
2403#[doc = "Compare signed less than or equal"]
2404#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s64)"]
2405#[inline(always)]
2406#[target_feature(enable = "neon")]
2407#[cfg_attr(test, assert_instr(cmge))]
2408#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2409pub fn vcle_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
2410    unsafe { simd_le(a, b) }
2411}
2412#[doc = "Compare signed less than or equal"]
2413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s64)"]
2414#[inline(always)]
2415#[target_feature(enable = "neon")]
2416#[cfg_attr(test, assert_instr(cmge))]
2417#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2418pub fn vcleq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
2419    unsafe { simd_le(a, b) }
2420}
2421#[doc = "Compare unsigned less than or equal"]
2422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u64)"]
2423#[inline(always)]
2424#[target_feature(enable = "neon")]
2425#[cfg_attr(test, assert_instr(cmhs))]
2426#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2427pub fn vcle_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
2428    unsafe { simd_le(a, b) }
2429}
2430#[doc = "Compare unsigned less than or equal"]
2431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u64)"]
2432#[inline(always)]
2433#[target_feature(enable = "neon")]
2434#[cfg_attr(test, assert_instr(cmhs))]
2435#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2436pub fn vcleq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
2437    unsafe { simd_le(a, b) }
2438}
2439#[doc = "Floating-point compare less than or equal"]
2440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_f64)"]
2441#[inline(always)]
2442#[target_feature(enable = "neon")]
2443#[cfg_attr(test, assert_instr(fcmp))]
2444#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2445pub fn vcled_f64(a: f64, b: f64) -> u64 {
2446    unsafe { simd_extract!(vcle_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
2447}
2448#[doc = "Floating-point compare less than or equal"]
2449#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcles_f32)"]
2450#[inline(always)]
2451#[target_feature(enable = "neon")]
2452#[cfg_attr(test, assert_instr(fcmp))]
2453#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2454pub fn vcles_f32(a: f32, b: f32) -> u32 {
2455    unsafe { simd_extract!(vcle_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
2456}
2457#[doc = "Compare less than or equal"]
2458#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_u64)"]
2459#[inline(always)]
2460#[target_feature(enable = "neon")]
2461#[cfg_attr(test, assert_instr(cmp))]
2462#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2463pub fn vcled_u64(a: u64, b: u64) -> u64 {
2464    unsafe { transmute(vcle_u64(transmute(a), transmute(b))) }
2465}
2466#[doc = "Compare less than or equal"]
2467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_s64)"]
2468#[inline(always)]
2469#[target_feature(enable = "neon")]
2470#[cfg_attr(test, assert_instr(cmp))]
2471#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2472pub fn vcled_s64(a: i64, b: i64) -> u64 {
2473    unsafe { transmute(vcle_s64(transmute(a), transmute(b))) }
2474}
2475#[doc = "Floating-point compare less than or equal"]
2476#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleh_f16)"]
2477#[inline(always)]
2478#[cfg_attr(test, assert_instr(fcmp))]
2479#[target_feature(enable = "neon,fp16")]
2480#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2481#[cfg(not(target_arch = "arm64ec"))]
2482pub fn vcleh_f16(a: f16, b: f16) -> u16 {
2483    unsafe { simd_extract!(vcle_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
2484}
2485#[doc = "Floating-point compare less than or equal to zero"]
2486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f32)"]
2487#[inline(always)]
2488#[target_feature(enable = "neon")]
2489#[cfg_attr(test, assert_instr(fcmle))]
2490#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2491pub fn vclez_f32(a: float32x2_t) -> uint32x2_t {
2492    let b: f32x2 = f32x2::new(0.0, 0.0);
2493    unsafe { simd_le(a, transmute(b)) }
2494}
2495#[doc = "Floating-point compare less than or equal to zero"]
2496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f32)"]
2497#[inline(always)]
2498#[target_feature(enable = "neon")]
2499#[cfg_attr(test, assert_instr(fcmle))]
2500#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2501pub fn vclezq_f32(a: float32x4_t) -> uint32x4_t {
2502    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2503    unsafe { simd_le(a, transmute(b)) }
2504}
2505#[doc = "Floating-point compare less than or equal to zero"]
2506#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f64)"]
2507#[inline(always)]
2508#[target_feature(enable = "neon")]
2509#[cfg_attr(test, assert_instr(fcmle))]
2510#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2511pub fn vclez_f64(a: float64x1_t) -> uint64x1_t {
2512    let b: f64 = 0.0;
2513    unsafe { simd_le(a, transmute(b)) }
2514}
2515#[doc = "Floating-point compare less than or equal to zero"]
2516#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f64)"]
2517#[inline(always)]
2518#[target_feature(enable = "neon")]
2519#[cfg_attr(test, assert_instr(fcmle))]
2520#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2521pub fn vclezq_f64(a: float64x2_t) -> uint64x2_t {
2522    let b: f64x2 = f64x2::new(0.0, 0.0);
2523    unsafe { simd_le(a, transmute(b)) }
2524}
2525#[doc = "Compare signed less than or equal to zero"]
2526#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s8)"]
2527#[inline(always)]
2528#[target_feature(enable = "neon")]
2529#[cfg_attr(test, assert_instr(cmle))]
2530#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2531pub fn vclez_s8(a: int8x8_t) -> uint8x8_t {
2532    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2533    unsafe { simd_le(a, transmute(b)) }
2534}
2535#[doc = "Compare signed less than or equal to zero"]
2536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s8)"]
2537#[inline(always)]
2538#[target_feature(enable = "neon")]
2539#[cfg_attr(test, assert_instr(cmle))]
2540#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2541pub fn vclezq_s8(a: int8x16_t) -> uint8x16_t {
2542    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2543    unsafe { simd_le(a, transmute(b)) }
2544}
2545#[doc = "Compare signed less than or equal to zero"]
2546#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s16)"]
2547#[inline(always)]
2548#[target_feature(enable = "neon")]
2549#[cfg_attr(test, assert_instr(cmle))]
2550#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2551pub fn vclez_s16(a: int16x4_t) -> uint16x4_t {
2552    let b: i16x4 = i16x4::new(0, 0, 0, 0);
2553    unsafe { simd_le(a, transmute(b)) }
2554}
2555#[doc = "Compare signed less than or equal to zero"]
2556#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s16)"]
2557#[inline(always)]
2558#[target_feature(enable = "neon")]
2559#[cfg_attr(test, assert_instr(cmle))]
2560#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2561pub fn vclezq_s16(a: int16x8_t) -> uint16x8_t {
2562    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2563    unsafe { simd_le(a, transmute(b)) }
2564}
2565#[doc = "Compare signed less than or equal to zero"]
2566#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s32)"]
2567#[inline(always)]
2568#[target_feature(enable = "neon")]
2569#[cfg_attr(test, assert_instr(cmle))]
2570#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2571pub fn vclez_s32(a: int32x2_t) -> uint32x2_t {
2572    let b: i32x2 = i32x2::new(0, 0);
2573    unsafe { simd_le(a, transmute(b)) }
2574}
2575#[doc = "Compare signed less than or equal to zero"]
2576#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s32)"]
2577#[inline(always)]
2578#[target_feature(enable = "neon")]
2579#[cfg_attr(test, assert_instr(cmle))]
2580#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2581pub fn vclezq_s32(a: int32x4_t) -> uint32x4_t {
2582    let b: i32x4 = i32x4::new(0, 0, 0, 0);
2583    unsafe { simd_le(a, transmute(b)) }
2584}
2585#[doc = "Compare signed less than or equal to zero"]
2586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s64)"]
2587#[inline(always)]
2588#[target_feature(enable = "neon")]
2589#[cfg_attr(test, assert_instr(cmle))]
2590#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2591pub fn vclez_s64(a: int64x1_t) -> uint64x1_t {
2592    let b: i64x1 = i64x1::new(0);
2593    unsafe { simd_le(a, transmute(b)) }
2594}
2595#[doc = "Compare signed less than or equal to zero"]
2596#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s64)"]
2597#[inline(always)]
2598#[target_feature(enable = "neon")]
2599#[cfg_attr(test, assert_instr(cmle))]
2600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2601pub fn vclezq_s64(a: int64x2_t) -> uint64x2_t {
2602    let b: i64x2 = i64x2::new(0, 0);
2603    unsafe { simd_le(a, transmute(b)) }
2604}
2605#[doc = "Floating-point compare less than or equal to zero"]
2606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_f64)"]
2607#[inline(always)]
2608#[target_feature(enable = "neon")]
2609#[cfg_attr(test, assert_instr(fcmp))]
2610#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2611pub fn vclezd_f64(a: f64) -> u64 {
2612    unsafe { simd_extract!(vclez_f64(vdup_n_f64(a)), 0) }
2613}
2614#[doc = "Floating-point compare less than or equal to zero"]
2615#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezs_f32)"]
2616#[inline(always)]
2617#[target_feature(enable = "neon")]
2618#[cfg_attr(test, assert_instr(fcmp))]
2619#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2620pub fn vclezs_f32(a: f32) -> u32 {
2621    unsafe { simd_extract!(vclez_f32(vdup_n_f32(a)), 0) }
2622}
2623#[doc = "Compare less than or equal to zero"]
2624#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_s64)"]
2625#[inline(always)]
2626#[target_feature(enable = "neon")]
2627#[cfg_attr(test, assert_instr(cmp))]
2628#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2629pub fn vclezd_s64(a: i64) -> u64 {
2630    unsafe { transmute(vclez_s64(transmute(a))) }
2631}
2632#[doc = "Floating-point compare less than or equal to zero"]
2633#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezh_f16)"]
2634#[inline(always)]
2635#[cfg_attr(test, assert_instr(fcmp))]
2636#[target_feature(enable = "neon,fp16")]
2637#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2638#[cfg(not(target_arch = "arm64ec"))]
2639pub fn vclezh_f16(a: f16) -> u16 {
2640    unsafe { simd_extract!(vclez_f16(vdup_n_f16(a)), 0) }
2641}
2642#[doc = "Floating-point compare less than"]
2643#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_f64)"]
2644#[inline(always)]
2645#[target_feature(enable = "neon")]
2646#[cfg_attr(test, assert_instr(fcmgt))]
2647#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2648pub fn vclt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2649    unsafe { simd_lt(a, b) }
2650}
2651#[doc = "Floating-point compare less than"]
2652#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_f64)"]
2653#[inline(always)]
2654#[target_feature(enable = "neon")]
2655#[cfg_attr(test, assert_instr(fcmgt))]
2656#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2657pub fn vcltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2658    unsafe { simd_lt(a, b) }
2659}
2660#[doc = "Compare signed less than"]
2661#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s64)"]
2662#[inline(always)]
2663#[target_feature(enable = "neon")]
2664#[cfg_attr(test, assert_instr(cmgt))]
2665#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2666pub fn vclt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
2667    unsafe { simd_lt(a, b) }
2668}
2669#[doc = "Compare signed less than"]
2670#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s64)"]
2671#[inline(always)]
2672#[target_feature(enable = "neon")]
2673#[cfg_attr(test, assert_instr(cmgt))]
2674#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2675pub fn vcltq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
2676    unsafe { simd_lt(a, b) }
2677}
2678#[doc = "Compare unsigned less than"]
2679#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u64)"]
2680#[inline(always)]
2681#[target_feature(enable = "neon")]
2682#[cfg_attr(test, assert_instr(cmhi))]
2683#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2684pub fn vclt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
2685    unsafe { simd_lt(a, b) }
2686}
2687#[doc = "Compare unsigned less than"]
2688#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u64)"]
2689#[inline(always)]
2690#[target_feature(enable = "neon")]
2691#[cfg_attr(test, assert_instr(cmhi))]
2692#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2693pub fn vcltq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
2694    unsafe { simd_lt(a, b) }
2695}
2696#[doc = "Compare less than"]
2697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_u64)"]
2698#[inline(always)]
2699#[target_feature(enable = "neon")]
2700#[cfg_attr(test, assert_instr(cmp))]
2701#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2702pub fn vcltd_u64(a: u64, b: u64) -> u64 {
2703    unsafe { transmute(vclt_u64(transmute(a), transmute(b))) }
2704}
2705#[doc = "Compare less than"]
2706#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_s64)"]
2707#[inline(always)]
2708#[target_feature(enable = "neon")]
2709#[cfg_attr(test, assert_instr(cmp))]
2710#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2711pub fn vcltd_s64(a: i64, b: i64) -> u64 {
2712    unsafe { transmute(vclt_s64(transmute(a), transmute(b))) }
2713}
2714#[doc = "Floating-point compare less than"]
2715#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclth_f16)"]
2716#[inline(always)]
2717#[cfg_attr(test, assert_instr(fcmp))]
2718#[target_feature(enable = "neon,fp16")]
2719#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2720#[cfg(not(target_arch = "arm64ec"))]
2721pub fn vclth_f16(a: f16, b: f16) -> u16 {
2722    unsafe { simd_extract!(vclt_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
2723}
2724#[doc = "Floating-point compare less than"]
2725#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclts_f32)"]
2726#[inline(always)]
2727#[target_feature(enable = "neon")]
2728#[cfg_attr(test, assert_instr(fcmp))]
2729#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2730pub fn vclts_f32(a: f32, b: f32) -> u32 {
2731    unsafe { simd_extract!(vclt_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
2732}
2733#[doc = "Floating-point compare less than"]
2734#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_f64)"]
2735#[inline(always)]
2736#[target_feature(enable = "neon")]
2737#[cfg_attr(test, assert_instr(fcmp))]
2738#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2739pub fn vcltd_f64(a: f64, b: f64) -> u64 {
2740    unsafe { simd_extract!(vclt_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
2741}
2742#[doc = "Floating-point compare less than zero"]
2743#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f32)"]
2744#[inline(always)]
2745#[target_feature(enable = "neon")]
2746#[cfg_attr(test, assert_instr(fcmlt))]
2747#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2748pub fn vcltz_f32(a: float32x2_t) -> uint32x2_t {
2749    let b: f32x2 = f32x2::new(0.0, 0.0);
2750    unsafe { simd_lt(a, transmute(b)) }
2751}
2752#[doc = "Floating-point compare less than zero"]
2753#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f32)"]
2754#[inline(always)]
2755#[target_feature(enable = "neon")]
2756#[cfg_attr(test, assert_instr(fcmlt))]
2757#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2758pub fn vcltzq_f32(a: float32x4_t) -> uint32x4_t {
2759    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2760    unsafe { simd_lt(a, transmute(b)) }
2761}
2762#[doc = "Floating-point compare less than zero"]
2763#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f64)"]
2764#[inline(always)]
2765#[target_feature(enable = "neon")]
2766#[cfg_attr(test, assert_instr(fcmlt))]
2767#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2768pub fn vcltz_f64(a: float64x1_t) -> uint64x1_t {
2769    let b: f64 = 0.0;
2770    unsafe { simd_lt(a, transmute(b)) }
2771}
2772#[doc = "Floating-point compare less than zero"]
2773#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f64)"]
2774#[inline(always)]
2775#[target_feature(enable = "neon")]
2776#[cfg_attr(test, assert_instr(fcmlt))]
2777#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2778pub fn vcltzq_f64(a: float64x2_t) -> uint64x2_t {
2779    let b: f64x2 = f64x2::new(0.0, 0.0);
2780    unsafe { simd_lt(a, transmute(b)) }
2781}
2782#[doc = "Compare signed less than zero"]
2783#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s8)"]
2784#[inline(always)]
2785#[target_feature(enable = "neon")]
2786#[cfg_attr(test, assert_instr(cmlt))]
2787#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2788pub fn vcltz_s8(a: int8x8_t) -> uint8x8_t {
2789    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2790    unsafe { simd_lt(a, transmute(b)) }
2791}
2792#[doc = "Compare signed less than zero"]
2793#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s8)"]
2794#[inline(always)]
2795#[target_feature(enable = "neon")]
2796#[cfg_attr(test, assert_instr(cmlt))]
2797#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2798pub fn vcltzq_s8(a: int8x16_t) -> uint8x16_t {
2799    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2800    unsafe { simd_lt(a, transmute(b)) }
2801}
2802#[doc = "Compare signed less than zero"]
2803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s16)"]
2804#[inline(always)]
2805#[target_feature(enable = "neon")]
2806#[cfg_attr(test, assert_instr(cmlt))]
2807#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2808pub fn vcltz_s16(a: int16x4_t) -> uint16x4_t {
2809    let b: i16x4 = i16x4::new(0, 0, 0, 0);
2810    unsafe { simd_lt(a, transmute(b)) }
2811}
2812#[doc = "Compare signed less than zero"]
2813#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s16)"]
2814#[inline(always)]
2815#[target_feature(enable = "neon")]
2816#[cfg_attr(test, assert_instr(cmlt))]
2817#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2818pub fn vcltzq_s16(a: int16x8_t) -> uint16x8_t {
2819    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2820    unsafe { simd_lt(a, transmute(b)) }
2821}
2822#[doc = "Compare signed less than zero"]
2823#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s32)"]
2824#[inline(always)]
2825#[target_feature(enable = "neon")]
2826#[cfg_attr(test, assert_instr(cmlt))]
2827#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2828pub fn vcltz_s32(a: int32x2_t) -> uint32x2_t {
2829    let b: i32x2 = i32x2::new(0, 0);
2830    unsafe { simd_lt(a, transmute(b)) }
2831}
2832#[doc = "Compare signed less than zero"]
2833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s32)"]
2834#[inline(always)]
2835#[target_feature(enable = "neon")]
2836#[cfg_attr(test, assert_instr(cmlt))]
2837#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2838pub fn vcltzq_s32(a: int32x4_t) -> uint32x4_t {
2839    let b: i32x4 = i32x4::new(0, 0, 0, 0);
2840    unsafe { simd_lt(a, transmute(b)) }
2841}
2842#[doc = "Compare signed less than zero"]
2843#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s64)"]
2844#[inline(always)]
2845#[target_feature(enable = "neon")]
2846#[cfg_attr(test, assert_instr(cmlt))]
2847#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2848pub fn vcltz_s64(a: int64x1_t) -> uint64x1_t {
2849    let b: i64x1 = i64x1::new(0);
2850    unsafe { simd_lt(a, transmute(b)) }
2851}
2852#[doc = "Compare signed less than zero"]
2853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s64)"]
2854#[inline(always)]
2855#[target_feature(enable = "neon")]
2856#[cfg_attr(test, assert_instr(cmlt))]
2857#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2858pub fn vcltzq_s64(a: int64x2_t) -> uint64x2_t {
2859    let b: i64x2 = i64x2::new(0, 0);
2860    unsafe { simd_lt(a, transmute(b)) }
2861}
2862#[doc = "Floating-point compare less than zero"]
2863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_f64)"]
2864#[inline(always)]
2865#[target_feature(enable = "neon")]
2866#[cfg_attr(test, assert_instr(fcmp))]
2867#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2868pub fn vcltzd_f64(a: f64) -> u64 {
2869    unsafe { simd_extract!(vcltz_f64(vdup_n_f64(a)), 0) }
2870}
2871#[doc = "Floating-point compare less than zero"]
2872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzs_f32)"]
2873#[inline(always)]
2874#[target_feature(enable = "neon")]
2875#[cfg_attr(test, assert_instr(fcmp))]
2876#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2877pub fn vcltzs_f32(a: f32) -> u32 {
2878    unsafe { simd_extract!(vcltz_f32(vdup_n_f32(a)), 0) }
2879}
2880#[doc = "Compare less than zero"]
2881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_s64)"]
2882#[inline(always)]
2883#[target_feature(enable = "neon")]
2884#[cfg_attr(test, assert_instr(asr))]
2885#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2886pub fn vcltzd_s64(a: i64) -> u64 {
2887    unsafe { transmute(vcltz_s64(transmute(a))) }
2888}
2889#[doc = "Floating-point compare less than zero"]
2890#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzh_f16)"]
2891#[inline(always)]
2892#[cfg_attr(test, assert_instr(fcmp))]
2893#[target_feature(enable = "neon,fp16")]
2894#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2895#[cfg(not(target_arch = "arm64ec"))]
2896pub fn vcltzh_f16(a: f16) -> u16 {
2897    unsafe { simd_extract!(vcltz_f16(vdup_n_f16(a)), 0) }
2898}
2899#[doc = "Floating-point complex multiply accumulate"]
2900#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f16)"]
2901#[inline(always)]
2902#[target_feature(enable = "neon,fcma")]
2903#[target_feature(enable = "neon,fp16")]
2904#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2905#[cfg(not(target_arch = "arm64ec"))]
2906#[cfg_attr(test, assert_instr(fcmla))]
2907pub fn vcmla_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
2908    unsafe extern "unadjusted" {
2909        #[cfg_attr(
2910            any(target_arch = "aarch64", target_arch = "arm64ec"),
2911            link_name = "llvm.aarch64.neon.vcmla.rot0.v4f16"
2912        )]
2913        fn _vcmla_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
2914    }
2915    unsafe { _vcmla_f16(a, b, c) }
2916}
2917#[doc = "Floating-point complex multiply accumulate"]
2918#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f16)"]
2919#[inline(always)]
2920#[target_feature(enable = "neon,fcma")]
2921#[target_feature(enable = "neon,fp16")]
2922#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2923#[cfg(not(target_arch = "arm64ec"))]
2924#[cfg_attr(test, assert_instr(fcmla))]
2925pub fn vcmlaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
2926    unsafe extern "unadjusted" {
2927        #[cfg_attr(
2928            any(target_arch = "aarch64", target_arch = "arm64ec"),
2929            link_name = "llvm.aarch64.neon.vcmla.rot0.v8f16"
2930        )]
2931        fn _vcmlaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
2932    }
2933    unsafe { _vcmlaq_f16(a, b, c) }
2934}
2935#[doc = "Floating-point complex multiply accumulate"]
2936#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f32)"]
2937#[inline(always)]
2938#[target_feature(enable = "neon,fcma")]
2939#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2940#[cfg_attr(test, assert_instr(fcmla))]
2941pub fn vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
2942    unsafe extern "unadjusted" {
2943        #[cfg_attr(
2944            any(target_arch = "aarch64", target_arch = "arm64ec"),
2945            link_name = "llvm.aarch64.neon.vcmla.rot0.v2f32"
2946        )]
2947        fn _vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
2948    }
2949    unsafe { _vcmla_f32(a, b, c) }
2950}
2951#[doc = "Floating-point complex multiply accumulate"]
2952#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f32)"]
2953#[inline(always)]
2954#[target_feature(enable = "neon,fcma")]
2955#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2956#[cfg_attr(test, assert_instr(fcmla))]
2957pub fn vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
2958    unsafe extern "unadjusted" {
2959        #[cfg_attr(
2960            any(target_arch = "aarch64", target_arch = "arm64ec"),
2961            link_name = "llvm.aarch64.neon.vcmla.rot0.v4f32"
2962        )]
2963        fn _vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
2964    }
2965    unsafe { _vcmlaq_f32(a, b, c) }
2966}
2967#[doc = "Floating-point complex multiply accumulate"]
2968#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f64)"]
2969#[inline(always)]
2970#[target_feature(enable = "neon,fcma")]
2971#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2972#[cfg_attr(test, assert_instr(fcmla))]
2973pub fn vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
2974    unsafe extern "unadjusted" {
2975        #[cfg_attr(
2976            any(target_arch = "aarch64", target_arch = "arm64ec"),
2977            link_name = "llvm.aarch64.neon.vcmla.rot0.v2f64"
2978        )]
2979        fn _vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
2980    }
2981    unsafe { _vcmlaq_f64(a, b, c) }
2982}
2983#[doc = "Floating-point complex multiply accumulate"]
2984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_lane_f16)"]
2985#[inline(always)]
2986#[target_feature(enable = "neon,fcma")]
2987#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
2988#[rustc_legacy_const_generics(3)]
2989#[target_feature(enable = "neon,fp16")]
2990#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2991#[cfg(not(target_arch = "arm64ec"))]
2992pub fn vcmla_lane_f16<const LANE: i32>(
2993    a: float16x4_t,
2994    b: float16x4_t,
2995    c: float16x4_t,
2996) -> float16x4_t {
2997    static_assert_uimm_bits!(LANE, 1);
2998    unsafe {
2999        let c: float16x4_t = simd_shuffle!(
3000            c,
3001            c,
3002            [
3003                2 * LANE as u32,
3004                2 * LANE as u32 + 1,
3005                2 * LANE as u32,
3006                2 * LANE as u32 + 1
3007            ]
3008        );
3009        vcmla_f16(a, b, c)
3010    }
3011}
3012#[doc = "Floating-point complex multiply accumulate"]
3013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_lane_f16)"]
3014#[inline(always)]
3015#[target_feature(enable = "neon,fcma")]
3016#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3017#[rustc_legacy_const_generics(3)]
3018#[target_feature(enable = "neon,fp16")]
3019#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3020#[cfg(not(target_arch = "arm64ec"))]
3021pub fn vcmlaq_lane_f16<const LANE: i32>(
3022    a: float16x8_t,
3023    b: float16x8_t,
3024    c: float16x4_t,
3025) -> float16x8_t {
3026    static_assert_uimm_bits!(LANE, 1);
3027    unsafe {
3028        let c: float16x8_t = simd_shuffle!(
3029            c,
3030            c,
3031            [
3032                2 * LANE as u32,
3033                2 * LANE as u32 + 1,
3034                2 * LANE as u32,
3035                2 * LANE as u32 + 1,
3036                2 * LANE as u32,
3037                2 * LANE as u32 + 1,
3038                2 * LANE as u32,
3039                2 * LANE as u32 + 1
3040            ]
3041        );
3042        vcmlaq_f16(a, b, c)
3043    }
3044}
3045#[doc = "Floating-point complex multiply accumulate"]
3046#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_lane_f32)"]
3047#[inline(always)]
3048#[target_feature(enable = "neon,fcma")]
3049#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3050#[rustc_legacy_const_generics(3)]
3051#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3052pub fn vcmla_lane_f32<const LANE: i32>(
3053    a: float32x2_t,
3054    b: float32x2_t,
3055    c: float32x2_t,
3056) -> float32x2_t {
3057    static_assert!(LANE == 0);
3058    unsafe {
3059        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3060        vcmla_f32(a, b, c)
3061    }
3062}
3063#[doc = "Floating-point complex multiply accumulate"]
3064#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_lane_f32)"]
3065#[inline(always)]
3066#[target_feature(enable = "neon,fcma")]
3067#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3068#[rustc_legacy_const_generics(3)]
3069#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3070pub fn vcmlaq_lane_f32<const LANE: i32>(
3071    a: float32x4_t,
3072    b: float32x4_t,
3073    c: float32x2_t,
3074) -> float32x4_t {
3075    static_assert!(LANE == 0);
3076    unsafe {
3077        let c: float32x4_t = simd_shuffle!(
3078            c,
3079            c,
3080            [
3081                2 * LANE as u32,
3082                2 * LANE as u32 + 1,
3083                2 * LANE as u32,
3084                2 * LANE as u32 + 1
3085            ]
3086        );
3087        vcmlaq_f32(a, b, c)
3088    }
3089}
3090#[doc = "Floating-point complex multiply accumulate"]
3091#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_laneq_f16)"]
3092#[inline(always)]
3093#[target_feature(enable = "neon,fcma")]
3094#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3095#[rustc_legacy_const_generics(3)]
3096#[target_feature(enable = "neon,fp16")]
3097#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3098#[cfg(not(target_arch = "arm64ec"))]
3099pub fn vcmla_laneq_f16<const LANE: i32>(
3100    a: float16x4_t,
3101    b: float16x4_t,
3102    c: float16x8_t,
3103) -> float16x4_t {
3104    static_assert_uimm_bits!(LANE, 2);
3105    unsafe {
3106        let c: float16x4_t = simd_shuffle!(
3107            c,
3108            c,
3109            [
3110                2 * LANE as u32,
3111                2 * LANE as u32 + 1,
3112                2 * LANE as u32,
3113                2 * LANE as u32 + 1
3114            ]
3115        );
3116        vcmla_f16(a, b, c)
3117    }
3118}
3119#[doc = "Floating-point complex multiply accumulate"]
3120#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_laneq_f16)"]
3121#[inline(always)]
3122#[target_feature(enable = "neon,fcma")]
3123#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3124#[rustc_legacy_const_generics(3)]
3125#[target_feature(enable = "neon,fp16")]
3126#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3127#[cfg(not(target_arch = "arm64ec"))]
3128pub fn vcmlaq_laneq_f16<const LANE: i32>(
3129    a: float16x8_t,
3130    b: float16x8_t,
3131    c: float16x8_t,
3132) -> float16x8_t {
3133    static_assert_uimm_bits!(LANE, 2);
3134    unsafe {
3135        let c: float16x8_t = simd_shuffle!(
3136            c,
3137            c,
3138            [
3139                2 * LANE as u32,
3140                2 * LANE as u32 + 1,
3141                2 * LANE as u32,
3142                2 * LANE as u32 + 1,
3143                2 * LANE as u32,
3144                2 * LANE as u32 + 1,
3145                2 * LANE as u32,
3146                2 * LANE as u32 + 1
3147            ]
3148        );
3149        vcmlaq_f16(a, b, c)
3150    }
3151}
3152#[doc = "Floating-point complex multiply accumulate"]
3153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_laneq_f32)"]
3154#[inline(always)]
3155#[target_feature(enable = "neon,fcma")]
3156#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3157#[rustc_legacy_const_generics(3)]
3158#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3159pub fn vcmla_laneq_f32<const LANE: i32>(
3160    a: float32x2_t,
3161    b: float32x2_t,
3162    c: float32x4_t,
3163) -> float32x2_t {
3164    static_assert_uimm_bits!(LANE, 1);
3165    unsafe {
3166        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3167        vcmla_f32(a, b, c)
3168    }
3169}
3170#[doc = "Floating-point complex multiply accumulate"]
3171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_laneq_f32)"]
3172#[inline(always)]
3173#[target_feature(enable = "neon,fcma")]
3174#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3175#[rustc_legacy_const_generics(3)]
3176#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3177pub fn vcmlaq_laneq_f32<const LANE: i32>(
3178    a: float32x4_t,
3179    b: float32x4_t,
3180    c: float32x4_t,
3181) -> float32x4_t {
3182    static_assert_uimm_bits!(LANE, 1);
3183    unsafe {
3184        let c: float32x4_t = simd_shuffle!(
3185            c,
3186            c,
3187            [
3188                2 * LANE as u32,
3189                2 * LANE as u32 + 1,
3190                2 * LANE as u32,
3191                2 * LANE as u32 + 1
3192            ]
3193        );
3194        vcmlaq_f32(a, b, c)
3195    }
3196}
3197#[doc = "Floating-point complex multiply accumulate"]
3198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f16)"]
3199#[inline(always)]
3200#[target_feature(enable = "neon,fcma")]
3201#[target_feature(enable = "neon,fp16")]
3202#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3203#[cfg(not(target_arch = "arm64ec"))]
3204#[cfg_attr(test, assert_instr(fcmla))]
3205pub fn vcmla_rot180_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
3206    unsafe extern "unadjusted" {
3207        #[cfg_attr(
3208            any(target_arch = "aarch64", target_arch = "arm64ec"),
3209            link_name = "llvm.aarch64.neon.vcmla.rot180.v4f16"
3210        )]
3211        fn _vcmla_rot180_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
3212    }
3213    unsafe { _vcmla_rot180_f16(a, b, c) }
3214}
3215#[doc = "Floating-point complex multiply accumulate"]
3216#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f16)"]
3217#[inline(always)]
3218#[target_feature(enable = "neon,fcma")]
3219#[target_feature(enable = "neon,fp16")]
3220#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3221#[cfg(not(target_arch = "arm64ec"))]
3222#[cfg_attr(test, assert_instr(fcmla))]
3223pub fn vcmlaq_rot180_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
3224    unsafe extern "unadjusted" {
3225        #[cfg_attr(
3226            any(target_arch = "aarch64", target_arch = "arm64ec"),
3227            link_name = "llvm.aarch64.neon.vcmla.rot180.v8f16"
3228        )]
3229        fn _vcmlaq_rot180_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
3230    }
3231    unsafe { _vcmlaq_rot180_f16(a, b, c) }
3232}
3233#[doc = "Floating-point complex multiply accumulate"]
3234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f32)"]
3235#[inline(always)]
3236#[target_feature(enable = "neon,fcma")]
3237#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3238#[cfg_attr(test, assert_instr(fcmla))]
3239pub fn vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
3240    unsafe extern "unadjusted" {
3241        #[cfg_attr(
3242            any(target_arch = "aarch64", target_arch = "arm64ec"),
3243            link_name = "llvm.aarch64.neon.vcmla.rot180.v2f32"
3244        )]
3245        fn _vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
3246    }
3247    unsafe { _vcmla_rot180_f32(a, b, c) }
3248}
3249#[doc = "Floating-point complex multiply accumulate"]
3250#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f32)"]
3251#[inline(always)]
3252#[target_feature(enable = "neon,fcma")]
3253#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3254#[cfg_attr(test, assert_instr(fcmla))]
3255pub fn vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
3256    unsafe extern "unadjusted" {
3257        #[cfg_attr(
3258            any(target_arch = "aarch64", target_arch = "arm64ec"),
3259            link_name = "llvm.aarch64.neon.vcmla.rot180.v4f32"
3260        )]
3261        fn _vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
3262    }
3263    unsafe { _vcmlaq_rot180_f32(a, b, c) }
3264}
3265#[doc = "Floating-point complex multiply accumulate"]
3266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f64)"]
3267#[inline(always)]
3268#[target_feature(enable = "neon,fcma")]
3269#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3270#[cfg_attr(test, assert_instr(fcmla))]
3271pub fn vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
3272    unsafe extern "unadjusted" {
3273        #[cfg_attr(
3274            any(target_arch = "aarch64", target_arch = "arm64ec"),
3275            link_name = "llvm.aarch64.neon.vcmla.rot180.v2f64"
3276        )]
3277        fn _vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
3278    }
3279    unsafe { _vcmlaq_rot180_f64(a, b, c) }
3280}
3281#[doc = "Floating-point complex multiply accumulate"]
3282#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_lane_f16)"]
3283#[inline(always)]
3284#[target_feature(enable = "neon,fcma")]
3285#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3286#[rustc_legacy_const_generics(3)]
3287#[target_feature(enable = "neon,fp16")]
3288#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3289#[cfg(not(target_arch = "arm64ec"))]
3290pub fn vcmla_rot180_lane_f16<const LANE: i32>(
3291    a: float16x4_t,
3292    b: float16x4_t,
3293    c: float16x4_t,
3294) -> float16x4_t {
3295    static_assert_uimm_bits!(LANE, 1);
3296    unsafe {
3297        let c: float16x4_t = simd_shuffle!(
3298            c,
3299            c,
3300            [
3301                2 * LANE as u32,
3302                2 * LANE as u32 + 1,
3303                2 * LANE as u32,
3304                2 * LANE as u32 + 1
3305            ]
3306        );
3307        vcmla_rot180_f16(a, b, c)
3308    }
3309}
3310#[doc = "Floating-point complex multiply accumulate"]
3311#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_lane_f16)"]
3312#[inline(always)]
3313#[target_feature(enable = "neon,fcma")]
3314#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3315#[rustc_legacy_const_generics(3)]
3316#[target_feature(enable = "neon,fp16")]
3317#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3318#[cfg(not(target_arch = "arm64ec"))]
3319pub fn vcmlaq_rot180_lane_f16<const LANE: i32>(
3320    a: float16x8_t,
3321    b: float16x8_t,
3322    c: float16x4_t,
3323) -> float16x8_t {
3324    static_assert_uimm_bits!(LANE, 1);
3325    unsafe {
3326        let c: float16x8_t = simd_shuffle!(
3327            c,
3328            c,
3329            [
3330                2 * LANE as u32,
3331                2 * LANE as u32 + 1,
3332                2 * LANE as u32,
3333                2 * LANE as u32 + 1,
3334                2 * LANE as u32,
3335                2 * LANE as u32 + 1,
3336                2 * LANE as u32,
3337                2 * LANE as u32 + 1
3338            ]
3339        );
3340        vcmlaq_rot180_f16(a, b, c)
3341    }
3342}
3343#[doc = "Floating-point complex multiply accumulate"]
3344#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_lane_f32)"]
3345#[inline(always)]
3346#[target_feature(enable = "neon,fcma")]
3347#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3348#[rustc_legacy_const_generics(3)]
3349#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3350pub fn vcmla_rot180_lane_f32<const LANE: i32>(
3351    a: float32x2_t,
3352    b: float32x2_t,
3353    c: float32x2_t,
3354) -> float32x2_t {
3355    static_assert!(LANE == 0);
3356    unsafe {
3357        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3358        vcmla_rot180_f32(a, b, c)
3359    }
3360}
3361#[doc = "Floating-point complex multiply accumulate"]
3362#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_lane_f32)"]
3363#[inline(always)]
3364#[target_feature(enable = "neon,fcma")]
3365#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3366#[rustc_legacy_const_generics(3)]
3367#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3368pub fn vcmlaq_rot180_lane_f32<const LANE: i32>(
3369    a: float32x4_t,
3370    b: float32x4_t,
3371    c: float32x2_t,
3372) -> float32x4_t {
3373    static_assert!(LANE == 0);
3374    unsafe {
3375        let c: float32x4_t = simd_shuffle!(
3376            c,
3377            c,
3378            [
3379                2 * LANE as u32,
3380                2 * LANE as u32 + 1,
3381                2 * LANE as u32,
3382                2 * LANE as u32 + 1
3383            ]
3384        );
3385        vcmlaq_rot180_f32(a, b, c)
3386    }
3387}
3388#[doc = "Floating-point complex multiply accumulate"]
3389#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_laneq_f16)"]
3390#[inline(always)]
3391#[target_feature(enable = "neon,fcma")]
3392#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3393#[rustc_legacy_const_generics(3)]
3394#[target_feature(enable = "neon,fp16")]
3395#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3396#[cfg(not(target_arch = "arm64ec"))]
3397pub fn vcmla_rot180_laneq_f16<const LANE: i32>(
3398    a: float16x4_t,
3399    b: float16x4_t,
3400    c: float16x8_t,
3401) -> float16x4_t {
3402    static_assert_uimm_bits!(LANE, 2);
3403    unsafe {
3404        let c: float16x4_t = simd_shuffle!(
3405            c,
3406            c,
3407            [
3408                2 * LANE as u32,
3409                2 * LANE as u32 + 1,
3410                2 * LANE as u32,
3411                2 * LANE as u32 + 1
3412            ]
3413        );
3414        vcmla_rot180_f16(a, b, c)
3415    }
3416}
3417#[doc = "Floating-point complex multiply accumulate"]
3418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_laneq_f16)"]
3419#[inline(always)]
3420#[target_feature(enable = "neon,fcma")]
3421#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3422#[rustc_legacy_const_generics(3)]
3423#[target_feature(enable = "neon,fp16")]
3424#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3425#[cfg(not(target_arch = "arm64ec"))]
3426pub fn vcmlaq_rot180_laneq_f16<const LANE: i32>(
3427    a: float16x8_t,
3428    b: float16x8_t,
3429    c: float16x8_t,
3430) -> float16x8_t {
3431    static_assert_uimm_bits!(LANE, 2);
3432    unsafe {
3433        let c: float16x8_t = simd_shuffle!(
3434            c,
3435            c,
3436            [
3437                2 * LANE as u32,
3438                2 * LANE as u32 + 1,
3439                2 * LANE as u32,
3440                2 * LANE as u32 + 1,
3441                2 * LANE as u32,
3442                2 * LANE as u32 + 1,
3443                2 * LANE as u32,
3444                2 * LANE as u32 + 1
3445            ]
3446        );
3447        vcmlaq_rot180_f16(a, b, c)
3448    }
3449}
3450#[doc = "Floating-point complex multiply accumulate"]
3451#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_laneq_f32)"]
3452#[inline(always)]
3453#[target_feature(enable = "neon,fcma")]
3454#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3455#[rustc_legacy_const_generics(3)]
3456#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3457pub fn vcmla_rot180_laneq_f32<const LANE: i32>(
3458    a: float32x2_t,
3459    b: float32x2_t,
3460    c: float32x4_t,
3461) -> float32x2_t {
3462    static_assert_uimm_bits!(LANE, 1);
3463    unsafe {
3464        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3465        vcmla_rot180_f32(a, b, c)
3466    }
3467}
3468#[doc = "Floating-point complex multiply accumulate"]
3469#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_laneq_f32)"]
3470#[inline(always)]
3471#[target_feature(enable = "neon,fcma")]
3472#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3473#[rustc_legacy_const_generics(3)]
3474#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3475pub fn vcmlaq_rot180_laneq_f32<const LANE: i32>(
3476    a: float32x4_t,
3477    b: float32x4_t,
3478    c: float32x4_t,
3479) -> float32x4_t {
3480    static_assert_uimm_bits!(LANE, 1);
3481    unsafe {
3482        let c: float32x4_t = simd_shuffle!(
3483            c,
3484            c,
3485            [
3486                2 * LANE as u32,
3487                2 * LANE as u32 + 1,
3488                2 * LANE as u32,
3489                2 * LANE as u32 + 1
3490            ]
3491        );
3492        vcmlaq_rot180_f32(a, b, c)
3493    }
3494}
3495#[doc = "Floating-point complex multiply accumulate"]
3496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f16)"]
3497#[inline(always)]
3498#[target_feature(enable = "neon,fcma")]
3499#[target_feature(enable = "neon,fp16")]
3500#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3501#[cfg(not(target_arch = "arm64ec"))]
3502#[cfg_attr(test, assert_instr(fcmla))]
3503pub fn vcmla_rot270_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
3504    unsafe extern "unadjusted" {
3505        #[cfg_attr(
3506            any(target_arch = "aarch64", target_arch = "arm64ec"),
3507            link_name = "llvm.aarch64.neon.vcmla.rot270.v4f16"
3508        )]
3509        fn _vcmla_rot270_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
3510    }
3511    unsafe { _vcmla_rot270_f16(a, b, c) }
3512}
3513#[doc = "Floating-point complex multiply accumulate"]
3514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f16)"]
3515#[inline(always)]
3516#[target_feature(enable = "neon,fcma")]
3517#[target_feature(enable = "neon,fp16")]
3518#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3519#[cfg(not(target_arch = "arm64ec"))]
3520#[cfg_attr(test, assert_instr(fcmla))]
3521pub fn vcmlaq_rot270_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
3522    unsafe extern "unadjusted" {
3523        #[cfg_attr(
3524            any(target_arch = "aarch64", target_arch = "arm64ec"),
3525            link_name = "llvm.aarch64.neon.vcmla.rot270.v8f16"
3526        )]
3527        fn _vcmlaq_rot270_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
3528    }
3529    unsafe { _vcmlaq_rot270_f16(a, b, c) }
3530}
3531#[doc = "Floating-point complex multiply accumulate"]
3532#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f32)"]
3533#[inline(always)]
3534#[target_feature(enable = "neon,fcma")]
3535#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3536#[cfg_attr(test, assert_instr(fcmla))]
3537pub fn vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
3538    unsafe extern "unadjusted" {
3539        #[cfg_attr(
3540            any(target_arch = "aarch64", target_arch = "arm64ec"),
3541            link_name = "llvm.aarch64.neon.vcmla.rot270.v2f32"
3542        )]
3543        fn _vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
3544    }
3545    unsafe { _vcmla_rot270_f32(a, b, c) }
3546}
3547#[doc = "Floating-point complex multiply accumulate"]
3548#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f32)"]
3549#[inline(always)]
3550#[target_feature(enable = "neon,fcma")]
3551#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3552#[cfg_attr(test, assert_instr(fcmla))]
3553pub fn vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
3554    unsafe extern "unadjusted" {
3555        #[cfg_attr(
3556            any(target_arch = "aarch64", target_arch = "arm64ec"),
3557            link_name = "llvm.aarch64.neon.vcmla.rot270.v4f32"
3558        )]
3559        fn _vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
3560    }
3561    unsafe { _vcmlaq_rot270_f32(a, b, c) }
3562}
3563#[doc = "Floating-point complex multiply accumulate"]
3564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f64)"]
3565#[inline(always)]
3566#[target_feature(enable = "neon,fcma")]
3567#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3568#[cfg_attr(test, assert_instr(fcmla))]
3569pub fn vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
3570    unsafe extern "unadjusted" {
3571        #[cfg_attr(
3572            any(target_arch = "aarch64", target_arch = "arm64ec"),
3573            link_name = "llvm.aarch64.neon.vcmla.rot270.v2f64"
3574        )]
3575        fn _vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
3576    }
3577    unsafe { _vcmlaq_rot270_f64(a, b, c) }
3578}
3579#[doc = "Floating-point complex multiply accumulate"]
3580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_lane_f16)"]
3581#[inline(always)]
3582#[target_feature(enable = "neon,fcma")]
3583#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3584#[rustc_legacy_const_generics(3)]
3585#[target_feature(enable = "neon,fp16")]
3586#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3587#[cfg(not(target_arch = "arm64ec"))]
3588pub fn vcmla_rot270_lane_f16<const LANE: i32>(
3589    a: float16x4_t,
3590    b: float16x4_t,
3591    c: float16x4_t,
3592) -> float16x4_t {
3593    static_assert_uimm_bits!(LANE, 1);
3594    unsafe {
3595        let c: float16x4_t = simd_shuffle!(
3596            c,
3597            c,
3598            [
3599                2 * LANE as u32,
3600                2 * LANE as u32 + 1,
3601                2 * LANE as u32,
3602                2 * LANE as u32 + 1
3603            ]
3604        );
3605        vcmla_rot270_f16(a, b, c)
3606    }
3607}
3608#[doc = "Floating-point complex multiply accumulate"]
3609#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_lane_f16)"]
3610#[inline(always)]
3611#[target_feature(enable = "neon,fcma")]
3612#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3613#[rustc_legacy_const_generics(3)]
3614#[target_feature(enable = "neon,fp16")]
3615#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3616#[cfg(not(target_arch = "arm64ec"))]
3617pub fn vcmlaq_rot270_lane_f16<const LANE: i32>(
3618    a: float16x8_t,
3619    b: float16x8_t,
3620    c: float16x4_t,
3621) -> float16x8_t {
3622    static_assert_uimm_bits!(LANE, 1);
3623    unsafe {
3624        let c: float16x8_t = simd_shuffle!(
3625            c,
3626            c,
3627            [
3628                2 * LANE as u32,
3629                2 * LANE as u32 + 1,
3630                2 * LANE as u32,
3631                2 * LANE as u32 + 1,
3632                2 * LANE as u32,
3633                2 * LANE as u32 + 1,
3634                2 * LANE as u32,
3635                2 * LANE as u32 + 1
3636            ]
3637        );
3638        vcmlaq_rot270_f16(a, b, c)
3639    }
3640}
3641#[doc = "Floating-point complex multiply accumulate"]
3642#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_lane_f32)"]
3643#[inline(always)]
3644#[target_feature(enable = "neon,fcma")]
3645#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3646#[rustc_legacy_const_generics(3)]
3647#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3648pub fn vcmla_rot270_lane_f32<const LANE: i32>(
3649    a: float32x2_t,
3650    b: float32x2_t,
3651    c: float32x2_t,
3652) -> float32x2_t {
3653    static_assert!(LANE == 0);
3654    unsafe {
3655        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3656        vcmla_rot270_f32(a, b, c)
3657    }
3658}
3659#[doc = "Floating-point complex multiply accumulate"]
3660#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_lane_f32)"]
3661#[inline(always)]
3662#[target_feature(enable = "neon,fcma")]
3663#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3664#[rustc_legacy_const_generics(3)]
3665#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3666pub fn vcmlaq_rot270_lane_f32<const LANE: i32>(
3667    a: float32x4_t,
3668    b: float32x4_t,
3669    c: float32x2_t,
3670) -> float32x4_t {
3671    static_assert!(LANE == 0);
3672    unsafe {
3673        let c: float32x4_t = simd_shuffle!(
3674            c,
3675            c,
3676            [
3677                2 * LANE as u32,
3678                2 * LANE as u32 + 1,
3679                2 * LANE as u32,
3680                2 * LANE as u32 + 1
3681            ]
3682        );
3683        vcmlaq_rot270_f32(a, b, c)
3684    }
3685}
3686#[doc = "Floating-point complex multiply accumulate"]
3687#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_laneq_f16)"]
3688#[inline(always)]
3689#[target_feature(enable = "neon,fcma")]
3690#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3691#[rustc_legacy_const_generics(3)]
3692#[target_feature(enable = "neon,fp16")]
3693#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3694#[cfg(not(target_arch = "arm64ec"))]
3695pub fn vcmla_rot270_laneq_f16<const LANE: i32>(
3696    a: float16x4_t,
3697    b: float16x4_t,
3698    c: float16x8_t,
3699) -> float16x4_t {
3700    static_assert_uimm_bits!(LANE, 2);
3701    unsafe {
3702        let c: float16x4_t = simd_shuffle!(
3703            c,
3704            c,
3705            [
3706                2 * LANE as u32,
3707                2 * LANE as u32 + 1,
3708                2 * LANE as u32,
3709                2 * LANE as u32 + 1
3710            ]
3711        );
3712        vcmla_rot270_f16(a, b, c)
3713    }
3714}
3715#[doc = "Floating-point complex multiply accumulate"]
3716#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_laneq_f16)"]
3717#[inline(always)]
3718#[target_feature(enable = "neon,fcma")]
3719#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3720#[rustc_legacy_const_generics(3)]
3721#[target_feature(enable = "neon,fp16")]
3722#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3723#[cfg(not(target_arch = "arm64ec"))]
3724pub fn vcmlaq_rot270_laneq_f16<const LANE: i32>(
3725    a: float16x8_t,
3726    b: float16x8_t,
3727    c: float16x8_t,
3728) -> float16x8_t {
3729    static_assert_uimm_bits!(LANE, 2);
3730    unsafe {
3731        let c: float16x8_t = simd_shuffle!(
3732            c,
3733            c,
3734            [
3735                2 * LANE as u32,
3736                2 * LANE as u32 + 1,
3737                2 * LANE as u32,
3738                2 * LANE as u32 + 1,
3739                2 * LANE as u32,
3740                2 * LANE as u32 + 1,
3741                2 * LANE as u32,
3742                2 * LANE as u32 + 1
3743            ]
3744        );
3745        vcmlaq_rot270_f16(a, b, c)
3746    }
3747}
3748#[doc = "Floating-point complex multiply accumulate"]
3749#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_laneq_f32)"]
3750#[inline(always)]
3751#[target_feature(enable = "neon,fcma")]
3752#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3753#[rustc_legacy_const_generics(3)]
3754#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3755pub fn vcmla_rot270_laneq_f32<const LANE: i32>(
3756    a: float32x2_t,
3757    b: float32x2_t,
3758    c: float32x4_t,
3759) -> float32x2_t {
3760    static_assert_uimm_bits!(LANE, 1);
3761    unsafe {
3762        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3763        vcmla_rot270_f32(a, b, c)
3764    }
3765}
3766#[doc = "Floating-point complex multiply accumulate"]
3767#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_laneq_f32)"]
3768#[inline(always)]
3769#[target_feature(enable = "neon,fcma")]
3770#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3771#[rustc_legacy_const_generics(3)]
3772#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3773pub fn vcmlaq_rot270_laneq_f32<const LANE: i32>(
3774    a: float32x4_t,
3775    b: float32x4_t,
3776    c: float32x4_t,
3777) -> float32x4_t {
3778    static_assert_uimm_bits!(LANE, 1);
3779    unsafe {
3780        let c: float32x4_t = simd_shuffle!(
3781            c,
3782            c,
3783            [
3784                2 * LANE as u32,
3785                2 * LANE as u32 + 1,
3786                2 * LANE as u32,
3787                2 * LANE as u32 + 1
3788            ]
3789        );
3790        vcmlaq_rot270_f32(a, b, c)
3791    }
3792}
3793#[doc = "Floating-point complex multiply accumulate"]
3794#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f16)"]
3795#[inline(always)]
3796#[target_feature(enable = "neon,fcma")]
3797#[target_feature(enable = "neon,fp16")]
3798#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3799#[cfg(not(target_arch = "arm64ec"))]
3800#[cfg_attr(test, assert_instr(fcmla))]
3801pub fn vcmla_rot90_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
3802    unsafe extern "unadjusted" {
3803        #[cfg_attr(
3804            any(target_arch = "aarch64", target_arch = "arm64ec"),
3805            link_name = "llvm.aarch64.neon.vcmla.rot90.v4f16"
3806        )]
3807        fn _vcmla_rot90_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
3808    }
3809    unsafe { _vcmla_rot90_f16(a, b, c) }
3810}
3811#[doc = "Floating-point complex multiply accumulate"]
3812#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f16)"]
3813#[inline(always)]
3814#[target_feature(enable = "neon,fcma")]
3815#[target_feature(enable = "neon,fp16")]
3816#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3817#[cfg(not(target_arch = "arm64ec"))]
3818#[cfg_attr(test, assert_instr(fcmla))]
3819pub fn vcmlaq_rot90_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
3820    unsafe extern "unadjusted" {
3821        #[cfg_attr(
3822            any(target_arch = "aarch64", target_arch = "arm64ec"),
3823            link_name = "llvm.aarch64.neon.vcmla.rot90.v8f16"
3824        )]
3825        fn _vcmlaq_rot90_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
3826    }
3827    unsafe { _vcmlaq_rot90_f16(a, b, c) }
3828}
3829#[doc = "Floating-point complex multiply accumulate"]
3830#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f32)"]
3831#[inline(always)]
3832#[target_feature(enable = "neon,fcma")]
3833#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3834#[cfg_attr(test, assert_instr(fcmla))]
3835pub fn vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
3836    unsafe extern "unadjusted" {
3837        #[cfg_attr(
3838            any(target_arch = "aarch64", target_arch = "arm64ec"),
3839            link_name = "llvm.aarch64.neon.vcmla.rot90.v2f32"
3840        )]
3841        fn _vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
3842    }
3843    unsafe { _vcmla_rot90_f32(a, b, c) }
3844}
3845#[doc = "Floating-point complex multiply accumulate"]
3846#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f32)"]
3847#[inline(always)]
3848#[target_feature(enable = "neon,fcma")]
3849#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3850#[cfg_attr(test, assert_instr(fcmla))]
3851pub fn vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
3852    unsafe extern "unadjusted" {
3853        #[cfg_attr(
3854            any(target_arch = "aarch64", target_arch = "arm64ec"),
3855            link_name = "llvm.aarch64.neon.vcmla.rot90.v4f32"
3856        )]
3857        fn _vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
3858    }
3859    unsafe { _vcmlaq_rot90_f32(a, b, c) }
3860}
3861#[doc = "Floating-point complex multiply accumulate"]
3862#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f64)"]
3863#[inline(always)]
3864#[target_feature(enable = "neon,fcma")]
3865#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3866#[cfg_attr(test, assert_instr(fcmla))]
3867pub fn vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
3868    unsafe extern "unadjusted" {
3869        #[cfg_attr(
3870            any(target_arch = "aarch64", target_arch = "arm64ec"),
3871            link_name = "llvm.aarch64.neon.vcmla.rot90.v2f64"
3872        )]
3873        fn _vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
3874    }
3875    unsafe { _vcmlaq_rot90_f64(a, b, c) }
3876}
3877#[doc = "Floating-point complex multiply accumulate"]
3878#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_lane_f16)"]
3879#[inline(always)]
3880#[target_feature(enable = "neon,fcma")]
3881#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3882#[rustc_legacy_const_generics(3)]
3883#[target_feature(enable = "neon,fp16")]
3884#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3885#[cfg(not(target_arch = "arm64ec"))]
3886pub fn vcmla_rot90_lane_f16<const LANE: i32>(
3887    a: float16x4_t,
3888    b: float16x4_t,
3889    c: float16x4_t,
3890) -> float16x4_t {
3891    static_assert_uimm_bits!(LANE, 1);
3892    unsafe {
3893        let c: float16x4_t = simd_shuffle!(
3894            c,
3895            c,
3896            [
3897                2 * LANE as u32,
3898                2 * LANE as u32 + 1,
3899                2 * LANE as u32,
3900                2 * LANE as u32 + 1
3901            ]
3902        );
3903        vcmla_rot90_f16(a, b, c)
3904    }
3905}
3906#[doc = "Floating-point complex multiply accumulate"]
3907#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_lane_f16)"]
3908#[inline(always)]
3909#[target_feature(enable = "neon,fcma")]
3910#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3911#[rustc_legacy_const_generics(3)]
3912#[target_feature(enable = "neon,fp16")]
3913#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3914#[cfg(not(target_arch = "arm64ec"))]
3915pub fn vcmlaq_rot90_lane_f16<const LANE: i32>(
3916    a: float16x8_t,
3917    b: float16x8_t,
3918    c: float16x4_t,
3919) -> float16x8_t {
3920    static_assert_uimm_bits!(LANE, 1);
3921    unsafe {
3922        let c: float16x8_t = simd_shuffle!(
3923            c,
3924            c,
3925            [
3926                2 * LANE as u32,
3927                2 * LANE as u32 + 1,
3928                2 * LANE as u32,
3929                2 * LANE as u32 + 1,
3930                2 * LANE as u32,
3931                2 * LANE as u32 + 1,
3932                2 * LANE as u32,
3933                2 * LANE as u32 + 1
3934            ]
3935        );
3936        vcmlaq_rot90_f16(a, b, c)
3937    }
3938}
3939#[doc = "Floating-point complex multiply accumulate"]
3940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_lane_f32)"]
3941#[inline(always)]
3942#[target_feature(enable = "neon,fcma")]
3943#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3944#[rustc_legacy_const_generics(3)]
3945#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3946pub fn vcmla_rot90_lane_f32<const LANE: i32>(
3947    a: float32x2_t,
3948    b: float32x2_t,
3949    c: float32x2_t,
3950) -> float32x2_t {
3951    static_assert!(LANE == 0);
3952    unsafe {
3953        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3954        vcmla_rot90_f32(a, b, c)
3955    }
3956}
3957#[doc = "Floating-point complex multiply accumulate"]
3958#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_lane_f32)"]
3959#[inline(always)]
3960#[target_feature(enable = "neon,fcma")]
3961#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3962#[rustc_legacy_const_generics(3)]
3963#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3964pub fn vcmlaq_rot90_lane_f32<const LANE: i32>(
3965    a: float32x4_t,
3966    b: float32x4_t,
3967    c: float32x2_t,
3968) -> float32x4_t {
3969    static_assert!(LANE == 0);
3970    unsafe {
3971        let c: float32x4_t = simd_shuffle!(
3972            c,
3973            c,
3974            [
3975                2 * LANE as u32,
3976                2 * LANE as u32 + 1,
3977                2 * LANE as u32,
3978                2 * LANE as u32 + 1
3979            ]
3980        );
3981        vcmlaq_rot90_f32(a, b, c)
3982    }
3983}
3984#[doc = "Floating-point complex multiply accumulate"]
3985#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_laneq_f16)"]
3986#[inline(always)]
3987#[target_feature(enable = "neon,fcma")]
3988#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3989#[rustc_legacy_const_generics(3)]
3990#[target_feature(enable = "neon,fp16")]
3991#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3992#[cfg(not(target_arch = "arm64ec"))]
3993pub fn vcmla_rot90_laneq_f16<const LANE: i32>(
3994    a: float16x4_t,
3995    b: float16x4_t,
3996    c: float16x8_t,
3997) -> float16x4_t {
3998    static_assert_uimm_bits!(LANE, 2);
3999    unsafe {
4000        let c: float16x4_t = simd_shuffle!(
4001            c,
4002            c,
4003            [
4004                2 * LANE as u32,
4005                2 * LANE as u32 + 1,
4006                2 * LANE as u32,
4007                2 * LANE as u32 + 1
4008            ]
4009        );
4010        vcmla_rot90_f16(a, b, c)
4011    }
4012}
4013#[doc = "Floating-point complex multiply accumulate"]
4014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_laneq_f16)"]
4015#[inline(always)]
4016#[target_feature(enable = "neon,fcma")]
4017#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
4018#[rustc_legacy_const_generics(3)]
4019#[target_feature(enable = "neon,fp16")]
4020#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
4021#[cfg(not(target_arch = "arm64ec"))]
4022pub fn vcmlaq_rot90_laneq_f16<const LANE: i32>(
4023    a: float16x8_t,
4024    b: float16x8_t,
4025    c: float16x8_t,
4026) -> float16x8_t {
4027    static_assert_uimm_bits!(LANE, 2);
4028    unsafe {
4029        let c: float16x8_t = simd_shuffle!(
4030            c,
4031            c,
4032            [
4033                2 * LANE as u32,
4034                2 * LANE as u32 + 1,
4035                2 * LANE as u32,
4036                2 * LANE as u32 + 1,
4037                2 * LANE as u32,
4038                2 * LANE as u32 + 1,
4039                2 * LANE as u32,
4040                2 * LANE as u32 + 1
4041            ]
4042        );
4043        vcmlaq_rot90_f16(a, b, c)
4044    }
4045}
4046#[doc = "Floating-point complex multiply accumulate"]
4047#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_laneq_f32)"]
4048#[inline(always)]
4049#[target_feature(enable = "neon,fcma")]
4050#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
4051#[rustc_legacy_const_generics(3)]
4052#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
4053pub fn vcmla_rot90_laneq_f32<const LANE: i32>(
4054    a: float32x2_t,
4055    b: float32x2_t,
4056    c: float32x4_t,
4057) -> float32x2_t {
4058    static_assert_uimm_bits!(LANE, 1);
4059    unsafe {
4060        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
4061        vcmla_rot90_f32(a, b, c)
4062    }
4063}
4064#[doc = "Floating-point complex multiply accumulate"]
4065#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_laneq_f32)"]
4066#[inline(always)]
4067#[target_feature(enable = "neon,fcma")]
4068#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
4069#[rustc_legacy_const_generics(3)]
4070#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
4071pub fn vcmlaq_rot90_laneq_f32<const LANE: i32>(
4072    a: float32x4_t,
4073    b: float32x4_t,
4074    c: float32x4_t,
4075) -> float32x4_t {
4076    static_assert_uimm_bits!(LANE, 1);
4077    unsafe {
4078        let c: float32x4_t = simd_shuffle!(
4079            c,
4080            c,
4081            [
4082                2 * LANE as u32,
4083                2 * LANE as u32 + 1,
4084                2 * LANE as u32,
4085                2 * LANE as u32 + 1
4086            ]
4087        );
4088        vcmlaq_rot90_f32(a, b, c)
4089    }
4090}
4091#[doc = "Insert vector element from another vector element"]
4092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_f32)"]
4093#[inline(always)]
4094#[target_feature(enable = "neon")]
4095#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4096#[rustc_legacy_const_generics(1, 3)]
4097#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4098pub fn vcopy_lane_f32<const LANE1: i32, const LANE2: i32>(
4099    a: float32x2_t,
4100    b: float32x2_t,
4101) -> float32x2_t {
4102    static_assert_uimm_bits!(LANE1, 1);
4103    static_assert_uimm_bits!(LANE2, 1);
4104    unsafe {
4105        match LANE1 & 0b1 {
4106            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4107            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4108            _ => unreachable_unchecked(),
4109        }
4110    }
4111}
4112#[doc = "Insert vector element from another vector element"]
4113#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s8)"]
4114#[inline(always)]
4115#[target_feature(enable = "neon")]
4116#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4117#[rustc_legacy_const_generics(1, 3)]
4118#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4119pub fn vcopy_lane_s8<const LANE1: i32, const LANE2: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
4120    static_assert_uimm_bits!(LANE1, 3);
4121    static_assert_uimm_bits!(LANE2, 3);
4122    unsafe {
4123        match LANE1 & 0b111 {
4124            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4125            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4126            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4127            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4128            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4129            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4130            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4131            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4132            _ => unreachable_unchecked(),
4133        }
4134    }
4135}
4136#[doc = "Insert vector element from another vector element"]
4137#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s16)"]
4138#[inline(always)]
4139#[target_feature(enable = "neon")]
4140#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4141#[rustc_legacy_const_generics(1, 3)]
4142#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4143pub fn vcopy_lane_s16<const LANE1: i32, const LANE2: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
4144    static_assert_uimm_bits!(LANE1, 2);
4145    static_assert_uimm_bits!(LANE2, 2);
4146    unsafe {
4147        match LANE1 & 0b11 {
4148            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4149            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4150            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4151            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4152            _ => unreachable_unchecked(),
4153        }
4154    }
4155}
4156#[doc = "Insert vector element from another vector element"]
4157#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s32)"]
4158#[inline(always)]
4159#[target_feature(enable = "neon")]
4160#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4161#[rustc_legacy_const_generics(1, 3)]
4162#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4163pub fn vcopy_lane_s32<const LANE1: i32, const LANE2: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
4164    static_assert_uimm_bits!(LANE1, 1);
4165    static_assert_uimm_bits!(LANE2, 1);
4166    unsafe {
4167        match LANE1 & 0b1 {
4168            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4169            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4170            _ => unreachable_unchecked(),
4171        }
4172    }
4173}
4174#[doc = "Insert vector element from another vector element"]
4175#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u8)"]
4176#[inline(always)]
4177#[target_feature(enable = "neon")]
4178#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4179#[rustc_legacy_const_generics(1, 3)]
4180#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4181pub fn vcopy_lane_u8<const LANE1: i32, const LANE2: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
4182    static_assert_uimm_bits!(LANE1, 3);
4183    static_assert_uimm_bits!(LANE2, 3);
4184    unsafe {
4185        match LANE1 & 0b111 {
4186            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4187            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4188            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4189            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4190            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4191            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4192            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4193            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4194            _ => unreachable_unchecked(),
4195        }
4196    }
4197}
4198#[doc = "Insert vector element from another vector element"]
4199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u16)"]
4200#[inline(always)]
4201#[target_feature(enable = "neon")]
4202#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4203#[rustc_legacy_const_generics(1, 3)]
4204#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4205pub fn vcopy_lane_u16<const LANE1: i32, const LANE2: i32>(
4206    a: uint16x4_t,
4207    b: uint16x4_t,
4208) -> uint16x4_t {
4209    static_assert_uimm_bits!(LANE1, 2);
4210    static_assert_uimm_bits!(LANE2, 2);
4211    unsafe {
4212        match LANE1 & 0b11 {
4213            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4214            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4215            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4216            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4217            _ => unreachable_unchecked(),
4218        }
4219    }
4220}
4221#[doc = "Insert vector element from another vector element"]
4222#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u32)"]
4223#[inline(always)]
4224#[target_feature(enable = "neon")]
4225#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4226#[rustc_legacy_const_generics(1, 3)]
4227#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4228pub fn vcopy_lane_u32<const LANE1: i32, const LANE2: i32>(
4229    a: uint32x2_t,
4230    b: uint32x2_t,
4231) -> uint32x2_t {
4232    static_assert_uimm_bits!(LANE1, 1);
4233    static_assert_uimm_bits!(LANE2, 1);
4234    unsafe {
4235        match LANE1 & 0b1 {
4236            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4237            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4238            _ => unreachable_unchecked(),
4239        }
4240    }
4241}
4242#[doc = "Insert vector element from another vector element"]
4243#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p8)"]
4244#[inline(always)]
4245#[target_feature(enable = "neon")]
4246#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4247#[rustc_legacy_const_generics(1, 3)]
4248#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4249pub fn vcopy_lane_p8<const LANE1: i32, const LANE2: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
4250    static_assert_uimm_bits!(LANE1, 3);
4251    static_assert_uimm_bits!(LANE2, 3);
4252    unsafe {
4253        match LANE1 & 0b111 {
4254            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4255            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4256            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4257            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4258            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4259            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4260            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4261            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4262            _ => unreachable_unchecked(),
4263        }
4264    }
4265}
4266#[doc = "Insert vector element from another vector element"]
4267#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p16)"]
4268#[inline(always)]
4269#[target_feature(enable = "neon")]
4270#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4271#[rustc_legacy_const_generics(1, 3)]
4272#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4273pub fn vcopy_lane_p16<const LANE1: i32, const LANE2: i32>(
4274    a: poly16x4_t,
4275    b: poly16x4_t,
4276) -> poly16x4_t {
4277    static_assert_uimm_bits!(LANE1, 2);
4278    static_assert_uimm_bits!(LANE2, 2);
4279    unsafe {
4280        match LANE1 & 0b11 {
4281            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4282            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4283            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4284            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4285            _ => unreachable_unchecked(),
4286        }
4287    }
4288}
4289#[doc = "Insert vector element from another vector element"]
4290#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_f32)"]
4291#[inline(always)]
4292#[target_feature(enable = "neon")]
4293#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4294#[rustc_legacy_const_generics(1, 3)]
4295#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4296pub fn vcopy_laneq_f32<const LANE1: i32, const LANE2: i32>(
4297    a: float32x2_t,
4298    b: float32x4_t,
4299) -> float32x2_t {
4300    static_assert_uimm_bits!(LANE1, 1);
4301    static_assert_uimm_bits!(LANE2, 2);
4302    let a: float32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };
4303    unsafe {
4304        match LANE1 & 0b1 {
4305            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
4306            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
4307            _ => unreachable_unchecked(),
4308        }
4309    }
4310}
4311#[doc = "Insert vector element from another vector element"]
4312#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s8)"]
4313#[inline(always)]
4314#[target_feature(enable = "neon")]
4315#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4316#[rustc_legacy_const_generics(1, 3)]
4317#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4318pub fn vcopy_laneq_s8<const LANE1: i32, const LANE2: i32>(a: int8x8_t, b: int8x16_t) -> int8x8_t {
4319    static_assert_uimm_bits!(LANE1, 3);
4320    static_assert_uimm_bits!(LANE2, 4);
4321    let a: int8x16_t =
4322        unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4323    unsafe {
4324        match LANE1 & 0b111 {
4325            0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4326            1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4327            2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
4328            3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
4329            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
4330            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
4331            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
4332            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
4333            _ => unreachable_unchecked(),
4334        }
4335    }
4336}
4337#[doc = "Insert vector element from another vector element"]
4338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s16)"]
4339#[inline(always)]
4340#[target_feature(enable = "neon")]
4341#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4342#[rustc_legacy_const_generics(1, 3)]
4343#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4344pub fn vcopy_laneq_s16<const LANE1: i32, const LANE2: i32>(
4345    a: int16x4_t,
4346    b: int16x8_t,
4347) -> int16x4_t {
4348    static_assert_uimm_bits!(LANE1, 2);
4349    static_assert_uimm_bits!(LANE2, 3);
4350    let a: int16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };
4351    unsafe {
4352        match LANE1 & 0b11 {
4353            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
4354            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
4355            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
4356            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
4357            _ => unreachable_unchecked(),
4358        }
4359    }
4360}
4361#[doc = "Insert vector element from another vector element"]
4362#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s32)"]
4363#[inline(always)]
4364#[target_feature(enable = "neon")]
4365#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4366#[rustc_legacy_const_generics(1, 3)]
4367#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4368pub fn vcopy_laneq_s32<const LANE1: i32, const LANE2: i32>(
4369    a: int32x2_t,
4370    b: int32x4_t,
4371) -> int32x2_t {
4372    static_assert_uimm_bits!(LANE1, 1);
4373    static_assert_uimm_bits!(LANE2, 2);
4374    let a: int32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };
4375    unsafe {
4376        match LANE1 & 0b1 {
4377            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
4378            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
4379            _ => unreachable_unchecked(),
4380        }
4381    }
4382}
4383#[doc = "Insert vector element from another vector element"]
4384#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u8)"]
4385#[inline(always)]
4386#[target_feature(enable = "neon")]
4387#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4388#[rustc_legacy_const_generics(1, 3)]
4389#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4390pub fn vcopy_laneq_u8<const LANE1: i32, const LANE2: i32>(
4391    a: uint8x8_t,
4392    b: uint8x16_t,
4393) -> uint8x8_t {
4394    static_assert_uimm_bits!(LANE1, 3);
4395    static_assert_uimm_bits!(LANE2, 4);
4396    let a: uint8x16_t =
4397        unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4398    unsafe {
4399        match LANE1 & 0b111 {
4400            0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4401            1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4402            2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
4403            3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
4404            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
4405            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
4406            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
4407            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
4408            _ => unreachable_unchecked(),
4409        }
4410    }
4411}
4412#[doc = "Insert vector element from another vector element"]
4413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u16)"]
4414#[inline(always)]
4415#[target_feature(enable = "neon")]
4416#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4417#[rustc_legacy_const_generics(1, 3)]
4418#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4419pub fn vcopy_laneq_u16<const LANE1: i32, const LANE2: i32>(
4420    a: uint16x4_t,
4421    b: uint16x8_t,
4422) -> uint16x4_t {
4423    static_assert_uimm_bits!(LANE1, 2);
4424    static_assert_uimm_bits!(LANE2, 3);
4425    let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };
4426    unsafe {
4427        match LANE1 & 0b11 {
4428            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
4429            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
4430            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
4431            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
4432            _ => unreachable_unchecked(),
4433        }
4434    }
4435}
4436#[doc = "Insert vector element from another vector element"]
4437#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u32)"]
4438#[inline(always)]
4439#[target_feature(enable = "neon")]
4440#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4441#[rustc_legacy_const_generics(1, 3)]
4442#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4443pub fn vcopy_laneq_u32<const LANE1: i32, const LANE2: i32>(
4444    a: uint32x2_t,
4445    b: uint32x4_t,
4446) -> uint32x2_t {
4447    static_assert_uimm_bits!(LANE1, 1);
4448    static_assert_uimm_bits!(LANE2, 2);
4449    let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };
4450    unsafe {
4451        match LANE1 & 0b1 {
4452            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
4453            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
4454            _ => unreachable_unchecked(),
4455        }
4456    }
4457}
4458#[doc = "Insert vector element from another vector element"]
4459#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p8)"]
4460#[inline(always)]
4461#[target_feature(enable = "neon")]
4462#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4463#[rustc_legacy_const_generics(1, 3)]
4464#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4465pub fn vcopy_laneq_p8<const LANE1: i32, const LANE2: i32>(
4466    a: poly8x8_t,
4467    b: poly8x16_t,
4468) -> poly8x8_t {
4469    static_assert_uimm_bits!(LANE1, 3);
4470    static_assert_uimm_bits!(LANE2, 4);
4471    let a: poly8x16_t =
4472        unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4473    unsafe {
4474        match LANE1 & 0b111 {
4475            0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4476            1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4477            2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
4478            3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
4479            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
4480            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
4481            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
4482            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
4483            _ => unreachable_unchecked(),
4484        }
4485    }
4486}
4487#[doc = "Insert vector element from another vector element"]
4488#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p16)"]
4489#[inline(always)]
4490#[target_feature(enable = "neon")]
4491#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4492#[rustc_legacy_const_generics(1, 3)]
4493#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4494pub fn vcopy_laneq_p16<const LANE1: i32, const LANE2: i32>(
4495    a: poly16x4_t,
4496    b: poly16x8_t,
4497) -> poly16x4_t {
4498    static_assert_uimm_bits!(LANE1, 2);
4499    static_assert_uimm_bits!(LANE2, 3);
4500    let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };
4501    unsafe {
4502        match LANE1 & 0b11 {
4503            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
4504            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
4505            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
4506            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
4507            _ => unreachable_unchecked(),
4508        }
4509    }
4510}
4511#[doc = "Insert vector element from another vector element"]
4512#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f32)"]
4513#[inline(always)]
4514#[target_feature(enable = "neon")]
4515#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4516#[rustc_legacy_const_generics(1, 3)]
4517#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4518pub fn vcopyq_lane_f32<const LANE1: i32, const LANE2: i32>(
4519    a: float32x4_t,
4520    b: float32x2_t,
4521) -> float32x4_t {
4522    static_assert_uimm_bits!(LANE1, 2);
4523    static_assert_uimm_bits!(LANE2, 1);
4524    let b: float32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };
4525    unsafe {
4526        match LANE1 & 0b11 {
4527            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4528            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4529            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4530            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4531            _ => unreachable_unchecked(),
4532        }
4533    }
4534}
4535#[doc = "Insert vector element from another vector element"]
4536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f64)"]
4537#[inline(always)]
4538#[target_feature(enable = "neon")]
4539#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4540#[rustc_legacy_const_generics(1, 3)]
4541#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4542pub fn vcopyq_lane_f64<const LANE1: i32, const LANE2: i32>(
4543    a: float64x2_t,
4544    b: float64x1_t,
4545) -> float64x2_t {
4546    static_assert_uimm_bits!(LANE1, 1);
4547    static_assert!(LANE2 == 0);
4548    let b: float64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4549    unsafe {
4550        match LANE1 & 0b1 {
4551            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4552            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4553            _ => unreachable_unchecked(),
4554        }
4555    }
4556}
4557#[doc = "Insert vector element from another vector element"]
4558#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s64)"]
4559#[inline(always)]
4560#[target_feature(enable = "neon")]
4561#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4562#[rustc_legacy_const_generics(1, 3)]
4563#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4564pub fn vcopyq_lane_s64<const LANE1: i32, const LANE2: i32>(
4565    a: int64x2_t,
4566    b: int64x1_t,
4567) -> int64x2_t {
4568    static_assert_uimm_bits!(LANE1, 1);
4569    static_assert!(LANE2 == 0);
4570    let b: int64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4571    unsafe {
4572        match LANE1 & 0b1 {
4573            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4574            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4575            _ => unreachable_unchecked(),
4576        }
4577    }
4578}
4579#[doc = "Insert vector element from another vector element"]
4580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u64)"]
4581#[inline(always)]
4582#[target_feature(enable = "neon")]
4583#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4584#[rustc_legacy_const_generics(1, 3)]
4585#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4586pub fn vcopyq_lane_u64<const LANE1: i32, const LANE2: i32>(
4587    a: uint64x2_t,
4588    b: uint64x1_t,
4589) -> uint64x2_t {
4590    static_assert_uimm_bits!(LANE1, 1);
4591    static_assert!(LANE2 == 0);
4592    let b: uint64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4593    unsafe {
4594        match LANE1 & 0b1 {
4595            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4596            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4597            _ => unreachable_unchecked(),
4598        }
4599    }
4600}
4601#[doc = "Insert vector element from another vector element"]
4602#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p64)"]
4603#[inline(always)]
4604#[target_feature(enable = "neon")]
4605#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4606#[rustc_legacy_const_generics(1, 3)]
4607#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4608pub fn vcopyq_lane_p64<const LANE1: i32, const LANE2: i32>(
4609    a: poly64x2_t,
4610    b: poly64x1_t,
4611) -> poly64x2_t {
4612    static_assert_uimm_bits!(LANE1, 1);
4613    static_assert!(LANE2 == 0);
4614    let b: poly64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4615    unsafe {
4616        match LANE1 & 0b1 {
4617            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4618            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4619            _ => unreachable_unchecked(),
4620        }
4621    }
4622}
4623#[doc = "Insert vector element from another vector element"]
4624#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s8)"]
4625#[inline(always)]
4626#[target_feature(enable = "neon")]
4627#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4628#[rustc_legacy_const_generics(1, 3)]
4629#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4630pub fn vcopyq_lane_s8<const LANE1: i32, const LANE2: i32>(a: int8x16_t, b: int8x8_t) -> int8x16_t {
4631    static_assert_uimm_bits!(LANE1, 4);
4632    static_assert_uimm_bits!(LANE2, 3);
4633    let b: int8x16_t =
4634        unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4635    unsafe {
4636        match LANE1 & 0b1111 {
4637            0 => simd_shuffle!(
4638                a,
4639                b,
4640                [
4641                    16 + LANE2 as u32,
4642                    1,
4643                    2,
4644                    3,
4645                    4,
4646                    5,
4647                    6,
4648                    7,
4649                    8,
4650                    9,
4651                    10,
4652                    11,
4653                    12,
4654                    13,
4655                    14,
4656                    15
4657                ]
4658            ),
4659            1 => simd_shuffle!(
4660                a,
4661                b,
4662                [
4663                    0,
4664                    16 + LANE2 as u32,
4665                    2,
4666                    3,
4667                    4,
4668                    5,
4669                    6,
4670                    7,
4671                    8,
4672                    9,
4673                    10,
4674                    11,
4675                    12,
4676                    13,
4677                    14,
4678                    15
4679                ]
4680            ),
4681            2 => simd_shuffle!(
4682                a,
4683                b,
4684                [
4685                    0,
4686                    1,
4687                    16 + LANE2 as u32,
4688                    3,
4689                    4,
4690                    5,
4691                    6,
4692                    7,
4693                    8,
4694                    9,
4695                    10,
4696                    11,
4697                    12,
4698                    13,
4699                    14,
4700                    15
4701                ]
4702            ),
4703            3 => simd_shuffle!(
4704                a,
4705                b,
4706                [
4707                    0,
4708                    1,
4709                    2,
4710                    16 + LANE2 as u32,
4711                    4,
4712                    5,
4713                    6,
4714                    7,
4715                    8,
4716                    9,
4717                    10,
4718                    11,
4719                    12,
4720                    13,
4721                    14,
4722                    15
4723                ]
4724            ),
4725            4 => simd_shuffle!(
4726                a,
4727                b,
4728                [
4729                    0,
4730                    1,
4731                    2,
4732                    3,
4733                    16 + LANE2 as u32,
4734                    5,
4735                    6,
4736                    7,
4737                    8,
4738                    9,
4739                    10,
4740                    11,
4741                    12,
4742                    13,
4743                    14,
4744                    15
4745                ]
4746            ),
4747            5 => simd_shuffle!(
4748                a,
4749                b,
4750                [
4751                    0,
4752                    1,
4753                    2,
4754                    3,
4755                    4,
4756                    16 + LANE2 as u32,
4757                    6,
4758                    7,
4759                    8,
4760                    9,
4761                    10,
4762                    11,
4763                    12,
4764                    13,
4765                    14,
4766                    15
4767                ]
4768            ),
4769            6 => simd_shuffle!(
4770                a,
4771                b,
4772                [
4773                    0,
4774                    1,
4775                    2,
4776                    3,
4777                    4,
4778                    5,
4779                    16 + LANE2 as u32,
4780                    7,
4781                    8,
4782                    9,
4783                    10,
4784                    11,
4785                    12,
4786                    13,
4787                    14,
4788                    15
4789                ]
4790            ),
4791            7 => simd_shuffle!(
4792                a,
4793                b,
4794                [
4795                    0,
4796                    1,
4797                    2,
4798                    3,
4799                    4,
4800                    5,
4801                    6,
4802                    16 + LANE2 as u32,
4803                    8,
4804                    9,
4805                    10,
4806                    11,
4807                    12,
4808                    13,
4809                    14,
4810                    15
4811                ]
4812            ),
4813            8 => simd_shuffle!(
4814                a,
4815                b,
4816                [
4817                    0,
4818                    1,
4819                    2,
4820                    3,
4821                    4,
4822                    5,
4823                    6,
4824                    7,
4825                    16 + LANE2 as u32,
4826                    9,
4827                    10,
4828                    11,
4829                    12,
4830                    13,
4831                    14,
4832                    15
4833                ]
4834            ),
4835            9 => simd_shuffle!(
4836                a,
4837                b,
4838                [
4839                    0,
4840                    1,
4841                    2,
4842                    3,
4843                    4,
4844                    5,
4845                    6,
4846                    7,
4847                    8,
4848                    16 + LANE2 as u32,
4849                    10,
4850                    11,
4851                    12,
4852                    13,
4853                    14,
4854                    15
4855                ]
4856            ),
4857            10 => simd_shuffle!(
4858                a,
4859                b,
4860                [
4861                    0,
4862                    1,
4863                    2,
4864                    3,
4865                    4,
4866                    5,
4867                    6,
4868                    7,
4869                    8,
4870                    9,
4871                    16 + LANE2 as u32,
4872                    11,
4873                    12,
4874                    13,
4875                    14,
4876                    15
4877                ]
4878            ),
4879            11 => simd_shuffle!(
4880                a,
4881                b,
4882                [
4883                    0,
4884                    1,
4885                    2,
4886                    3,
4887                    4,
4888                    5,
4889                    6,
4890                    7,
4891                    8,
4892                    9,
4893                    10,
4894                    16 + LANE2 as u32,
4895                    12,
4896                    13,
4897                    14,
4898                    15
4899                ]
4900            ),
4901            12 => simd_shuffle!(
4902                a,
4903                b,
4904                [
4905                    0,
4906                    1,
4907                    2,
4908                    3,
4909                    4,
4910                    5,
4911                    6,
4912                    7,
4913                    8,
4914                    9,
4915                    10,
4916                    11,
4917                    16 + LANE2 as u32,
4918                    13,
4919                    14,
4920                    15
4921                ]
4922            ),
4923            13 => simd_shuffle!(
4924                a,
4925                b,
4926                [
4927                    0,
4928                    1,
4929                    2,
4930                    3,
4931                    4,
4932                    5,
4933                    6,
4934                    7,
4935                    8,
4936                    9,
4937                    10,
4938                    11,
4939                    12,
4940                    16 + LANE2 as u32,
4941                    14,
4942                    15
4943                ]
4944            ),
4945            14 => simd_shuffle!(
4946                a,
4947                b,
4948                [
4949                    0,
4950                    1,
4951                    2,
4952                    3,
4953                    4,
4954                    5,
4955                    6,
4956                    7,
4957                    8,
4958                    9,
4959                    10,
4960                    11,
4961                    12,
4962                    13,
4963                    16 + LANE2 as u32,
4964                    15
4965                ]
4966            ),
4967            15 => simd_shuffle!(
4968                a,
4969                b,
4970                [
4971                    0,
4972                    1,
4973                    2,
4974                    3,
4975                    4,
4976                    5,
4977                    6,
4978                    7,
4979                    8,
4980                    9,
4981                    10,
4982                    11,
4983                    12,
4984                    13,
4985                    14,
4986                    16 + LANE2 as u32
4987                ]
4988            ),
4989            _ => unreachable_unchecked(),
4990        }
4991    }
4992}
4993#[doc = "Insert vector element from another vector element"]
4994#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s16)"]
4995#[inline(always)]
4996#[target_feature(enable = "neon")]
4997#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4998#[rustc_legacy_const_generics(1, 3)]
4999#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5000pub fn vcopyq_lane_s16<const LANE1: i32, const LANE2: i32>(
5001    a: int16x8_t,
5002    b: int16x4_t,
5003) -> int16x8_t {
5004    static_assert_uimm_bits!(LANE1, 3);
5005    static_assert_uimm_bits!(LANE2, 2);
5006    let b: int16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };
5007    unsafe {
5008        match LANE1 & 0b111 {
5009            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
5010            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
5011            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
5012            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
5013            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
5014            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
5015            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
5016            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
5017            _ => unreachable_unchecked(),
5018        }
5019    }
5020}
5021#[doc = "Insert vector element from another vector element"]
5022#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s32)"]
5023#[inline(always)]
5024#[target_feature(enable = "neon")]
5025#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5026#[rustc_legacy_const_generics(1, 3)]
5027#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5028pub fn vcopyq_lane_s32<const LANE1: i32, const LANE2: i32>(
5029    a: int32x4_t,
5030    b: int32x2_t,
5031) -> int32x4_t {
5032    static_assert_uimm_bits!(LANE1, 2);
5033    static_assert_uimm_bits!(LANE2, 1);
5034    let b: int32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };
5035    unsafe {
5036        match LANE1 & 0b11 {
5037            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
5038            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
5039            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
5040            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
5041            _ => unreachable_unchecked(),
5042        }
5043    }
5044}
5045#[doc = "Insert vector element from another vector element"]
5046#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u8)"]
5047#[inline(always)]
5048#[target_feature(enable = "neon")]
5049#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5050#[rustc_legacy_const_generics(1, 3)]
5051#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5052pub fn vcopyq_lane_u8<const LANE1: i32, const LANE2: i32>(
5053    a: uint8x16_t,
5054    b: uint8x8_t,
5055) -> uint8x16_t {
5056    static_assert_uimm_bits!(LANE1, 4);
5057    static_assert_uimm_bits!(LANE2, 3);
5058    let b: uint8x16_t =
5059        unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
5060    unsafe {
5061        match LANE1 & 0b1111 {
5062            0 => simd_shuffle!(
5063                a,
5064                b,
5065                [
5066                    16 + LANE2 as u32,
5067                    1,
5068                    2,
5069                    3,
5070                    4,
5071                    5,
5072                    6,
5073                    7,
5074                    8,
5075                    9,
5076                    10,
5077                    11,
5078                    12,
5079                    13,
5080                    14,
5081                    15
5082                ]
5083            ),
5084            1 => simd_shuffle!(
5085                a,
5086                b,
5087                [
5088                    0,
5089                    16 + LANE2 as u32,
5090                    2,
5091                    3,
5092                    4,
5093                    5,
5094                    6,
5095                    7,
5096                    8,
5097                    9,
5098                    10,
5099                    11,
5100                    12,
5101                    13,
5102                    14,
5103                    15
5104                ]
5105            ),
5106            2 => simd_shuffle!(
5107                a,
5108                b,
5109                [
5110                    0,
5111                    1,
5112                    16 + LANE2 as u32,
5113                    3,
5114                    4,
5115                    5,
5116                    6,
5117                    7,
5118                    8,
5119                    9,
5120                    10,
5121                    11,
5122                    12,
5123                    13,
5124                    14,
5125                    15
5126                ]
5127            ),
5128            3 => simd_shuffle!(
5129                a,
5130                b,
5131                [
5132                    0,
5133                    1,
5134                    2,
5135                    16 + LANE2 as u32,
5136                    4,
5137                    5,
5138                    6,
5139                    7,
5140                    8,
5141                    9,
5142                    10,
5143                    11,
5144                    12,
5145                    13,
5146                    14,
5147                    15
5148                ]
5149            ),
5150            4 => simd_shuffle!(
5151                a,
5152                b,
5153                [
5154                    0,
5155                    1,
5156                    2,
5157                    3,
5158                    16 + LANE2 as u32,
5159                    5,
5160                    6,
5161                    7,
5162                    8,
5163                    9,
5164                    10,
5165                    11,
5166                    12,
5167                    13,
5168                    14,
5169                    15
5170                ]
5171            ),
5172            5 => simd_shuffle!(
5173                a,
5174                b,
5175                [
5176                    0,
5177                    1,
5178                    2,
5179                    3,
5180                    4,
5181                    16 + LANE2 as u32,
5182                    6,
5183                    7,
5184                    8,
5185                    9,
5186                    10,
5187                    11,
5188                    12,
5189                    13,
5190                    14,
5191                    15
5192                ]
5193            ),
5194            6 => simd_shuffle!(
5195                a,
5196                b,
5197                [
5198                    0,
5199                    1,
5200                    2,
5201                    3,
5202                    4,
5203                    5,
5204                    16 + LANE2 as u32,
5205                    7,
5206                    8,
5207                    9,
5208                    10,
5209                    11,
5210                    12,
5211                    13,
5212                    14,
5213                    15
5214                ]
5215            ),
5216            7 => simd_shuffle!(
5217                a,
5218                b,
5219                [
5220                    0,
5221                    1,
5222                    2,
5223                    3,
5224                    4,
5225                    5,
5226                    6,
5227                    16 + LANE2 as u32,
5228                    8,
5229                    9,
5230                    10,
5231                    11,
5232                    12,
5233                    13,
5234                    14,
5235                    15
5236                ]
5237            ),
5238            8 => simd_shuffle!(
5239                a,
5240                b,
5241                [
5242                    0,
5243                    1,
5244                    2,
5245                    3,
5246                    4,
5247                    5,
5248                    6,
5249                    7,
5250                    16 + LANE2 as u32,
5251                    9,
5252                    10,
5253                    11,
5254                    12,
5255                    13,
5256                    14,
5257                    15
5258                ]
5259            ),
5260            9 => simd_shuffle!(
5261                a,
5262                b,
5263                [
5264                    0,
5265                    1,
5266                    2,
5267                    3,
5268                    4,
5269                    5,
5270                    6,
5271                    7,
5272                    8,
5273                    16 + LANE2 as u32,
5274                    10,
5275                    11,
5276                    12,
5277                    13,
5278                    14,
5279                    15
5280                ]
5281            ),
5282            10 => simd_shuffle!(
5283                a,
5284                b,
5285                [
5286                    0,
5287                    1,
5288                    2,
5289                    3,
5290                    4,
5291                    5,
5292                    6,
5293                    7,
5294                    8,
5295                    9,
5296                    16 + LANE2 as u32,
5297                    11,
5298                    12,
5299                    13,
5300                    14,
5301                    15
5302                ]
5303            ),
5304            11 => simd_shuffle!(
5305                a,
5306                b,
5307                [
5308                    0,
5309                    1,
5310                    2,
5311                    3,
5312                    4,
5313                    5,
5314                    6,
5315                    7,
5316                    8,
5317                    9,
5318                    10,
5319                    16 + LANE2 as u32,
5320                    12,
5321                    13,
5322                    14,
5323                    15
5324                ]
5325            ),
5326            12 => simd_shuffle!(
5327                a,
5328                b,
5329                [
5330                    0,
5331                    1,
5332                    2,
5333                    3,
5334                    4,
5335                    5,
5336                    6,
5337                    7,
5338                    8,
5339                    9,
5340                    10,
5341                    11,
5342                    16 + LANE2 as u32,
5343                    13,
5344                    14,
5345                    15
5346                ]
5347            ),
5348            13 => simd_shuffle!(
5349                a,
5350                b,
5351                [
5352                    0,
5353                    1,
5354                    2,
5355                    3,
5356                    4,
5357                    5,
5358                    6,
5359                    7,
5360                    8,
5361                    9,
5362                    10,
5363                    11,
5364                    12,
5365                    16 + LANE2 as u32,
5366                    14,
5367                    15
5368                ]
5369            ),
5370            14 => simd_shuffle!(
5371                a,
5372                b,
5373                [
5374                    0,
5375                    1,
5376                    2,
5377                    3,
5378                    4,
5379                    5,
5380                    6,
5381                    7,
5382                    8,
5383                    9,
5384                    10,
5385                    11,
5386                    12,
5387                    13,
5388                    16 + LANE2 as u32,
5389                    15
5390                ]
5391            ),
5392            15 => simd_shuffle!(
5393                a,
5394                b,
5395                [
5396                    0,
5397                    1,
5398                    2,
5399                    3,
5400                    4,
5401                    5,
5402                    6,
5403                    7,
5404                    8,
5405                    9,
5406                    10,
5407                    11,
5408                    12,
5409                    13,
5410                    14,
5411                    16 + LANE2 as u32
5412                ]
5413            ),
5414            _ => unreachable_unchecked(),
5415        }
5416    }
5417}
5418#[doc = "Insert vector element from another vector element"]
5419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u16)"]
5420#[inline(always)]
5421#[target_feature(enable = "neon")]
5422#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5423#[rustc_legacy_const_generics(1, 3)]
5424#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5425pub fn vcopyq_lane_u16<const LANE1: i32, const LANE2: i32>(
5426    a: uint16x8_t,
5427    b: uint16x4_t,
5428) -> uint16x8_t {
5429    static_assert_uimm_bits!(LANE1, 3);
5430    static_assert_uimm_bits!(LANE2, 2);
5431    let b: uint16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };
5432    unsafe {
5433        match LANE1 & 0b111 {
5434            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
5435            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
5436            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
5437            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
5438            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
5439            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
5440            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
5441            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
5442            _ => unreachable_unchecked(),
5443        }
5444    }
5445}
5446#[doc = "Insert vector element from another vector element"]
5447#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u32)"]
5448#[inline(always)]
5449#[target_feature(enable = "neon")]
5450#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5451#[rustc_legacy_const_generics(1, 3)]
5452#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5453pub fn vcopyq_lane_u32<const LANE1: i32, const LANE2: i32>(
5454    a: uint32x4_t,
5455    b: uint32x2_t,
5456) -> uint32x4_t {
5457    static_assert_uimm_bits!(LANE1, 2);
5458    static_assert_uimm_bits!(LANE2, 1);
5459    let b: uint32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };
5460    unsafe {
5461        match LANE1 & 0b11 {
5462            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
5463            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
5464            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
5465            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
5466            _ => unreachable_unchecked(),
5467        }
5468    }
5469}
5470#[doc = "Insert vector element from another vector element"]
5471#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p8)"]
5472#[inline(always)]
5473#[target_feature(enable = "neon")]
5474#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5475#[rustc_legacy_const_generics(1, 3)]
5476#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5477pub fn vcopyq_lane_p8<const LANE1: i32, const LANE2: i32>(
5478    a: poly8x16_t,
5479    b: poly8x8_t,
5480) -> poly8x16_t {
5481    static_assert_uimm_bits!(LANE1, 4);
5482    static_assert_uimm_bits!(LANE2, 3);
5483    let b: poly8x16_t =
5484        unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
5485    unsafe {
5486        match LANE1 & 0b1111 {
5487            0 => simd_shuffle!(
5488                a,
5489                b,
5490                [
5491                    16 + LANE2 as u32,
5492                    1,
5493                    2,
5494                    3,
5495                    4,
5496                    5,
5497                    6,
5498                    7,
5499                    8,
5500                    9,
5501                    10,
5502                    11,
5503                    12,
5504                    13,
5505                    14,
5506                    15
5507                ]
5508            ),
5509            1 => simd_shuffle!(
5510                a,
5511                b,
5512                [
5513                    0,
5514                    16 + LANE2 as u32,
5515                    2,
5516                    3,
5517                    4,
5518                    5,
5519                    6,
5520                    7,
5521                    8,
5522                    9,
5523                    10,
5524                    11,
5525                    12,
5526                    13,
5527                    14,
5528                    15
5529                ]
5530            ),
5531            2 => simd_shuffle!(
5532                a,
5533                b,
5534                [
5535                    0,
5536                    1,
5537                    16 + LANE2 as u32,
5538                    3,
5539                    4,
5540                    5,
5541                    6,
5542                    7,
5543                    8,
5544                    9,
5545                    10,
5546                    11,
5547                    12,
5548                    13,
5549                    14,
5550                    15
5551                ]
5552            ),
5553            3 => simd_shuffle!(
5554                a,
5555                b,
5556                [
5557                    0,
5558                    1,
5559                    2,
5560                    16 + LANE2 as u32,
5561                    4,
5562                    5,
5563                    6,
5564                    7,
5565                    8,
5566                    9,
5567                    10,
5568                    11,
5569                    12,
5570                    13,
5571                    14,
5572                    15
5573                ]
5574            ),
5575            4 => simd_shuffle!(
5576                a,
5577                b,
5578                [
5579                    0,
5580                    1,
5581                    2,
5582                    3,
5583                    16 + LANE2 as u32,
5584                    5,
5585                    6,
5586                    7,
5587                    8,
5588                    9,
5589                    10,
5590                    11,
5591                    12,
5592                    13,
5593                    14,
5594                    15
5595                ]
5596            ),
5597            5 => simd_shuffle!(
5598                a,
5599                b,
5600                [
5601                    0,
5602                    1,
5603                    2,
5604                    3,
5605                    4,
5606                    16 + LANE2 as u32,
5607                    6,
5608                    7,
5609                    8,
5610                    9,
5611                    10,
5612                    11,
5613                    12,
5614                    13,
5615                    14,
5616                    15
5617                ]
5618            ),
5619            6 => simd_shuffle!(
5620                a,
5621                b,
5622                [
5623                    0,
5624                    1,
5625                    2,
5626                    3,
5627                    4,
5628                    5,
5629                    16 + LANE2 as u32,
5630                    7,
5631                    8,
5632                    9,
5633                    10,
5634                    11,
5635                    12,
5636                    13,
5637                    14,
5638                    15
5639                ]
5640            ),
5641            7 => simd_shuffle!(
5642                a,
5643                b,
5644                [
5645                    0,
5646                    1,
5647                    2,
5648                    3,
5649                    4,
5650                    5,
5651                    6,
5652                    16 + LANE2 as u32,
5653                    8,
5654                    9,
5655                    10,
5656                    11,
5657                    12,
5658                    13,
5659                    14,
5660                    15
5661                ]
5662            ),
5663            8 => simd_shuffle!(
5664                a,
5665                b,
5666                [
5667                    0,
5668                    1,
5669                    2,
5670                    3,
5671                    4,
5672                    5,
5673                    6,
5674                    7,
5675                    16 + LANE2 as u32,
5676                    9,
5677                    10,
5678                    11,
5679                    12,
5680                    13,
5681                    14,
5682                    15
5683                ]
5684            ),
5685            9 => simd_shuffle!(
5686                a,
5687                b,
5688                [
5689                    0,
5690                    1,
5691                    2,
5692                    3,
5693                    4,
5694                    5,
5695                    6,
5696                    7,
5697                    8,
5698                    16 + LANE2 as u32,
5699                    10,
5700                    11,
5701                    12,
5702                    13,
5703                    14,
5704                    15
5705                ]
5706            ),
5707            10 => simd_shuffle!(
5708                a,
5709                b,
5710                [
5711                    0,
5712                    1,
5713                    2,
5714                    3,
5715                    4,
5716                    5,
5717                    6,
5718                    7,
5719                    8,
5720                    9,
5721                    16 + LANE2 as u32,
5722                    11,
5723                    12,
5724                    13,
5725                    14,
5726                    15
5727                ]
5728            ),
5729            11 => simd_shuffle!(
5730                a,
5731                b,
5732                [
5733                    0,
5734                    1,
5735                    2,
5736                    3,
5737                    4,
5738                    5,
5739                    6,
5740                    7,
5741                    8,
5742                    9,
5743                    10,
5744                    16 + LANE2 as u32,
5745                    12,
5746                    13,
5747                    14,
5748                    15
5749                ]
5750            ),
5751            12 => simd_shuffle!(
5752                a,
5753                b,
5754                [
5755                    0,
5756                    1,
5757                    2,
5758                    3,
5759                    4,
5760                    5,
5761                    6,
5762                    7,
5763                    8,
5764                    9,
5765                    10,
5766                    11,
5767                    16 + LANE2 as u32,
5768                    13,
5769                    14,
5770                    15
5771                ]
5772            ),
5773            13 => simd_shuffle!(
5774                a,
5775                b,
5776                [
5777                    0,
5778                    1,
5779                    2,
5780                    3,
5781                    4,
5782                    5,
5783                    6,
5784                    7,
5785                    8,
5786                    9,
5787                    10,
5788                    11,
5789                    12,
5790                    16 + LANE2 as u32,
5791                    14,
5792                    15
5793                ]
5794            ),
5795            14 => simd_shuffle!(
5796                a,
5797                b,
5798                [
5799                    0,
5800                    1,
5801                    2,
5802                    3,
5803                    4,
5804                    5,
5805                    6,
5806                    7,
5807                    8,
5808                    9,
5809                    10,
5810                    11,
5811                    12,
5812                    13,
5813                    16 + LANE2 as u32,
5814                    15
5815                ]
5816            ),
5817            15 => simd_shuffle!(
5818                a,
5819                b,
5820                [
5821                    0,
5822                    1,
5823                    2,
5824                    3,
5825                    4,
5826                    5,
5827                    6,
5828                    7,
5829                    8,
5830                    9,
5831                    10,
5832                    11,
5833                    12,
5834                    13,
5835                    14,
5836                    16 + LANE2 as u32
5837                ]
5838            ),
5839            _ => unreachable_unchecked(),
5840        }
5841    }
5842}
5843#[doc = "Insert vector element from another vector element"]
5844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p16)"]
5845#[inline(always)]
5846#[target_feature(enable = "neon")]
5847#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5848#[rustc_legacy_const_generics(1, 3)]
5849#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5850pub fn vcopyq_lane_p16<const LANE1: i32, const LANE2: i32>(
5851    a: poly16x8_t,
5852    b: poly16x4_t,
5853) -> poly16x8_t {
5854    static_assert_uimm_bits!(LANE1, 3);
5855    static_assert_uimm_bits!(LANE2, 2);
5856    let b: poly16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };
5857    unsafe {
5858        match LANE1 & 0b111 {
5859            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
5860            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
5861            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
5862            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
5863            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
5864            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
5865            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
5866            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
5867            _ => unreachable_unchecked(),
5868        }
5869    }
5870}
5871#[doc = "Insert vector element from another vector element"]
5872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f32)"]
5873#[inline(always)]
5874#[target_feature(enable = "neon")]
5875#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5876#[rustc_legacy_const_generics(1, 3)]
5877#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5878pub fn vcopyq_laneq_f32<const LANE1: i32, const LANE2: i32>(
5879    a: float32x4_t,
5880    b: float32x4_t,
5881) -> float32x4_t {
5882    static_assert_uimm_bits!(LANE1, 2);
5883    static_assert_uimm_bits!(LANE2, 2);
5884    unsafe {
5885        match LANE1 & 0b11 {
5886            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
5887            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
5888            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
5889            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
5890            _ => unreachable_unchecked(),
5891        }
5892    }
5893}
5894#[doc = "Insert vector element from another vector element"]
5895#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f64)"]
5896#[inline(always)]
5897#[target_feature(enable = "neon")]
5898#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5899#[rustc_legacy_const_generics(1, 3)]
5900#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5901pub fn vcopyq_laneq_f64<const LANE1: i32, const LANE2: i32>(
5902    a: float64x2_t,
5903    b: float64x2_t,
5904) -> float64x2_t {
5905    static_assert_uimm_bits!(LANE1, 1);
5906    static_assert_uimm_bits!(LANE2, 1);
5907    unsafe {
5908        match LANE1 & 0b1 {
5909            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
5910            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
5911            _ => unreachable_unchecked(),
5912        }
5913    }
5914}
5915#[doc = "Insert vector element from another vector element"]
5916#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s8)"]
5917#[inline(always)]
5918#[target_feature(enable = "neon")]
5919#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5920#[rustc_legacy_const_generics(1, 3)]
5921#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5922pub fn vcopyq_laneq_s8<const LANE1: i32, const LANE2: i32>(
5923    a: int8x16_t,
5924    b: int8x16_t,
5925) -> int8x16_t {
5926    static_assert_uimm_bits!(LANE1, 4);
5927    static_assert_uimm_bits!(LANE2, 4);
5928    unsafe {
5929        match LANE1 & 0b1111 {
5930            0 => simd_shuffle!(
5931                a,
5932                b,
5933                [
5934                    16 + LANE2 as u32,
5935                    1,
5936                    2,
5937                    3,
5938                    4,
5939                    5,
5940                    6,
5941                    7,
5942                    8,
5943                    9,
5944                    10,
5945                    11,
5946                    12,
5947                    13,
5948                    14,
5949                    15
5950                ]
5951            ),
5952            1 => simd_shuffle!(
5953                a,
5954                b,
5955                [
5956                    0,
5957                    16 + LANE2 as u32,
5958                    2,
5959                    3,
5960                    4,
5961                    5,
5962                    6,
5963                    7,
5964                    8,
5965                    9,
5966                    10,
5967                    11,
5968                    12,
5969                    13,
5970                    14,
5971                    15
5972                ]
5973            ),
5974            2 => simd_shuffle!(
5975                a,
5976                b,
5977                [
5978                    0,
5979                    1,
5980                    16 + LANE2 as u32,
5981                    3,
5982                    4,
5983                    5,
5984                    6,
5985                    7,
5986                    8,
5987                    9,
5988                    10,
5989                    11,
5990                    12,
5991                    13,
5992                    14,
5993                    15
5994                ]
5995            ),
5996            3 => simd_shuffle!(
5997                a,
5998                b,
5999                [
6000                    0,
6001                    1,
6002                    2,
6003                    16 + LANE2 as u32,
6004                    4,
6005                    5,
6006                    6,
6007                    7,
6008                    8,
6009                    9,
6010                    10,
6011                    11,
6012                    12,
6013                    13,
6014                    14,
6015                    15
6016                ]
6017            ),
6018            4 => simd_shuffle!(
6019                a,
6020                b,
6021                [
6022                    0,
6023                    1,
6024                    2,
6025                    3,
6026                    16 + LANE2 as u32,
6027                    5,
6028                    6,
6029                    7,
6030                    8,
6031                    9,
6032                    10,
6033                    11,
6034                    12,
6035                    13,
6036                    14,
6037                    15
6038                ]
6039            ),
6040            5 => simd_shuffle!(
6041                a,
6042                b,
6043                [
6044                    0,
6045                    1,
6046                    2,
6047                    3,
6048                    4,
6049                    16 + LANE2 as u32,
6050                    6,
6051                    7,
6052                    8,
6053                    9,
6054                    10,
6055                    11,
6056                    12,
6057                    13,
6058                    14,
6059                    15
6060                ]
6061            ),
6062            6 => simd_shuffle!(
6063                a,
6064                b,
6065                [
6066                    0,
6067                    1,
6068                    2,
6069                    3,
6070                    4,
6071                    5,
6072                    16 + LANE2 as u32,
6073                    7,
6074                    8,
6075                    9,
6076                    10,
6077                    11,
6078                    12,
6079                    13,
6080                    14,
6081                    15
6082                ]
6083            ),
6084            7 => simd_shuffle!(
6085                a,
6086                b,
6087                [
6088                    0,
6089                    1,
6090                    2,
6091                    3,
6092                    4,
6093                    5,
6094                    6,
6095                    16 + LANE2 as u32,
6096                    8,
6097                    9,
6098                    10,
6099                    11,
6100                    12,
6101                    13,
6102                    14,
6103                    15
6104                ]
6105            ),
6106            8 => simd_shuffle!(
6107                a,
6108                b,
6109                [
6110                    0,
6111                    1,
6112                    2,
6113                    3,
6114                    4,
6115                    5,
6116                    6,
6117                    7,
6118                    16 + LANE2 as u32,
6119                    9,
6120                    10,
6121                    11,
6122                    12,
6123                    13,
6124                    14,
6125                    15
6126                ]
6127            ),
6128            9 => simd_shuffle!(
6129                a,
6130                b,
6131                [
6132                    0,
6133                    1,
6134                    2,
6135                    3,
6136                    4,
6137                    5,
6138                    6,
6139                    7,
6140                    8,
6141                    16 + LANE2 as u32,
6142                    10,
6143                    11,
6144                    12,
6145                    13,
6146                    14,
6147                    15
6148                ]
6149            ),
6150            10 => simd_shuffle!(
6151                a,
6152                b,
6153                [
6154                    0,
6155                    1,
6156                    2,
6157                    3,
6158                    4,
6159                    5,
6160                    6,
6161                    7,
6162                    8,
6163                    9,
6164                    16 + LANE2 as u32,
6165                    11,
6166                    12,
6167                    13,
6168                    14,
6169                    15
6170                ]
6171            ),
6172            11 => simd_shuffle!(
6173                a,
6174                b,
6175                [
6176                    0,
6177                    1,
6178                    2,
6179                    3,
6180                    4,
6181                    5,
6182                    6,
6183                    7,
6184                    8,
6185                    9,
6186                    10,
6187                    16 + LANE2 as u32,
6188                    12,
6189                    13,
6190                    14,
6191                    15
6192                ]
6193            ),
6194            12 => simd_shuffle!(
6195                a,
6196                b,
6197                [
6198                    0,
6199                    1,
6200                    2,
6201                    3,
6202                    4,
6203                    5,
6204                    6,
6205                    7,
6206                    8,
6207                    9,
6208                    10,
6209                    11,
6210                    16 + LANE2 as u32,
6211                    13,
6212                    14,
6213                    15
6214                ]
6215            ),
6216            13 => simd_shuffle!(
6217                a,
6218                b,
6219                [
6220                    0,
6221                    1,
6222                    2,
6223                    3,
6224                    4,
6225                    5,
6226                    6,
6227                    7,
6228                    8,
6229                    9,
6230                    10,
6231                    11,
6232                    12,
6233                    16 + LANE2 as u32,
6234                    14,
6235                    15
6236                ]
6237            ),
6238            14 => simd_shuffle!(
6239                a,
6240                b,
6241                [
6242                    0,
6243                    1,
6244                    2,
6245                    3,
6246                    4,
6247                    5,
6248                    6,
6249                    7,
6250                    8,
6251                    9,
6252                    10,
6253                    11,
6254                    12,
6255                    13,
6256                    16 + LANE2 as u32,
6257                    15
6258                ]
6259            ),
6260            15 => simd_shuffle!(
6261                a,
6262                b,
6263                [
6264                    0,
6265                    1,
6266                    2,
6267                    3,
6268                    4,
6269                    5,
6270                    6,
6271                    7,
6272                    8,
6273                    9,
6274                    10,
6275                    11,
6276                    12,
6277                    13,
6278                    14,
6279                    16 + LANE2 as u32
6280                ]
6281            ),
6282            _ => unreachable_unchecked(),
6283        }
6284    }
6285}
6286#[doc = "Insert vector element from another vector element"]
6287#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s16)"]
6288#[inline(always)]
6289#[target_feature(enable = "neon")]
6290#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6291#[rustc_legacy_const_generics(1, 3)]
6292#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6293pub fn vcopyq_laneq_s16<const LANE1: i32, const LANE2: i32>(
6294    a: int16x8_t,
6295    b: int16x8_t,
6296) -> int16x8_t {
6297    static_assert_uimm_bits!(LANE1, 3);
6298    static_assert_uimm_bits!(LANE2, 3);
6299    unsafe {
6300        match LANE1 & 0b111 {
6301            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
6302            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
6303            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
6304            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
6305            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
6306            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
6307            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
6308            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
6309            _ => unreachable_unchecked(),
6310        }
6311    }
6312}
6313#[doc = "Insert vector element from another vector element"]
6314#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s32)"]
6315#[inline(always)]
6316#[target_feature(enable = "neon")]
6317#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6318#[rustc_legacy_const_generics(1, 3)]
6319#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6320pub fn vcopyq_laneq_s32<const LANE1: i32, const LANE2: i32>(
6321    a: int32x4_t,
6322    b: int32x4_t,
6323) -> int32x4_t {
6324    static_assert_uimm_bits!(LANE1, 2);
6325    static_assert_uimm_bits!(LANE2, 2);
6326    unsafe {
6327        match LANE1 & 0b11 {
6328            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
6329            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
6330            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
6331            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
6332            _ => unreachable_unchecked(),
6333        }
6334    }
6335}
6336#[doc = "Insert vector element from another vector element"]
6337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s64)"]
6338#[inline(always)]
6339#[target_feature(enable = "neon")]
6340#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6341#[rustc_legacy_const_generics(1, 3)]
6342#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6343pub fn vcopyq_laneq_s64<const LANE1: i32, const LANE2: i32>(
6344    a: int64x2_t,
6345    b: int64x2_t,
6346) -> int64x2_t {
6347    static_assert_uimm_bits!(LANE1, 1);
6348    static_assert_uimm_bits!(LANE2, 1);
6349    unsafe {
6350        match LANE1 & 0b1 {
6351            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
6352            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
6353            _ => unreachable_unchecked(),
6354        }
6355    }
6356}
6357#[doc = "Insert vector element from another vector element"]
6358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u8)"]
6359#[inline(always)]
6360#[target_feature(enable = "neon")]
6361#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6362#[rustc_legacy_const_generics(1, 3)]
6363#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6364pub fn vcopyq_laneq_u8<const LANE1: i32, const LANE2: i32>(
6365    a: uint8x16_t,
6366    b: uint8x16_t,
6367) -> uint8x16_t {
6368    static_assert_uimm_bits!(LANE1, 4);
6369    static_assert_uimm_bits!(LANE2, 4);
6370    unsafe {
6371        match LANE1 & 0b1111 {
6372            0 => simd_shuffle!(
6373                a,
6374                b,
6375                [
6376                    16 + LANE2 as u32,
6377                    1,
6378                    2,
6379                    3,
6380                    4,
6381                    5,
6382                    6,
6383                    7,
6384                    8,
6385                    9,
6386                    10,
6387                    11,
6388                    12,
6389                    13,
6390                    14,
6391                    15
6392                ]
6393            ),
6394            1 => simd_shuffle!(
6395                a,
6396                b,
6397                [
6398                    0,
6399                    16 + LANE2 as u32,
6400                    2,
6401                    3,
6402                    4,
6403                    5,
6404                    6,
6405                    7,
6406                    8,
6407                    9,
6408                    10,
6409                    11,
6410                    12,
6411                    13,
6412                    14,
6413                    15
6414                ]
6415            ),
6416            2 => simd_shuffle!(
6417                a,
6418                b,
6419                [
6420                    0,
6421                    1,
6422                    16 + LANE2 as u32,
6423                    3,
6424                    4,
6425                    5,
6426                    6,
6427                    7,
6428                    8,
6429                    9,
6430                    10,
6431                    11,
6432                    12,
6433                    13,
6434                    14,
6435                    15
6436                ]
6437            ),
6438            3 => simd_shuffle!(
6439                a,
6440                b,
6441                [
6442                    0,
6443                    1,
6444                    2,
6445                    16 + LANE2 as u32,
6446                    4,
6447                    5,
6448                    6,
6449                    7,
6450                    8,
6451                    9,
6452                    10,
6453                    11,
6454                    12,
6455                    13,
6456                    14,
6457                    15
6458                ]
6459            ),
6460            4 => simd_shuffle!(
6461                a,
6462                b,
6463                [
6464                    0,
6465                    1,
6466                    2,
6467                    3,
6468                    16 + LANE2 as u32,
6469                    5,
6470                    6,
6471                    7,
6472                    8,
6473                    9,
6474                    10,
6475                    11,
6476                    12,
6477                    13,
6478                    14,
6479                    15
6480                ]
6481            ),
6482            5 => simd_shuffle!(
6483                a,
6484                b,
6485                [
6486                    0,
6487                    1,
6488                    2,
6489                    3,
6490                    4,
6491                    16 + LANE2 as u32,
6492                    6,
6493                    7,
6494                    8,
6495                    9,
6496                    10,
6497                    11,
6498                    12,
6499                    13,
6500                    14,
6501                    15
6502                ]
6503            ),
6504            6 => simd_shuffle!(
6505                a,
6506                b,
6507                [
6508                    0,
6509                    1,
6510                    2,
6511                    3,
6512                    4,
6513                    5,
6514                    16 + LANE2 as u32,
6515                    7,
6516                    8,
6517                    9,
6518                    10,
6519                    11,
6520                    12,
6521                    13,
6522                    14,
6523                    15
6524                ]
6525            ),
6526            7 => simd_shuffle!(
6527                a,
6528                b,
6529                [
6530                    0,
6531                    1,
6532                    2,
6533                    3,
6534                    4,
6535                    5,
6536                    6,
6537                    16 + LANE2 as u32,
6538                    8,
6539                    9,
6540                    10,
6541                    11,
6542                    12,
6543                    13,
6544                    14,
6545                    15
6546                ]
6547            ),
6548            8 => simd_shuffle!(
6549                a,
6550                b,
6551                [
6552                    0,
6553                    1,
6554                    2,
6555                    3,
6556                    4,
6557                    5,
6558                    6,
6559                    7,
6560                    16 + LANE2 as u32,
6561                    9,
6562                    10,
6563                    11,
6564                    12,
6565                    13,
6566                    14,
6567                    15
6568                ]
6569            ),
6570            9 => simd_shuffle!(
6571                a,
6572                b,
6573                [
6574                    0,
6575                    1,
6576                    2,
6577                    3,
6578                    4,
6579                    5,
6580                    6,
6581                    7,
6582                    8,
6583                    16 + LANE2 as u32,
6584                    10,
6585                    11,
6586                    12,
6587                    13,
6588                    14,
6589                    15
6590                ]
6591            ),
6592            10 => simd_shuffle!(
6593                a,
6594                b,
6595                [
6596                    0,
6597                    1,
6598                    2,
6599                    3,
6600                    4,
6601                    5,
6602                    6,
6603                    7,
6604                    8,
6605                    9,
6606                    16 + LANE2 as u32,
6607                    11,
6608                    12,
6609                    13,
6610                    14,
6611                    15
6612                ]
6613            ),
6614            11 => simd_shuffle!(
6615                a,
6616                b,
6617                [
6618                    0,
6619                    1,
6620                    2,
6621                    3,
6622                    4,
6623                    5,
6624                    6,
6625                    7,
6626                    8,
6627                    9,
6628                    10,
6629                    16 + LANE2 as u32,
6630                    12,
6631                    13,
6632                    14,
6633                    15
6634                ]
6635            ),
6636            12 => simd_shuffle!(
6637                a,
6638                b,
6639                [
6640                    0,
6641                    1,
6642                    2,
6643                    3,
6644                    4,
6645                    5,
6646                    6,
6647                    7,
6648                    8,
6649                    9,
6650                    10,
6651                    11,
6652                    16 + LANE2 as u32,
6653                    13,
6654                    14,
6655                    15
6656                ]
6657            ),
6658            13 => simd_shuffle!(
6659                a,
6660                b,
6661                [
6662                    0,
6663                    1,
6664                    2,
6665                    3,
6666                    4,
6667                    5,
6668                    6,
6669                    7,
6670                    8,
6671                    9,
6672                    10,
6673                    11,
6674                    12,
6675                    16 + LANE2 as u32,
6676                    14,
6677                    15
6678                ]
6679            ),
6680            14 => simd_shuffle!(
6681                a,
6682                b,
6683                [
6684                    0,
6685                    1,
6686                    2,
6687                    3,
6688                    4,
6689                    5,
6690                    6,
6691                    7,
6692                    8,
6693                    9,
6694                    10,
6695                    11,
6696                    12,
6697                    13,
6698                    16 + LANE2 as u32,
6699                    15
6700                ]
6701            ),
6702            15 => simd_shuffle!(
6703                a,
6704                b,
6705                [
6706                    0,
6707                    1,
6708                    2,
6709                    3,
6710                    4,
6711                    5,
6712                    6,
6713                    7,
6714                    8,
6715                    9,
6716                    10,
6717                    11,
6718                    12,
6719                    13,
6720                    14,
6721                    16 + LANE2 as u32
6722                ]
6723            ),
6724            _ => unreachable_unchecked(),
6725        }
6726    }
6727}
6728#[doc = "Insert vector element from another vector element"]
6729#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u16)"]
6730#[inline(always)]
6731#[target_feature(enable = "neon")]
6732#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6733#[rustc_legacy_const_generics(1, 3)]
6734#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6735pub fn vcopyq_laneq_u16<const LANE1: i32, const LANE2: i32>(
6736    a: uint16x8_t,
6737    b: uint16x8_t,
6738) -> uint16x8_t {
6739    static_assert_uimm_bits!(LANE1, 3);
6740    static_assert_uimm_bits!(LANE2, 3);
6741    unsafe {
6742        match LANE1 & 0b111 {
6743            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
6744            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
6745            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
6746            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
6747            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
6748            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
6749            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
6750            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
6751            _ => unreachable_unchecked(),
6752        }
6753    }
6754}
6755#[doc = "Insert vector element from another vector element"]
6756#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u32)"]
6757#[inline(always)]
6758#[target_feature(enable = "neon")]
6759#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6760#[rustc_legacy_const_generics(1, 3)]
6761#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6762pub fn vcopyq_laneq_u32<const LANE1: i32, const LANE2: i32>(
6763    a: uint32x4_t,
6764    b: uint32x4_t,
6765) -> uint32x4_t {
6766    static_assert_uimm_bits!(LANE1, 2);
6767    static_assert_uimm_bits!(LANE2, 2);
6768    unsafe {
6769        match LANE1 & 0b11 {
6770            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
6771            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
6772            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
6773            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
6774            _ => unreachable_unchecked(),
6775        }
6776    }
6777}
6778#[doc = "Insert vector element from another vector element"]
6779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u64)"]
6780#[inline(always)]
6781#[target_feature(enable = "neon")]
6782#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6783#[rustc_legacy_const_generics(1, 3)]
6784#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6785pub fn vcopyq_laneq_u64<const LANE1: i32, const LANE2: i32>(
6786    a: uint64x2_t,
6787    b: uint64x2_t,
6788) -> uint64x2_t {
6789    static_assert_uimm_bits!(LANE1, 1);
6790    static_assert_uimm_bits!(LANE2, 1);
6791    unsafe {
6792        match LANE1 & 0b1 {
6793            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
6794            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
6795            _ => unreachable_unchecked(),
6796        }
6797    }
6798}
6799#[doc = "Insert vector element from another vector element"]
6800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p8)"]
6801#[inline(always)]
6802#[target_feature(enable = "neon")]
6803#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6804#[rustc_legacy_const_generics(1, 3)]
6805#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6806pub fn vcopyq_laneq_p8<const LANE1: i32, const LANE2: i32>(
6807    a: poly8x16_t,
6808    b: poly8x16_t,
6809) -> poly8x16_t {
6810    static_assert_uimm_bits!(LANE1, 4);
6811    static_assert_uimm_bits!(LANE2, 4);
6812    unsafe {
6813        match LANE1 & 0b1111 {
6814            0 => simd_shuffle!(
6815                a,
6816                b,
6817                [
6818                    16 + LANE2 as u32,
6819                    1,
6820                    2,
6821                    3,
6822                    4,
6823                    5,
6824                    6,
6825                    7,
6826                    8,
6827                    9,
6828                    10,
6829                    11,
6830                    12,
6831                    13,
6832                    14,
6833                    15
6834                ]
6835            ),
6836            1 => simd_shuffle!(
6837                a,
6838                b,
6839                [
6840                    0,
6841                    16 + LANE2 as u32,
6842                    2,
6843                    3,
6844                    4,
6845                    5,
6846                    6,
6847                    7,
6848                    8,
6849                    9,
6850                    10,
6851                    11,
6852                    12,
6853                    13,
6854                    14,
6855                    15
6856                ]
6857            ),
6858            2 => simd_shuffle!(
6859                a,
6860                b,
6861                [
6862                    0,
6863                    1,
6864                    16 + LANE2 as u32,
6865                    3,
6866                    4,
6867                    5,
6868                    6,
6869                    7,
6870                    8,
6871                    9,
6872                    10,
6873                    11,
6874                    12,
6875                    13,
6876                    14,
6877                    15
6878                ]
6879            ),
6880            3 => simd_shuffle!(
6881                a,
6882                b,
6883                [
6884                    0,
6885                    1,
6886                    2,
6887                    16 + LANE2 as u32,
6888                    4,
6889                    5,
6890                    6,
6891                    7,
6892                    8,
6893                    9,
6894                    10,
6895                    11,
6896                    12,
6897                    13,
6898                    14,
6899                    15
6900                ]
6901            ),
6902            4 => simd_shuffle!(
6903                a,
6904                b,
6905                [
6906                    0,
6907                    1,
6908                    2,
6909                    3,
6910                    16 + LANE2 as u32,
6911                    5,
6912                    6,
6913                    7,
6914                    8,
6915                    9,
6916                    10,
6917                    11,
6918                    12,
6919                    13,
6920                    14,
6921                    15
6922                ]
6923            ),
6924            5 => simd_shuffle!(
6925                a,
6926                b,
6927                [
6928                    0,
6929                    1,
6930                    2,
6931                    3,
6932                    4,
6933                    16 + LANE2 as u32,
6934                    6,
6935                    7,
6936                    8,
6937                    9,
6938                    10,
6939                    11,
6940                    12,
6941                    13,
6942                    14,
6943                    15
6944                ]
6945            ),
6946            6 => simd_shuffle!(
6947                a,
6948                b,
6949                [
6950                    0,
6951                    1,
6952                    2,
6953                    3,
6954                    4,
6955                    5,
6956                    16 + LANE2 as u32,
6957                    7,
6958                    8,
6959                    9,
6960                    10,
6961                    11,
6962                    12,
6963                    13,
6964                    14,
6965                    15
6966                ]
6967            ),
6968            7 => simd_shuffle!(
6969                a,
6970                b,
6971                [
6972                    0,
6973                    1,
6974                    2,
6975                    3,
6976                    4,
6977                    5,
6978                    6,
6979                    16 + LANE2 as u32,
6980                    8,
6981                    9,
6982                    10,
6983                    11,
6984                    12,
6985                    13,
6986                    14,
6987                    15
6988                ]
6989            ),
6990            8 => simd_shuffle!(
6991                a,
6992                b,
6993                [
6994                    0,
6995                    1,
6996                    2,
6997                    3,
6998                    4,
6999                    5,
7000                    6,
7001                    7,
7002                    16 + LANE2 as u32,
7003                    9,
7004                    10,
7005                    11,
7006                    12,
7007                    13,
7008                    14,
7009                    15
7010                ]
7011            ),
7012            9 => simd_shuffle!(
7013                a,
7014                b,
7015                [
7016                    0,
7017                    1,
7018                    2,
7019                    3,
7020                    4,
7021                    5,
7022                    6,
7023                    7,
7024                    8,
7025                    16 + LANE2 as u32,
7026                    10,
7027                    11,
7028                    12,
7029                    13,
7030                    14,
7031                    15
7032                ]
7033            ),
7034            10 => simd_shuffle!(
7035                a,
7036                b,
7037                [
7038                    0,
7039                    1,
7040                    2,
7041                    3,
7042                    4,
7043                    5,
7044                    6,
7045                    7,
7046                    8,
7047                    9,
7048                    16 + LANE2 as u32,
7049                    11,
7050                    12,
7051                    13,
7052                    14,
7053                    15
7054                ]
7055            ),
7056            11 => simd_shuffle!(
7057                a,
7058                b,
7059                [
7060                    0,
7061                    1,
7062                    2,
7063                    3,
7064                    4,
7065                    5,
7066                    6,
7067                    7,
7068                    8,
7069                    9,
7070                    10,
7071                    16 + LANE2 as u32,
7072                    12,
7073                    13,
7074                    14,
7075                    15
7076                ]
7077            ),
7078            12 => simd_shuffle!(
7079                a,
7080                b,
7081                [
7082                    0,
7083                    1,
7084                    2,
7085                    3,
7086                    4,
7087                    5,
7088                    6,
7089                    7,
7090                    8,
7091                    9,
7092                    10,
7093                    11,
7094                    16 + LANE2 as u32,
7095                    13,
7096                    14,
7097                    15
7098                ]
7099            ),
7100            13 => simd_shuffle!(
7101                a,
7102                b,
7103                [
7104                    0,
7105                    1,
7106                    2,
7107                    3,
7108                    4,
7109                    5,
7110                    6,
7111                    7,
7112                    8,
7113                    9,
7114                    10,
7115                    11,
7116                    12,
7117                    16 + LANE2 as u32,
7118                    14,
7119                    15
7120                ]
7121            ),
7122            14 => simd_shuffle!(
7123                a,
7124                b,
7125                [
7126                    0,
7127                    1,
7128                    2,
7129                    3,
7130                    4,
7131                    5,
7132                    6,
7133                    7,
7134                    8,
7135                    9,
7136                    10,
7137                    11,
7138                    12,
7139                    13,
7140                    16 + LANE2 as u32,
7141                    15
7142                ]
7143            ),
7144            15 => simd_shuffle!(
7145                a,
7146                b,
7147                [
7148                    0,
7149                    1,
7150                    2,
7151                    3,
7152                    4,
7153                    5,
7154                    6,
7155                    7,
7156                    8,
7157                    9,
7158                    10,
7159                    11,
7160                    12,
7161                    13,
7162                    14,
7163                    16 + LANE2 as u32
7164                ]
7165            ),
7166            _ => unreachable_unchecked(),
7167        }
7168    }
7169}
7170#[doc = "Insert vector element from another vector element"]
7171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p16)"]
7172#[inline(always)]
7173#[target_feature(enable = "neon")]
7174#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
7175#[rustc_legacy_const_generics(1, 3)]
7176#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7177pub fn vcopyq_laneq_p16<const LANE1: i32, const LANE2: i32>(
7178    a: poly16x8_t,
7179    b: poly16x8_t,
7180) -> poly16x8_t {
7181    static_assert_uimm_bits!(LANE1, 3);
7182    static_assert_uimm_bits!(LANE2, 3);
7183    unsafe {
7184        match LANE1 & 0b111 {
7185            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
7186            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
7187            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
7188            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
7189            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
7190            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
7191            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
7192            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
7193            _ => unreachable_unchecked(),
7194        }
7195    }
7196}
7197#[doc = "Insert vector element from another vector element"]
7198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p64)"]
7199#[inline(always)]
7200#[target_feature(enable = "neon")]
7201#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
7202#[rustc_legacy_const_generics(1, 3)]
7203#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7204pub fn vcopyq_laneq_p64<const LANE1: i32, const LANE2: i32>(
7205    a: poly64x2_t,
7206    b: poly64x2_t,
7207) -> poly64x2_t {
7208    static_assert_uimm_bits!(LANE1, 1);
7209    static_assert_uimm_bits!(LANE2, 1);
7210    unsafe {
7211        match LANE1 & 0b1 {
7212            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
7213            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
7214            _ => unreachable_unchecked(),
7215        }
7216    }
7217}
7218#[doc = "Insert vector element from another vector element"]
7219#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f64)"]
7220#[inline(always)]
7221#[target_feature(enable = "neon")]
7222#[cfg_attr(test, assert_instr(nop))]
7223#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7224pub fn vcreate_f64(a: u64) -> float64x1_t {
7225    unsafe { transmute(a) }
7226}
7227#[doc = "Floating-point convert"]
7228#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_f64)"]
7229#[inline(always)]
7230#[target_feature(enable = "neon")]
7231#[cfg_attr(test, assert_instr(fcvtn))]
7232#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7233pub fn vcvt_f32_f64(a: float64x2_t) -> float32x2_t {
7234    unsafe { simd_cast(a) }
7235}
7236#[doc = "Floating-point convert to higher precision long"]
7237#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_f32)"]
7238#[inline(always)]
7239#[target_feature(enable = "neon")]
7240#[cfg_attr(test, assert_instr(fcvtl))]
7241#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7242pub fn vcvt_f64_f32(a: float32x2_t) -> float64x2_t {
7243    unsafe { simd_cast(a) }
7244}
7245#[doc = "Fixed-point convert to floating-point"]
7246#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_s64)"]
7247#[inline(always)]
7248#[target_feature(enable = "neon")]
7249#[cfg_attr(test, assert_instr(scvtf))]
7250#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7251pub fn vcvt_f64_s64(a: int64x1_t) -> float64x1_t {
7252    unsafe { simd_cast(a) }
7253}
7254#[doc = "Fixed-point convert to floating-point"]
7255#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_s64)"]
7256#[inline(always)]
7257#[target_feature(enable = "neon")]
7258#[cfg_attr(test, assert_instr(scvtf))]
7259#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7260pub fn vcvtq_f64_s64(a: int64x2_t) -> float64x2_t {
7261    unsafe { simd_cast(a) }
7262}
7263#[doc = "Fixed-point convert to floating-point"]
7264#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_u64)"]
7265#[inline(always)]
7266#[target_feature(enable = "neon")]
7267#[cfg_attr(test, assert_instr(ucvtf))]
7268#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7269pub fn vcvt_f64_u64(a: uint64x1_t) -> float64x1_t {
7270    unsafe { simd_cast(a) }
7271}
7272#[doc = "Fixed-point convert to floating-point"]
7273#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_u64)"]
7274#[inline(always)]
7275#[target_feature(enable = "neon")]
7276#[cfg_attr(test, assert_instr(ucvtf))]
7277#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7278pub fn vcvtq_f64_u64(a: uint64x2_t) -> float64x2_t {
7279    unsafe { simd_cast(a) }
7280}
7281#[doc = "Floating-point convert to lower precision"]
7282#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f16_f32)"]
7283#[inline(always)]
7284#[target_feature(enable = "neon")]
7285#[cfg_attr(test, assert_instr(fcvtn2))]
7286#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
7287#[cfg(not(target_arch = "arm64ec"))]
7288pub fn vcvt_high_f16_f32(a: float16x4_t, b: float32x4_t) -> float16x8_t {
7289    vcombine_f16(a, vcvt_f16_f32(b))
7290}
7291#[doc = "Floating-point convert to higher precision"]
7292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f32_f16)"]
7293#[inline(always)]
7294#[target_feature(enable = "neon")]
7295#[cfg_attr(test, assert_instr(fcvtl2))]
7296#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
7297#[cfg(not(target_arch = "arm64ec"))]
7298pub fn vcvt_high_f32_f16(a: float16x8_t) -> float32x4_t {
7299    vcvt_f32_f16(vget_high_f16(a))
7300}
7301#[doc = "Floating-point convert to lower precision narrow"]
7302#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f32_f64)"]
7303#[inline(always)]
7304#[target_feature(enable = "neon")]
7305#[cfg_attr(test, assert_instr(fcvtn2))]
7306#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7307pub fn vcvt_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t {
7308    unsafe { simd_shuffle!(a, simd_cast(b), [0, 1, 2, 3]) }
7309}
7310#[doc = "Floating-point convert to higher precision long"]
7311#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f64_f32)"]
7312#[inline(always)]
7313#[target_feature(enable = "neon")]
7314#[cfg_attr(test, assert_instr(fcvtl2))]
7315#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7316pub fn vcvt_high_f64_f32(a: float32x4_t) -> float64x2_t {
7317    unsafe {
7318        let b: float32x2_t = simd_shuffle!(a, a, [2, 3]);
7319        simd_cast(b)
7320    }
7321}
7322#[doc = "Fixed-point convert to floating-point"]
7323#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_s64)"]
7324#[inline(always)]
7325#[target_feature(enable = "neon")]
7326#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7327#[rustc_legacy_const_generics(1)]
7328#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7329pub fn vcvt_n_f64_s64<const N: i32>(a: int64x1_t) -> float64x1_t {
7330    static_assert!(N >= 1 && N <= 64);
7331    unsafe extern "unadjusted" {
7332        #[cfg_attr(
7333            any(target_arch = "aarch64", target_arch = "arm64ec"),
7334            link_name = "llvm.aarch64.neon.vcvtfxs2fp.v1f64.v1i64"
7335        )]
7336        fn _vcvt_n_f64_s64(a: int64x1_t, n: i32) -> float64x1_t;
7337    }
7338    unsafe { _vcvt_n_f64_s64(a, N) }
7339}
7340#[doc = "Fixed-point convert to floating-point"]
7341#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_s64)"]
7342#[inline(always)]
7343#[target_feature(enable = "neon")]
7344#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7345#[rustc_legacy_const_generics(1)]
7346#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7347pub fn vcvtq_n_f64_s64<const N: i32>(a: int64x2_t) -> float64x2_t {
7348    static_assert!(N >= 1 && N <= 64);
7349    unsafe extern "unadjusted" {
7350        #[cfg_attr(
7351            any(target_arch = "aarch64", target_arch = "arm64ec"),
7352            link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64"
7353        )]
7354        fn _vcvtq_n_f64_s64(a: int64x2_t, n: i32) -> float64x2_t;
7355    }
7356    unsafe { _vcvtq_n_f64_s64(a, N) }
7357}
7358#[doc = "Fixed-point convert to floating-point"]
7359#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_u64)"]
7360#[inline(always)]
7361#[target_feature(enable = "neon")]
7362#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7363#[rustc_legacy_const_generics(1)]
7364#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7365pub fn vcvt_n_f64_u64<const N: i32>(a: uint64x1_t) -> float64x1_t {
7366    static_assert!(N >= 1 && N <= 64);
7367    unsafe extern "unadjusted" {
7368        #[cfg_attr(
7369            any(target_arch = "aarch64", target_arch = "arm64ec"),
7370            link_name = "llvm.aarch64.neon.vcvtfxu2fp.v1f64.v1i64"
7371        )]
7372        fn _vcvt_n_f64_u64(a: uint64x1_t, n: i32) -> float64x1_t;
7373    }
7374    unsafe { _vcvt_n_f64_u64(a, N) }
7375}
7376#[doc = "Fixed-point convert to floating-point"]
7377#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_u64)"]
7378#[inline(always)]
7379#[target_feature(enable = "neon")]
7380#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7381#[rustc_legacy_const_generics(1)]
7382#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7383pub fn vcvtq_n_f64_u64<const N: i32>(a: uint64x2_t) -> float64x2_t {
7384    static_assert!(N >= 1 && N <= 64);
7385    unsafe extern "unadjusted" {
7386        #[cfg_attr(
7387            any(target_arch = "aarch64", target_arch = "arm64ec"),
7388            link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64"
7389        )]
7390        fn _vcvtq_n_f64_u64(a: uint64x2_t, n: i32) -> float64x2_t;
7391    }
7392    unsafe { _vcvtq_n_f64_u64(a, N) }
7393}
7394#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s64_f64)"]
7396#[inline(always)]
7397#[target_feature(enable = "neon")]
7398#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
7399#[rustc_legacy_const_generics(1)]
7400#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7401pub fn vcvt_n_s64_f64<const N: i32>(a: float64x1_t) -> int64x1_t {
7402    static_assert!(N >= 1 && N <= 64);
7403    unsafe extern "unadjusted" {
7404        #[cfg_attr(
7405            any(target_arch = "aarch64", target_arch = "arm64ec"),
7406            link_name = "llvm.aarch64.neon.vcvtfp2fxs.v1i64.v1f64"
7407        )]
7408        fn _vcvt_n_s64_f64(a: float64x1_t, n: i32) -> int64x1_t;
7409    }
7410    unsafe { _vcvt_n_s64_f64(a, N) }
7411}
7412#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s64_f64)"]
7414#[inline(always)]
7415#[target_feature(enable = "neon")]
7416#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
7417#[rustc_legacy_const_generics(1)]
7418#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7419pub fn vcvtq_n_s64_f64<const N: i32>(a: float64x2_t) -> int64x2_t {
7420    static_assert!(N >= 1 && N <= 64);
7421    unsafe extern "unadjusted" {
7422        #[cfg_attr(
7423            any(target_arch = "aarch64", target_arch = "arm64ec"),
7424            link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i64.v2f64"
7425        )]
7426        fn _vcvtq_n_s64_f64(a: float64x2_t, n: i32) -> int64x2_t;
7427    }
7428    unsafe { _vcvtq_n_s64_f64(a, N) }
7429}
7430#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u64_f64)"]
7432#[inline(always)]
7433#[target_feature(enable = "neon")]
7434#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
7435#[rustc_legacy_const_generics(1)]
7436#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7437pub fn vcvt_n_u64_f64<const N: i32>(a: float64x1_t) -> uint64x1_t {
7438    static_assert!(N >= 1 && N <= 64);
7439    unsafe extern "unadjusted" {
7440        #[cfg_attr(
7441            any(target_arch = "aarch64", target_arch = "arm64ec"),
7442            link_name = "llvm.aarch64.neon.vcvtfp2fxu.v1i64.v1f64"
7443        )]
7444        fn _vcvt_n_u64_f64(a: float64x1_t, n: i32) -> uint64x1_t;
7445    }
7446    unsafe { _vcvt_n_u64_f64(a, N) }
7447}
7448#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7449#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u64_f64)"]
7450#[inline(always)]
7451#[target_feature(enable = "neon")]
7452#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
7453#[rustc_legacy_const_generics(1)]
7454#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7455pub fn vcvtq_n_u64_f64<const N: i32>(a: float64x2_t) -> uint64x2_t {
7456    static_assert!(N >= 1 && N <= 64);
7457    unsafe extern "unadjusted" {
7458        #[cfg_attr(
7459            any(target_arch = "aarch64", target_arch = "arm64ec"),
7460            link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i64.v2f64"
7461        )]
7462        fn _vcvtq_n_u64_f64(a: float64x2_t, n: i32) -> uint64x2_t;
7463    }
7464    unsafe { _vcvtq_n_u64_f64(a, N) }
7465}
7466#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"]
7467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s64_f64)"]
7468#[inline(always)]
7469#[target_feature(enable = "neon")]
7470#[cfg_attr(test, assert_instr(fcvtzs))]
7471#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7472pub fn vcvt_s64_f64(a: float64x1_t) -> int64x1_t {
7473    unsafe extern "unadjusted" {
7474        #[cfg_attr(
7475            any(target_arch = "aarch64", target_arch = "arm64ec"),
7476            link_name = "llvm.fptosi.sat.v1i64.v1f64"
7477        )]
7478        fn _vcvt_s64_f64(a: float64x1_t) -> int64x1_t;
7479    }
7480    unsafe { _vcvt_s64_f64(a) }
7481}
7482#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"]
7483#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s64_f64)"]
7484#[inline(always)]
7485#[target_feature(enable = "neon")]
7486#[cfg_attr(test, assert_instr(fcvtzs))]
7487#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7488pub fn vcvtq_s64_f64(a: float64x2_t) -> int64x2_t {
7489    unsafe extern "unadjusted" {
7490        #[cfg_attr(
7491            any(target_arch = "aarch64", target_arch = "arm64ec"),
7492            link_name = "llvm.fptosi.sat.v2i64.v2f64"
7493        )]
7494        fn _vcvtq_s64_f64(a: float64x2_t) -> int64x2_t;
7495    }
7496    unsafe { _vcvtq_s64_f64(a) }
7497}
7498#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"]
7499#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_u64_f64)"]
7500#[inline(always)]
7501#[target_feature(enable = "neon")]
7502#[cfg_attr(test, assert_instr(fcvtzu))]
7503#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7504pub fn vcvt_u64_f64(a: float64x1_t) -> uint64x1_t {
7505    unsafe extern "unadjusted" {
7506        #[cfg_attr(
7507            any(target_arch = "aarch64", target_arch = "arm64ec"),
7508            link_name = "llvm.fptoui.sat.v1i64.v1f64"
7509        )]
7510        fn _vcvt_u64_f64(a: float64x1_t) -> uint64x1_t;
7511    }
7512    unsafe { _vcvt_u64_f64(a) }
7513}
7514#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"]
7515#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u64_f64)"]
7516#[inline(always)]
7517#[target_feature(enable = "neon")]
7518#[cfg_attr(test, assert_instr(fcvtzu))]
7519#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7520pub fn vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t {
7521    unsafe extern "unadjusted" {
7522        #[cfg_attr(
7523            any(target_arch = "aarch64", target_arch = "arm64ec"),
7524            link_name = "llvm.fptoui.sat.v2i64.v2f64"
7525        )]
7526        fn _vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t;
7527    }
7528    unsafe { _vcvtq_u64_f64(a) }
7529}
7530#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7531#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s16_f16)"]
7532#[inline(always)]
7533#[cfg_attr(test, assert_instr(fcvtas))]
7534#[target_feature(enable = "neon,fp16")]
7535#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
7536#[cfg(not(target_arch = "arm64ec"))]
7537pub fn vcvta_s16_f16(a: float16x4_t) -> int16x4_t {
7538    unsafe extern "unadjusted" {
7539        #[cfg_attr(
7540            any(target_arch = "aarch64", target_arch = "arm64ec"),
7541            link_name = "llvm.aarch64.neon.fcvtas.v4i16.v4f16"
7542        )]
7543        fn _vcvta_s16_f16(a: float16x4_t) -> int16x4_t;
7544    }
7545    unsafe { _vcvta_s16_f16(a) }
7546}
7547#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7548#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s16_f16)"]
7549#[inline(always)]
7550#[cfg_attr(test, assert_instr(fcvtas))]
7551#[target_feature(enable = "neon,fp16")]
7552#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
7553#[cfg(not(target_arch = "arm64ec"))]
7554pub fn vcvtaq_s16_f16(a: float16x8_t) -> int16x8_t {
7555    unsafe extern "unadjusted" {
7556        #[cfg_attr(
7557            any(target_arch = "aarch64", target_arch = "arm64ec"),
7558            link_name = "llvm.aarch64.neon.fcvtas.v8i16.v8f16"
7559        )]
7560        fn _vcvtaq_s16_f16(a: float16x8_t) -> int16x8_t;
7561    }
7562    unsafe { _vcvtaq_s16_f16(a) }
7563}
7564#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7565#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s32_f32)"]
7566#[inline(always)]
7567#[target_feature(enable = "neon")]
7568#[cfg_attr(test, assert_instr(fcvtas))]
7569#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7570pub fn vcvta_s32_f32(a: float32x2_t) -> int32x2_t {
7571    unsafe extern "unadjusted" {
7572        #[cfg_attr(
7573            any(target_arch = "aarch64", target_arch = "arm64ec"),
7574            link_name = "llvm.aarch64.neon.fcvtas.v2i32.v2f32"
7575        )]
7576        fn _vcvta_s32_f32(a: float32x2_t) -> int32x2_t;
7577    }
7578    unsafe { _vcvta_s32_f32(a) }
7579}
7580#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7581#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s32_f32)"]
7582#[inline(always)]
7583#[target_feature(enable = "neon")]
7584#[cfg_attr(test, assert_instr(fcvtas))]
7585#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7586pub fn vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t {
7587    unsafe extern "unadjusted" {
7588        #[cfg_attr(
7589            any(target_arch = "aarch64", target_arch = "arm64ec"),
7590            link_name = "llvm.aarch64.neon.fcvtas.v4i32.v4f32"
7591        )]
7592        fn _vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t;
7593    }
7594    unsafe { _vcvtaq_s32_f32(a) }
7595}
7596#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7597#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s64_f64)"]
7598#[inline(always)]
7599#[target_feature(enable = "neon")]
7600#[cfg_attr(test, assert_instr(fcvtas))]
7601#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7602pub fn vcvta_s64_f64(a: float64x1_t) -> int64x1_t {
7603    unsafe extern "unadjusted" {
7604        #[cfg_attr(
7605            any(target_arch = "aarch64", target_arch = "arm64ec"),
7606            link_name = "llvm.aarch64.neon.fcvtas.v1i64.v1f64"
7607        )]
7608        fn _vcvta_s64_f64(a: float64x1_t) -> int64x1_t;
7609    }
7610    unsafe { _vcvta_s64_f64(a) }
7611}
7612#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7613#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s64_f64)"]
7614#[inline(always)]
7615#[target_feature(enable = "neon")]
7616#[cfg_attr(test, assert_instr(fcvtas))]
7617#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7618pub fn vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t {
7619    unsafe extern "unadjusted" {
7620        #[cfg_attr(
7621            any(target_arch = "aarch64", target_arch = "arm64ec"),
7622            link_name = "llvm.aarch64.neon.fcvtas.v2i64.v2f64"
7623        )]
7624        fn _vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t;
7625    }
7626    unsafe { _vcvtaq_s64_f64(a) }
7627}
7628#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7629#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u16_f16)"]
7630#[inline(always)]
7631#[cfg_attr(test, assert_instr(fcvtau))]
7632#[target_feature(enable = "neon,fp16")]
7633#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
7634#[cfg(not(target_arch = "arm64ec"))]
7635pub fn vcvta_u16_f16(a: float16x4_t) -> uint16x4_t {
7636    unsafe extern "unadjusted" {
7637        #[cfg_attr(
7638            any(target_arch = "aarch64", target_arch = "arm64ec"),
7639            link_name = "llvm.aarch64.neon.fcvtau.v4i16.v4f16"
7640        )]
7641        fn _vcvta_u16_f16(a: float16x4_t) -> uint16x4_t;
7642    }
7643    unsafe { _vcvta_u16_f16(a) }
7644}
7645#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u16_f16)"]
7647#[inline(always)]
7648#[cfg_attr(test, assert_instr(fcvtau))]
7649#[target_feature(enable = "neon,fp16")]
7650#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
7651#[cfg(not(target_arch = "arm64ec"))]
7652pub fn vcvtaq_u16_f16(a: float16x8_t) -> uint16x8_t {
7653    unsafe extern "unadjusted" {
7654        #[cfg_attr(
7655            any(target_arch = "aarch64", target_arch = "arm64ec"),
7656            link_name = "llvm.aarch64.neon.fcvtau.v8i16.v8f16"
7657        )]
7658        fn _vcvtaq_u16_f16(a: float16x8_t) -> uint16x8_t;
7659    }
7660    unsafe { _vcvtaq_u16_f16(a) }
7661}
7662#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7663#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u32_f32)"]
7664#[inline(always)]
7665#[target_feature(enable = "neon")]
7666#[cfg_attr(test, assert_instr(fcvtau))]
7667#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7668pub fn vcvta_u32_f32(a: float32x2_t) -> uint32x2_t {
7669    unsafe extern "unadjusted" {
7670        #[cfg_attr(
7671            any(target_arch = "aarch64", target_arch = "arm64ec"),
7672            link_name = "llvm.aarch64.neon.fcvtau.v2i32.v2f32"
7673        )]
7674        fn _vcvta_u32_f32(a: float32x2_t) -> uint32x2_t;
7675    }
7676    unsafe { _vcvta_u32_f32(a) }
7677}
7678#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7679#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u32_f32)"]
7680#[inline(always)]
7681#[target_feature(enable = "neon")]
7682#[cfg_attr(test, assert_instr(fcvtau))]
7683#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7684pub fn vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t {
7685    unsafe extern "unadjusted" {
7686        #[cfg_attr(
7687            any(target_arch = "aarch64", target_arch = "arm64ec"),
7688            link_name = "llvm.aarch64.neon.fcvtau.v4i32.v4f32"
7689        )]
7690        fn _vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t;
7691    }
7692    unsafe { _vcvtaq_u32_f32(a) }
7693}
7694#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7695#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u64_f64)"]
7696#[inline(always)]
7697#[target_feature(enable = "neon")]
7698#[cfg_attr(test, assert_instr(fcvtau))]
7699#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7700pub fn vcvta_u64_f64(a: float64x1_t) -> uint64x1_t {
7701    unsafe extern "unadjusted" {
7702        #[cfg_attr(
7703            any(target_arch = "aarch64", target_arch = "arm64ec"),
7704            link_name = "llvm.aarch64.neon.fcvtau.v1i64.v1f64"
7705        )]
7706        fn _vcvta_u64_f64(a: float64x1_t) -> uint64x1_t;
7707    }
7708    unsafe { _vcvta_u64_f64(a) }
7709}
7710#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u64_f64)"]
7712#[inline(always)]
7713#[target_feature(enable = "neon")]
7714#[cfg_attr(test, assert_instr(fcvtau))]
7715#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7716pub fn vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t {
7717    unsafe extern "unadjusted" {
7718        #[cfg_attr(
7719            any(target_arch = "aarch64", target_arch = "arm64ec"),
7720            link_name = "llvm.aarch64.neon.fcvtau.v2i64.v2f64"
7721        )]
7722        fn _vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t;
7723    }
7724    unsafe { _vcvtaq_u64_f64(a) }
7725}
7726#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7727#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s16_f16)"]
7728#[inline(always)]
7729#[cfg_attr(test, assert_instr(fcvtas))]
7730#[target_feature(enable = "neon,fp16")]
7731#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7732#[cfg(not(target_arch = "arm64ec"))]
7733pub fn vcvtah_s16_f16(a: f16) -> i16 {
7734    vcvtah_s32_f16(a) as i16
7735}
7736#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7737#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s32_f16)"]
7738#[inline(always)]
7739#[cfg_attr(test, assert_instr(fcvtas))]
7740#[target_feature(enable = "neon,fp16")]
7741#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7742#[cfg(not(target_arch = "arm64ec"))]
7743pub fn vcvtah_s32_f16(a: f16) -> i32 {
7744    unsafe extern "unadjusted" {
7745        #[cfg_attr(
7746            any(target_arch = "aarch64", target_arch = "arm64ec"),
7747            link_name = "llvm.aarch64.neon.fcvtas.i32.f16"
7748        )]
7749        fn _vcvtah_s32_f16(a: f16) -> i32;
7750    }
7751    unsafe { _vcvtah_s32_f16(a) }
7752}
7753#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s64_f16)"]
7755#[inline(always)]
7756#[cfg_attr(test, assert_instr(fcvtas))]
7757#[target_feature(enable = "neon,fp16")]
7758#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7759#[cfg(not(target_arch = "arm64ec"))]
7760pub fn vcvtah_s64_f16(a: f16) -> i64 {
7761    unsafe extern "unadjusted" {
7762        #[cfg_attr(
7763            any(target_arch = "aarch64", target_arch = "arm64ec"),
7764            link_name = "llvm.aarch64.neon.fcvtas.i64.f16"
7765        )]
7766        fn _vcvtah_s64_f16(a: f16) -> i64;
7767    }
7768    unsafe { _vcvtah_s64_f16(a) }
7769}
7770#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u16_f16)"]
7772#[inline(always)]
7773#[cfg_attr(test, assert_instr(fcvtau))]
7774#[target_feature(enable = "neon,fp16")]
7775#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7776#[cfg(not(target_arch = "arm64ec"))]
7777pub fn vcvtah_u16_f16(a: f16) -> u16 {
7778    vcvtah_u32_f16(a) as u16
7779}
7780#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u32_f16)"]
7782#[inline(always)]
7783#[cfg_attr(test, assert_instr(fcvtau))]
7784#[target_feature(enable = "neon,fp16")]
7785#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7786#[cfg(not(target_arch = "arm64ec"))]
7787pub fn vcvtah_u32_f16(a: f16) -> u32 {
7788    unsafe extern "unadjusted" {
7789        #[cfg_attr(
7790            any(target_arch = "aarch64", target_arch = "arm64ec"),
7791            link_name = "llvm.aarch64.neon.fcvtau.i32.f16"
7792        )]
7793        fn _vcvtah_u32_f16(a: f16) -> u32;
7794    }
7795    unsafe { _vcvtah_u32_f16(a) }
7796}
7797#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7798#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u64_f16)"]
7799#[inline(always)]
7800#[cfg_attr(test, assert_instr(fcvtau))]
7801#[target_feature(enable = "neon,fp16")]
7802#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7803#[cfg(not(target_arch = "arm64ec"))]
7804pub fn vcvtah_u64_f16(a: f16) -> u64 {
7805    unsafe extern "unadjusted" {
7806        #[cfg_attr(
7807            any(target_arch = "aarch64", target_arch = "arm64ec"),
7808            link_name = "llvm.aarch64.neon.fcvtau.i64.f16"
7809        )]
7810        fn _vcvtah_u64_f16(a: f16) -> u64;
7811    }
7812    unsafe { _vcvtah_u64_f16(a) }
7813}
7814#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_s32_f32)"]
7816#[inline(always)]
7817#[target_feature(enable = "neon")]
7818#[cfg_attr(test, assert_instr(fcvtas))]
7819#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7820pub fn vcvtas_s32_f32(a: f32) -> i32 {
7821    unsafe extern "unadjusted" {
7822        #[cfg_attr(
7823            any(target_arch = "aarch64", target_arch = "arm64ec"),
7824            link_name = "llvm.aarch64.neon.fcvtas.i32.f32"
7825        )]
7826        fn _vcvtas_s32_f32(a: f32) -> i32;
7827    }
7828    unsafe { _vcvtas_s32_f32(a) }
7829}
7830#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7831#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_s64_f64)"]
7832#[inline(always)]
7833#[target_feature(enable = "neon")]
7834#[cfg_attr(test, assert_instr(fcvtas))]
7835#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7836pub fn vcvtad_s64_f64(a: f64) -> i64 {
7837    unsafe extern "unadjusted" {
7838        #[cfg_attr(
7839            any(target_arch = "aarch64", target_arch = "arm64ec"),
7840            link_name = "llvm.aarch64.neon.fcvtas.i64.f64"
7841        )]
7842        fn _vcvtad_s64_f64(a: f64) -> i64;
7843    }
7844    unsafe { _vcvtad_s64_f64(a) }
7845}
7846#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_u32_f32)"]
7848#[inline(always)]
7849#[target_feature(enable = "neon")]
7850#[cfg_attr(test, assert_instr(fcvtau))]
7851#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7852pub fn vcvtas_u32_f32(a: f32) -> u32 {
7853    unsafe extern "unadjusted" {
7854        #[cfg_attr(
7855            any(target_arch = "aarch64", target_arch = "arm64ec"),
7856            link_name = "llvm.aarch64.neon.fcvtau.i32.f32"
7857        )]
7858        fn _vcvtas_u32_f32(a: f32) -> u32;
7859    }
7860    unsafe { _vcvtas_u32_f32(a) }
7861}
7862#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_u64_f64)"]
7864#[inline(always)]
7865#[target_feature(enable = "neon")]
7866#[cfg_attr(test, assert_instr(fcvtau))]
7867#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7868pub fn vcvtad_u64_f64(a: f64) -> u64 {
7869    unsafe extern "unadjusted" {
7870        #[cfg_attr(
7871            any(target_arch = "aarch64", target_arch = "arm64ec"),
7872            link_name = "llvm.aarch64.neon.fcvtau.i64.f64"
7873        )]
7874        fn _vcvtad_u64_f64(a: f64) -> u64;
7875    }
7876    unsafe { _vcvtad_u64_f64(a) }
7877}
7878#[doc = "Fixed-point convert to floating-point"]
7879#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_s64)"]
7880#[inline(always)]
7881#[target_feature(enable = "neon")]
7882#[cfg_attr(test, assert_instr(scvtf))]
7883#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7884pub fn vcvtd_f64_s64(a: i64) -> f64 {
7885    a as f64
7886}
7887#[doc = "Fixed-point convert to floating-point"]
7888#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_s32)"]
7889#[inline(always)]
7890#[target_feature(enable = "neon")]
7891#[cfg_attr(test, assert_instr(scvtf))]
7892#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7893pub fn vcvts_f32_s32(a: i32) -> f32 {
7894    a as f32
7895}
7896#[doc = "Fixed-point convert to floating-point"]
7897#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s16)"]
7898#[inline(always)]
7899#[cfg_attr(test, assert_instr(scvtf))]
7900#[target_feature(enable = "neon,fp16")]
7901#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7902#[cfg(not(target_arch = "arm64ec"))]
7903pub fn vcvth_f16_s16(a: i16) -> f16 {
7904    a as f16
7905}
7906#[doc = "Fixed-point convert to floating-point"]
7907#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s32)"]
7908#[inline(always)]
7909#[cfg_attr(test, assert_instr(scvtf))]
7910#[target_feature(enable = "neon,fp16")]
7911#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7912#[cfg(not(target_arch = "arm64ec"))]
7913pub fn vcvth_f16_s32(a: i32) -> f16 {
7914    a as f16
7915}
7916#[doc = "Fixed-point convert to floating-point"]
7917#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s64)"]
7918#[inline(always)]
7919#[cfg_attr(test, assert_instr(scvtf))]
7920#[target_feature(enable = "neon,fp16")]
7921#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7922#[cfg(not(target_arch = "arm64ec"))]
7923pub fn vcvth_f16_s64(a: i64) -> f16 {
7924    a as f16
7925}
7926#[doc = "Unsigned fixed-point convert to floating-point"]
7927#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u16)"]
7928#[inline(always)]
7929#[cfg_attr(test, assert_instr(ucvtf))]
7930#[target_feature(enable = "neon,fp16")]
7931#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7932#[cfg(not(target_arch = "arm64ec"))]
7933pub fn vcvth_f16_u16(a: u16) -> f16 {
7934    a as f16
7935}
7936#[doc = "Unsigned fixed-point convert to floating-point"]
7937#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u32)"]
7938#[inline(always)]
7939#[cfg_attr(test, assert_instr(ucvtf))]
7940#[target_feature(enable = "neon,fp16")]
7941#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7942#[cfg(not(target_arch = "arm64ec"))]
7943pub fn vcvth_f16_u32(a: u32) -> f16 {
7944    a as f16
7945}
7946#[doc = "Unsigned fixed-point convert to floating-point"]
7947#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u64)"]
7948#[inline(always)]
7949#[cfg_attr(test, assert_instr(ucvtf))]
7950#[target_feature(enable = "neon,fp16")]
7951#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7952#[cfg(not(target_arch = "arm64ec"))]
7953pub fn vcvth_f16_u64(a: u64) -> f16 {
7954    a as f16
7955}
7956#[doc = "Fixed-point convert to floating-point"]
7957#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s16)"]
7958#[inline(always)]
7959#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7960#[rustc_legacy_const_generics(1)]
7961#[target_feature(enable = "neon,fp16")]
7962#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7963#[cfg(not(target_arch = "arm64ec"))]
7964pub fn vcvth_n_f16_s16<const N: i32>(a: i16) -> f16 {
7965    static_assert!(N >= 1 && N <= 16);
7966    vcvth_n_f16_s32::<N>(a as i32)
7967}
7968#[doc = "Fixed-point convert to floating-point"]
7969#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s32)"]
7970#[inline(always)]
7971#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7972#[rustc_legacy_const_generics(1)]
7973#[target_feature(enable = "neon,fp16")]
7974#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7975#[cfg(not(target_arch = "arm64ec"))]
7976pub fn vcvth_n_f16_s32<const N: i32>(a: i32) -> f16 {
7977    static_assert!(N >= 1 && N <= 16);
7978    unsafe extern "unadjusted" {
7979        #[cfg_attr(
7980            any(target_arch = "aarch64", target_arch = "arm64ec"),
7981            link_name = "llvm.aarch64.neon.vcvtfxs2fp.f16.i32"
7982        )]
7983        fn _vcvth_n_f16_s32(a: i32, n: i32) -> f16;
7984    }
7985    unsafe { _vcvth_n_f16_s32(a, N) }
7986}
7987#[doc = "Fixed-point convert to floating-point"]
7988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s64)"]
7989#[inline(always)]
7990#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7991#[rustc_legacy_const_generics(1)]
7992#[target_feature(enable = "neon,fp16")]
7993#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7994#[cfg(not(target_arch = "arm64ec"))]
7995pub fn vcvth_n_f16_s64<const N: i32>(a: i64) -> f16 {
7996    static_assert!(N >= 1 && N <= 16);
7997    unsafe extern "unadjusted" {
7998        #[cfg_attr(
7999            any(target_arch = "aarch64", target_arch = "arm64ec"),
8000            link_name = "llvm.aarch64.neon.vcvtfxs2fp.f16.i64"
8001        )]
8002        fn _vcvth_n_f16_s64(a: i64, n: i32) -> f16;
8003    }
8004    unsafe { _vcvth_n_f16_s64(a, N) }
8005}
8006#[doc = "Fixed-point convert to floating-point"]
8007#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u16)"]
8008#[inline(always)]
8009#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
8010#[rustc_legacy_const_generics(1)]
8011#[target_feature(enable = "neon,fp16")]
8012#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8013#[cfg(not(target_arch = "arm64ec"))]
8014pub fn vcvth_n_f16_u16<const N: i32>(a: u16) -> f16 {
8015    static_assert!(N >= 1 && N <= 16);
8016    vcvth_n_f16_u32::<N>(a as u32)
8017}
8018#[doc = "Fixed-point convert to floating-point"]
8019#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u32)"]
8020#[inline(always)]
8021#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
8022#[rustc_legacy_const_generics(1)]
8023#[target_feature(enable = "neon,fp16")]
8024#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8025#[cfg(not(target_arch = "arm64ec"))]
8026pub fn vcvth_n_f16_u32<const N: i32>(a: u32) -> f16 {
8027    static_assert!(N >= 1 && N <= 16);
8028    unsafe extern "unadjusted" {
8029        #[cfg_attr(
8030            any(target_arch = "aarch64", target_arch = "arm64ec"),
8031            link_name = "llvm.aarch64.neon.vcvtfxu2fp.f16.i32"
8032        )]
8033        fn _vcvth_n_f16_u32(a: u32, n: i32) -> f16;
8034    }
8035    unsafe { _vcvth_n_f16_u32(a, N) }
8036}
8037#[doc = "Fixed-point convert to floating-point"]
8038#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u64)"]
8039#[inline(always)]
8040#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
8041#[rustc_legacy_const_generics(1)]
8042#[target_feature(enable = "neon,fp16")]
8043#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8044#[cfg(not(target_arch = "arm64ec"))]
8045pub fn vcvth_n_f16_u64<const N: i32>(a: u64) -> f16 {
8046    static_assert!(N >= 1 && N <= 16);
8047    unsafe extern "unadjusted" {
8048        #[cfg_attr(
8049            any(target_arch = "aarch64", target_arch = "arm64ec"),
8050            link_name = "llvm.aarch64.neon.vcvtfxu2fp.f16.i64"
8051        )]
8052        fn _vcvth_n_f16_u64(a: u64, n: i32) -> f16;
8053    }
8054    unsafe { _vcvth_n_f16_u64(a, N) }
8055}
8056#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8057#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s16_f16)"]
8058#[inline(always)]
8059#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
8060#[rustc_legacy_const_generics(1)]
8061#[target_feature(enable = "neon,fp16")]
8062#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8063#[cfg(not(target_arch = "arm64ec"))]
8064pub fn vcvth_n_s16_f16<const N: i32>(a: f16) -> i16 {
8065    static_assert!(N >= 1 && N <= 16);
8066    vcvth_n_s32_f16::<N>(a) as i16
8067}
8068#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8069#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s32_f16)"]
8070#[inline(always)]
8071#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
8072#[rustc_legacy_const_generics(1)]
8073#[target_feature(enable = "neon,fp16")]
8074#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8075#[cfg(not(target_arch = "arm64ec"))]
8076pub fn vcvth_n_s32_f16<const N: i32>(a: f16) -> i32 {
8077    static_assert!(N >= 1 && N <= 16);
8078    unsafe extern "unadjusted" {
8079        #[cfg_attr(
8080            any(target_arch = "aarch64", target_arch = "arm64ec"),
8081            link_name = "llvm.aarch64.neon.vcvtfp2fxs.i32.f16"
8082        )]
8083        fn _vcvth_n_s32_f16(a: f16, n: i32) -> i32;
8084    }
8085    unsafe { _vcvth_n_s32_f16(a, N) }
8086}
8087#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8088#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s64_f16)"]
8089#[inline(always)]
8090#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
8091#[rustc_legacy_const_generics(1)]
8092#[target_feature(enable = "neon,fp16")]
8093#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8094#[cfg(not(target_arch = "arm64ec"))]
8095pub fn vcvth_n_s64_f16<const N: i32>(a: f16) -> i64 {
8096    static_assert!(N >= 1 && N <= 16);
8097    unsafe extern "unadjusted" {
8098        #[cfg_attr(
8099            any(target_arch = "aarch64", target_arch = "arm64ec"),
8100            link_name = "llvm.aarch64.neon.vcvtfp2fxs.i64.f16"
8101        )]
8102        fn _vcvth_n_s64_f16(a: f16, n: i32) -> i64;
8103    }
8104    unsafe { _vcvth_n_s64_f16(a, N) }
8105}
8106#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8107#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u16_f16)"]
8108#[inline(always)]
8109#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
8110#[rustc_legacy_const_generics(1)]
8111#[target_feature(enable = "neon,fp16")]
8112#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8113#[cfg(not(target_arch = "arm64ec"))]
8114pub fn vcvth_n_u16_f16<const N: i32>(a: f16) -> u16 {
8115    static_assert!(N >= 1 && N <= 16);
8116    vcvth_n_u32_f16::<N>(a) as u16
8117}
8118#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u32_f16)"]
8120#[inline(always)]
8121#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
8122#[rustc_legacy_const_generics(1)]
8123#[target_feature(enable = "neon,fp16")]
8124#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8125#[cfg(not(target_arch = "arm64ec"))]
8126pub fn vcvth_n_u32_f16<const N: i32>(a: f16) -> u32 {
8127    static_assert!(N >= 1 && N <= 16);
8128    unsafe extern "unadjusted" {
8129        #[cfg_attr(
8130            any(target_arch = "aarch64", target_arch = "arm64ec"),
8131            link_name = "llvm.aarch64.neon.vcvtfp2fxu.i32.f16"
8132        )]
8133        fn _vcvth_n_u32_f16(a: f16, n: i32) -> u32;
8134    }
8135    unsafe { _vcvth_n_u32_f16(a, N) }
8136}
8137#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u64_f16)"]
8139#[inline(always)]
8140#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
8141#[rustc_legacy_const_generics(1)]
8142#[target_feature(enable = "neon,fp16")]
8143#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8144#[cfg(not(target_arch = "arm64ec"))]
8145pub fn vcvth_n_u64_f16<const N: i32>(a: f16) -> u64 {
8146    static_assert!(N >= 1 && N <= 16);
8147    unsafe extern "unadjusted" {
8148        #[cfg_attr(
8149            any(target_arch = "aarch64", target_arch = "arm64ec"),
8150            link_name = "llvm.aarch64.neon.vcvtfp2fxu.i64.f16"
8151        )]
8152        fn _vcvth_n_u64_f16(a: f16, n: i32) -> u64;
8153    }
8154    unsafe { _vcvth_n_u64_f16(a, N) }
8155}
8156#[doc = "Floating-point convert to signed fixed-point"]
8157#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s16_f16)"]
8158#[inline(always)]
8159#[cfg_attr(test, assert_instr(fcvtzs))]
8160#[target_feature(enable = "neon,fp16")]
8161#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8162#[cfg(not(target_arch = "arm64ec"))]
8163pub fn vcvth_s16_f16(a: f16) -> i16 {
8164    a as i16
8165}
8166#[doc = "Floating-point convert to signed fixed-point"]
8167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s32_f16)"]
8168#[inline(always)]
8169#[cfg_attr(test, assert_instr(fcvtzs))]
8170#[target_feature(enable = "neon,fp16")]
8171#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8172#[cfg(not(target_arch = "arm64ec"))]
8173pub fn vcvth_s32_f16(a: f16) -> i32 {
8174    a as i32
8175}
8176#[doc = "Floating-point convert to signed fixed-point"]
8177#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s64_f16)"]
8178#[inline(always)]
8179#[cfg_attr(test, assert_instr(fcvtzs))]
8180#[target_feature(enable = "neon,fp16")]
8181#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8182#[cfg(not(target_arch = "arm64ec"))]
8183pub fn vcvth_s64_f16(a: f16) -> i64 {
8184    a as i64
8185}
8186#[doc = "Floating-point convert to unsigned fixed-point"]
8187#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u16_f16)"]
8188#[inline(always)]
8189#[cfg_attr(test, assert_instr(fcvtzu))]
8190#[target_feature(enable = "neon,fp16")]
8191#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8192#[cfg(not(target_arch = "arm64ec"))]
8193pub fn vcvth_u16_f16(a: f16) -> u16 {
8194    a as u16
8195}
8196#[doc = "Floating-point convert to unsigned fixed-point"]
8197#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u32_f16)"]
8198#[inline(always)]
8199#[cfg_attr(test, assert_instr(fcvtzu))]
8200#[target_feature(enable = "neon,fp16")]
8201#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8202#[cfg(not(target_arch = "arm64ec"))]
8203pub fn vcvth_u32_f16(a: f16) -> u32 {
8204    a as u32
8205}
8206#[doc = "Floating-point convert to unsigned fixed-point"]
8207#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u64_f16)"]
8208#[inline(always)]
8209#[cfg_attr(test, assert_instr(fcvtzu))]
8210#[target_feature(enable = "neon,fp16")]
8211#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8212#[cfg(not(target_arch = "arm64ec"))]
8213pub fn vcvth_u64_f16(a: f16) -> u64 {
8214    a as u64
8215}
8216#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8217#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s16_f16)"]
8218#[inline(always)]
8219#[cfg_attr(test, assert_instr(fcvtms))]
8220#[target_feature(enable = "neon,fp16")]
8221#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8222#[cfg(not(target_arch = "arm64ec"))]
8223pub fn vcvtm_s16_f16(a: float16x4_t) -> int16x4_t {
8224    unsafe extern "unadjusted" {
8225        #[cfg_attr(
8226            any(target_arch = "aarch64", target_arch = "arm64ec"),
8227            link_name = "llvm.aarch64.neon.fcvtms.v4i16.v4f16"
8228        )]
8229        fn _vcvtm_s16_f16(a: float16x4_t) -> int16x4_t;
8230    }
8231    unsafe { _vcvtm_s16_f16(a) }
8232}
8233#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s16_f16)"]
8235#[inline(always)]
8236#[cfg_attr(test, assert_instr(fcvtms))]
8237#[target_feature(enable = "neon,fp16")]
8238#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8239#[cfg(not(target_arch = "arm64ec"))]
8240pub fn vcvtmq_s16_f16(a: float16x8_t) -> int16x8_t {
8241    unsafe extern "unadjusted" {
8242        #[cfg_attr(
8243            any(target_arch = "aarch64", target_arch = "arm64ec"),
8244            link_name = "llvm.aarch64.neon.fcvtms.v8i16.v8f16"
8245        )]
8246        fn _vcvtmq_s16_f16(a: float16x8_t) -> int16x8_t;
8247    }
8248    unsafe { _vcvtmq_s16_f16(a) }
8249}
8250#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8251#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s32_f32)"]
8252#[inline(always)]
8253#[target_feature(enable = "neon")]
8254#[cfg_attr(test, assert_instr(fcvtms))]
8255#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8256pub fn vcvtm_s32_f32(a: float32x2_t) -> int32x2_t {
8257    unsafe extern "unadjusted" {
8258        #[cfg_attr(
8259            any(target_arch = "aarch64", target_arch = "arm64ec"),
8260            link_name = "llvm.aarch64.neon.fcvtms.v2i32.v2f32"
8261        )]
8262        fn _vcvtm_s32_f32(a: float32x2_t) -> int32x2_t;
8263    }
8264    unsafe { _vcvtm_s32_f32(a) }
8265}
8266#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8267#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s32_f32)"]
8268#[inline(always)]
8269#[target_feature(enable = "neon")]
8270#[cfg_attr(test, assert_instr(fcvtms))]
8271#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8272pub fn vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t {
8273    unsafe extern "unadjusted" {
8274        #[cfg_attr(
8275            any(target_arch = "aarch64", target_arch = "arm64ec"),
8276            link_name = "llvm.aarch64.neon.fcvtms.v4i32.v4f32"
8277        )]
8278        fn _vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t;
8279    }
8280    unsafe { _vcvtmq_s32_f32(a) }
8281}
8282#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8283#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s64_f64)"]
8284#[inline(always)]
8285#[target_feature(enable = "neon")]
8286#[cfg_attr(test, assert_instr(fcvtms))]
8287#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8288pub fn vcvtm_s64_f64(a: float64x1_t) -> int64x1_t {
8289    unsafe extern "unadjusted" {
8290        #[cfg_attr(
8291            any(target_arch = "aarch64", target_arch = "arm64ec"),
8292            link_name = "llvm.aarch64.neon.fcvtms.v1i64.v1f64"
8293        )]
8294        fn _vcvtm_s64_f64(a: float64x1_t) -> int64x1_t;
8295    }
8296    unsafe { _vcvtm_s64_f64(a) }
8297}
8298#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8299#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s64_f64)"]
8300#[inline(always)]
8301#[target_feature(enable = "neon")]
8302#[cfg_attr(test, assert_instr(fcvtms))]
8303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8304pub fn vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t {
8305    unsafe extern "unadjusted" {
8306        #[cfg_attr(
8307            any(target_arch = "aarch64", target_arch = "arm64ec"),
8308            link_name = "llvm.aarch64.neon.fcvtms.v2i64.v2f64"
8309        )]
8310        fn _vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t;
8311    }
8312    unsafe { _vcvtmq_s64_f64(a) }
8313}
8314#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8315#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u16_f16)"]
8316#[inline(always)]
8317#[cfg_attr(test, assert_instr(fcvtmu))]
8318#[target_feature(enable = "neon,fp16")]
8319#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8320#[cfg(not(target_arch = "arm64ec"))]
8321pub fn vcvtm_u16_f16(a: float16x4_t) -> uint16x4_t {
8322    unsafe extern "unadjusted" {
8323        #[cfg_attr(
8324            any(target_arch = "aarch64", target_arch = "arm64ec"),
8325            link_name = "llvm.aarch64.neon.fcvtmu.v4i16.v4f16"
8326        )]
8327        fn _vcvtm_u16_f16(a: float16x4_t) -> uint16x4_t;
8328    }
8329    unsafe { _vcvtm_u16_f16(a) }
8330}
8331#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8332#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u16_f16)"]
8333#[inline(always)]
8334#[cfg_attr(test, assert_instr(fcvtmu))]
8335#[target_feature(enable = "neon,fp16")]
8336#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8337#[cfg(not(target_arch = "arm64ec"))]
8338pub fn vcvtmq_u16_f16(a: float16x8_t) -> uint16x8_t {
8339    unsafe extern "unadjusted" {
8340        #[cfg_attr(
8341            any(target_arch = "aarch64", target_arch = "arm64ec"),
8342            link_name = "llvm.aarch64.neon.fcvtmu.v8i16.v8f16"
8343        )]
8344        fn _vcvtmq_u16_f16(a: float16x8_t) -> uint16x8_t;
8345    }
8346    unsafe { _vcvtmq_u16_f16(a) }
8347}
8348#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8349#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u32_f32)"]
8350#[inline(always)]
8351#[target_feature(enable = "neon")]
8352#[cfg_attr(test, assert_instr(fcvtmu))]
8353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8354pub fn vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t {
8355    unsafe extern "unadjusted" {
8356        #[cfg_attr(
8357            any(target_arch = "aarch64", target_arch = "arm64ec"),
8358            link_name = "llvm.aarch64.neon.fcvtmu.v2i32.v2f32"
8359        )]
8360        fn _vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t;
8361    }
8362    unsafe { _vcvtm_u32_f32(a) }
8363}
8364#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8365#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u32_f32)"]
8366#[inline(always)]
8367#[target_feature(enable = "neon")]
8368#[cfg_attr(test, assert_instr(fcvtmu))]
8369#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8370pub fn vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t {
8371    unsafe extern "unadjusted" {
8372        #[cfg_attr(
8373            any(target_arch = "aarch64", target_arch = "arm64ec"),
8374            link_name = "llvm.aarch64.neon.fcvtmu.v4i32.v4f32"
8375        )]
8376        fn _vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t;
8377    }
8378    unsafe { _vcvtmq_u32_f32(a) }
8379}
8380#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u64_f64)"]
8382#[inline(always)]
8383#[target_feature(enable = "neon")]
8384#[cfg_attr(test, assert_instr(fcvtmu))]
8385#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8386pub fn vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t {
8387    unsafe extern "unadjusted" {
8388        #[cfg_attr(
8389            any(target_arch = "aarch64", target_arch = "arm64ec"),
8390            link_name = "llvm.aarch64.neon.fcvtmu.v1i64.v1f64"
8391        )]
8392        fn _vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t;
8393    }
8394    unsafe { _vcvtm_u64_f64(a) }
8395}
8396#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8397#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u64_f64)"]
8398#[inline(always)]
8399#[target_feature(enable = "neon")]
8400#[cfg_attr(test, assert_instr(fcvtmu))]
8401#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8402pub fn vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t {
8403    unsafe extern "unadjusted" {
8404        #[cfg_attr(
8405            any(target_arch = "aarch64", target_arch = "arm64ec"),
8406            link_name = "llvm.aarch64.neon.fcvtmu.v2i64.v2f64"
8407        )]
8408        fn _vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t;
8409    }
8410    unsafe { _vcvtmq_u64_f64(a) }
8411}
8412#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s16_f16)"]
8414#[inline(always)]
8415#[cfg_attr(test, assert_instr(fcvtms))]
8416#[target_feature(enable = "neon,fp16")]
8417#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8418#[cfg(not(target_arch = "arm64ec"))]
8419pub fn vcvtmh_s16_f16(a: f16) -> i16 {
8420    vcvtmh_s32_f16(a) as i16
8421}
8422#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8423#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s32_f16)"]
8424#[inline(always)]
8425#[cfg_attr(test, assert_instr(fcvtms))]
8426#[target_feature(enable = "neon,fp16")]
8427#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8428#[cfg(not(target_arch = "arm64ec"))]
8429pub fn vcvtmh_s32_f16(a: f16) -> i32 {
8430    unsafe extern "unadjusted" {
8431        #[cfg_attr(
8432            any(target_arch = "aarch64", target_arch = "arm64ec"),
8433            link_name = "llvm.aarch64.neon.fcvtms.i32.f16"
8434        )]
8435        fn _vcvtmh_s32_f16(a: f16) -> i32;
8436    }
8437    unsafe { _vcvtmh_s32_f16(a) }
8438}
8439#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s64_f16)"]
8441#[inline(always)]
8442#[cfg_attr(test, assert_instr(fcvtms))]
8443#[target_feature(enable = "neon,fp16")]
8444#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8445#[cfg(not(target_arch = "arm64ec"))]
8446pub fn vcvtmh_s64_f16(a: f16) -> i64 {
8447    unsafe extern "unadjusted" {
8448        #[cfg_attr(
8449            any(target_arch = "aarch64", target_arch = "arm64ec"),
8450            link_name = "llvm.aarch64.neon.fcvtms.i64.f16"
8451        )]
8452        fn _vcvtmh_s64_f16(a: f16) -> i64;
8453    }
8454    unsafe { _vcvtmh_s64_f16(a) }
8455}
8456#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8457#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u16_f16)"]
8458#[inline(always)]
8459#[cfg_attr(test, assert_instr(fcvtmu))]
8460#[target_feature(enable = "neon,fp16")]
8461#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8462#[cfg(not(target_arch = "arm64ec"))]
8463pub fn vcvtmh_u16_f16(a: f16) -> u16 {
8464    vcvtmh_u32_f16(a) as u16
8465}
8466#[doc = "Floating-point convert to unsigned integer, rounding towards minus infinity"]
8467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u32_f16)"]
8468#[inline(always)]
8469#[cfg_attr(test, assert_instr(fcvtmu))]
8470#[target_feature(enable = "neon,fp16")]
8471#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8472#[cfg(not(target_arch = "arm64ec"))]
8473pub fn vcvtmh_u32_f16(a: f16) -> u32 {
8474    unsafe extern "unadjusted" {
8475        #[cfg_attr(
8476            any(target_arch = "aarch64", target_arch = "arm64ec"),
8477            link_name = "llvm.aarch64.neon.fcvtmu.i32.f16"
8478        )]
8479        fn _vcvtmh_u32_f16(a: f16) -> u32;
8480    }
8481    unsafe { _vcvtmh_u32_f16(a) }
8482}
8483#[doc = "Floating-point convert to unsigned integer, rounding towards minus infinity"]
8484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u64_f16)"]
8485#[inline(always)]
8486#[cfg_attr(test, assert_instr(fcvtmu))]
8487#[target_feature(enable = "neon,fp16")]
8488#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8489#[cfg(not(target_arch = "arm64ec"))]
8490pub fn vcvtmh_u64_f16(a: f16) -> u64 {
8491    unsafe extern "unadjusted" {
8492        #[cfg_attr(
8493            any(target_arch = "aarch64", target_arch = "arm64ec"),
8494            link_name = "llvm.aarch64.neon.fcvtmu.i64.f16"
8495        )]
8496        fn _vcvtmh_u64_f16(a: f16) -> u64;
8497    }
8498    unsafe { _vcvtmh_u64_f16(a) }
8499}
8500#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8501#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_s32_f32)"]
8502#[inline(always)]
8503#[target_feature(enable = "neon")]
8504#[cfg_attr(test, assert_instr(fcvtms))]
8505#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8506pub fn vcvtms_s32_f32(a: f32) -> i32 {
8507    unsafe extern "unadjusted" {
8508        #[cfg_attr(
8509            any(target_arch = "aarch64", target_arch = "arm64ec"),
8510            link_name = "llvm.aarch64.neon.fcvtms.i32.f32"
8511        )]
8512        fn _vcvtms_s32_f32(a: f32) -> i32;
8513    }
8514    unsafe { _vcvtms_s32_f32(a) }
8515}
8516#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_s64_f64)"]
8518#[inline(always)]
8519#[target_feature(enable = "neon")]
8520#[cfg_attr(test, assert_instr(fcvtms))]
8521#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8522pub fn vcvtmd_s64_f64(a: f64) -> i64 {
8523    unsafe extern "unadjusted" {
8524        #[cfg_attr(
8525            any(target_arch = "aarch64", target_arch = "arm64ec"),
8526            link_name = "llvm.aarch64.neon.fcvtms.i64.f64"
8527        )]
8528        fn _vcvtmd_s64_f64(a: f64) -> i64;
8529    }
8530    unsafe { _vcvtmd_s64_f64(a) }
8531}
8532#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8533#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_u32_f32)"]
8534#[inline(always)]
8535#[target_feature(enable = "neon")]
8536#[cfg_attr(test, assert_instr(fcvtmu))]
8537#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8538pub fn vcvtms_u32_f32(a: f32) -> u32 {
8539    unsafe extern "unadjusted" {
8540        #[cfg_attr(
8541            any(target_arch = "aarch64", target_arch = "arm64ec"),
8542            link_name = "llvm.aarch64.neon.fcvtmu.i32.f32"
8543        )]
8544        fn _vcvtms_u32_f32(a: f32) -> u32;
8545    }
8546    unsafe { _vcvtms_u32_f32(a) }
8547}
8548#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8549#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_u64_f64)"]
8550#[inline(always)]
8551#[target_feature(enable = "neon")]
8552#[cfg_attr(test, assert_instr(fcvtmu))]
8553#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8554pub fn vcvtmd_u64_f64(a: f64) -> u64 {
8555    unsafe extern "unadjusted" {
8556        #[cfg_attr(
8557            any(target_arch = "aarch64", target_arch = "arm64ec"),
8558            link_name = "llvm.aarch64.neon.fcvtmu.i64.f64"
8559        )]
8560        fn _vcvtmd_u64_f64(a: f64) -> u64;
8561    }
8562    unsafe { _vcvtmd_u64_f64(a) }
8563}
8564#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8565#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s16_f16)"]
8566#[inline(always)]
8567#[cfg_attr(test, assert_instr(fcvtns))]
8568#[target_feature(enable = "neon,fp16")]
8569#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8570#[cfg(not(target_arch = "arm64ec"))]
8571pub fn vcvtn_s16_f16(a: float16x4_t) -> int16x4_t {
8572    unsafe extern "unadjusted" {
8573        #[cfg_attr(
8574            any(target_arch = "aarch64", target_arch = "arm64ec"),
8575            link_name = "llvm.aarch64.neon.fcvtns.v4i16.v4f16"
8576        )]
8577        fn _vcvtn_s16_f16(a: float16x4_t) -> int16x4_t;
8578    }
8579    unsafe { _vcvtn_s16_f16(a) }
8580}
8581#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8582#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s16_f16)"]
8583#[inline(always)]
8584#[cfg_attr(test, assert_instr(fcvtns))]
8585#[target_feature(enable = "neon,fp16")]
8586#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8587#[cfg(not(target_arch = "arm64ec"))]
8588pub fn vcvtnq_s16_f16(a: float16x8_t) -> int16x8_t {
8589    unsafe extern "unadjusted" {
8590        #[cfg_attr(
8591            any(target_arch = "aarch64", target_arch = "arm64ec"),
8592            link_name = "llvm.aarch64.neon.fcvtns.v8i16.v8f16"
8593        )]
8594        fn _vcvtnq_s16_f16(a: float16x8_t) -> int16x8_t;
8595    }
8596    unsafe { _vcvtnq_s16_f16(a) }
8597}
8598#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8599#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s32_f32)"]
8600#[inline(always)]
8601#[target_feature(enable = "neon")]
8602#[cfg_attr(test, assert_instr(fcvtns))]
8603#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8604pub fn vcvtn_s32_f32(a: float32x2_t) -> int32x2_t {
8605    unsafe extern "unadjusted" {
8606        #[cfg_attr(
8607            any(target_arch = "aarch64", target_arch = "arm64ec"),
8608            link_name = "llvm.aarch64.neon.fcvtns.v2i32.v2f32"
8609        )]
8610        fn _vcvtn_s32_f32(a: float32x2_t) -> int32x2_t;
8611    }
8612    unsafe { _vcvtn_s32_f32(a) }
8613}
8614#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8615#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s32_f32)"]
8616#[inline(always)]
8617#[target_feature(enable = "neon")]
8618#[cfg_attr(test, assert_instr(fcvtns))]
8619#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8620pub fn vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t {
8621    unsafe extern "unadjusted" {
8622        #[cfg_attr(
8623            any(target_arch = "aarch64", target_arch = "arm64ec"),
8624            link_name = "llvm.aarch64.neon.fcvtns.v4i32.v4f32"
8625        )]
8626        fn _vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t;
8627    }
8628    unsafe { _vcvtnq_s32_f32(a) }
8629}
8630#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8631#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s64_f64)"]
8632#[inline(always)]
8633#[target_feature(enable = "neon")]
8634#[cfg_attr(test, assert_instr(fcvtns))]
8635#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8636pub fn vcvtn_s64_f64(a: float64x1_t) -> int64x1_t {
8637    unsafe extern "unadjusted" {
8638        #[cfg_attr(
8639            any(target_arch = "aarch64", target_arch = "arm64ec"),
8640            link_name = "llvm.aarch64.neon.fcvtns.v1i64.v1f64"
8641        )]
8642        fn _vcvtn_s64_f64(a: float64x1_t) -> int64x1_t;
8643    }
8644    unsafe { _vcvtn_s64_f64(a) }
8645}
8646#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8647#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s64_f64)"]
8648#[inline(always)]
8649#[target_feature(enable = "neon")]
8650#[cfg_attr(test, assert_instr(fcvtns))]
8651#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8652pub fn vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t {
8653    unsafe extern "unadjusted" {
8654        #[cfg_attr(
8655            any(target_arch = "aarch64", target_arch = "arm64ec"),
8656            link_name = "llvm.aarch64.neon.fcvtns.v2i64.v2f64"
8657        )]
8658        fn _vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t;
8659    }
8660    unsafe { _vcvtnq_s64_f64(a) }
8661}
8662#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8663#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u16_f16)"]
8664#[inline(always)]
8665#[cfg_attr(test, assert_instr(fcvtnu))]
8666#[target_feature(enable = "neon,fp16")]
8667#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8668#[cfg(not(target_arch = "arm64ec"))]
8669pub fn vcvtn_u16_f16(a: float16x4_t) -> uint16x4_t {
8670    unsafe extern "unadjusted" {
8671        #[cfg_attr(
8672            any(target_arch = "aarch64", target_arch = "arm64ec"),
8673            link_name = "llvm.aarch64.neon.fcvtnu.v4i16.v4f16"
8674        )]
8675        fn _vcvtn_u16_f16(a: float16x4_t) -> uint16x4_t;
8676    }
8677    unsafe { _vcvtn_u16_f16(a) }
8678}
8679#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u16_f16)"]
8681#[inline(always)]
8682#[cfg_attr(test, assert_instr(fcvtnu))]
8683#[target_feature(enable = "neon,fp16")]
8684#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8685#[cfg(not(target_arch = "arm64ec"))]
8686pub fn vcvtnq_u16_f16(a: float16x8_t) -> uint16x8_t {
8687    unsafe extern "unadjusted" {
8688        #[cfg_attr(
8689            any(target_arch = "aarch64", target_arch = "arm64ec"),
8690            link_name = "llvm.aarch64.neon.fcvtnu.v8i16.v8f16"
8691        )]
8692        fn _vcvtnq_u16_f16(a: float16x8_t) -> uint16x8_t;
8693    }
8694    unsafe { _vcvtnq_u16_f16(a) }
8695}
8696#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u32_f32)"]
8698#[inline(always)]
8699#[target_feature(enable = "neon")]
8700#[cfg_attr(test, assert_instr(fcvtnu))]
8701#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8702pub fn vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t {
8703    unsafe extern "unadjusted" {
8704        #[cfg_attr(
8705            any(target_arch = "aarch64", target_arch = "arm64ec"),
8706            link_name = "llvm.aarch64.neon.fcvtnu.v2i32.v2f32"
8707        )]
8708        fn _vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t;
8709    }
8710    unsafe { _vcvtn_u32_f32(a) }
8711}
8712#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u32_f32)"]
8714#[inline(always)]
8715#[target_feature(enable = "neon")]
8716#[cfg_attr(test, assert_instr(fcvtnu))]
8717#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8718pub fn vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t {
8719    unsafe extern "unadjusted" {
8720        #[cfg_attr(
8721            any(target_arch = "aarch64", target_arch = "arm64ec"),
8722            link_name = "llvm.aarch64.neon.fcvtnu.v4i32.v4f32"
8723        )]
8724        fn _vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t;
8725    }
8726    unsafe { _vcvtnq_u32_f32(a) }
8727}
8728#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8729#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u64_f64)"]
8730#[inline(always)]
8731#[target_feature(enable = "neon")]
8732#[cfg_attr(test, assert_instr(fcvtnu))]
8733#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8734pub fn vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t {
8735    unsafe extern "unadjusted" {
8736        #[cfg_attr(
8737            any(target_arch = "aarch64", target_arch = "arm64ec"),
8738            link_name = "llvm.aarch64.neon.fcvtnu.v1i64.v1f64"
8739        )]
8740        fn _vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t;
8741    }
8742    unsafe { _vcvtn_u64_f64(a) }
8743}
8744#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u64_f64)"]
8746#[inline(always)]
8747#[target_feature(enable = "neon")]
8748#[cfg_attr(test, assert_instr(fcvtnu))]
8749#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8750pub fn vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t {
8751    unsafe extern "unadjusted" {
8752        #[cfg_attr(
8753            any(target_arch = "aarch64", target_arch = "arm64ec"),
8754            link_name = "llvm.aarch64.neon.fcvtnu.v2i64.v2f64"
8755        )]
8756        fn _vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t;
8757    }
8758    unsafe { _vcvtnq_u64_f64(a) }
8759}
8760#[doc = "Floating-point convert to integer, rounding to nearest with ties to even"]
8761#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s16_f16)"]
8762#[inline(always)]
8763#[cfg_attr(test, assert_instr(fcvtns))]
8764#[target_feature(enable = "neon,fp16")]
8765#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8766#[cfg(not(target_arch = "arm64ec"))]
8767pub fn vcvtnh_s16_f16(a: f16) -> i16 {
8768    vcvtnh_s32_f16(a) as i16
8769}
8770#[doc = "Floating-point convert to integer, rounding to nearest with ties to even"]
8771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s32_f16)"]
8772#[inline(always)]
8773#[cfg_attr(test, assert_instr(fcvtns))]
8774#[target_feature(enable = "neon,fp16")]
8775#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8776#[cfg(not(target_arch = "arm64ec"))]
8777pub fn vcvtnh_s32_f16(a: f16) -> i32 {
8778    unsafe extern "unadjusted" {
8779        #[cfg_attr(
8780            any(target_arch = "aarch64", target_arch = "arm64ec"),
8781            link_name = "llvm.aarch64.neon.fcvtns.i32.f16"
8782        )]
8783        fn _vcvtnh_s32_f16(a: f16) -> i32;
8784    }
8785    unsafe { _vcvtnh_s32_f16(a) }
8786}
8787#[doc = "Floating-point convert to integer, rounding to nearest with ties to even"]
8788#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s64_f16)"]
8789#[inline(always)]
8790#[cfg_attr(test, assert_instr(fcvtns))]
8791#[target_feature(enable = "neon,fp16")]
8792#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8793#[cfg(not(target_arch = "arm64ec"))]
8794pub fn vcvtnh_s64_f16(a: f16) -> i64 {
8795    unsafe extern "unadjusted" {
8796        #[cfg_attr(
8797            any(target_arch = "aarch64", target_arch = "arm64ec"),
8798            link_name = "llvm.aarch64.neon.fcvtns.i64.f16"
8799        )]
8800        fn _vcvtnh_s64_f16(a: f16) -> i64;
8801    }
8802    unsafe { _vcvtnh_s64_f16(a) }
8803}
8804#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8805#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u16_f16)"]
8806#[inline(always)]
8807#[cfg_attr(test, assert_instr(fcvtnu))]
8808#[target_feature(enable = "neon,fp16")]
8809#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8810#[cfg(not(target_arch = "arm64ec"))]
8811pub fn vcvtnh_u16_f16(a: f16) -> u16 {
8812    vcvtnh_u32_f16(a) as u16
8813}
8814#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u32_f16)"]
8816#[inline(always)]
8817#[cfg_attr(test, assert_instr(fcvtnu))]
8818#[target_feature(enable = "neon,fp16")]
8819#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8820#[cfg(not(target_arch = "arm64ec"))]
8821pub fn vcvtnh_u32_f16(a: f16) -> u32 {
8822    unsafe extern "unadjusted" {
8823        #[cfg_attr(
8824            any(target_arch = "aarch64", target_arch = "arm64ec"),
8825            link_name = "llvm.aarch64.neon.fcvtnu.i32.f16"
8826        )]
8827        fn _vcvtnh_u32_f16(a: f16) -> u32;
8828    }
8829    unsafe { _vcvtnh_u32_f16(a) }
8830}
8831#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8832#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u64_f16)"]
8833#[inline(always)]
8834#[cfg_attr(test, assert_instr(fcvtnu))]
8835#[target_feature(enable = "neon,fp16")]
8836#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8837#[cfg(not(target_arch = "arm64ec"))]
8838pub fn vcvtnh_u64_f16(a: f16) -> u64 {
8839    unsafe extern "unadjusted" {
8840        #[cfg_attr(
8841            any(target_arch = "aarch64", target_arch = "arm64ec"),
8842            link_name = "llvm.aarch64.neon.fcvtnu.i64.f16"
8843        )]
8844        fn _vcvtnh_u64_f16(a: f16) -> u64;
8845    }
8846    unsafe { _vcvtnh_u64_f16(a) }
8847}
8848#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_s32_f32)"]
8850#[inline(always)]
8851#[target_feature(enable = "neon")]
8852#[cfg_attr(test, assert_instr(fcvtns))]
8853#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8854pub fn vcvtns_s32_f32(a: f32) -> i32 {
8855    unsafe extern "unadjusted" {
8856        #[cfg_attr(
8857            any(target_arch = "aarch64", target_arch = "arm64ec"),
8858            link_name = "llvm.aarch64.neon.fcvtns.i32.f32"
8859        )]
8860        fn _vcvtns_s32_f32(a: f32) -> i32;
8861    }
8862    unsafe { _vcvtns_s32_f32(a) }
8863}
8864#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8865#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_s64_f64)"]
8866#[inline(always)]
8867#[target_feature(enable = "neon")]
8868#[cfg_attr(test, assert_instr(fcvtns))]
8869#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8870pub fn vcvtnd_s64_f64(a: f64) -> i64 {
8871    unsafe extern "unadjusted" {
8872        #[cfg_attr(
8873            any(target_arch = "aarch64", target_arch = "arm64ec"),
8874            link_name = "llvm.aarch64.neon.fcvtns.i64.f64"
8875        )]
8876        fn _vcvtnd_s64_f64(a: f64) -> i64;
8877    }
8878    unsafe { _vcvtnd_s64_f64(a) }
8879}
8880#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_u32_f32)"]
8882#[inline(always)]
8883#[target_feature(enable = "neon")]
8884#[cfg_attr(test, assert_instr(fcvtnu))]
8885#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8886pub fn vcvtns_u32_f32(a: f32) -> u32 {
8887    unsafe extern "unadjusted" {
8888        #[cfg_attr(
8889            any(target_arch = "aarch64", target_arch = "arm64ec"),
8890            link_name = "llvm.aarch64.neon.fcvtnu.i32.f32"
8891        )]
8892        fn _vcvtns_u32_f32(a: f32) -> u32;
8893    }
8894    unsafe { _vcvtns_u32_f32(a) }
8895}
8896#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8897#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_u64_f64)"]
8898#[inline(always)]
8899#[target_feature(enable = "neon")]
8900#[cfg_attr(test, assert_instr(fcvtnu))]
8901#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8902pub fn vcvtnd_u64_f64(a: f64) -> u64 {
8903    unsafe extern "unadjusted" {
8904        #[cfg_attr(
8905            any(target_arch = "aarch64", target_arch = "arm64ec"),
8906            link_name = "llvm.aarch64.neon.fcvtnu.i64.f64"
8907        )]
8908        fn _vcvtnd_u64_f64(a: f64) -> u64;
8909    }
8910    unsafe { _vcvtnd_u64_f64(a) }
8911}
8912#[doc = "Floating-point convert to signed integer, rounding to plus infinity"]
8913#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s16_f16)"]
8914#[inline(always)]
8915#[cfg_attr(test, assert_instr(fcvtps))]
8916#[target_feature(enable = "neon,fp16")]
8917#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8918#[cfg(not(target_arch = "arm64ec"))]
8919pub fn vcvtp_s16_f16(a: float16x4_t) -> int16x4_t {
8920    unsafe extern "unadjusted" {
8921        #[cfg_attr(
8922            any(target_arch = "aarch64", target_arch = "arm64ec"),
8923            link_name = "llvm.aarch64.neon.fcvtps.v4i16.v4f16"
8924        )]
8925        fn _vcvtp_s16_f16(a: float16x4_t) -> int16x4_t;
8926    }
8927    unsafe { _vcvtp_s16_f16(a) }
8928}
8929#[doc = "Floating-point convert to signed integer, rounding to plus infinity"]
8930#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s16_f16)"]
8931#[inline(always)]
8932#[cfg_attr(test, assert_instr(fcvtps))]
8933#[target_feature(enable = "neon,fp16")]
8934#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8935#[cfg(not(target_arch = "arm64ec"))]
8936pub fn vcvtpq_s16_f16(a: float16x8_t) -> int16x8_t {
8937    unsafe extern "unadjusted" {
8938        #[cfg_attr(
8939            any(target_arch = "aarch64", target_arch = "arm64ec"),
8940            link_name = "llvm.aarch64.neon.fcvtps.v8i16.v8f16"
8941        )]
8942        fn _vcvtpq_s16_f16(a: float16x8_t) -> int16x8_t;
8943    }
8944    unsafe { _vcvtpq_s16_f16(a) }
8945}
8946#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8947#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s32_f32)"]
8948#[inline(always)]
8949#[target_feature(enable = "neon")]
8950#[cfg_attr(test, assert_instr(fcvtps))]
8951#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8952pub fn vcvtp_s32_f32(a: float32x2_t) -> int32x2_t {
8953    unsafe extern "unadjusted" {
8954        #[cfg_attr(
8955            any(target_arch = "aarch64", target_arch = "arm64ec"),
8956            link_name = "llvm.aarch64.neon.fcvtps.v2i32.v2f32"
8957        )]
8958        fn _vcvtp_s32_f32(a: float32x2_t) -> int32x2_t;
8959    }
8960    unsafe { _vcvtp_s32_f32(a) }
8961}
8962#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8963#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s32_f32)"]
8964#[inline(always)]
8965#[target_feature(enable = "neon")]
8966#[cfg_attr(test, assert_instr(fcvtps))]
8967#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8968pub fn vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t {
8969    unsafe extern "unadjusted" {
8970        #[cfg_attr(
8971            any(target_arch = "aarch64", target_arch = "arm64ec"),
8972            link_name = "llvm.aarch64.neon.fcvtps.v4i32.v4f32"
8973        )]
8974        fn _vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t;
8975    }
8976    unsafe { _vcvtpq_s32_f32(a) }
8977}
8978#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8979#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s64_f64)"]
8980#[inline(always)]
8981#[target_feature(enable = "neon")]
8982#[cfg_attr(test, assert_instr(fcvtps))]
8983#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8984pub fn vcvtp_s64_f64(a: float64x1_t) -> int64x1_t {
8985    unsafe extern "unadjusted" {
8986        #[cfg_attr(
8987            any(target_arch = "aarch64", target_arch = "arm64ec"),
8988            link_name = "llvm.aarch64.neon.fcvtps.v1i64.v1f64"
8989        )]
8990        fn _vcvtp_s64_f64(a: float64x1_t) -> int64x1_t;
8991    }
8992    unsafe { _vcvtp_s64_f64(a) }
8993}
8994#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8995#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s64_f64)"]
8996#[inline(always)]
8997#[target_feature(enable = "neon")]
8998#[cfg_attr(test, assert_instr(fcvtps))]
8999#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9000pub fn vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t {
9001    unsafe extern "unadjusted" {
9002        #[cfg_attr(
9003            any(target_arch = "aarch64", target_arch = "arm64ec"),
9004            link_name = "llvm.aarch64.neon.fcvtps.v2i64.v2f64"
9005        )]
9006        fn _vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t;
9007    }
9008    unsafe { _vcvtpq_s64_f64(a) }
9009}
9010#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9011#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u16_f16)"]
9012#[inline(always)]
9013#[cfg_attr(test, assert_instr(fcvtpu))]
9014#[target_feature(enable = "neon,fp16")]
9015#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
9016#[cfg(not(target_arch = "arm64ec"))]
9017pub fn vcvtp_u16_f16(a: float16x4_t) -> uint16x4_t {
9018    unsafe extern "unadjusted" {
9019        #[cfg_attr(
9020            any(target_arch = "aarch64", target_arch = "arm64ec"),
9021            link_name = "llvm.aarch64.neon.fcvtpu.v4i16.v4f16"
9022        )]
9023        fn _vcvtp_u16_f16(a: float16x4_t) -> uint16x4_t;
9024    }
9025    unsafe { _vcvtp_u16_f16(a) }
9026}
9027#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9028#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u16_f16)"]
9029#[inline(always)]
9030#[cfg_attr(test, assert_instr(fcvtpu))]
9031#[target_feature(enable = "neon,fp16")]
9032#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
9033#[cfg(not(target_arch = "arm64ec"))]
9034pub fn vcvtpq_u16_f16(a: float16x8_t) -> uint16x8_t {
9035    unsafe extern "unadjusted" {
9036        #[cfg_attr(
9037            any(target_arch = "aarch64", target_arch = "arm64ec"),
9038            link_name = "llvm.aarch64.neon.fcvtpu.v8i16.v8f16"
9039        )]
9040        fn _vcvtpq_u16_f16(a: float16x8_t) -> uint16x8_t;
9041    }
9042    unsafe { _vcvtpq_u16_f16(a) }
9043}
9044#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9045#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u32_f32)"]
9046#[inline(always)]
9047#[target_feature(enable = "neon")]
9048#[cfg_attr(test, assert_instr(fcvtpu))]
9049#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9050pub fn vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t {
9051    unsafe extern "unadjusted" {
9052        #[cfg_attr(
9053            any(target_arch = "aarch64", target_arch = "arm64ec"),
9054            link_name = "llvm.aarch64.neon.fcvtpu.v2i32.v2f32"
9055        )]
9056        fn _vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t;
9057    }
9058    unsafe { _vcvtp_u32_f32(a) }
9059}
9060#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9061#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u32_f32)"]
9062#[inline(always)]
9063#[target_feature(enable = "neon")]
9064#[cfg_attr(test, assert_instr(fcvtpu))]
9065#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9066pub fn vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t {
9067    unsafe extern "unadjusted" {
9068        #[cfg_attr(
9069            any(target_arch = "aarch64", target_arch = "arm64ec"),
9070            link_name = "llvm.aarch64.neon.fcvtpu.v4i32.v4f32"
9071        )]
9072        fn _vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t;
9073    }
9074    unsafe { _vcvtpq_u32_f32(a) }
9075}
9076#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9077#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u64_f64)"]
9078#[inline(always)]
9079#[target_feature(enable = "neon")]
9080#[cfg_attr(test, assert_instr(fcvtpu))]
9081#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9082pub fn vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t {
9083    unsafe extern "unadjusted" {
9084        #[cfg_attr(
9085            any(target_arch = "aarch64", target_arch = "arm64ec"),
9086            link_name = "llvm.aarch64.neon.fcvtpu.v1i64.v1f64"
9087        )]
9088        fn _vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t;
9089    }
9090    unsafe { _vcvtp_u64_f64(a) }
9091}
9092#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u64_f64)"]
9094#[inline(always)]
9095#[target_feature(enable = "neon")]
9096#[cfg_attr(test, assert_instr(fcvtpu))]
9097#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9098pub fn vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t {
9099    unsafe extern "unadjusted" {
9100        #[cfg_attr(
9101            any(target_arch = "aarch64", target_arch = "arm64ec"),
9102            link_name = "llvm.aarch64.neon.fcvtpu.v2i64.v2f64"
9103        )]
9104        fn _vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t;
9105    }
9106    unsafe { _vcvtpq_u64_f64(a) }
9107}
9108#[doc = "Floating-point convert to integer, rounding to plus infinity"]
9109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s16_f16)"]
9110#[inline(always)]
9111#[cfg_attr(test, assert_instr(fcvtps))]
9112#[target_feature(enable = "neon,fp16")]
9113#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9114#[cfg(not(target_arch = "arm64ec"))]
9115pub fn vcvtph_s16_f16(a: f16) -> i16 {
9116    vcvtph_s32_f16(a) as i16
9117}
9118#[doc = "Floating-point convert to integer, rounding to plus infinity"]
9119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s32_f16)"]
9120#[inline(always)]
9121#[cfg_attr(test, assert_instr(fcvtps))]
9122#[target_feature(enable = "neon,fp16")]
9123#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9124#[cfg(not(target_arch = "arm64ec"))]
9125pub fn vcvtph_s32_f16(a: f16) -> i32 {
9126    unsafe extern "unadjusted" {
9127        #[cfg_attr(
9128            any(target_arch = "aarch64", target_arch = "arm64ec"),
9129            link_name = "llvm.aarch64.neon.fcvtps.i32.f16"
9130        )]
9131        fn _vcvtph_s32_f16(a: f16) -> i32;
9132    }
9133    unsafe { _vcvtph_s32_f16(a) }
9134}
9135#[doc = "Floating-point convert to integer, rounding to plus infinity"]
9136#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s64_f16)"]
9137#[inline(always)]
9138#[cfg_attr(test, assert_instr(fcvtps))]
9139#[target_feature(enable = "neon,fp16")]
9140#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9141#[cfg(not(target_arch = "arm64ec"))]
9142pub fn vcvtph_s64_f16(a: f16) -> i64 {
9143    unsafe extern "unadjusted" {
9144        #[cfg_attr(
9145            any(target_arch = "aarch64", target_arch = "arm64ec"),
9146            link_name = "llvm.aarch64.neon.fcvtps.i64.f16"
9147        )]
9148        fn _vcvtph_s64_f16(a: f16) -> i64;
9149    }
9150    unsafe { _vcvtph_s64_f16(a) }
9151}
9152#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u16_f16)"]
9154#[inline(always)]
9155#[cfg_attr(test, assert_instr(fcvtpu))]
9156#[target_feature(enable = "neon,fp16")]
9157#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9158#[cfg(not(target_arch = "arm64ec"))]
9159pub fn vcvtph_u16_f16(a: f16) -> u16 {
9160    vcvtph_u32_f16(a) as u16
9161}
9162#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9163#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u32_f16)"]
9164#[inline(always)]
9165#[cfg_attr(test, assert_instr(fcvtpu))]
9166#[target_feature(enable = "neon,fp16")]
9167#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9168#[cfg(not(target_arch = "arm64ec"))]
9169pub fn vcvtph_u32_f16(a: f16) -> u32 {
9170    unsafe extern "unadjusted" {
9171        #[cfg_attr(
9172            any(target_arch = "aarch64", target_arch = "arm64ec"),
9173            link_name = "llvm.aarch64.neon.fcvtpu.i32.f16"
9174        )]
9175        fn _vcvtph_u32_f16(a: f16) -> u32;
9176    }
9177    unsafe { _vcvtph_u32_f16(a) }
9178}
9179#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9180#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u64_f16)"]
9181#[inline(always)]
9182#[cfg_attr(test, assert_instr(fcvtpu))]
9183#[target_feature(enable = "neon,fp16")]
9184#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9185#[cfg(not(target_arch = "arm64ec"))]
9186pub fn vcvtph_u64_f16(a: f16) -> u64 {
9187    unsafe extern "unadjusted" {
9188        #[cfg_attr(
9189            any(target_arch = "aarch64", target_arch = "arm64ec"),
9190            link_name = "llvm.aarch64.neon.fcvtpu.i64.f16"
9191        )]
9192        fn _vcvtph_u64_f16(a: f16) -> u64;
9193    }
9194    unsafe { _vcvtph_u64_f16(a) }
9195}
9196#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
9197#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_s32_f32)"]
9198#[inline(always)]
9199#[target_feature(enable = "neon")]
9200#[cfg_attr(test, assert_instr(fcvtps))]
9201#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9202pub fn vcvtps_s32_f32(a: f32) -> i32 {
9203    unsafe extern "unadjusted" {
9204        #[cfg_attr(
9205            any(target_arch = "aarch64", target_arch = "arm64ec"),
9206            link_name = "llvm.aarch64.neon.fcvtps.i32.f32"
9207        )]
9208        fn _vcvtps_s32_f32(a: f32) -> i32;
9209    }
9210    unsafe { _vcvtps_s32_f32(a) }
9211}
9212#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
9213#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_s64_f64)"]
9214#[inline(always)]
9215#[target_feature(enable = "neon")]
9216#[cfg_attr(test, assert_instr(fcvtps))]
9217#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9218pub fn vcvtpd_s64_f64(a: f64) -> i64 {
9219    unsafe extern "unadjusted" {
9220        #[cfg_attr(
9221            any(target_arch = "aarch64", target_arch = "arm64ec"),
9222            link_name = "llvm.aarch64.neon.fcvtps.i64.f64"
9223        )]
9224        fn _vcvtpd_s64_f64(a: f64) -> i64;
9225    }
9226    unsafe { _vcvtpd_s64_f64(a) }
9227}
9228#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_u32_f32)"]
9230#[inline(always)]
9231#[target_feature(enable = "neon")]
9232#[cfg_attr(test, assert_instr(fcvtpu))]
9233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9234pub fn vcvtps_u32_f32(a: f32) -> u32 {
9235    unsafe extern "unadjusted" {
9236        #[cfg_attr(
9237            any(target_arch = "aarch64", target_arch = "arm64ec"),
9238            link_name = "llvm.aarch64.neon.fcvtpu.i32.f32"
9239        )]
9240        fn _vcvtps_u32_f32(a: f32) -> u32;
9241    }
9242    unsafe { _vcvtps_u32_f32(a) }
9243}
9244#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9245#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_u64_f64)"]
9246#[inline(always)]
9247#[target_feature(enable = "neon")]
9248#[cfg_attr(test, assert_instr(fcvtpu))]
9249#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9250pub fn vcvtpd_u64_f64(a: f64) -> u64 {
9251    unsafe extern "unadjusted" {
9252        #[cfg_attr(
9253            any(target_arch = "aarch64", target_arch = "arm64ec"),
9254            link_name = "llvm.aarch64.neon.fcvtpu.i64.f64"
9255        )]
9256        fn _vcvtpd_u64_f64(a: f64) -> u64;
9257    }
9258    unsafe { _vcvtpd_u64_f64(a) }
9259}
9260#[doc = "Fixed-point convert to floating-point"]
9261#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_u32)"]
9262#[inline(always)]
9263#[target_feature(enable = "neon")]
9264#[cfg_attr(test, assert_instr(ucvtf))]
9265#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9266pub fn vcvts_f32_u32(a: u32) -> f32 {
9267    a as f32
9268}
9269#[doc = "Fixed-point convert to floating-point"]
9270#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_u64)"]
9271#[inline(always)]
9272#[target_feature(enable = "neon")]
9273#[cfg_attr(test, assert_instr(ucvtf))]
9274#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9275pub fn vcvtd_f64_u64(a: u64) -> f64 {
9276    a as f64
9277}
9278#[doc = "Fixed-point convert to floating-point"]
9279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_s32)"]
9280#[inline(always)]
9281#[target_feature(enable = "neon")]
9282#[cfg_attr(test, assert_instr(scvtf, N = 2))]
9283#[rustc_legacy_const_generics(1)]
9284#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9285pub fn vcvts_n_f32_s32<const N: i32>(a: i32) -> f32 {
9286    static_assert!(N >= 1 && N <= 64);
9287    unsafe extern "unadjusted" {
9288        #[cfg_attr(
9289            any(target_arch = "aarch64", target_arch = "arm64ec"),
9290            link_name = "llvm.aarch64.neon.vcvtfxs2fp.f32.i32"
9291        )]
9292        fn _vcvts_n_f32_s32(a: i32, n: i32) -> f32;
9293    }
9294    unsafe { _vcvts_n_f32_s32(a, N) }
9295}
9296#[doc = "Fixed-point convert to floating-point"]
9297#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_s64)"]
9298#[inline(always)]
9299#[target_feature(enable = "neon")]
9300#[cfg_attr(test, assert_instr(scvtf, N = 2))]
9301#[rustc_legacy_const_generics(1)]
9302#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9303pub fn vcvtd_n_f64_s64<const N: i32>(a: i64) -> f64 {
9304    static_assert!(N >= 1 && N <= 64);
9305    unsafe extern "unadjusted" {
9306        #[cfg_attr(
9307            any(target_arch = "aarch64", target_arch = "arm64ec"),
9308            link_name = "llvm.aarch64.neon.vcvtfxs2fp.f64.i64"
9309        )]
9310        fn _vcvtd_n_f64_s64(a: i64, n: i32) -> f64;
9311    }
9312    unsafe { _vcvtd_n_f64_s64(a, N) }
9313}
9314#[doc = "Fixed-point convert to floating-point"]
9315#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_u32)"]
9316#[inline(always)]
9317#[target_feature(enable = "neon")]
9318#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
9319#[rustc_legacy_const_generics(1)]
9320#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9321pub fn vcvts_n_f32_u32<const N: i32>(a: u32) -> f32 {
9322    static_assert!(N >= 1 && N <= 32);
9323    unsafe extern "unadjusted" {
9324        #[cfg_attr(
9325            any(target_arch = "aarch64", target_arch = "arm64ec"),
9326            link_name = "llvm.aarch64.neon.vcvtfxu2fp.f32.i32"
9327        )]
9328        fn _vcvts_n_f32_u32(a: u32, n: i32) -> f32;
9329    }
9330    unsafe { _vcvts_n_f32_u32(a, N) }
9331}
9332#[doc = "Fixed-point convert to floating-point"]
9333#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_u64)"]
9334#[inline(always)]
9335#[target_feature(enable = "neon")]
9336#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
9337#[rustc_legacy_const_generics(1)]
9338#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9339pub fn vcvtd_n_f64_u64<const N: i32>(a: u64) -> f64 {
9340    static_assert!(N >= 1 && N <= 64);
9341    unsafe extern "unadjusted" {
9342        #[cfg_attr(
9343            any(target_arch = "aarch64", target_arch = "arm64ec"),
9344            link_name = "llvm.aarch64.neon.vcvtfxu2fp.f64.i64"
9345        )]
9346        fn _vcvtd_n_f64_u64(a: u64, n: i32) -> f64;
9347    }
9348    unsafe { _vcvtd_n_f64_u64(a, N) }
9349}
9350#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9351#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_s32_f32)"]
9352#[inline(always)]
9353#[target_feature(enable = "neon")]
9354#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
9355#[rustc_legacy_const_generics(1)]
9356#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9357pub fn vcvts_n_s32_f32<const N: i32>(a: f32) -> i32 {
9358    static_assert!(N >= 1 && N <= 32);
9359    unsafe extern "unadjusted" {
9360        #[cfg_attr(
9361            any(target_arch = "aarch64", target_arch = "arm64ec"),
9362            link_name = "llvm.aarch64.neon.vcvtfp2fxs.i32.f32"
9363        )]
9364        fn _vcvts_n_s32_f32(a: f32, n: i32) -> i32;
9365    }
9366    unsafe { _vcvts_n_s32_f32(a, N) }
9367}
9368#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9369#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_s64_f64)"]
9370#[inline(always)]
9371#[target_feature(enable = "neon")]
9372#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
9373#[rustc_legacy_const_generics(1)]
9374#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9375pub fn vcvtd_n_s64_f64<const N: i32>(a: f64) -> i64 {
9376    static_assert!(N >= 1 && N <= 64);
9377    unsafe extern "unadjusted" {
9378        #[cfg_attr(
9379            any(target_arch = "aarch64", target_arch = "arm64ec"),
9380            link_name = "llvm.aarch64.neon.vcvtfp2fxs.i64.f64"
9381        )]
9382        fn _vcvtd_n_s64_f64(a: f64, n: i32) -> i64;
9383    }
9384    unsafe { _vcvtd_n_s64_f64(a, N) }
9385}
9386#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9387#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_u32_f32)"]
9388#[inline(always)]
9389#[target_feature(enable = "neon")]
9390#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
9391#[rustc_legacy_const_generics(1)]
9392#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9393pub fn vcvts_n_u32_f32<const N: i32>(a: f32) -> u32 {
9394    static_assert!(N >= 1 && N <= 32);
9395    unsafe extern "unadjusted" {
9396        #[cfg_attr(
9397            any(target_arch = "aarch64", target_arch = "arm64ec"),
9398            link_name = "llvm.aarch64.neon.vcvtfp2fxu.i32.f32"
9399        )]
9400        fn _vcvts_n_u32_f32(a: f32, n: i32) -> u32;
9401    }
9402    unsafe { _vcvts_n_u32_f32(a, N) }
9403}
9404#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9405#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_u64_f64)"]
9406#[inline(always)]
9407#[target_feature(enable = "neon")]
9408#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
9409#[rustc_legacy_const_generics(1)]
9410#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9411pub fn vcvtd_n_u64_f64<const N: i32>(a: f64) -> u64 {
9412    static_assert!(N >= 1 && N <= 64);
9413    unsafe extern "unadjusted" {
9414        #[cfg_attr(
9415            any(target_arch = "aarch64", target_arch = "arm64ec"),
9416            link_name = "llvm.aarch64.neon.vcvtfp2fxu.i64.f64"
9417        )]
9418        fn _vcvtd_n_u64_f64(a: f64, n: i32) -> u64;
9419    }
9420    unsafe { _vcvtd_n_u64_f64(a, N) }
9421}
9422#[doc = "Fixed-point convert to floating-point"]
9423#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_s32_f32)"]
9424#[inline(always)]
9425#[target_feature(enable = "neon")]
9426#[cfg_attr(test, assert_instr(fcvtzs))]
9427#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9428pub fn vcvts_s32_f32(a: f32) -> i32 {
9429    a as i32
9430}
9431#[doc = "Fixed-point convert to floating-point"]
9432#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_s64_f64)"]
9433#[inline(always)]
9434#[target_feature(enable = "neon")]
9435#[cfg_attr(test, assert_instr(fcvtzs))]
9436#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9437pub fn vcvtd_s64_f64(a: f64) -> i64 {
9438    a as i64
9439}
9440#[doc = "Fixed-point convert to floating-point"]
9441#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_u32_f32)"]
9442#[inline(always)]
9443#[target_feature(enable = "neon")]
9444#[cfg_attr(test, assert_instr(fcvtzu))]
9445#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9446pub fn vcvts_u32_f32(a: f32) -> u32 {
9447    a as u32
9448}
9449#[doc = "Fixed-point convert to floating-point"]
9450#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_u64_f64)"]
9451#[inline(always)]
9452#[target_feature(enable = "neon")]
9453#[cfg_attr(test, assert_instr(fcvtzu))]
9454#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9455pub fn vcvtd_u64_f64(a: f64) -> u64 {
9456    a as u64
9457}
9458#[doc = "Floating-point convert to lower precision narrow, rounding to odd"]
9459#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_f32_f64)"]
9460#[inline(always)]
9461#[target_feature(enable = "neon")]
9462#[cfg_attr(test, assert_instr(fcvtxn))]
9463#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9464pub fn vcvtx_f32_f64(a: float64x2_t) -> float32x2_t {
9465    unsafe extern "unadjusted" {
9466        #[cfg_attr(
9467            any(target_arch = "aarch64", target_arch = "arm64ec"),
9468            link_name = "llvm.aarch64.neon.fcvtxn.v2f32.v2f64"
9469        )]
9470        fn _vcvtx_f32_f64(a: float64x2_t) -> float32x2_t;
9471    }
9472    unsafe { _vcvtx_f32_f64(a) }
9473}
9474#[doc = "Floating-point convert to lower precision narrow, rounding to odd"]
9475#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_high_f32_f64)"]
9476#[inline(always)]
9477#[target_feature(enable = "neon")]
9478#[cfg_attr(test, assert_instr(fcvtxn2))]
9479#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9480pub fn vcvtx_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t {
9481    unsafe { simd_shuffle!(a, vcvtx_f32_f64(b), [0, 1, 2, 3]) }
9482}
9483#[doc = "Floating-point convert to lower precision narrow, rounding to odd"]
9484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtxd_f32_f64)"]
9485#[inline(always)]
9486#[target_feature(enable = "neon")]
9487#[cfg_attr(test, assert_instr(fcvtxn))]
9488#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9489pub fn vcvtxd_f32_f64(a: f64) -> f32 {
9490    unsafe { simd_extract!(vcvtx_f32_f64(vdupq_n_f64(a)), 0) }
9491}
9492#[doc = "Divide"]
9493#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f16)"]
9494#[inline(always)]
9495#[target_feature(enable = "neon,fp16")]
9496#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
9497#[cfg(not(target_arch = "arm64ec"))]
9498#[cfg_attr(test, assert_instr(fdiv))]
9499pub fn vdiv_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
9500    unsafe { simd_div(a, b) }
9501}
9502#[doc = "Divide"]
9503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f16)"]
9504#[inline(always)]
9505#[target_feature(enable = "neon,fp16")]
9506#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
9507#[cfg(not(target_arch = "arm64ec"))]
9508#[cfg_attr(test, assert_instr(fdiv))]
9509pub fn vdivq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
9510    unsafe { simd_div(a, b) }
9511}
9512#[doc = "Divide"]
9513#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f32)"]
9514#[inline(always)]
9515#[target_feature(enable = "neon")]
9516#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9517#[cfg_attr(test, assert_instr(fdiv))]
9518pub fn vdiv_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
9519    unsafe { simd_div(a, b) }
9520}
9521#[doc = "Divide"]
9522#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f32)"]
9523#[inline(always)]
9524#[target_feature(enable = "neon")]
9525#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9526#[cfg_attr(test, assert_instr(fdiv))]
9527pub fn vdivq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
9528    unsafe { simd_div(a, b) }
9529}
9530#[doc = "Divide"]
9531#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f64)"]
9532#[inline(always)]
9533#[target_feature(enable = "neon")]
9534#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9535#[cfg_attr(test, assert_instr(fdiv))]
9536pub fn vdiv_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
9537    unsafe { simd_div(a, b) }
9538}
9539#[doc = "Divide"]
9540#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f64)"]
9541#[inline(always)]
9542#[target_feature(enable = "neon")]
9543#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9544#[cfg_attr(test, assert_instr(fdiv))]
9545pub fn vdivq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
9546    unsafe { simd_div(a, b) }
9547}
9548#[doc = "Divide"]
9549#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivh_f16)"]
9550#[inline(always)]
9551#[target_feature(enable = "neon,fp16")]
9552#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9553#[cfg(not(target_arch = "arm64ec"))]
9554#[cfg_attr(test, assert_instr(fdiv))]
9555pub fn vdivh_f16(a: f16, b: f16) -> f16 {
9556    a / b
9557}
9558#[doc = "Set all vector lanes to the same value"]
9559#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f64)"]
9560#[inline(always)]
9561#[target_feature(enable = "neon")]
9562#[cfg_attr(test, assert_instr(nop, N = 0))]
9563#[rustc_legacy_const_generics(1)]
9564#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9565pub fn vdup_lane_f64<const N: i32>(a: float64x1_t) -> float64x1_t {
9566    static_assert!(N == 0);
9567    a
9568}
9569#[doc = "Set all vector lanes to the same value"]
9570#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p64)"]
9571#[inline(always)]
9572#[target_feature(enable = "neon")]
9573#[cfg_attr(test, assert_instr(nop, N = 0))]
9574#[rustc_legacy_const_generics(1)]
9575#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9576pub fn vdup_lane_p64<const N: i32>(a: poly64x1_t) -> poly64x1_t {
9577    static_assert!(N == 0);
9578    a
9579}
9580#[doc = "Set all vector lanes to the same value"]
9581#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f64)"]
9582#[inline(always)]
9583#[target_feature(enable = "neon")]
9584#[cfg_attr(test, assert_instr(nop, N = 1))]
9585#[rustc_legacy_const_generics(1)]
9586#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9587pub fn vdup_laneq_f64<const N: i32>(a: float64x2_t) -> float64x1_t {
9588    static_assert_uimm_bits!(N, 1);
9589    unsafe { transmute::<f64, _>(simd_extract!(a, N as u32)) }
9590}
9591#[doc = "Set all vector lanes to the same value"]
9592#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p64)"]
9593#[inline(always)]
9594#[target_feature(enable = "neon")]
9595#[cfg_attr(test, assert_instr(nop, N = 1))]
9596#[rustc_legacy_const_generics(1)]
9597#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9598pub fn vdup_laneq_p64<const N: i32>(a: poly64x2_t) -> poly64x1_t {
9599    static_assert_uimm_bits!(N, 1);
9600    unsafe { transmute::<u64, _>(simd_extract!(a, N as u32)) }
9601}
9602#[doc = "Set all vector lanes to the same value"]
9603#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_s8)"]
9604#[inline(always)]
9605#[target_feature(enable = "neon")]
9606#[cfg_attr(test, assert_instr(nop, N = 4))]
9607#[rustc_legacy_const_generics(1)]
9608#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9609pub fn vdupb_lane_s8<const N: i32>(a: int8x8_t) -> i8 {
9610    static_assert_uimm_bits!(N, 3);
9611    unsafe { simd_extract!(a, N as u32) }
9612}
9613#[doc = "Set all vector lanes to the same value"]
9614#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_s16)"]
9615#[inline(always)]
9616#[target_feature(enable = "neon")]
9617#[cfg_attr(test, assert_instr(nop, N = 4))]
9618#[rustc_legacy_const_generics(1)]
9619#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9620pub fn vduph_laneq_s16<const N: i32>(a: int16x8_t) -> i16 {
9621    static_assert_uimm_bits!(N, 3);
9622    unsafe { simd_extract!(a, N as u32) }
9623}
9624#[doc = "Set all vector lanes to the same value"]
9625#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_u8)"]
9626#[inline(always)]
9627#[target_feature(enable = "neon")]
9628#[cfg_attr(test, assert_instr(nop, N = 4))]
9629#[rustc_legacy_const_generics(1)]
9630#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9631pub fn vdupb_lane_u8<const N: i32>(a: uint8x8_t) -> u8 {
9632    static_assert_uimm_bits!(N, 3);
9633    unsafe { simd_extract!(a, N as u32) }
9634}
9635#[doc = "Set all vector lanes to the same value"]
9636#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_u16)"]
9637#[inline(always)]
9638#[target_feature(enable = "neon")]
9639#[cfg_attr(test, assert_instr(nop, N = 4))]
9640#[rustc_legacy_const_generics(1)]
9641#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9642pub fn vduph_laneq_u16<const N: i32>(a: uint16x8_t) -> u16 {
9643    static_assert_uimm_bits!(N, 3);
9644    unsafe { simd_extract!(a, N as u32) }
9645}
9646#[doc = "Set all vector lanes to the same value"]
9647#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_p8)"]
9648#[inline(always)]
9649#[target_feature(enable = "neon")]
9650#[cfg_attr(test, assert_instr(nop, N = 4))]
9651#[rustc_legacy_const_generics(1)]
9652#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9653pub fn vdupb_lane_p8<const N: i32>(a: poly8x8_t) -> p8 {
9654    static_assert_uimm_bits!(N, 3);
9655    unsafe { simd_extract!(a, N as u32) }
9656}
9657#[doc = "Set all vector lanes to the same value"]
9658#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_p16)"]
9659#[inline(always)]
9660#[target_feature(enable = "neon")]
9661#[cfg_attr(test, assert_instr(nop, N = 4))]
9662#[rustc_legacy_const_generics(1)]
9663#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9664pub fn vduph_laneq_p16<const N: i32>(a: poly16x8_t) -> p16 {
9665    static_assert_uimm_bits!(N, 3);
9666    unsafe { simd_extract!(a, N as u32) }
9667}
9668#[doc = "Extract an element from a vector"]
9669#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_s8)"]
9670#[inline(always)]
9671#[target_feature(enable = "neon")]
9672#[cfg_attr(test, assert_instr(nop, N = 8))]
9673#[rustc_legacy_const_generics(1)]
9674#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9675pub fn vdupb_laneq_s8<const N: i32>(a: int8x16_t) -> i8 {
9676    static_assert_uimm_bits!(N, 4);
9677    unsafe { simd_extract!(a, N as u32) }
9678}
9679#[doc = "Extract an element from a vector"]
9680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_u8)"]
9681#[inline(always)]
9682#[target_feature(enable = "neon")]
9683#[cfg_attr(test, assert_instr(nop, N = 8))]
9684#[rustc_legacy_const_generics(1)]
9685#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9686pub fn vdupb_laneq_u8<const N: i32>(a: uint8x16_t) -> u8 {
9687    static_assert_uimm_bits!(N, 4);
9688    unsafe { simd_extract!(a, N as u32) }
9689}
9690#[doc = "Extract an element from a vector"]
9691#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_p8)"]
9692#[inline(always)]
9693#[target_feature(enable = "neon")]
9694#[cfg_attr(test, assert_instr(nop, N = 8))]
9695#[rustc_legacy_const_generics(1)]
9696#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9697pub fn vdupb_laneq_p8<const N: i32>(a: poly8x16_t) -> p8 {
9698    static_assert_uimm_bits!(N, 4);
9699    unsafe { simd_extract!(a, N as u32) }
9700}
9701#[doc = "Set all vector lanes to the same value"]
9702#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_f64)"]
9703#[inline(always)]
9704#[target_feature(enable = "neon")]
9705#[cfg_attr(test, assert_instr(nop, N = 0))]
9706#[rustc_legacy_const_generics(1)]
9707#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9708pub fn vdupd_lane_f64<const N: i32>(a: float64x1_t) -> f64 {
9709    static_assert!(N == 0);
9710    unsafe { simd_extract!(a, N as u32) }
9711}
9712#[doc = "Set all vector lanes to the same value"]
9713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_s64)"]
9714#[inline(always)]
9715#[target_feature(enable = "neon")]
9716#[cfg_attr(test, assert_instr(nop, N = 0))]
9717#[rustc_legacy_const_generics(1)]
9718#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9719pub fn vdupd_lane_s64<const N: i32>(a: int64x1_t) -> i64 {
9720    static_assert!(N == 0);
9721    unsafe { simd_extract!(a, N as u32) }
9722}
9723#[doc = "Set all vector lanes to the same value"]
9724#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_u64)"]
9725#[inline(always)]
9726#[target_feature(enable = "neon")]
9727#[cfg_attr(test, assert_instr(nop, N = 0))]
9728#[rustc_legacy_const_generics(1)]
9729#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9730pub fn vdupd_lane_u64<const N: i32>(a: uint64x1_t) -> u64 {
9731    static_assert!(N == 0);
9732    unsafe { simd_extract!(a, N as u32) }
9733}
9734#[doc = "Set all vector lanes to the same value"]
9735#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_f16)"]
9736#[inline(always)]
9737#[cfg_attr(test, assert_instr(nop, N = 2))]
9738#[rustc_legacy_const_generics(1)]
9739#[target_feature(enable = "neon,fp16")]
9740#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9741#[cfg(not(target_arch = "arm64ec"))]
9742pub fn vduph_lane_f16<const N: i32>(a: float16x4_t) -> f16 {
9743    static_assert_uimm_bits!(N, 2);
9744    unsafe { simd_extract!(a, N as u32) }
9745}
9746#[doc = "Extract an element from a vector"]
9747#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_f16)"]
9748#[inline(always)]
9749#[cfg_attr(test, assert_instr(nop, N = 4))]
9750#[rustc_legacy_const_generics(1)]
9751#[target_feature(enable = "neon,fp16")]
9752#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9753#[cfg(not(target_arch = "arm64ec"))]
9754pub fn vduph_laneq_f16<const N: i32>(a: float16x8_t) -> f16 {
9755    static_assert_uimm_bits!(N, 4);
9756    unsafe { simd_extract!(a, N as u32) }
9757}
9758#[doc = "Set all vector lanes to the same value"]
9759#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f64)"]
9760#[inline(always)]
9761#[target_feature(enable = "neon")]
9762#[cfg_attr(test, assert_instr(dup, N = 0))]
9763#[rustc_legacy_const_generics(1)]
9764#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9765pub fn vdupq_lane_f64<const N: i32>(a: float64x1_t) -> float64x2_t {
9766    static_assert!(N == 0);
9767    unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9768}
9769#[doc = "Set all vector lanes to the same value"]
9770#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p64)"]
9771#[inline(always)]
9772#[target_feature(enable = "neon")]
9773#[cfg_attr(test, assert_instr(dup, N = 0))]
9774#[rustc_legacy_const_generics(1)]
9775#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9776pub fn vdupq_lane_p64<const N: i32>(a: poly64x1_t) -> poly64x2_t {
9777    static_assert!(N == 0);
9778    unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9779}
9780#[doc = "Set all vector lanes to the same value"]
9781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f64)"]
9782#[inline(always)]
9783#[target_feature(enable = "neon")]
9784#[cfg_attr(test, assert_instr(dup, N = 1))]
9785#[rustc_legacy_const_generics(1)]
9786#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9787pub fn vdupq_laneq_f64<const N: i32>(a: float64x2_t) -> float64x2_t {
9788    static_assert_uimm_bits!(N, 1);
9789    unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9790}
9791#[doc = "Set all vector lanes to the same value"]
9792#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p64)"]
9793#[inline(always)]
9794#[target_feature(enable = "neon")]
9795#[cfg_attr(test, assert_instr(dup, N = 1))]
9796#[rustc_legacy_const_generics(1)]
9797#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9798pub fn vdupq_laneq_p64<const N: i32>(a: poly64x2_t) -> poly64x2_t {
9799    static_assert_uimm_bits!(N, 1);
9800    unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9801}
9802#[doc = "Set all vector lanes to the same value"]
9803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_f32)"]
9804#[inline(always)]
9805#[target_feature(enable = "neon")]
9806#[cfg_attr(test, assert_instr(nop, N = 1))]
9807#[rustc_legacy_const_generics(1)]
9808#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9809pub fn vdups_lane_f32<const N: i32>(a: float32x2_t) -> f32 {
9810    static_assert_uimm_bits!(N, 1);
9811    unsafe { simd_extract!(a, N as u32) }
9812}
9813#[doc = "Set all vector lanes to the same value"]
9814#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_f64)"]
9815#[inline(always)]
9816#[target_feature(enable = "neon")]
9817#[cfg_attr(test, assert_instr(nop, N = 1))]
9818#[rustc_legacy_const_generics(1)]
9819#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9820pub fn vdupd_laneq_f64<const N: i32>(a: float64x2_t) -> f64 {
9821    static_assert_uimm_bits!(N, 1);
9822    unsafe { simd_extract!(a, N as u32) }
9823}
9824#[doc = "Set all vector lanes to the same value"]
9825#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_s32)"]
9826#[inline(always)]
9827#[target_feature(enable = "neon")]
9828#[cfg_attr(test, assert_instr(nop, N = 1))]
9829#[rustc_legacy_const_generics(1)]
9830#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9831pub fn vdups_lane_s32<const N: i32>(a: int32x2_t) -> i32 {
9832    static_assert_uimm_bits!(N, 1);
9833    unsafe { simd_extract!(a, N as u32) }
9834}
9835#[doc = "Set all vector lanes to the same value"]
9836#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_s64)"]
9837#[inline(always)]
9838#[target_feature(enable = "neon")]
9839#[cfg_attr(test, assert_instr(nop, N = 1))]
9840#[rustc_legacy_const_generics(1)]
9841#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9842pub fn vdupd_laneq_s64<const N: i32>(a: int64x2_t) -> i64 {
9843    static_assert_uimm_bits!(N, 1);
9844    unsafe { simd_extract!(a, N as u32) }
9845}
9846#[doc = "Set all vector lanes to the same value"]
9847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_u32)"]
9848#[inline(always)]
9849#[target_feature(enable = "neon")]
9850#[cfg_attr(test, assert_instr(nop, N = 1))]
9851#[rustc_legacy_const_generics(1)]
9852#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9853pub fn vdups_lane_u32<const N: i32>(a: uint32x2_t) -> u32 {
9854    static_assert_uimm_bits!(N, 1);
9855    unsafe { simd_extract!(a, N as u32) }
9856}
9857#[doc = "Set all vector lanes to the same value"]
9858#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_u64)"]
9859#[inline(always)]
9860#[target_feature(enable = "neon")]
9861#[cfg_attr(test, assert_instr(nop, N = 1))]
9862#[rustc_legacy_const_generics(1)]
9863#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9864pub fn vdupd_laneq_u64<const N: i32>(a: uint64x2_t) -> u64 {
9865    static_assert_uimm_bits!(N, 1);
9866    unsafe { simd_extract!(a, N as u32) }
9867}
9868#[doc = "Set all vector lanes to the same value"]
9869#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_f32)"]
9870#[inline(always)]
9871#[target_feature(enable = "neon")]
9872#[cfg_attr(test, assert_instr(nop, N = 2))]
9873#[rustc_legacy_const_generics(1)]
9874#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9875pub fn vdups_laneq_f32<const N: i32>(a: float32x4_t) -> f32 {
9876    static_assert_uimm_bits!(N, 2);
9877    unsafe { simd_extract!(a, N as u32) }
9878}
9879#[doc = "Set all vector lanes to the same value"]
9880#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_s16)"]
9881#[inline(always)]
9882#[target_feature(enable = "neon")]
9883#[cfg_attr(test, assert_instr(nop, N = 2))]
9884#[rustc_legacy_const_generics(1)]
9885#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9886pub fn vduph_lane_s16<const N: i32>(a: int16x4_t) -> i16 {
9887    static_assert_uimm_bits!(N, 2);
9888    unsafe { simd_extract!(a, N as u32) }
9889}
9890#[doc = "Set all vector lanes to the same value"]
9891#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_s32)"]
9892#[inline(always)]
9893#[target_feature(enable = "neon")]
9894#[cfg_attr(test, assert_instr(nop, N = 2))]
9895#[rustc_legacy_const_generics(1)]
9896#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9897pub fn vdups_laneq_s32<const N: i32>(a: int32x4_t) -> i32 {
9898    static_assert_uimm_bits!(N, 2);
9899    unsafe { simd_extract!(a, N as u32) }
9900}
9901#[doc = "Set all vector lanes to the same value"]
9902#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_u16)"]
9903#[inline(always)]
9904#[target_feature(enable = "neon")]
9905#[cfg_attr(test, assert_instr(nop, N = 2))]
9906#[rustc_legacy_const_generics(1)]
9907#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9908pub fn vduph_lane_u16<const N: i32>(a: uint16x4_t) -> u16 {
9909    static_assert_uimm_bits!(N, 2);
9910    unsafe { simd_extract!(a, N as u32) }
9911}
9912#[doc = "Set all vector lanes to the same value"]
9913#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_u32)"]
9914#[inline(always)]
9915#[target_feature(enable = "neon")]
9916#[cfg_attr(test, assert_instr(nop, N = 2))]
9917#[rustc_legacy_const_generics(1)]
9918#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9919pub fn vdups_laneq_u32<const N: i32>(a: uint32x4_t) -> u32 {
9920    static_assert_uimm_bits!(N, 2);
9921    unsafe { simd_extract!(a, N as u32) }
9922}
9923#[doc = "Set all vector lanes to the same value"]
9924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_p16)"]
9925#[inline(always)]
9926#[target_feature(enable = "neon")]
9927#[cfg_attr(test, assert_instr(nop, N = 2))]
9928#[rustc_legacy_const_generics(1)]
9929#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9930pub fn vduph_lane_p16<const N: i32>(a: poly16x4_t) -> p16 {
9931    static_assert_uimm_bits!(N, 2);
9932    unsafe { simd_extract!(a, N as u32) }
9933}
9934#[doc = "Three-way exclusive OR"]
9935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s8)"]
9936#[inline(always)]
9937#[target_feature(enable = "neon,sha3")]
9938#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9939#[cfg_attr(test, assert_instr(eor3))]
9940pub fn veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
9941    unsafe extern "unadjusted" {
9942        #[cfg_attr(
9943            any(target_arch = "aarch64", target_arch = "arm64ec"),
9944            link_name = "llvm.aarch64.crypto.eor3s.v16i8"
9945        )]
9946        fn _veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t;
9947    }
9948    unsafe { _veor3q_s8(a, b, c) }
9949}
9950#[doc = "Three-way exclusive OR"]
9951#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s16)"]
9952#[inline(always)]
9953#[target_feature(enable = "neon,sha3")]
9954#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9955#[cfg_attr(test, assert_instr(eor3))]
9956pub fn veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
9957    unsafe extern "unadjusted" {
9958        #[cfg_attr(
9959            any(target_arch = "aarch64", target_arch = "arm64ec"),
9960            link_name = "llvm.aarch64.crypto.eor3s.v8i16"
9961        )]
9962        fn _veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
9963    }
9964    unsafe { _veor3q_s16(a, b, c) }
9965}
9966#[doc = "Three-way exclusive OR"]
9967#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s32)"]
9968#[inline(always)]
9969#[target_feature(enable = "neon,sha3")]
9970#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9971#[cfg_attr(test, assert_instr(eor3))]
9972pub fn veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
9973    unsafe extern "unadjusted" {
9974        #[cfg_attr(
9975            any(target_arch = "aarch64", target_arch = "arm64ec"),
9976            link_name = "llvm.aarch64.crypto.eor3s.v4i32"
9977        )]
9978        fn _veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
9979    }
9980    unsafe { _veor3q_s32(a, b, c) }
9981}
9982#[doc = "Three-way exclusive OR"]
9983#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s64)"]
9984#[inline(always)]
9985#[target_feature(enable = "neon,sha3")]
9986#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9987#[cfg_attr(test, assert_instr(eor3))]
9988pub fn veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t {
9989    unsafe extern "unadjusted" {
9990        #[cfg_attr(
9991            any(target_arch = "aarch64", target_arch = "arm64ec"),
9992            link_name = "llvm.aarch64.crypto.eor3s.v2i64"
9993        )]
9994        fn _veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t;
9995    }
9996    unsafe { _veor3q_s64(a, b, c) }
9997}
9998#[doc = "Three-way exclusive OR"]
9999#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u8)"]
10000#[inline(always)]
10001#[target_feature(enable = "neon,sha3")]
10002#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10003#[cfg_attr(test, assert_instr(eor3))]
10004pub fn veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
10005    unsafe extern "unadjusted" {
10006        #[cfg_attr(
10007            any(target_arch = "aarch64", target_arch = "arm64ec"),
10008            link_name = "llvm.aarch64.crypto.eor3u.v16i8"
10009        )]
10010        fn _veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t;
10011    }
10012    unsafe { _veor3q_u8(a, b, c) }
10013}
10014#[doc = "Three-way exclusive OR"]
10015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u16)"]
10016#[inline(always)]
10017#[target_feature(enable = "neon,sha3")]
10018#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10019#[cfg_attr(test, assert_instr(eor3))]
10020pub fn veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
10021    unsafe extern "unadjusted" {
10022        #[cfg_attr(
10023            any(target_arch = "aarch64", target_arch = "arm64ec"),
10024            link_name = "llvm.aarch64.crypto.eor3u.v8i16"
10025        )]
10026        fn _veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t;
10027    }
10028    unsafe { _veor3q_u16(a, b, c) }
10029}
10030#[doc = "Three-way exclusive OR"]
10031#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u32)"]
10032#[inline(always)]
10033#[target_feature(enable = "neon,sha3")]
10034#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10035#[cfg_attr(test, assert_instr(eor3))]
10036pub fn veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
10037    unsafe extern "unadjusted" {
10038        #[cfg_attr(
10039            any(target_arch = "aarch64", target_arch = "arm64ec"),
10040            link_name = "llvm.aarch64.crypto.eor3u.v4i32"
10041        )]
10042        fn _veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
10043    }
10044    unsafe { _veor3q_u32(a, b, c) }
10045}
10046#[doc = "Three-way exclusive OR"]
10047#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u64)"]
10048#[inline(always)]
10049#[target_feature(enable = "neon,sha3")]
10050#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10051#[cfg_attr(test, assert_instr(eor3))]
10052pub fn veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
10053    unsafe extern "unadjusted" {
10054        #[cfg_attr(
10055            any(target_arch = "aarch64", target_arch = "arm64ec"),
10056            link_name = "llvm.aarch64.crypto.eor3u.v2i64"
10057        )]
10058        fn _veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
10059    }
10060    unsafe { _veor3q_u64(a, b, c) }
10061}
10062#[doc = "Extract vector from pair of vectors"]
10063#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f64)"]
10064#[inline(always)]
10065#[target_feature(enable = "neon")]
10066#[cfg_attr(test, assert_instr(ext, N = 1))]
10067#[rustc_legacy_const_generics(2)]
10068#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10069pub fn vextq_f64<const N: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
10070    static_assert_uimm_bits!(N, 1);
10071    unsafe {
10072        match N & 0b1 {
10073            0 => simd_shuffle!(a, b, [0, 1]),
10074            1 => simd_shuffle!(a, b, [1, 2]),
10075            _ => unreachable_unchecked(),
10076        }
10077    }
10078}
10079#[doc = "Extract vector from pair of vectors"]
10080#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p64)"]
10081#[inline(always)]
10082#[target_feature(enable = "neon")]
10083#[cfg_attr(test, assert_instr(ext, N = 1))]
10084#[rustc_legacy_const_generics(2)]
10085#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10086pub fn vextq_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
10087    static_assert_uimm_bits!(N, 1);
10088    unsafe {
10089        match N & 0b1 {
10090            0 => simd_shuffle!(a, b, [0, 1]),
10091            1 => simd_shuffle!(a, b, [1, 2]),
10092            _ => unreachable_unchecked(),
10093        }
10094    }
10095}
10096#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10097#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f64)"]
10098#[inline(always)]
10099#[target_feature(enable = "neon")]
10100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10101#[cfg_attr(test, assert_instr(fmadd))]
10102pub fn vfma_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
10103    unsafe { simd_fma(b, c, a) }
10104}
10105#[doc = "Floating-point fused multiply-add to accumulator"]
10106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f16)"]
10107#[inline(always)]
10108#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10109#[rustc_legacy_const_generics(3)]
10110#[target_feature(enable = "neon,fp16")]
10111#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10112#[cfg(not(target_arch = "arm64ec"))]
10113pub fn vfma_lane_f16<const LANE: i32>(
10114    a: float16x4_t,
10115    b: float16x4_t,
10116    c: float16x4_t,
10117) -> float16x4_t {
10118    static_assert_uimm_bits!(LANE, 2);
10119    unsafe { vfma_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10120}
10121#[doc = "Floating-point fused multiply-add to accumulator"]
10122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f16)"]
10123#[inline(always)]
10124#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10125#[rustc_legacy_const_generics(3)]
10126#[target_feature(enable = "neon,fp16")]
10127#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10128#[cfg(not(target_arch = "arm64ec"))]
10129pub fn vfma_laneq_f16<const LANE: i32>(
10130    a: float16x4_t,
10131    b: float16x4_t,
10132    c: float16x8_t,
10133) -> float16x4_t {
10134    static_assert_uimm_bits!(LANE, 3);
10135    unsafe { vfma_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10136}
10137#[doc = "Floating-point fused multiply-add to accumulator"]
10138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f16)"]
10139#[inline(always)]
10140#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10141#[rustc_legacy_const_generics(3)]
10142#[target_feature(enable = "neon,fp16")]
10143#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10144#[cfg(not(target_arch = "arm64ec"))]
10145pub fn vfmaq_lane_f16<const LANE: i32>(
10146    a: float16x8_t,
10147    b: float16x8_t,
10148    c: float16x4_t,
10149) -> float16x8_t {
10150    static_assert_uimm_bits!(LANE, 2);
10151    unsafe { vfmaq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10152}
10153#[doc = "Floating-point fused multiply-add to accumulator"]
10154#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f16)"]
10155#[inline(always)]
10156#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10157#[rustc_legacy_const_generics(3)]
10158#[target_feature(enable = "neon,fp16")]
10159#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10160#[cfg(not(target_arch = "arm64ec"))]
10161pub fn vfmaq_laneq_f16<const LANE: i32>(
10162    a: float16x8_t,
10163    b: float16x8_t,
10164    c: float16x8_t,
10165) -> float16x8_t {
10166    static_assert_uimm_bits!(LANE, 3);
10167    unsafe { vfmaq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10168}
10169#[doc = "Floating-point fused multiply-add to accumulator"]
10170#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f32)"]
10171#[inline(always)]
10172#[target_feature(enable = "neon")]
10173#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10174#[rustc_legacy_const_generics(3)]
10175#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10176pub fn vfma_lane_f32<const LANE: i32>(
10177    a: float32x2_t,
10178    b: float32x2_t,
10179    c: float32x2_t,
10180) -> float32x2_t {
10181    static_assert_uimm_bits!(LANE, 1);
10182    unsafe { vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10183}
10184#[doc = "Floating-point fused multiply-add to accumulator"]
10185#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f32)"]
10186#[inline(always)]
10187#[target_feature(enable = "neon")]
10188#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10189#[rustc_legacy_const_generics(3)]
10190#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10191pub fn vfma_laneq_f32<const LANE: i32>(
10192    a: float32x2_t,
10193    b: float32x2_t,
10194    c: float32x4_t,
10195) -> float32x2_t {
10196    static_assert_uimm_bits!(LANE, 2);
10197    unsafe { vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10198}
10199#[doc = "Floating-point fused multiply-add to accumulator"]
10200#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f32)"]
10201#[inline(always)]
10202#[target_feature(enable = "neon")]
10203#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10204#[rustc_legacy_const_generics(3)]
10205#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10206pub fn vfmaq_lane_f32<const LANE: i32>(
10207    a: float32x4_t,
10208    b: float32x4_t,
10209    c: float32x2_t,
10210) -> float32x4_t {
10211    static_assert_uimm_bits!(LANE, 1);
10212    unsafe { vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10213}
10214#[doc = "Floating-point fused multiply-add to accumulator"]
10215#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f32)"]
10216#[inline(always)]
10217#[target_feature(enable = "neon")]
10218#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10219#[rustc_legacy_const_generics(3)]
10220#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10221pub fn vfmaq_laneq_f32<const LANE: i32>(
10222    a: float32x4_t,
10223    b: float32x4_t,
10224    c: float32x4_t,
10225) -> float32x4_t {
10226    static_assert_uimm_bits!(LANE, 2);
10227    unsafe { vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10228}
10229#[doc = "Floating-point fused multiply-add to accumulator"]
10230#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f64)"]
10231#[inline(always)]
10232#[target_feature(enable = "neon")]
10233#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10234#[rustc_legacy_const_generics(3)]
10235#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10236pub fn vfmaq_laneq_f64<const LANE: i32>(
10237    a: float64x2_t,
10238    b: float64x2_t,
10239    c: float64x2_t,
10240) -> float64x2_t {
10241    static_assert_uimm_bits!(LANE, 1);
10242    unsafe { vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
10243}
10244#[doc = "Floating-point fused multiply-add to accumulator"]
10245#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f64)"]
10246#[inline(always)]
10247#[target_feature(enable = "neon")]
10248#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10249#[rustc_legacy_const_generics(3)]
10250#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10251pub fn vfma_lane_f64<const LANE: i32>(
10252    a: float64x1_t,
10253    b: float64x1_t,
10254    c: float64x1_t,
10255) -> float64x1_t {
10256    static_assert!(LANE == 0);
10257    unsafe { vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
10258}
10259#[doc = "Floating-point fused multiply-add to accumulator"]
10260#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f64)"]
10261#[inline(always)]
10262#[target_feature(enable = "neon")]
10263#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10264#[rustc_legacy_const_generics(3)]
10265#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10266pub fn vfma_laneq_f64<const LANE: i32>(
10267    a: float64x1_t,
10268    b: float64x1_t,
10269    c: float64x2_t,
10270) -> float64x1_t {
10271    static_assert_uimm_bits!(LANE, 1);
10272    unsafe { vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
10273}
10274#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
10275#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f16)"]
10276#[inline(always)]
10277#[target_feature(enable = "neon,fp16")]
10278#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10279#[cfg(not(target_arch = "arm64ec"))]
10280#[cfg_attr(test, assert_instr(fmla))]
10281pub fn vfma_n_f16(a: float16x4_t, b: float16x4_t, c: f16) -> float16x4_t {
10282    vfma_f16(a, b, vdup_n_f16(c))
10283}
10284#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
10285#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f16)"]
10286#[inline(always)]
10287#[target_feature(enable = "neon,fp16")]
10288#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10289#[cfg(not(target_arch = "arm64ec"))]
10290#[cfg_attr(test, assert_instr(fmla))]
10291pub fn vfmaq_n_f16(a: float16x8_t, b: float16x8_t, c: f16) -> float16x8_t {
10292    vfmaq_f16(a, b, vdupq_n_f16(c))
10293}
10294#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10295#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f64)"]
10296#[inline(always)]
10297#[target_feature(enable = "neon")]
10298#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10299#[cfg_attr(test, assert_instr(fmadd))]
10300pub fn vfma_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t {
10301    vfma_f64(a, b, vdup_n_f64(c))
10302}
10303#[doc = "Floating-point fused multiply-add to accumulator"]
10304#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_lane_f64)"]
10305#[inline(always)]
10306#[target_feature(enable = "neon")]
10307#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10308#[rustc_legacy_const_generics(3)]
10309#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10310pub fn vfmad_lane_f64<const LANE: i32>(a: f64, b: f64, c: float64x1_t) -> f64 {
10311    static_assert!(LANE == 0);
10312    unsafe {
10313        let c: f64 = simd_extract!(c, LANE as u32);
10314        fmaf64(b, c, a)
10315    }
10316}
10317#[doc = "Floating-point fused multiply-add to accumulator"]
10318#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_f16)"]
10319#[inline(always)]
10320#[cfg_attr(test, assert_instr(fmadd))]
10321#[target_feature(enable = "neon,fp16")]
10322#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10323#[cfg(not(target_arch = "arm64ec"))]
10324pub fn vfmah_f16(a: f16, b: f16, c: f16) -> f16 {
10325    fmaf16(b, c, a)
10326}
10327#[doc = "Floating-point fused multiply-add to accumulator"]
10328#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_lane_f16)"]
10329#[inline(always)]
10330#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10331#[rustc_legacy_const_generics(3)]
10332#[target_feature(enable = "neon,fp16")]
10333#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10334#[cfg(not(target_arch = "arm64ec"))]
10335pub fn vfmah_lane_f16<const LANE: i32>(a: f16, b: f16, v: float16x4_t) -> f16 {
10336    static_assert_uimm_bits!(LANE, 2);
10337    unsafe {
10338        let c: f16 = simd_extract!(v, LANE as u32);
10339        vfmah_f16(a, b, c)
10340    }
10341}
10342#[doc = "Floating-point fused multiply-add to accumulator"]
10343#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_laneq_f16)"]
10344#[inline(always)]
10345#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10346#[rustc_legacy_const_generics(3)]
10347#[target_feature(enable = "neon,fp16")]
10348#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10349#[cfg(not(target_arch = "arm64ec"))]
10350pub fn vfmah_laneq_f16<const LANE: i32>(a: f16, b: f16, v: float16x8_t) -> f16 {
10351    static_assert_uimm_bits!(LANE, 3);
10352    unsafe {
10353        let c: f16 = simd_extract!(v, LANE as u32);
10354        vfmah_f16(a, b, c)
10355    }
10356}
10357#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f64)"]
10359#[inline(always)]
10360#[target_feature(enable = "neon")]
10361#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10362#[cfg_attr(test, assert_instr(fmla))]
10363pub fn vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
10364    unsafe { simd_fma(b, c, a) }
10365}
10366#[doc = "Floating-point fused multiply-add to accumulator"]
10367#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f64)"]
10368#[inline(always)]
10369#[target_feature(enable = "neon")]
10370#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10371#[rustc_legacy_const_generics(3)]
10372#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10373pub fn vfmaq_lane_f64<const LANE: i32>(
10374    a: float64x2_t,
10375    b: float64x2_t,
10376    c: float64x1_t,
10377) -> float64x2_t {
10378    static_assert!(LANE == 0);
10379    unsafe { vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
10380}
10381#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10382#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f64)"]
10383#[inline(always)]
10384#[target_feature(enable = "neon")]
10385#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10386#[cfg_attr(test, assert_instr(fmla))]
10387pub fn vfmaq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t {
10388    vfmaq_f64(a, b, vdupq_n_f64(c))
10389}
10390#[doc = "Floating-point fused multiply-add to accumulator"]
10391#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_lane_f32)"]
10392#[inline(always)]
10393#[target_feature(enable = "neon")]
10394#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10395#[rustc_legacy_const_generics(3)]
10396#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10397pub fn vfmas_lane_f32<const LANE: i32>(a: f32, b: f32, c: float32x2_t) -> f32 {
10398    static_assert_uimm_bits!(LANE, 1);
10399    unsafe {
10400        let c: f32 = simd_extract!(c, LANE as u32);
10401        fmaf32(b, c, a)
10402    }
10403}
10404#[doc = "Floating-point fused multiply-add to accumulator"]
10405#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_laneq_f32)"]
10406#[inline(always)]
10407#[target_feature(enable = "neon")]
10408#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10409#[rustc_legacy_const_generics(3)]
10410#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10411pub fn vfmas_laneq_f32<const LANE: i32>(a: f32, b: f32, c: float32x4_t) -> f32 {
10412    static_assert_uimm_bits!(LANE, 2);
10413    unsafe {
10414        let c: f32 = simd_extract!(c, LANE as u32);
10415        fmaf32(b, c, a)
10416    }
10417}
10418#[doc = "Floating-point fused multiply-add to accumulator"]
10419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_laneq_f64)"]
10420#[inline(always)]
10421#[target_feature(enable = "neon")]
10422#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10423#[rustc_legacy_const_generics(3)]
10424#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10425pub fn vfmad_laneq_f64<const LANE: i32>(a: f64, b: f64, c: float64x2_t) -> f64 {
10426    static_assert_uimm_bits!(LANE, 1);
10427    unsafe {
10428        let c: f64 = simd_extract!(c, LANE as u32);
10429        fmaf64(b, c, a)
10430    }
10431}
10432#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10433#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_high_f16)"]
10434#[inline(always)]
10435#[target_feature(enable = "neon,fp16")]
10436#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10437#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10438#[cfg(not(target_arch = "arm64ec"))]
10439#[cfg_attr(test, assert_instr(fmlal2))]
10440pub fn vfmlal_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10441    unsafe extern "unadjusted" {
10442        #[cfg_attr(
10443            any(target_arch = "aarch64", target_arch = "arm64ec"),
10444            link_name = "llvm.aarch64.neon.fmlal2.v2f32.v4f16"
10445        )]
10446        fn _vfmlal_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10447    }
10448    unsafe { _vfmlal_high_f16(r, a, b) }
10449}
10450#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10451#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_high_f16)"]
10452#[inline(always)]
10453#[target_feature(enable = "neon,fp16")]
10454#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10455#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10456#[cfg(not(target_arch = "arm64ec"))]
10457#[cfg_attr(test, assert_instr(fmlal2))]
10458pub fn vfmlalq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10459    unsafe extern "unadjusted" {
10460        #[cfg_attr(
10461            any(target_arch = "aarch64", target_arch = "arm64ec"),
10462            link_name = "llvm.aarch64.neon.fmlal2.v4f32.v8f16"
10463        )]
10464        fn _vfmlalq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10465    }
10466    unsafe { _vfmlalq_high_f16(r, a, b) }
10467}
10468#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10469#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_lane_high_f16)"]
10470#[inline(always)]
10471#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10472#[target_feature(enable = "neon,fp16")]
10473#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10474#[rustc_legacy_const_generics(3)]
10475#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10476#[cfg(not(target_arch = "arm64ec"))]
10477pub fn vfmlal_lane_high_f16<const LANE: i32>(
10478    r: float32x2_t,
10479    a: float16x4_t,
10480    b: float16x4_t,
10481) -> float32x2_t {
10482    static_assert_uimm_bits!(LANE, 2);
10483    unsafe { vfmlal_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10484}
10485#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_laneq_high_f16)"]
10487#[inline(always)]
10488#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10489#[target_feature(enable = "neon,fp16")]
10490#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10491#[rustc_legacy_const_generics(3)]
10492#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10493#[cfg(not(target_arch = "arm64ec"))]
10494pub fn vfmlal_laneq_high_f16<const LANE: i32>(
10495    r: float32x2_t,
10496    a: float16x4_t,
10497    b: float16x8_t,
10498) -> float32x2_t {
10499    static_assert_uimm_bits!(LANE, 3);
10500    unsafe { vfmlal_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10501}
10502#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_lane_high_f16)"]
10504#[inline(always)]
10505#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10506#[target_feature(enable = "neon,fp16")]
10507#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10508#[rustc_legacy_const_generics(3)]
10509#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10510#[cfg(not(target_arch = "arm64ec"))]
10511pub fn vfmlalq_lane_high_f16<const LANE: i32>(
10512    r: float32x4_t,
10513    a: float16x8_t,
10514    b: float16x4_t,
10515) -> float32x4_t {
10516    static_assert_uimm_bits!(LANE, 2);
10517    unsafe { vfmlalq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10518}
10519#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10520#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_laneq_high_f16)"]
10521#[inline(always)]
10522#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10523#[target_feature(enable = "neon,fp16")]
10524#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10525#[rustc_legacy_const_generics(3)]
10526#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10527#[cfg(not(target_arch = "arm64ec"))]
10528pub fn vfmlalq_laneq_high_f16<const LANE: i32>(
10529    r: float32x4_t,
10530    a: float16x8_t,
10531    b: float16x8_t,
10532) -> float32x4_t {
10533    static_assert_uimm_bits!(LANE, 3);
10534    unsafe { vfmlalq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10535}
10536#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10537#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_lane_low_f16)"]
10538#[inline(always)]
10539#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10540#[target_feature(enable = "neon,fp16")]
10541#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10542#[rustc_legacy_const_generics(3)]
10543#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10544#[cfg(not(target_arch = "arm64ec"))]
10545pub fn vfmlal_lane_low_f16<const LANE: i32>(
10546    r: float32x2_t,
10547    a: float16x4_t,
10548    b: float16x4_t,
10549) -> float32x2_t {
10550    static_assert_uimm_bits!(LANE, 2);
10551    unsafe { vfmlal_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10552}
10553#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10554#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_laneq_low_f16)"]
10555#[inline(always)]
10556#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10557#[target_feature(enable = "neon,fp16")]
10558#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10559#[rustc_legacy_const_generics(3)]
10560#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10561#[cfg(not(target_arch = "arm64ec"))]
10562pub fn vfmlal_laneq_low_f16<const LANE: i32>(
10563    r: float32x2_t,
10564    a: float16x4_t,
10565    b: float16x8_t,
10566) -> float32x2_t {
10567    static_assert_uimm_bits!(LANE, 3);
10568    unsafe { vfmlal_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10569}
10570#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10571#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_lane_low_f16)"]
10572#[inline(always)]
10573#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10574#[target_feature(enable = "neon,fp16")]
10575#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10576#[rustc_legacy_const_generics(3)]
10577#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10578#[cfg(not(target_arch = "arm64ec"))]
10579pub fn vfmlalq_lane_low_f16<const LANE: i32>(
10580    r: float32x4_t,
10581    a: float16x8_t,
10582    b: float16x4_t,
10583) -> float32x4_t {
10584    static_assert_uimm_bits!(LANE, 2);
10585    unsafe { vfmlalq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10586}
10587#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10588#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_laneq_low_f16)"]
10589#[inline(always)]
10590#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10591#[target_feature(enable = "neon,fp16")]
10592#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10593#[rustc_legacy_const_generics(3)]
10594#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10595#[cfg(not(target_arch = "arm64ec"))]
10596pub fn vfmlalq_laneq_low_f16<const LANE: i32>(
10597    r: float32x4_t,
10598    a: float16x8_t,
10599    b: float16x8_t,
10600) -> float32x4_t {
10601    static_assert_uimm_bits!(LANE, 3);
10602    unsafe { vfmlalq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10603}
10604#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10605#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_low_f16)"]
10606#[inline(always)]
10607#[target_feature(enable = "neon,fp16")]
10608#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10609#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10610#[cfg(not(target_arch = "arm64ec"))]
10611#[cfg_attr(test, assert_instr(fmlal))]
10612pub fn vfmlal_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10613    unsafe extern "unadjusted" {
10614        #[cfg_attr(
10615            any(target_arch = "aarch64", target_arch = "arm64ec"),
10616            link_name = "llvm.aarch64.neon.fmlal.v2f32.v4f16"
10617        )]
10618        fn _vfmlal_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10619    }
10620    unsafe { _vfmlal_low_f16(r, a, b) }
10621}
10622#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10623#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_low_f16)"]
10624#[inline(always)]
10625#[target_feature(enable = "neon,fp16")]
10626#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10627#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10628#[cfg(not(target_arch = "arm64ec"))]
10629#[cfg_attr(test, assert_instr(fmlal))]
10630pub fn vfmlalq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10631    unsafe extern "unadjusted" {
10632        #[cfg_attr(
10633            any(target_arch = "aarch64", target_arch = "arm64ec"),
10634            link_name = "llvm.aarch64.neon.fmlal.v4f32.v8f16"
10635        )]
10636        fn _vfmlalq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10637    }
10638    unsafe { _vfmlalq_low_f16(r, a, b) }
10639}
10640#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10641#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_high_f16)"]
10642#[inline(always)]
10643#[target_feature(enable = "neon,fp16")]
10644#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10645#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10646#[cfg(not(target_arch = "arm64ec"))]
10647#[cfg_attr(test, assert_instr(fmlsl2))]
10648pub fn vfmlsl_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10649    unsafe extern "unadjusted" {
10650        #[cfg_attr(
10651            any(target_arch = "aarch64", target_arch = "arm64ec"),
10652            link_name = "llvm.aarch64.neon.fmlsl2.v2f32.v4f16"
10653        )]
10654        fn _vfmlsl_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10655    }
10656    unsafe { _vfmlsl_high_f16(r, a, b) }
10657}
10658#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10659#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_high_f16)"]
10660#[inline(always)]
10661#[target_feature(enable = "neon,fp16")]
10662#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10663#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10664#[cfg(not(target_arch = "arm64ec"))]
10665#[cfg_attr(test, assert_instr(fmlsl2))]
10666pub fn vfmlslq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10667    unsafe extern "unadjusted" {
10668        #[cfg_attr(
10669            any(target_arch = "aarch64", target_arch = "arm64ec"),
10670            link_name = "llvm.aarch64.neon.fmlsl2.v4f32.v8f16"
10671        )]
10672        fn _vfmlslq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10673    }
10674    unsafe { _vfmlslq_high_f16(r, a, b) }
10675}
10676#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10677#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_lane_high_f16)"]
10678#[inline(always)]
10679#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10680#[target_feature(enable = "neon,fp16")]
10681#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10682#[rustc_legacy_const_generics(3)]
10683#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10684#[cfg(not(target_arch = "arm64ec"))]
10685pub fn vfmlsl_lane_high_f16<const LANE: i32>(
10686    r: float32x2_t,
10687    a: float16x4_t,
10688    b: float16x4_t,
10689) -> float32x2_t {
10690    static_assert_uimm_bits!(LANE, 2);
10691    unsafe { vfmlsl_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10692}
10693#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10694#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_laneq_high_f16)"]
10695#[inline(always)]
10696#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10697#[target_feature(enable = "neon,fp16")]
10698#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10699#[rustc_legacy_const_generics(3)]
10700#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10701#[cfg(not(target_arch = "arm64ec"))]
10702pub fn vfmlsl_laneq_high_f16<const LANE: i32>(
10703    r: float32x2_t,
10704    a: float16x4_t,
10705    b: float16x8_t,
10706) -> float32x2_t {
10707    static_assert_uimm_bits!(LANE, 3);
10708    unsafe { vfmlsl_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10709}
10710#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_lane_high_f16)"]
10712#[inline(always)]
10713#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10714#[target_feature(enable = "neon,fp16")]
10715#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10716#[rustc_legacy_const_generics(3)]
10717#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10718#[cfg(not(target_arch = "arm64ec"))]
10719pub fn vfmlslq_lane_high_f16<const LANE: i32>(
10720    r: float32x4_t,
10721    a: float16x8_t,
10722    b: float16x4_t,
10723) -> float32x4_t {
10724    static_assert_uimm_bits!(LANE, 2);
10725    unsafe { vfmlslq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10726}
10727#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10728#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_laneq_high_f16)"]
10729#[inline(always)]
10730#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10731#[target_feature(enable = "neon,fp16")]
10732#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10733#[rustc_legacy_const_generics(3)]
10734#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10735#[cfg(not(target_arch = "arm64ec"))]
10736pub fn vfmlslq_laneq_high_f16<const LANE: i32>(
10737    r: float32x4_t,
10738    a: float16x8_t,
10739    b: float16x8_t,
10740) -> float32x4_t {
10741    static_assert_uimm_bits!(LANE, 3);
10742    unsafe { vfmlslq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10743}
10744#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_lane_low_f16)"]
10746#[inline(always)]
10747#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10748#[target_feature(enable = "neon,fp16")]
10749#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10750#[rustc_legacy_const_generics(3)]
10751#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10752#[cfg(not(target_arch = "arm64ec"))]
10753pub fn vfmlsl_lane_low_f16<const LANE: i32>(
10754    r: float32x2_t,
10755    a: float16x4_t,
10756    b: float16x4_t,
10757) -> float32x2_t {
10758    static_assert_uimm_bits!(LANE, 2);
10759    unsafe { vfmlsl_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10760}
10761#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10762#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_laneq_low_f16)"]
10763#[inline(always)]
10764#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10765#[target_feature(enable = "neon,fp16")]
10766#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10767#[rustc_legacy_const_generics(3)]
10768#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10769#[cfg(not(target_arch = "arm64ec"))]
10770pub fn vfmlsl_laneq_low_f16<const LANE: i32>(
10771    r: float32x2_t,
10772    a: float16x4_t,
10773    b: float16x8_t,
10774) -> float32x2_t {
10775    static_assert_uimm_bits!(LANE, 3);
10776    unsafe { vfmlsl_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10777}
10778#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_lane_low_f16)"]
10780#[inline(always)]
10781#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10782#[target_feature(enable = "neon,fp16")]
10783#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10784#[rustc_legacy_const_generics(3)]
10785#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10786#[cfg(not(target_arch = "arm64ec"))]
10787pub fn vfmlslq_lane_low_f16<const LANE: i32>(
10788    r: float32x4_t,
10789    a: float16x8_t,
10790    b: float16x4_t,
10791) -> float32x4_t {
10792    static_assert_uimm_bits!(LANE, 2);
10793    unsafe { vfmlslq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10794}
10795#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10796#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_laneq_low_f16)"]
10797#[inline(always)]
10798#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10799#[target_feature(enable = "neon,fp16")]
10800#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10801#[rustc_legacy_const_generics(3)]
10802#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10803#[cfg(not(target_arch = "arm64ec"))]
10804pub fn vfmlslq_laneq_low_f16<const LANE: i32>(
10805    r: float32x4_t,
10806    a: float16x8_t,
10807    b: float16x8_t,
10808) -> float32x4_t {
10809    static_assert_uimm_bits!(LANE, 3);
10810    unsafe { vfmlslq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10811}
10812#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10813#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_low_f16)"]
10814#[inline(always)]
10815#[target_feature(enable = "neon,fp16")]
10816#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10817#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10818#[cfg(not(target_arch = "arm64ec"))]
10819#[cfg_attr(test, assert_instr(fmlsl))]
10820pub fn vfmlsl_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10821    unsafe extern "unadjusted" {
10822        #[cfg_attr(
10823            any(target_arch = "aarch64", target_arch = "arm64ec"),
10824            link_name = "llvm.aarch64.neon.fmlsl.v2f32.v4f16"
10825        )]
10826        fn _vfmlsl_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10827    }
10828    unsafe { _vfmlsl_low_f16(r, a, b) }
10829}
10830#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10831#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_low_f16)"]
10832#[inline(always)]
10833#[target_feature(enable = "neon,fp16")]
10834#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10835#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10836#[cfg(not(target_arch = "arm64ec"))]
10837#[cfg_attr(test, assert_instr(fmlsl))]
10838pub fn vfmlslq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10839    unsafe extern "unadjusted" {
10840        #[cfg_attr(
10841            any(target_arch = "aarch64", target_arch = "arm64ec"),
10842            link_name = "llvm.aarch64.neon.fmlsl.v4f32.v8f16"
10843        )]
10844        fn _vfmlslq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10845    }
10846    unsafe { _vfmlslq_low_f16(r, a, b) }
10847}
10848#[doc = "Floating-point fused multiply-subtract from accumulator"]
10849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f64)"]
10850#[inline(always)]
10851#[target_feature(enable = "neon")]
10852#[cfg_attr(test, assert_instr(fmsub))]
10853#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10854pub fn vfms_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
10855    unsafe {
10856        let b: float64x1_t = simd_neg(b);
10857        vfma_f64(a, b, c)
10858    }
10859}
10860#[doc = "Floating-point fused multiply-subtract from accumulator"]
10861#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f16)"]
10862#[inline(always)]
10863#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10864#[rustc_legacy_const_generics(3)]
10865#[target_feature(enable = "neon,fp16")]
10866#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10867#[cfg(not(target_arch = "arm64ec"))]
10868pub fn vfms_lane_f16<const LANE: i32>(
10869    a: float16x4_t,
10870    b: float16x4_t,
10871    c: float16x4_t,
10872) -> float16x4_t {
10873    static_assert_uimm_bits!(LANE, 2);
10874    unsafe { vfms_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10875}
10876#[doc = "Floating-point fused multiply-subtract from accumulator"]
10877#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f16)"]
10878#[inline(always)]
10879#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10880#[rustc_legacy_const_generics(3)]
10881#[target_feature(enable = "neon,fp16")]
10882#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10883#[cfg(not(target_arch = "arm64ec"))]
10884pub fn vfms_laneq_f16<const LANE: i32>(
10885    a: float16x4_t,
10886    b: float16x4_t,
10887    c: float16x8_t,
10888) -> float16x4_t {
10889    static_assert_uimm_bits!(LANE, 3);
10890    unsafe { vfms_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10891}
10892#[doc = "Floating-point fused multiply-subtract from accumulator"]
10893#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f16)"]
10894#[inline(always)]
10895#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10896#[rustc_legacy_const_generics(3)]
10897#[target_feature(enable = "neon,fp16")]
10898#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10899#[cfg(not(target_arch = "arm64ec"))]
10900pub fn vfmsq_lane_f16<const LANE: i32>(
10901    a: float16x8_t,
10902    b: float16x8_t,
10903    c: float16x4_t,
10904) -> float16x8_t {
10905    static_assert_uimm_bits!(LANE, 2);
10906    unsafe { vfmsq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10907}
10908#[doc = "Floating-point fused multiply-subtract from accumulator"]
10909#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f16)"]
10910#[inline(always)]
10911#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10912#[rustc_legacy_const_generics(3)]
10913#[target_feature(enable = "neon,fp16")]
10914#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10915#[cfg(not(target_arch = "arm64ec"))]
10916pub fn vfmsq_laneq_f16<const LANE: i32>(
10917    a: float16x8_t,
10918    b: float16x8_t,
10919    c: float16x8_t,
10920) -> float16x8_t {
10921    static_assert_uimm_bits!(LANE, 3);
10922    unsafe { vfmsq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10923}
10924#[doc = "Floating-point fused multiply-subtract to accumulator"]
10925#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f32)"]
10926#[inline(always)]
10927#[target_feature(enable = "neon")]
10928#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10929#[rustc_legacy_const_generics(3)]
10930#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10931pub fn vfms_lane_f32<const LANE: i32>(
10932    a: float32x2_t,
10933    b: float32x2_t,
10934    c: float32x2_t,
10935) -> float32x2_t {
10936    static_assert_uimm_bits!(LANE, 1);
10937    unsafe { vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10938}
10939#[doc = "Floating-point fused multiply-subtract to accumulator"]
10940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f32)"]
10941#[inline(always)]
10942#[target_feature(enable = "neon")]
10943#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10944#[rustc_legacy_const_generics(3)]
10945#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10946pub fn vfms_laneq_f32<const LANE: i32>(
10947    a: float32x2_t,
10948    b: float32x2_t,
10949    c: float32x4_t,
10950) -> float32x2_t {
10951    static_assert_uimm_bits!(LANE, 2);
10952    unsafe { vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10953}
10954#[doc = "Floating-point fused multiply-subtract to accumulator"]
10955#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f32)"]
10956#[inline(always)]
10957#[target_feature(enable = "neon")]
10958#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10959#[rustc_legacy_const_generics(3)]
10960#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10961pub fn vfmsq_lane_f32<const LANE: i32>(
10962    a: float32x4_t,
10963    b: float32x4_t,
10964    c: float32x2_t,
10965) -> float32x4_t {
10966    static_assert_uimm_bits!(LANE, 1);
10967    unsafe { vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10968}
10969#[doc = "Floating-point fused multiply-subtract to accumulator"]
10970#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f32)"]
10971#[inline(always)]
10972#[target_feature(enable = "neon")]
10973#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10974#[rustc_legacy_const_generics(3)]
10975#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10976pub fn vfmsq_laneq_f32<const LANE: i32>(
10977    a: float32x4_t,
10978    b: float32x4_t,
10979    c: float32x4_t,
10980) -> float32x4_t {
10981    static_assert_uimm_bits!(LANE, 2);
10982    unsafe { vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10983}
10984#[doc = "Floating-point fused multiply-subtract to accumulator"]
10985#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f64)"]
10986#[inline(always)]
10987#[target_feature(enable = "neon")]
10988#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10989#[rustc_legacy_const_generics(3)]
10990#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10991pub fn vfmsq_laneq_f64<const LANE: i32>(
10992    a: float64x2_t,
10993    b: float64x2_t,
10994    c: float64x2_t,
10995) -> float64x2_t {
10996    static_assert_uimm_bits!(LANE, 1);
10997    unsafe { vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
10998}
10999#[doc = "Floating-point fused multiply-subtract to accumulator"]
11000#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f64)"]
11001#[inline(always)]
11002#[target_feature(enable = "neon")]
11003#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11004#[rustc_legacy_const_generics(3)]
11005#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11006pub fn vfms_lane_f64<const LANE: i32>(
11007    a: float64x1_t,
11008    b: float64x1_t,
11009    c: float64x1_t,
11010) -> float64x1_t {
11011    static_assert!(LANE == 0);
11012    unsafe { vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
11013}
11014#[doc = "Floating-point fused multiply-subtract to accumulator"]
11015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f64)"]
11016#[inline(always)]
11017#[target_feature(enable = "neon")]
11018#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11019#[rustc_legacy_const_generics(3)]
11020#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11021pub fn vfms_laneq_f64<const LANE: i32>(
11022    a: float64x1_t,
11023    b: float64x1_t,
11024    c: float64x2_t,
11025) -> float64x1_t {
11026    static_assert_uimm_bits!(LANE, 1);
11027    unsafe { vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
11028}
11029#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
11030#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f16)"]
11031#[inline(always)]
11032#[target_feature(enable = "neon,fp16")]
11033#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11034#[cfg(not(target_arch = "arm64ec"))]
11035#[cfg_attr(test, assert_instr(fmls))]
11036pub fn vfms_n_f16(a: float16x4_t, b: float16x4_t, c: f16) -> float16x4_t {
11037    vfms_f16(a, b, vdup_n_f16(c))
11038}
11039#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
11040#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f16)"]
11041#[inline(always)]
11042#[target_feature(enable = "neon,fp16")]
11043#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11044#[cfg(not(target_arch = "arm64ec"))]
11045#[cfg_attr(test, assert_instr(fmls))]
11046pub fn vfmsq_n_f16(a: float16x8_t, b: float16x8_t, c: f16) -> float16x8_t {
11047    vfmsq_f16(a, b, vdupq_n_f16(c))
11048}
11049#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"]
11050#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f64)"]
11051#[inline(always)]
11052#[target_feature(enable = "neon")]
11053#[cfg_attr(test, assert_instr(fmsub))]
11054#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11055pub fn vfms_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t {
11056    vfms_f64(a, b, vdup_n_f64(c))
11057}
11058#[doc = "Floating-point fused multiply-subtract from accumulator"]
11059#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_f16)"]
11060#[inline(always)]
11061#[cfg_attr(test, assert_instr(fmsub))]
11062#[target_feature(enable = "neon,fp16")]
11063#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11064#[cfg(not(target_arch = "arm64ec"))]
11065pub fn vfmsh_f16(a: f16, b: f16, c: f16) -> f16 {
11066    vfmah_f16(a, -b, c)
11067}
11068#[doc = "Floating-point fused multiply-subtract from accumulator"]
11069#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_lane_f16)"]
11070#[inline(always)]
11071#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11072#[rustc_legacy_const_generics(3)]
11073#[target_feature(enable = "neon,fp16")]
11074#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11075#[cfg(not(target_arch = "arm64ec"))]
11076pub fn vfmsh_lane_f16<const LANE: i32>(a: f16, b: f16, v: float16x4_t) -> f16 {
11077    static_assert_uimm_bits!(LANE, 2);
11078    unsafe {
11079        let c: f16 = simd_extract!(v, LANE as u32);
11080        vfmsh_f16(a, b, c)
11081    }
11082}
11083#[doc = "Floating-point fused multiply-subtract from accumulator"]
11084#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_laneq_f16)"]
11085#[inline(always)]
11086#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11087#[rustc_legacy_const_generics(3)]
11088#[target_feature(enable = "neon,fp16")]
11089#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11090#[cfg(not(target_arch = "arm64ec"))]
11091pub fn vfmsh_laneq_f16<const LANE: i32>(a: f16, b: f16, v: float16x8_t) -> f16 {
11092    static_assert_uimm_bits!(LANE, 3);
11093    unsafe {
11094        let c: f16 = simd_extract!(v, LANE as u32);
11095        vfmsh_f16(a, b, c)
11096    }
11097}
11098#[doc = "Floating-point fused multiply-subtract from accumulator"]
11099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f64)"]
11100#[inline(always)]
11101#[target_feature(enable = "neon")]
11102#[cfg_attr(test, assert_instr(fmls))]
11103#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11104pub fn vfmsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
11105    unsafe {
11106        let b: float64x2_t = simd_neg(b);
11107        vfmaq_f64(a, b, c)
11108    }
11109}
11110#[doc = "Floating-point fused multiply-subtract to accumulator"]
11111#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f64)"]
11112#[inline(always)]
11113#[target_feature(enable = "neon")]
11114#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
11115#[rustc_legacy_const_generics(3)]
11116#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11117pub fn vfmsq_lane_f64<const LANE: i32>(
11118    a: float64x2_t,
11119    b: float64x2_t,
11120    c: float64x1_t,
11121) -> float64x2_t {
11122    static_assert!(LANE == 0);
11123    unsafe { vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
11124}
11125#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"]
11126#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f64)"]
11127#[inline(always)]
11128#[target_feature(enable = "neon")]
11129#[cfg_attr(test, assert_instr(fmls))]
11130#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11131pub fn vfmsq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t {
11132    vfmsq_f64(a, b, vdupq_n_f64(c))
11133}
11134#[doc = "Floating-point fused multiply-subtract to accumulator"]
11135#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_lane_f32)"]
11136#[inline(always)]
11137#[target_feature(enable = "neon")]
11138#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11139#[rustc_legacy_const_generics(3)]
11140#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11141pub fn vfmss_lane_f32<const LANE: i32>(a: f32, b: f32, c: float32x2_t) -> f32 {
11142    vfmas_lane_f32::<LANE>(a, -b, c)
11143}
11144#[doc = "Floating-point fused multiply-subtract to accumulator"]
11145#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_laneq_f32)"]
11146#[inline(always)]
11147#[target_feature(enable = "neon")]
11148#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11149#[rustc_legacy_const_generics(3)]
11150#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11151pub fn vfmss_laneq_f32<const LANE: i32>(a: f32, b: f32, c: float32x4_t) -> f32 {
11152    vfmas_laneq_f32::<LANE>(a, -b, c)
11153}
11154#[doc = "Floating-point fused multiply-subtract to accumulator"]
11155#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_lane_f64)"]
11156#[inline(always)]
11157#[target_feature(enable = "neon")]
11158#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11159#[rustc_legacy_const_generics(3)]
11160#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11161pub fn vfmsd_lane_f64<const LANE: i32>(a: f64, b: f64, c: float64x1_t) -> f64 {
11162    vfmad_lane_f64::<LANE>(a, -b, c)
11163}
11164#[doc = "Floating-point fused multiply-subtract to accumulator"]
11165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_laneq_f64)"]
11166#[inline(always)]
11167#[target_feature(enable = "neon")]
11168#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11169#[rustc_legacy_const_generics(3)]
11170#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11171pub fn vfmsd_laneq_f64<const LANE: i32>(a: f64, b: f64, c: float64x2_t) -> f64 {
11172    vfmad_laneq_f64::<LANE>(a, -b, c)
11173}
11174#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11175#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f16)"]
11176#[doc = "## Safety"]
11177#[doc = "  * Neon intrinsic unsafe"]
11178#[inline(always)]
11179#[target_feature(enable = "neon,fp16")]
11180#[cfg_attr(test, assert_instr(ldr))]
11181#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11182#[cfg(not(target_arch = "arm64ec"))]
11183pub unsafe fn vld1_f16(ptr: *const f16) -> float16x4_t {
11184    crate::ptr::read_unaligned(ptr.cast())
11185}
11186#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11187#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f16)"]
11188#[doc = "## Safety"]
11189#[doc = "  * Neon intrinsic unsafe"]
11190#[inline(always)]
11191#[target_feature(enable = "neon,fp16")]
11192#[cfg_attr(test, assert_instr(ldr))]
11193#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11194#[cfg(not(target_arch = "arm64ec"))]
11195pub unsafe fn vld1q_f16(ptr: *const f16) -> float16x8_t {
11196    crate::ptr::read_unaligned(ptr.cast())
11197}
11198#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32)"]
11200#[doc = "## Safety"]
11201#[doc = "  * Neon intrinsic unsafe"]
11202#[inline(always)]
11203#[target_feature(enable = "neon")]
11204#[cfg_attr(test, assert_instr(ldr))]
11205#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11206pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t {
11207    crate::ptr::read_unaligned(ptr.cast())
11208}
11209#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32)"]
11211#[doc = "## Safety"]
11212#[doc = "  * Neon intrinsic unsafe"]
11213#[inline(always)]
11214#[target_feature(enable = "neon")]
11215#[cfg_attr(test, assert_instr(ldr))]
11216#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11217pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t {
11218    crate::ptr::read_unaligned(ptr.cast())
11219}
11220#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11221#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64)"]
11222#[doc = "## Safety"]
11223#[doc = "  * Neon intrinsic unsafe"]
11224#[inline(always)]
11225#[target_feature(enable = "neon")]
11226#[cfg_attr(test, assert_instr(ldr))]
11227#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11228pub unsafe fn vld1_f64(ptr: *const f64) -> float64x1_t {
11229    crate::ptr::read_unaligned(ptr.cast())
11230}
11231#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11232#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64)"]
11233#[doc = "## Safety"]
11234#[doc = "  * Neon intrinsic unsafe"]
11235#[inline(always)]
11236#[target_feature(enable = "neon")]
11237#[cfg_attr(test, assert_instr(ldr))]
11238#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11239pub unsafe fn vld1q_f64(ptr: *const f64) -> float64x2_t {
11240    crate::ptr::read_unaligned(ptr.cast())
11241}
11242#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11243#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8)"]
11244#[doc = "## Safety"]
11245#[doc = "  * Neon intrinsic unsafe"]
11246#[inline(always)]
11247#[target_feature(enable = "neon")]
11248#[cfg_attr(test, assert_instr(ldr))]
11249#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11250pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t {
11251    crate::ptr::read_unaligned(ptr.cast())
11252}
11253#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11254#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8)"]
11255#[doc = "## Safety"]
11256#[doc = "  * Neon intrinsic unsafe"]
11257#[inline(always)]
11258#[target_feature(enable = "neon")]
11259#[cfg_attr(test, assert_instr(ldr))]
11260#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11261pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t {
11262    crate::ptr::read_unaligned(ptr.cast())
11263}
11264#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11265#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16)"]
11266#[doc = "## Safety"]
11267#[doc = "  * Neon intrinsic unsafe"]
11268#[inline(always)]
11269#[target_feature(enable = "neon")]
11270#[cfg_attr(test, assert_instr(ldr))]
11271#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11272pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t {
11273    crate::ptr::read_unaligned(ptr.cast())
11274}
11275#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11276#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16)"]
11277#[doc = "## Safety"]
11278#[doc = "  * Neon intrinsic unsafe"]
11279#[inline(always)]
11280#[target_feature(enable = "neon")]
11281#[cfg_attr(test, assert_instr(ldr))]
11282#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11283pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t {
11284    crate::ptr::read_unaligned(ptr.cast())
11285}
11286#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11287#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32)"]
11288#[doc = "## Safety"]
11289#[doc = "  * Neon intrinsic unsafe"]
11290#[inline(always)]
11291#[target_feature(enable = "neon")]
11292#[cfg_attr(test, assert_instr(ldr))]
11293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11294pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t {
11295    crate::ptr::read_unaligned(ptr.cast())
11296}
11297#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32)"]
11299#[doc = "## Safety"]
11300#[doc = "  * Neon intrinsic unsafe"]
11301#[inline(always)]
11302#[target_feature(enable = "neon")]
11303#[cfg_attr(test, assert_instr(ldr))]
11304#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11305pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t {
11306    crate::ptr::read_unaligned(ptr.cast())
11307}
11308#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64)"]
11310#[doc = "## Safety"]
11311#[doc = "  * Neon intrinsic unsafe"]
11312#[inline(always)]
11313#[target_feature(enable = "neon")]
11314#[cfg_attr(test, assert_instr(ldr))]
11315#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11316pub unsafe fn vld1_s64(ptr: *const i64) -> int64x1_t {
11317    crate::ptr::read_unaligned(ptr.cast())
11318}
11319#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11320#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64)"]
11321#[doc = "## Safety"]
11322#[doc = "  * Neon intrinsic unsafe"]
11323#[inline(always)]
11324#[target_feature(enable = "neon")]
11325#[cfg_attr(test, assert_instr(ldr))]
11326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11327pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t {
11328    crate::ptr::read_unaligned(ptr.cast())
11329}
11330#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11331#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8)"]
11332#[doc = "## Safety"]
11333#[doc = "  * Neon intrinsic unsafe"]
11334#[inline(always)]
11335#[target_feature(enable = "neon")]
11336#[cfg_attr(test, assert_instr(ldr))]
11337#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11338pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t {
11339    crate::ptr::read_unaligned(ptr.cast())
11340}
11341#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11342#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8)"]
11343#[doc = "## Safety"]
11344#[doc = "  * Neon intrinsic unsafe"]
11345#[inline(always)]
11346#[target_feature(enable = "neon")]
11347#[cfg_attr(test, assert_instr(ldr))]
11348#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11349pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t {
11350    crate::ptr::read_unaligned(ptr.cast())
11351}
11352#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11353#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16)"]
11354#[doc = "## Safety"]
11355#[doc = "  * Neon intrinsic unsafe"]
11356#[inline(always)]
11357#[target_feature(enable = "neon")]
11358#[cfg_attr(test, assert_instr(ldr))]
11359#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11360pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t {
11361    crate::ptr::read_unaligned(ptr.cast())
11362}
11363#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11364#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16)"]
11365#[doc = "## Safety"]
11366#[doc = "  * Neon intrinsic unsafe"]
11367#[inline(always)]
11368#[target_feature(enable = "neon")]
11369#[cfg_attr(test, assert_instr(ldr))]
11370#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11371pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t {
11372    crate::ptr::read_unaligned(ptr.cast())
11373}
11374#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11375#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32)"]
11376#[doc = "## Safety"]
11377#[doc = "  * Neon intrinsic unsafe"]
11378#[inline(always)]
11379#[target_feature(enable = "neon")]
11380#[cfg_attr(test, assert_instr(ldr))]
11381#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11382pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t {
11383    crate::ptr::read_unaligned(ptr.cast())
11384}
11385#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32)"]
11387#[doc = "## Safety"]
11388#[doc = "  * Neon intrinsic unsafe"]
11389#[inline(always)]
11390#[target_feature(enable = "neon")]
11391#[cfg_attr(test, assert_instr(ldr))]
11392#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11393pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t {
11394    crate::ptr::read_unaligned(ptr.cast())
11395}
11396#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11397#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64)"]
11398#[doc = "## Safety"]
11399#[doc = "  * Neon intrinsic unsafe"]
11400#[inline(always)]
11401#[target_feature(enable = "neon")]
11402#[cfg_attr(test, assert_instr(ldr))]
11403#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11404pub unsafe fn vld1_u64(ptr: *const u64) -> uint64x1_t {
11405    crate::ptr::read_unaligned(ptr.cast())
11406}
11407#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11408#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64)"]
11409#[doc = "## Safety"]
11410#[doc = "  * Neon intrinsic unsafe"]
11411#[inline(always)]
11412#[target_feature(enable = "neon")]
11413#[cfg_attr(test, assert_instr(ldr))]
11414#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11415pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t {
11416    crate::ptr::read_unaligned(ptr.cast())
11417}
11418#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8)"]
11420#[doc = "## Safety"]
11421#[doc = "  * Neon intrinsic unsafe"]
11422#[inline(always)]
11423#[target_feature(enable = "neon")]
11424#[cfg_attr(test, assert_instr(ldr))]
11425#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11426pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t {
11427    crate::ptr::read_unaligned(ptr.cast())
11428}
11429#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11430#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8)"]
11431#[doc = "## Safety"]
11432#[doc = "  * Neon intrinsic unsafe"]
11433#[inline(always)]
11434#[target_feature(enable = "neon")]
11435#[cfg_attr(test, assert_instr(ldr))]
11436#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11437pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t {
11438    crate::ptr::read_unaligned(ptr.cast())
11439}
11440#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11441#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16)"]
11442#[doc = "## Safety"]
11443#[doc = "  * Neon intrinsic unsafe"]
11444#[inline(always)]
11445#[target_feature(enable = "neon")]
11446#[cfg_attr(test, assert_instr(ldr))]
11447#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11448pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t {
11449    crate::ptr::read_unaligned(ptr.cast())
11450}
11451#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16)"]
11453#[doc = "## Safety"]
11454#[doc = "  * Neon intrinsic unsafe"]
11455#[inline(always)]
11456#[target_feature(enable = "neon")]
11457#[cfg_attr(test, assert_instr(ldr))]
11458#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11459pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t {
11460    crate::ptr::read_unaligned(ptr.cast())
11461}
11462#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11463#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64)"]
11464#[doc = "## Safety"]
11465#[doc = "  * Neon intrinsic unsafe"]
11466#[inline(always)]
11467#[target_feature(enable = "neon,aes")]
11468#[cfg_attr(test, assert_instr(ldr))]
11469#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11470pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t {
11471    crate::ptr::read_unaligned(ptr.cast())
11472}
11473#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11474#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64)"]
11475#[doc = "## Safety"]
11476#[doc = "  * Neon intrinsic unsafe"]
11477#[inline(always)]
11478#[target_feature(enable = "neon,aes")]
11479#[cfg_attr(test, assert_instr(ldr))]
11480#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11481pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t {
11482    crate::ptr::read_unaligned(ptr.cast())
11483}
11484#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11485#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x2)"]
11486#[doc = "## Safety"]
11487#[doc = "  * Neon intrinsic unsafe"]
11488#[inline(always)]
11489#[target_feature(enable = "neon")]
11490#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11491#[cfg_attr(test, assert_instr(ld))]
11492pub unsafe fn vld1_f64_x2(ptr: *const f64) -> float64x1x2_t {
11493    crate::ptr::read_unaligned(ptr.cast())
11494}
11495#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x3)"]
11497#[doc = "## Safety"]
11498#[doc = "  * Neon intrinsic unsafe"]
11499#[inline(always)]
11500#[target_feature(enable = "neon")]
11501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11502#[cfg_attr(test, assert_instr(ld))]
11503pub unsafe fn vld1_f64_x3(ptr: *const f64) -> float64x1x3_t {
11504    crate::ptr::read_unaligned(ptr.cast())
11505}
11506#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11507#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x4)"]
11508#[doc = "## Safety"]
11509#[doc = "  * Neon intrinsic unsafe"]
11510#[inline(always)]
11511#[target_feature(enable = "neon")]
11512#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11513#[cfg_attr(test, assert_instr(ld))]
11514pub unsafe fn vld1_f64_x4(ptr: *const f64) -> float64x1x4_t {
11515    crate::ptr::read_unaligned(ptr.cast())
11516}
11517#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11518#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x2)"]
11519#[doc = "## Safety"]
11520#[doc = "  * Neon intrinsic unsafe"]
11521#[inline(always)]
11522#[target_feature(enable = "neon")]
11523#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11524#[cfg_attr(test, assert_instr(ld))]
11525pub unsafe fn vld1q_f64_x2(ptr: *const f64) -> float64x2x2_t {
11526    crate::ptr::read_unaligned(ptr.cast())
11527}
11528#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11529#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x3)"]
11530#[doc = "## Safety"]
11531#[doc = "  * Neon intrinsic unsafe"]
11532#[inline(always)]
11533#[target_feature(enable = "neon")]
11534#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11535#[cfg_attr(test, assert_instr(ld))]
11536pub unsafe fn vld1q_f64_x3(ptr: *const f64) -> float64x2x3_t {
11537    crate::ptr::read_unaligned(ptr.cast())
11538}
11539#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11540#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x4)"]
11541#[doc = "## Safety"]
11542#[doc = "  * Neon intrinsic unsafe"]
11543#[inline(always)]
11544#[target_feature(enable = "neon")]
11545#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11546#[cfg_attr(test, assert_instr(ld))]
11547pub unsafe fn vld1q_f64_x4(ptr: *const f64) -> float64x2x4_t {
11548    crate::ptr::read_unaligned(ptr.cast())
11549}
11550#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11551#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f64)"]
11552#[doc = "## Safety"]
11553#[doc = "  * Neon intrinsic unsafe"]
11554#[inline(always)]
11555#[target_feature(enable = "neon")]
11556#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11557#[cfg_attr(test, assert_instr(ld2r))]
11558pub unsafe fn vld2_dup_f64(a: *const f64) -> float64x1x2_t {
11559    unsafe extern "unadjusted" {
11560        #[cfg_attr(
11561            any(target_arch = "aarch64", target_arch = "arm64ec"),
11562            link_name = "llvm.aarch64.neon.ld2r.v1f64.p0"
11563        )]
11564        fn _vld2_dup_f64(ptr: *const f64) -> float64x1x2_t;
11565    }
11566    _vld2_dup_f64(a as _)
11567}
11568#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11569#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f64)"]
11570#[doc = "## Safety"]
11571#[doc = "  * Neon intrinsic unsafe"]
11572#[inline(always)]
11573#[target_feature(enable = "neon")]
11574#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11575#[cfg_attr(test, assert_instr(ld2r))]
11576pub unsafe fn vld2q_dup_f64(a: *const f64) -> float64x2x2_t {
11577    unsafe extern "unadjusted" {
11578        #[cfg_attr(
11579            any(target_arch = "aarch64", target_arch = "arm64ec"),
11580            link_name = "llvm.aarch64.neon.ld2r.v2f64.p0"
11581        )]
11582        fn _vld2q_dup_f64(ptr: *const f64) -> float64x2x2_t;
11583    }
11584    _vld2q_dup_f64(a as _)
11585}
11586#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11587#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s64)"]
11588#[doc = "## Safety"]
11589#[doc = "  * Neon intrinsic unsafe"]
11590#[inline(always)]
11591#[target_feature(enable = "neon")]
11592#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11593#[cfg_attr(test, assert_instr(ld2r))]
11594pub unsafe fn vld2q_dup_s64(a: *const i64) -> int64x2x2_t {
11595    unsafe extern "unadjusted" {
11596        #[cfg_attr(
11597            any(target_arch = "aarch64", target_arch = "arm64ec"),
11598            link_name = "llvm.aarch64.neon.ld2r.v2i64.p0"
11599        )]
11600        fn _vld2q_dup_s64(ptr: *const i64) -> int64x2x2_t;
11601    }
11602    _vld2q_dup_s64(a as _)
11603}
11604#[doc = "Load multiple 2-element structures to two registers"]
11605#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f64)"]
11606#[doc = "## Safety"]
11607#[doc = "  * Neon intrinsic unsafe"]
11608#[inline(always)]
11609#[target_feature(enable = "neon")]
11610#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11611#[cfg_attr(test, assert_instr(nop))]
11612pub unsafe fn vld2_f64(a: *const f64) -> float64x1x2_t {
11613    crate::ptr::read_unaligned(a.cast())
11614}
11615#[doc = "Load multiple 2-element structures to two registers"]
11616#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f64)"]
11617#[doc = "## Safety"]
11618#[doc = "  * Neon intrinsic unsafe"]
11619#[inline(always)]
11620#[target_feature(enable = "neon")]
11621#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11622#[rustc_legacy_const_generics(2)]
11623#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11624pub unsafe fn vld2_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x2_t) -> float64x1x2_t {
11625    static_assert!(LANE == 0);
11626    unsafe extern "unadjusted" {
11627        #[cfg_attr(
11628            any(target_arch = "aarch64", target_arch = "arm64ec"),
11629            link_name = "llvm.aarch64.neon.ld2lane.v1f64.p0"
11630        )]
11631        fn _vld2_lane_f64(a: float64x1_t, b: float64x1_t, n: i64, ptr: *const i8) -> float64x1x2_t;
11632    }
11633    _vld2_lane_f64(b.0, b.1, LANE as i64, a as _)
11634}
11635#[doc = "Load multiple 2-element structures to two registers"]
11636#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s64)"]
11637#[doc = "## Safety"]
11638#[doc = "  * Neon intrinsic unsafe"]
11639#[inline(always)]
11640#[target_feature(enable = "neon")]
11641#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11642#[rustc_legacy_const_generics(2)]
11643#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11644pub unsafe fn vld2_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x2_t) -> int64x1x2_t {
11645    static_assert!(LANE == 0);
11646    unsafe extern "unadjusted" {
11647        #[cfg_attr(
11648            any(target_arch = "aarch64", target_arch = "arm64ec"),
11649            link_name = "llvm.aarch64.neon.ld2lane.v1i64.p0"
11650        )]
11651        fn _vld2_lane_s64(a: int64x1_t, b: int64x1_t, n: i64, ptr: *const i8) -> int64x1x2_t;
11652    }
11653    _vld2_lane_s64(b.0, b.1, LANE as i64, a as _)
11654}
11655#[doc = "Load multiple 2-element structures to two registers"]
11656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p64)"]
11657#[doc = "## Safety"]
11658#[doc = "  * Neon intrinsic unsafe"]
11659#[inline(always)]
11660#[target_feature(enable = "neon,aes")]
11661#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11662#[rustc_legacy_const_generics(2)]
11663#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11664pub unsafe fn vld2_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x2_t) -> poly64x1x2_t {
11665    static_assert!(LANE == 0);
11666    transmute(vld2_lane_s64::<LANE>(transmute(a), transmute(b)))
11667}
11668#[doc = "Load multiple 2-element structures to two registers"]
11669#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u64)"]
11670#[doc = "## Safety"]
11671#[doc = "  * Neon intrinsic unsafe"]
11672#[inline(always)]
11673#[target_feature(enable = "neon")]
11674#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11675#[rustc_legacy_const_generics(2)]
11676#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11677pub unsafe fn vld2_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x2_t) -> uint64x1x2_t {
11678    static_assert!(LANE == 0);
11679    transmute(vld2_lane_s64::<LANE>(transmute(a), transmute(b)))
11680}
11681#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11682#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p64)"]
11683#[doc = "## Safety"]
11684#[doc = "  * Neon intrinsic unsafe"]
11685#[inline(always)]
11686#[cfg(target_endian = "little")]
11687#[target_feature(enable = "neon,aes")]
11688#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11689#[cfg_attr(test, assert_instr(ld2r))]
11690pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t {
11691    transmute(vld2q_dup_s64(transmute(a)))
11692}
11693#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11694#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p64)"]
11695#[doc = "## Safety"]
11696#[doc = "  * Neon intrinsic unsafe"]
11697#[inline(always)]
11698#[cfg(target_endian = "big")]
11699#[target_feature(enable = "neon,aes")]
11700#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11701#[cfg_attr(test, assert_instr(ld2r))]
11702pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t {
11703    let mut ret_val: poly64x2x2_t = transmute(vld2q_dup_s64(transmute(a)));
11704    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11705    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11706    ret_val
11707}
11708#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11709#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u64)"]
11710#[doc = "## Safety"]
11711#[doc = "  * Neon intrinsic unsafe"]
11712#[inline(always)]
11713#[cfg(target_endian = "little")]
11714#[target_feature(enable = "neon")]
11715#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11716#[cfg_attr(test, assert_instr(ld2r))]
11717pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t {
11718    transmute(vld2q_dup_s64(transmute(a)))
11719}
11720#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11721#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u64)"]
11722#[doc = "## Safety"]
11723#[doc = "  * Neon intrinsic unsafe"]
11724#[inline(always)]
11725#[cfg(target_endian = "big")]
11726#[target_feature(enable = "neon")]
11727#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11728#[cfg_attr(test, assert_instr(ld2r))]
11729pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t {
11730    let mut ret_val: uint64x2x2_t = transmute(vld2q_dup_s64(transmute(a)));
11731    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11732    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11733    ret_val
11734}
11735#[doc = "Load multiple 2-element structures to two registers"]
11736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f64)"]
11737#[doc = "## Safety"]
11738#[doc = "  * Neon intrinsic unsafe"]
11739#[inline(always)]
11740#[target_feature(enable = "neon")]
11741#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11742#[cfg_attr(test, assert_instr(ld2))]
11743pub unsafe fn vld2q_f64(a: *const f64) -> float64x2x2_t {
11744    unsafe extern "unadjusted" {
11745        #[cfg_attr(
11746            any(target_arch = "aarch64", target_arch = "arm64ec"),
11747            link_name = "llvm.aarch64.neon.ld2.v2f64.p0"
11748        )]
11749        fn _vld2q_f64(ptr: *const float64x2_t) -> float64x2x2_t;
11750    }
11751    _vld2q_f64(a as _)
11752}
11753#[doc = "Load multiple 2-element structures to two registers"]
11754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s64)"]
11755#[doc = "## Safety"]
11756#[doc = "  * Neon intrinsic unsafe"]
11757#[inline(always)]
11758#[target_feature(enable = "neon")]
11759#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11760#[cfg_attr(test, assert_instr(ld2))]
11761pub unsafe fn vld2q_s64(a: *const i64) -> int64x2x2_t {
11762    unsafe extern "unadjusted" {
11763        #[cfg_attr(
11764            any(target_arch = "aarch64", target_arch = "arm64ec"),
11765            link_name = "llvm.aarch64.neon.ld2.v2i64.p0"
11766        )]
11767        fn _vld2q_s64(ptr: *const int64x2_t) -> int64x2x2_t;
11768    }
11769    _vld2q_s64(a as _)
11770}
11771#[doc = "Load multiple 2-element structures to two registers"]
11772#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f64)"]
11773#[doc = "## Safety"]
11774#[doc = "  * Neon intrinsic unsafe"]
11775#[inline(always)]
11776#[target_feature(enable = "neon")]
11777#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11778#[rustc_legacy_const_generics(2)]
11779#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11780pub unsafe fn vld2q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x2_t) -> float64x2x2_t {
11781    static_assert_uimm_bits!(LANE, 1);
11782    unsafe extern "unadjusted" {
11783        #[cfg_attr(
11784            any(target_arch = "aarch64", target_arch = "arm64ec"),
11785            link_name = "llvm.aarch64.neon.ld2lane.v2f64.p0"
11786        )]
11787        fn _vld2q_lane_f64(a: float64x2_t, b: float64x2_t, n: i64, ptr: *const i8)
11788            -> float64x2x2_t;
11789    }
11790    _vld2q_lane_f64(b.0, b.1, LANE as i64, a as _)
11791}
11792#[doc = "Load multiple 2-element structures to two registers"]
11793#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s8)"]
11794#[doc = "## Safety"]
11795#[doc = "  * Neon intrinsic unsafe"]
11796#[inline(always)]
11797#[target_feature(enable = "neon")]
11798#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11799#[rustc_legacy_const_generics(2)]
11800#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11801pub unsafe fn vld2q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x2_t) -> int8x16x2_t {
11802    static_assert_uimm_bits!(LANE, 4);
11803    unsafe extern "unadjusted" {
11804        #[cfg_attr(
11805            any(target_arch = "aarch64", target_arch = "arm64ec"),
11806            link_name = "llvm.aarch64.neon.ld2lane.v16i8.p0"
11807        )]
11808        fn _vld2q_lane_s8(a: int8x16_t, b: int8x16_t, n: i64, ptr: *const i8) -> int8x16x2_t;
11809    }
11810    _vld2q_lane_s8(b.0, b.1, LANE as i64, a as _)
11811}
11812#[doc = "Load multiple 2-element structures to two registers"]
11813#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s64)"]
11814#[doc = "## Safety"]
11815#[doc = "  * Neon intrinsic unsafe"]
11816#[inline(always)]
11817#[target_feature(enable = "neon")]
11818#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11819#[rustc_legacy_const_generics(2)]
11820#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11821pub unsafe fn vld2q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x2_t) -> int64x2x2_t {
11822    static_assert_uimm_bits!(LANE, 1);
11823    unsafe extern "unadjusted" {
11824        #[cfg_attr(
11825            any(target_arch = "aarch64", target_arch = "arm64ec"),
11826            link_name = "llvm.aarch64.neon.ld2lane.v2i64.p0"
11827        )]
11828        fn _vld2q_lane_s64(a: int64x2_t, b: int64x2_t, n: i64, ptr: *const i8) -> int64x2x2_t;
11829    }
11830    _vld2q_lane_s64(b.0, b.1, LANE as i64, a as _)
11831}
11832#[doc = "Load multiple 2-element structures to two registers"]
11833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p64)"]
11834#[doc = "## Safety"]
11835#[doc = "  * Neon intrinsic unsafe"]
11836#[inline(always)]
11837#[target_feature(enable = "neon,aes")]
11838#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11839#[rustc_legacy_const_generics(2)]
11840#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11841pub unsafe fn vld2q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x2_t) -> poly64x2x2_t {
11842    static_assert_uimm_bits!(LANE, 1);
11843    transmute(vld2q_lane_s64::<LANE>(transmute(a), transmute(b)))
11844}
11845#[doc = "Load multiple 2-element structures to two registers"]
11846#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u8)"]
11847#[doc = "## Safety"]
11848#[doc = "  * Neon intrinsic unsafe"]
11849#[inline(always)]
11850#[target_feature(enable = "neon")]
11851#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11852#[rustc_legacy_const_generics(2)]
11853#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11854pub unsafe fn vld2q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x2_t) -> uint8x16x2_t {
11855    static_assert_uimm_bits!(LANE, 4);
11856    transmute(vld2q_lane_s8::<LANE>(transmute(a), transmute(b)))
11857}
11858#[doc = "Load multiple 2-element structures to two registers"]
11859#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u64)"]
11860#[doc = "## Safety"]
11861#[doc = "  * Neon intrinsic unsafe"]
11862#[inline(always)]
11863#[target_feature(enable = "neon")]
11864#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11865#[rustc_legacy_const_generics(2)]
11866#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11867pub unsafe fn vld2q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x2_t) -> uint64x2x2_t {
11868    static_assert_uimm_bits!(LANE, 1);
11869    transmute(vld2q_lane_s64::<LANE>(transmute(a), transmute(b)))
11870}
11871#[doc = "Load multiple 2-element structures to two registers"]
11872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p8)"]
11873#[doc = "## Safety"]
11874#[doc = "  * Neon intrinsic unsafe"]
11875#[inline(always)]
11876#[target_feature(enable = "neon")]
11877#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11878#[rustc_legacy_const_generics(2)]
11879#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11880pub unsafe fn vld2q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x2_t) -> poly8x16x2_t {
11881    static_assert_uimm_bits!(LANE, 4);
11882    transmute(vld2q_lane_s8::<LANE>(transmute(a), transmute(b)))
11883}
11884#[doc = "Load multiple 2-element structures to two registers"]
11885#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p64)"]
11886#[doc = "## Safety"]
11887#[doc = "  * Neon intrinsic unsafe"]
11888#[inline(always)]
11889#[cfg(target_endian = "little")]
11890#[target_feature(enable = "neon,aes")]
11891#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11892#[cfg_attr(test, assert_instr(ld2))]
11893pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t {
11894    transmute(vld2q_s64(transmute(a)))
11895}
11896#[doc = "Load multiple 2-element structures to two registers"]
11897#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p64)"]
11898#[doc = "## Safety"]
11899#[doc = "  * Neon intrinsic unsafe"]
11900#[inline(always)]
11901#[cfg(target_endian = "big")]
11902#[target_feature(enable = "neon,aes")]
11903#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11904#[cfg_attr(test, assert_instr(ld2))]
11905pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t {
11906    let mut ret_val: poly64x2x2_t = transmute(vld2q_s64(transmute(a)));
11907    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11908    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11909    ret_val
11910}
11911#[doc = "Load multiple 2-element structures to two registers"]
11912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u64)"]
11913#[doc = "## Safety"]
11914#[doc = "  * Neon intrinsic unsafe"]
11915#[inline(always)]
11916#[target_feature(enable = "neon")]
11917#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11918#[cfg_attr(test, assert_instr(ld2))]
11919pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t {
11920    transmute(vld2q_s64(transmute(a)))
11921}
11922#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
11923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f64)"]
11924#[doc = "## Safety"]
11925#[doc = "  * Neon intrinsic unsafe"]
11926#[inline(always)]
11927#[target_feature(enable = "neon")]
11928#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11929#[cfg_attr(test, assert_instr(ld3r))]
11930pub unsafe fn vld3_dup_f64(a: *const f64) -> float64x1x3_t {
11931    unsafe extern "unadjusted" {
11932        #[cfg_attr(
11933            any(target_arch = "aarch64", target_arch = "arm64ec"),
11934            link_name = "llvm.aarch64.neon.ld3r.v1f64.p0"
11935        )]
11936        fn _vld3_dup_f64(ptr: *const f64) -> float64x1x3_t;
11937    }
11938    _vld3_dup_f64(a as _)
11939}
11940#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
11941#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f64)"]
11942#[doc = "## Safety"]
11943#[doc = "  * Neon intrinsic unsafe"]
11944#[inline(always)]
11945#[target_feature(enable = "neon")]
11946#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11947#[cfg_attr(test, assert_instr(ld3r))]
11948pub unsafe fn vld3q_dup_f64(a: *const f64) -> float64x2x3_t {
11949    unsafe extern "unadjusted" {
11950        #[cfg_attr(
11951            any(target_arch = "aarch64", target_arch = "arm64ec"),
11952            link_name = "llvm.aarch64.neon.ld3r.v2f64.p0"
11953        )]
11954        fn _vld3q_dup_f64(ptr: *const f64) -> float64x2x3_t;
11955    }
11956    _vld3q_dup_f64(a as _)
11957}
11958#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
11959#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s64)"]
11960#[doc = "## Safety"]
11961#[doc = "  * Neon intrinsic unsafe"]
11962#[inline(always)]
11963#[target_feature(enable = "neon")]
11964#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11965#[cfg_attr(test, assert_instr(ld3r))]
11966pub unsafe fn vld3q_dup_s64(a: *const i64) -> int64x2x3_t {
11967    unsafe extern "unadjusted" {
11968        #[cfg_attr(
11969            any(target_arch = "aarch64", target_arch = "arm64ec"),
11970            link_name = "llvm.aarch64.neon.ld3r.v2i64.p0"
11971        )]
11972        fn _vld3q_dup_s64(ptr: *const i64) -> int64x2x3_t;
11973    }
11974    _vld3q_dup_s64(a as _)
11975}
11976#[doc = "Load multiple 3-element structures to three registers"]
11977#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f64)"]
11978#[doc = "## Safety"]
11979#[doc = "  * Neon intrinsic unsafe"]
11980#[inline(always)]
11981#[target_feature(enable = "neon")]
11982#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11983#[cfg_attr(test, assert_instr(nop))]
11984pub unsafe fn vld3_f64(a: *const f64) -> float64x1x3_t {
11985    crate::ptr::read_unaligned(a.cast())
11986}
11987#[doc = "Load multiple 3-element structures to three registers"]
11988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f64)"]
11989#[doc = "## Safety"]
11990#[doc = "  * Neon intrinsic unsafe"]
11991#[inline(always)]
11992#[target_feature(enable = "neon")]
11993#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
11994#[rustc_legacy_const_generics(2)]
11995#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11996pub unsafe fn vld3_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x3_t) -> float64x1x3_t {
11997    static_assert!(LANE == 0);
11998    unsafe extern "unadjusted" {
11999        #[cfg_attr(
12000            any(target_arch = "aarch64", target_arch = "arm64ec"),
12001            link_name = "llvm.aarch64.neon.ld3lane.v1f64.p0"
12002        )]
12003        fn _vld3_lane_f64(
12004            a: float64x1_t,
12005            b: float64x1_t,
12006            c: float64x1_t,
12007            n: i64,
12008            ptr: *const i8,
12009        ) -> float64x1x3_t;
12010    }
12011    _vld3_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
12012}
12013#[doc = "Load multiple 3-element structures to three registers"]
12014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p64)"]
12015#[doc = "## Safety"]
12016#[doc = "  * Neon intrinsic unsafe"]
12017#[inline(always)]
12018#[target_feature(enable = "neon,aes")]
12019#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12020#[rustc_legacy_const_generics(2)]
12021#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12022pub unsafe fn vld3_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x3_t) -> poly64x1x3_t {
12023    static_assert!(LANE == 0);
12024    transmute(vld3_lane_s64::<LANE>(transmute(a), transmute(b)))
12025}
12026#[doc = "Load multiple 3-element structures to two registers"]
12027#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s64)"]
12028#[doc = "## Safety"]
12029#[doc = "  * Neon intrinsic unsafe"]
12030#[inline(always)]
12031#[target_feature(enable = "neon")]
12032#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12033#[rustc_legacy_const_generics(2)]
12034#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12035pub unsafe fn vld3_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x3_t) -> int64x1x3_t {
12036    static_assert!(LANE == 0);
12037    unsafe extern "unadjusted" {
12038        #[cfg_attr(
12039            any(target_arch = "aarch64", target_arch = "arm64ec"),
12040            link_name = "llvm.aarch64.neon.ld3lane.v1i64.p0"
12041        )]
12042        fn _vld3_lane_s64(
12043            a: int64x1_t,
12044            b: int64x1_t,
12045            c: int64x1_t,
12046            n: i64,
12047            ptr: *const i8,
12048        ) -> int64x1x3_t;
12049    }
12050    _vld3_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
12051}
12052#[doc = "Load multiple 3-element structures to three registers"]
12053#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u64)"]
12054#[doc = "## Safety"]
12055#[doc = "  * Neon intrinsic unsafe"]
12056#[inline(always)]
12057#[target_feature(enable = "neon")]
12058#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12059#[rustc_legacy_const_generics(2)]
12060#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12061pub unsafe fn vld3_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x3_t) -> uint64x1x3_t {
12062    static_assert!(LANE == 0);
12063    transmute(vld3_lane_s64::<LANE>(transmute(a), transmute(b)))
12064}
12065#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12066#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p64)"]
12067#[doc = "## Safety"]
12068#[doc = "  * Neon intrinsic unsafe"]
12069#[inline(always)]
12070#[cfg(target_endian = "little")]
12071#[target_feature(enable = "neon,aes")]
12072#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12073#[cfg_attr(test, assert_instr(ld3r))]
12074pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t {
12075    transmute(vld3q_dup_s64(transmute(a)))
12076}
12077#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12078#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p64)"]
12079#[doc = "## Safety"]
12080#[doc = "  * Neon intrinsic unsafe"]
12081#[inline(always)]
12082#[cfg(target_endian = "big")]
12083#[target_feature(enable = "neon,aes")]
12084#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12085#[cfg_attr(test, assert_instr(ld3r))]
12086pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t {
12087    let mut ret_val: poly64x2x3_t = transmute(vld3q_dup_s64(transmute(a)));
12088    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12089    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12090    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12091    ret_val
12092}
12093#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12094#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u64)"]
12095#[doc = "## Safety"]
12096#[doc = "  * Neon intrinsic unsafe"]
12097#[inline(always)]
12098#[cfg(target_endian = "little")]
12099#[target_feature(enable = "neon")]
12100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12101#[cfg_attr(test, assert_instr(ld3r))]
12102pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t {
12103    transmute(vld3q_dup_s64(transmute(a)))
12104}
12105#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u64)"]
12107#[doc = "## Safety"]
12108#[doc = "  * Neon intrinsic unsafe"]
12109#[inline(always)]
12110#[cfg(target_endian = "big")]
12111#[target_feature(enable = "neon")]
12112#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12113#[cfg_attr(test, assert_instr(ld3r))]
12114pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t {
12115    let mut ret_val: uint64x2x3_t = transmute(vld3q_dup_s64(transmute(a)));
12116    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12117    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12118    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12119    ret_val
12120}
12121#[doc = "Load multiple 3-element structures to three registers"]
12122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f64)"]
12123#[doc = "## Safety"]
12124#[doc = "  * Neon intrinsic unsafe"]
12125#[inline(always)]
12126#[target_feature(enable = "neon")]
12127#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12128#[cfg_attr(test, assert_instr(ld3))]
12129pub unsafe fn vld3q_f64(a: *const f64) -> float64x2x3_t {
12130    unsafe extern "unadjusted" {
12131        #[cfg_attr(
12132            any(target_arch = "aarch64", target_arch = "arm64ec"),
12133            link_name = "llvm.aarch64.neon.ld3.v2f64.p0"
12134        )]
12135        fn _vld3q_f64(ptr: *const float64x2_t) -> float64x2x3_t;
12136    }
12137    _vld3q_f64(a as _)
12138}
12139#[doc = "Load multiple 3-element structures to three registers"]
12140#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s64)"]
12141#[doc = "## Safety"]
12142#[doc = "  * Neon intrinsic unsafe"]
12143#[inline(always)]
12144#[target_feature(enable = "neon")]
12145#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12146#[cfg_attr(test, assert_instr(ld3))]
12147pub unsafe fn vld3q_s64(a: *const i64) -> int64x2x3_t {
12148    unsafe extern "unadjusted" {
12149        #[cfg_attr(
12150            any(target_arch = "aarch64", target_arch = "arm64ec"),
12151            link_name = "llvm.aarch64.neon.ld3.v2i64.p0"
12152        )]
12153        fn _vld3q_s64(ptr: *const int64x2_t) -> int64x2x3_t;
12154    }
12155    _vld3q_s64(a as _)
12156}
12157#[doc = "Load multiple 3-element structures to three registers"]
12158#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f64)"]
12159#[doc = "## Safety"]
12160#[doc = "  * Neon intrinsic unsafe"]
12161#[inline(always)]
12162#[target_feature(enable = "neon")]
12163#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12164#[rustc_legacy_const_generics(2)]
12165#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12166pub unsafe fn vld3q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x3_t) -> float64x2x3_t {
12167    static_assert_uimm_bits!(LANE, 1);
12168    unsafe extern "unadjusted" {
12169        #[cfg_attr(
12170            any(target_arch = "aarch64", target_arch = "arm64ec"),
12171            link_name = "llvm.aarch64.neon.ld3lane.v2f64.p0"
12172        )]
12173        fn _vld3q_lane_f64(
12174            a: float64x2_t,
12175            b: float64x2_t,
12176            c: float64x2_t,
12177            n: i64,
12178            ptr: *const i8,
12179        ) -> float64x2x3_t;
12180    }
12181    _vld3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
12182}
12183#[doc = "Load multiple 3-element structures to three registers"]
12184#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p64)"]
12185#[doc = "## Safety"]
12186#[doc = "  * Neon intrinsic unsafe"]
12187#[inline(always)]
12188#[target_feature(enable = "neon,aes")]
12189#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12190#[rustc_legacy_const_generics(2)]
12191#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12192pub unsafe fn vld3q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x3_t) -> poly64x2x3_t {
12193    static_assert_uimm_bits!(LANE, 1);
12194    transmute(vld3q_lane_s64::<LANE>(transmute(a), transmute(b)))
12195}
12196#[doc = "Load multiple 3-element structures to two registers"]
12197#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s8)"]
12198#[doc = "## Safety"]
12199#[doc = "  * Neon intrinsic unsafe"]
12200#[inline(always)]
12201#[target_feature(enable = "neon")]
12202#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12203#[rustc_legacy_const_generics(2)]
12204#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12205pub unsafe fn vld3q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x3_t) -> int8x16x3_t {
12206    static_assert_uimm_bits!(LANE, 3);
12207    unsafe extern "unadjusted" {
12208        #[cfg_attr(
12209            any(target_arch = "aarch64", target_arch = "arm64ec"),
12210            link_name = "llvm.aarch64.neon.ld3lane.v16i8.p0"
12211        )]
12212        fn _vld3q_lane_s8(
12213            a: int8x16_t,
12214            b: int8x16_t,
12215            c: int8x16_t,
12216            n: i64,
12217            ptr: *const i8,
12218        ) -> int8x16x3_t;
12219    }
12220    _vld3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _)
12221}
12222#[doc = "Load multiple 3-element structures to two registers"]
12223#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s64)"]
12224#[doc = "## Safety"]
12225#[doc = "  * Neon intrinsic unsafe"]
12226#[inline(always)]
12227#[target_feature(enable = "neon")]
12228#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12229#[rustc_legacy_const_generics(2)]
12230#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12231pub unsafe fn vld3q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x3_t) -> int64x2x3_t {
12232    static_assert_uimm_bits!(LANE, 1);
12233    unsafe extern "unadjusted" {
12234        #[cfg_attr(
12235            any(target_arch = "aarch64", target_arch = "arm64ec"),
12236            link_name = "llvm.aarch64.neon.ld3lane.v2i64.p0"
12237        )]
12238        fn _vld3q_lane_s64(
12239            a: int64x2_t,
12240            b: int64x2_t,
12241            c: int64x2_t,
12242            n: i64,
12243            ptr: *const i8,
12244        ) -> int64x2x3_t;
12245    }
12246    _vld3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
12247}
12248#[doc = "Load multiple 3-element structures to three registers"]
12249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u8)"]
12250#[doc = "## Safety"]
12251#[doc = "  * Neon intrinsic unsafe"]
12252#[inline(always)]
12253#[target_feature(enable = "neon")]
12254#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12255#[rustc_legacy_const_generics(2)]
12256#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12257pub unsafe fn vld3q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x3_t) -> uint8x16x3_t {
12258    static_assert_uimm_bits!(LANE, 4);
12259    transmute(vld3q_lane_s8::<LANE>(transmute(a), transmute(b)))
12260}
12261#[doc = "Load multiple 3-element structures to three registers"]
12262#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u64)"]
12263#[doc = "## Safety"]
12264#[doc = "  * Neon intrinsic unsafe"]
12265#[inline(always)]
12266#[target_feature(enable = "neon")]
12267#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12268#[rustc_legacy_const_generics(2)]
12269#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12270pub unsafe fn vld3q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x3_t) -> uint64x2x3_t {
12271    static_assert_uimm_bits!(LANE, 1);
12272    transmute(vld3q_lane_s64::<LANE>(transmute(a), transmute(b)))
12273}
12274#[doc = "Load multiple 3-element structures to three registers"]
12275#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p8)"]
12276#[doc = "## Safety"]
12277#[doc = "  * Neon intrinsic unsafe"]
12278#[inline(always)]
12279#[target_feature(enable = "neon")]
12280#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12281#[rustc_legacy_const_generics(2)]
12282#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12283pub unsafe fn vld3q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x3_t) -> poly8x16x3_t {
12284    static_assert_uimm_bits!(LANE, 4);
12285    transmute(vld3q_lane_s8::<LANE>(transmute(a), transmute(b)))
12286}
12287#[doc = "Load multiple 3-element structures to three registers"]
12288#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p64)"]
12289#[doc = "## Safety"]
12290#[doc = "  * Neon intrinsic unsafe"]
12291#[inline(always)]
12292#[cfg(target_endian = "little")]
12293#[target_feature(enable = "neon,aes")]
12294#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12295#[cfg_attr(test, assert_instr(ld3))]
12296pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t {
12297    transmute(vld3q_s64(transmute(a)))
12298}
12299#[doc = "Load multiple 3-element structures to three registers"]
12300#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p64)"]
12301#[doc = "## Safety"]
12302#[doc = "  * Neon intrinsic unsafe"]
12303#[inline(always)]
12304#[cfg(target_endian = "big")]
12305#[target_feature(enable = "neon,aes")]
12306#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12307#[cfg_attr(test, assert_instr(ld3))]
12308pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t {
12309    let mut ret_val: poly64x2x3_t = transmute(vld3q_s64(transmute(a)));
12310    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12311    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12312    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12313    ret_val
12314}
12315#[doc = "Load multiple 3-element structures to three registers"]
12316#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u64)"]
12317#[doc = "## Safety"]
12318#[doc = "  * Neon intrinsic unsafe"]
12319#[inline(always)]
12320#[target_feature(enable = "neon")]
12321#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12322#[cfg_attr(test, assert_instr(ld3))]
12323pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t {
12324    transmute(vld3q_s64(transmute(a)))
12325}
12326#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12327#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f64)"]
12328#[doc = "## Safety"]
12329#[doc = "  * Neon intrinsic unsafe"]
12330#[inline(always)]
12331#[target_feature(enable = "neon")]
12332#[cfg_attr(test, assert_instr(ld4r))]
12333#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12334pub unsafe fn vld4_dup_f64(a: *const f64) -> float64x1x4_t {
12335    unsafe extern "unadjusted" {
12336        #[cfg_attr(
12337            any(target_arch = "aarch64", target_arch = "arm64ec"),
12338            link_name = "llvm.aarch64.neon.ld4r.v1f64.p0"
12339        )]
12340        fn _vld4_dup_f64(ptr: *const f64) -> float64x1x4_t;
12341    }
12342    _vld4_dup_f64(a as _)
12343}
12344#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f64)"]
12346#[doc = "## Safety"]
12347#[doc = "  * Neon intrinsic unsafe"]
12348#[inline(always)]
12349#[target_feature(enable = "neon")]
12350#[cfg_attr(test, assert_instr(ld4r))]
12351#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12352pub unsafe fn vld4q_dup_f64(a: *const f64) -> float64x2x4_t {
12353    unsafe extern "unadjusted" {
12354        #[cfg_attr(
12355            any(target_arch = "aarch64", target_arch = "arm64ec"),
12356            link_name = "llvm.aarch64.neon.ld4r.v2f64.p0"
12357        )]
12358        fn _vld4q_dup_f64(ptr: *const f64) -> float64x2x4_t;
12359    }
12360    _vld4q_dup_f64(a as _)
12361}
12362#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s64)"]
12364#[doc = "## Safety"]
12365#[doc = "  * Neon intrinsic unsafe"]
12366#[inline(always)]
12367#[target_feature(enable = "neon")]
12368#[cfg_attr(test, assert_instr(ld4r))]
12369#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12370pub unsafe fn vld4q_dup_s64(a: *const i64) -> int64x2x4_t {
12371    unsafe extern "unadjusted" {
12372        #[cfg_attr(
12373            any(target_arch = "aarch64", target_arch = "arm64ec"),
12374            link_name = "llvm.aarch64.neon.ld4r.v2i64.p0"
12375        )]
12376        fn _vld4q_dup_s64(ptr: *const i64) -> int64x2x4_t;
12377    }
12378    _vld4q_dup_s64(a as _)
12379}
12380#[doc = "Load multiple 4-element structures to four registers"]
12381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f64)"]
12382#[doc = "## Safety"]
12383#[doc = "  * Neon intrinsic unsafe"]
12384#[inline(always)]
12385#[target_feature(enable = "neon")]
12386#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12387#[cfg_attr(test, assert_instr(nop))]
12388pub unsafe fn vld4_f64(a: *const f64) -> float64x1x4_t {
12389    crate::ptr::read_unaligned(a.cast())
12390}
12391#[doc = "Load multiple 4-element structures to four registers"]
12392#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f64)"]
12393#[doc = "## Safety"]
12394#[doc = "  * Neon intrinsic unsafe"]
12395#[inline(always)]
12396#[target_feature(enable = "neon")]
12397#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12398#[rustc_legacy_const_generics(2)]
12399#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12400pub unsafe fn vld4_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x4_t) -> float64x1x4_t {
12401    static_assert!(LANE == 0);
12402    unsafe extern "unadjusted" {
12403        #[cfg_attr(
12404            any(target_arch = "aarch64", target_arch = "arm64ec"),
12405            link_name = "llvm.aarch64.neon.ld4lane.v1f64.p0"
12406        )]
12407        fn _vld4_lane_f64(
12408            a: float64x1_t,
12409            b: float64x1_t,
12410            c: float64x1_t,
12411            d: float64x1_t,
12412            n: i64,
12413            ptr: *const i8,
12414        ) -> float64x1x4_t;
12415    }
12416    _vld4_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12417}
12418#[doc = "Load multiple 4-element structures to four registers"]
12419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s64)"]
12420#[doc = "## Safety"]
12421#[doc = "  * Neon intrinsic unsafe"]
12422#[inline(always)]
12423#[target_feature(enable = "neon")]
12424#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12425#[rustc_legacy_const_generics(2)]
12426#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12427pub unsafe fn vld4_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x4_t) -> int64x1x4_t {
12428    static_assert!(LANE == 0);
12429    unsafe extern "unadjusted" {
12430        #[cfg_attr(
12431            any(target_arch = "aarch64", target_arch = "arm64ec"),
12432            link_name = "llvm.aarch64.neon.ld4lane.v1i64.p0"
12433        )]
12434        fn _vld4_lane_s64(
12435            a: int64x1_t,
12436            b: int64x1_t,
12437            c: int64x1_t,
12438            d: int64x1_t,
12439            n: i64,
12440            ptr: *const i8,
12441        ) -> int64x1x4_t;
12442    }
12443    _vld4_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12444}
12445#[doc = "Load multiple 4-element structures to four registers"]
12446#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p64)"]
12447#[doc = "## Safety"]
12448#[doc = "  * Neon intrinsic unsafe"]
12449#[inline(always)]
12450#[target_feature(enable = "neon,aes")]
12451#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12452#[rustc_legacy_const_generics(2)]
12453#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12454pub unsafe fn vld4_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x4_t) -> poly64x1x4_t {
12455    static_assert!(LANE == 0);
12456    transmute(vld4_lane_s64::<LANE>(transmute(a), transmute(b)))
12457}
12458#[doc = "Load multiple 4-element structures to four registers"]
12459#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u64)"]
12460#[doc = "## Safety"]
12461#[doc = "  * Neon intrinsic unsafe"]
12462#[inline(always)]
12463#[target_feature(enable = "neon")]
12464#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12465#[rustc_legacy_const_generics(2)]
12466#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12467pub unsafe fn vld4_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x4_t) -> uint64x1x4_t {
12468    static_assert!(LANE == 0);
12469    transmute(vld4_lane_s64::<LANE>(transmute(a), transmute(b)))
12470}
12471#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12472#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p64)"]
12473#[doc = "## Safety"]
12474#[doc = "  * Neon intrinsic unsafe"]
12475#[inline(always)]
12476#[cfg(target_endian = "little")]
12477#[target_feature(enable = "neon,aes")]
12478#[cfg_attr(test, assert_instr(ld4r))]
12479#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12480pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t {
12481    transmute(vld4q_dup_s64(transmute(a)))
12482}
12483#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p64)"]
12485#[doc = "## Safety"]
12486#[doc = "  * Neon intrinsic unsafe"]
12487#[inline(always)]
12488#[cfg(target_endian = "big")]
12489#[target_feature(enable = "neon,aes")]
12490#[cfg_attr(test, assert_instr(ld4r))]
12491#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12492pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t {
12493    let mut ret_val: poly64x2x4_t = transmute(vld4q_dup_s64(transmute(a)));
12494    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12495    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12496    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12497    ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12498    ret_val
12499}
12500#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12501#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u64)"]
12502#[doc = "## Safety"]
12503#[doc = "  * Neon intrinsic unsafe"]
12504#[inline(always)]
12505#[cfg(target_endian = "little")]
12506#[target_feature(enable = "neon")]
12507#[cfg_attr(test, assert_instr(ld4r))]
12508#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12509pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t {
12510    transmute(vld4q_dup_s64(transmute(a)))
12511}
12512#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12513#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u64)"]
12514#[doc = "## Safety"]
12515#[doc = "  * Neon intrinsic unsafe"]
12516#[inline(always)]
12517#[cfg(target_endian = "big")]
12518#[target_feature(enable = "neon")]
12519#[cfg_attr(test, assert_instr(ld4r))]
12520#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12521pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t {
12522    let mut ret_val: uint64x2x4_t = transmute(vld4q_dup_s64(transmute(a)));
12523    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12524    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12525    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12526    ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12527    ret_val
12528}
12529#[doc = "Load multiple 4-element structures to four registers"]
12530#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f64)"]
12531#[doc = "## Safety"]
12532#[doc = "  * Neon intrinsic unsafe"]
12533#[inline(always)]
12534#[target_feature(enable = "neon")]
12535#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12536#[cfg_attr(test, assert_instr(ld4))]
12537pub unsafe fn vld4q_f64(a: *const f64) -> float64x2x4_t {
12538    unsafe extern "unadjusted" {
12539        #[cfg_attr(
12540            any(target_arch = "aarch64", target_arch = "arm64ec"),
12541            link_name = "llvm.aarch64.neon.ld4.v2f64.p0"
12542        )]
12543        fn _vld4q_f64(ptr: *const float64x2_t) -> float64x2x4_t;
12544    }
12545    _vld4q_f64(a as _)
12546}
12547#[doc = "Load multiple 4-element structures to four registers"]
12548#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s64)"]
12549#[doc = "## Safety"]
12550#[doc = "  * Neon intrinsic unsafe"]
12551#[inline(always)]
12552#[target_feature(enable = "neon")]
12553#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12554#[cfg_attr(test, assert_instr(ld4))]
12555pub unsafe fn vld4q_s64(a: *const i64) -> int64x2x4_t {
12556    unsafe extern "unadjusted" {
12557        #[cfg_attr(
12558            any(target_arch = "aarch64", target_arch = "arm64ec"),
12559            link_name = "llvm.aarch64.neon.ld4.v2i64.p0"
12560        )]
12561        fn _vld4q_s64(ptr: *const int64x2_t) -> int64x2x4_t;
12562    }
12563    _vld4q_s64(a as _)
12564}
12565#[doc = "Load multiple 4-element structures to four registers"]
12566#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f64)"]
12567#[doc = "## Safety"]
12568#[doc = "  * Neon intrinsic unsafe"]
12569#[inline(always)]
12570#[target_feature(enable = "neon")]
12571#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12572#[rustc_legacy_const_generics(2)]
12573#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12574pub unsafe fn vld4q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x4_t) -> float64x2x4_t {
12575    static_assert_uimm_bits!(LANE, 1);
12576    unsafe extern "unadjusted" {
12577        #[cfg_attr(
12578            any(target_arch = "aarch64", target_arch = "arm64ec"),
12579            link_name = "llvm.aarch64.neon.ld4lane.v2f64.p0"
12580        )]
12581        fn _vld4q_lane_f64(
12582            a: float64x2_t,
12583            b: float64x2_t,
12584            c: float64x2_t,
12585            d: float64x2_t,
12586            n: i64,
12587            ptr: *const i8,
12588        ) -> float64x2x4_t;
12589    }
12590    _vld4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12591}
12592#[doc = "Load multiple 4-element structures to four registers"]
12593#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s8)"]
12594#[doc = "## Safety"]
12595#[doc = "  * Neon intrinsic unsafe"]
12596#[inline(always)]
12597#[target_feature(enable = "neon")]
12598#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12599#[rustc_legacy_const_generics(2)]
12600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12601pub unsafe fn vld4q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x4_t) -> int8x16x4_t {
12602    static_assert_uimm_bits!(LANE, 3);
12603    unsafe extern "unadjusted" {
12604        #[cfg_attr(
12605            any(target_arch = "aarch64", target_arch = "arm64ec"),
12606            link_name = "llvm.aarch64.neon.ld4lane.v16i8.p0"
12607        )]
12608        fn _vld4q_lane_s8(
12609            a: int8x16_t,
12610            b: int8x16_t,
12611            c: int8x16_t,
12612            d: int8x16_t,
12613            n: i64,
12614            ptr: *const i8,
12615        ) -> int8x16x4_t;
12616    }
12617    _vld4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12618}
12619#[doc = "Load multiple 4-element structures to four registers"]
12620#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s64)"]
12621#[doc = "## Safety"]
12622#[doc = "  * Neon intrinsic unsafe"]
12623#[inline(always)]
12624#[target_feature(enable = "neon")]
12625#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12626#[rustc_legacy_const_generics(2)]
12627#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12628pub unsafe fn vld4q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x4_t) -> int64x2x4_t {
12629    static_assert_uimm_bits!(LANE, 1);
12630    unsafe extern "unadjusted" {
12631        #[cfg_attr(
12632            any(target_arch = "aarch64", target_arch = "arm64ec"),
12633            link_name = "llvm.aarch64.neon.ld4lane.v2i64.p0"
12634        )]
12635        fn _vld4q_lane_s64(
12636            a: int64x2_t,
12637            b: int64x2_t,
12638            c: int64x2_t,
12639            d: int64x2_t,
12640            n: i64,
12641            ptr: *const i8,
12642        ) -> int64x2x4_t;
12643    }
12644    _vld4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12645}
12646#[doc = "Load multiple 4-element structures to four registers"]
12647#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p64)"]
12648#[doc = "## Safety"]
12649#[doc = "  * Neon intrinsic unsafe"]
12650#[inline(always)]
12651#[target_feature(enable = "neon,aes")]
12652#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12653#[rustc_legacy_const_generics(2)]
12654#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12655pub unsafe fn vld4q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x4_t) -> poly64x2x4_t {
12656    static_assert_uimm_bits!(LANE, 1);
12657    transmute(vld4q_lane_s64::<LANE>(transmute(a), transmute(b)))
12658}
12659#[doc = "Load multiple 4-element structures to four registers"]
12660#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u8)"]
12661#[doc = "## Safety"]
12662#[doc = "  * Neon intrinsic unsafe"]
12663#[inline(always)]
12664#[target_feature(enable = "neon")]
12665#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12666#[rustc_legacy_const_generics(2)]
12667#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12668pub unsafe fn vld4q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x4_t) -> uint8x16x4_t {
12669    static_assert_uimm_bits!(LANE, 4);
12670    transmute(vld4q_lane_s8::<LANE>(transmute(a), transmute(b)))
12671}
12672#[doc = "Load multiple 4-element structures to four registers"]
12673#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u64)"]
12674#[doc = "## Safety"]
12675#[doc = "  * Neon intrinsic unsafe"]
12676#[inline(always)]
12677#[target_feature(enable = "neon")]
12678#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12679#[rustc_legacy_const_generics(2)]
12680#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12681pub unsafe fn vld4q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x4_t) -> uint64x2x4_t {
12682    static_assert_uimm_bits!(LANE, 1);
12683    transmute(vld4q_lane_s64::<LANE>(transmute(a), transmute(b)))
12684}
12685#[doc = "Load multiple 4-element structures to four registers"]
12686#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p8)"]
12687#[doc = "## Safety"]
12688#[doc = "  * Neon intrinsic unsafe"]
12689#[inline(always)]
12690#[target_feature(enable = "neon")]
12691#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12692#[rustc_legacy_const_generics(2)]
12693#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12694pub unsafe fn vld4q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x4_t) -> poly8x16x4_t {
12695    static_assert_uimm_bits!(LANE, 4);
12696    transmute(vld4q_lane_s8::<LANE>(transmute(a), transmute(b)))
12697}
12698#[doc = "Load multiple 4-element structures to four registers"]
12699#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p64)"]
12700#[doc = "## Safety"]
12701#[doc = "  * Neon intrinsic unsafe"]
12702#[inline(always)]
12703#[cfg(target_endian = "little")]
12704#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12705#[target_feature(enable = "neon,aes")]
12706#[cfg_attr(test, assert_instr(ld4))]
12707pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t {
12708    transmute(vld4q_s64(transmute(a)))
12709}
12710#[doc = "Load multiple 4-element structures to four registers"]
12711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p64)"]
12712#[doc = "## Safety"]
12713#[doc = "  * Neon intrinsic unsafe"]
12714#[inline(always)]
12715#[cfg(target_endian = "big")]
12716#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12717#[target_feature(enable = "neon,aes")]
12718#[cfg_attr(test, assert_instr(ld4))]
12719pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t {
12720    let mut ret_val: poly64x2x4_t = transmute(vld4q_s64(transmute(a)));
12721    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12722    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12723    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12724    ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12725    ret_val
12726}
12727#[doc = "Load multiple 4-element structures to four registers"]
12728#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u64)"]
12729#[doc = "## Safety"]
12730#[doc = "  * Neon intrinsic unsafe"]
12731#[inline(always)]
12732#[target_feature(enable = "neon")]
12733#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12734#[cfg_attr(test, assert_instr(ld4))]
12735pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t {
12736    transmute(vld4q_s64(transmute(a)))
12737}
12738#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12739#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1_lane_s64)"]
12740#[doc = "## Safety"]
12741#[doc = "  * Neon intrinsic unsafe"]
12742#[inline(always)]
12743#[target_feature(enable = "neon,rcpc3")]
12744#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12745#[rustc_legacy_const_generics(2)]
12746#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12747#[cfg(target_has_atomic = "64")]
12748pub unsafe fn vldap1_lane_s64<const LANE: i32>(ptr: *const i64, src: int64x1_t) -> int64x1_t {
12749    static_assert!(LANE == 0);
12750    let atomic_src = crate::sync::atomic::AtomicI64::from_ptr(ptr as *mut i64);
12751    simd_insert!(
12752        src,
12753        LANE as u32,
12754        atomic_src.load(crate::sync::atomic::Ordering::Acquire)
12755    )
12756}
12757#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12758#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1q_lane_s64)"]
12759#[doc = "## Safety"]
12760#[doc = "  * Neon intrinsic unsafe"]
12761#[inline(always)]
12762#[target_feature(enable = "neon,rcpc3")]
12763#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12764#[rustc_legacy_const_generics(2)]
12765#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12766#[cfg(target_has_atomic = "64")]
12767pub unsafe fn vldap1q_lane_s64<const LANE: i32>(ptr: *const i64, src: int64x2_t) -> int64x2_t {
12768    static_assert_uimm_bits!(LANE, 1);
12769    let atomic_src = crate::sync::atomic::AtomicI64::from_ptr(ptr as *mut i64);
12770    simd_insert!(
12771        src,
12772        LANE as u32,
12773        atomic_src.load(crate::sync::atomic::Ordering::Acquire)
12774    )
12775}
12776#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12777#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1q_lane_f64)"]
12778#[doc = "## Safety"]
12779#[doc = "  * Neon intrinsic unsafe"]
12780#[inline(always)]
12781#[rustc_legacy_const_generics(2)]
12782#[target_feature(enable = "neon,rcpc3")]
12783#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12784#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12785#[cfg(target_has_atomic = "64")]
12786pub unsafe fn vldap1q_lane_f64<const LANE: i32>(ptr: *const f64, src: float64x2_t) -> float64x2_t {
12787    static_assert_uimm_bits!(LANE, 1);
12788    transmute(vldap1q_lane_s64::<LANE>(ptr as *mut i64, transmute(src)))
12789}
12790#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12791#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1_lane_u64)"]
12792#[doc = "## Safety"]
12793#[doc = "  * Neon intrinsic unsafe"]
12794#[inline(always)]
12795#[rustc_legacy_const_generics(2)]
12796#[target_feature(enable = "neon,rcpc3")]
12797#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12798#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12799#[cfg(target_has_atomic = "64")]
12800pub unsafe fn vldap1_lane_u64<const LANE: i32>(ptr: *const u64, src: uint64x1_t) -> uint64x1_t {
12801    static_assert!(LANE == 0);
12802    transmute(vldap1_lane_s64::<LANE>(ptr as *mut i64, transmute(src)))
12803}
12804#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12805#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1q_lane_u64)"]
12806#[doc = "## Safety"]
12807#[doc = "  * Neon intrinsic unsafe"]
12808#[inline(always)]
12809#[rustc_legacy_const_generics(2)]
12810#[target_feature(enable = "neon,rcpc3")]
12811#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12812#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12813#[cfg(target_has_atomic = "64")]
12814pub unsafe fn vldap1q_lane_u64<const LANE: i32>(ptr: *const u64, src: uint64x2_t) -> uint64x2_t {
12815    static_assert_uimm_bits!(LANE, 1);
12816    transmute(vldap1q_lane_s64::<LANE>(ptr as *mut i64, transmute(src)))
12817}
12818#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1_lane_p64)"]
12820#[doc = "## Safety"]
12821#[doc = "  * Neon intrinsic unsafe"]
12822#[inline(always)]
12823#[rustc_legacy_const_generics(2)]
12824#[target_feature(enable = "neon,rcpc3")]
12825#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12826#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12827#[cfg(target_has_atomic = "64")]
12828pub unsafe fn vldap1_lane_p64<const LANE: i32>(ptr: *const p64, src: poly64x1_t) -> poly64x1_t {
12829    static_assert!(LANE == 0);
12830    transmute(vldap1_lane_s64::<LANE>(ptr as *mut i64, transmute(src)))
12831}
12832#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1q_lane_p64)"]
12834#[doc = "## Safety"]
12835#[doc = "  * Neon intrinsic unsafe"]
12836#[inline(always)]
12837#[rustc_legacy_const_generics(2)]
12838#[target_feature(enable = "neon,rcpc3")]
12839#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12840#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12841#[cfg(target_has_atomic = "64")]
12842pub unsafe fn vldap1q_lane_p64<const LANE: i32>(ptr: *const p64, src: poly64x2_t) -> poly64x2_t {
12843    static_assert_uimm_bits!(LANE, 1);
12844    transmute(vldap1q_lane_s64::<LANE>(ptr as *mut i64, transmute(src)))
12845}
12846#[doc = "Lookup table read with 2-bit indices"]
12847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_f16)"]
12848#[doc = "## Safety"]
12849#[doc = "  * Neon intrinsic unsafe"]
12850#[inline(always)]
12851#[target_feature(enable = "neon,lut")]
12852#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12853#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12854#[rustc_legacy_const_generics(2)]
12855pub unsafe fn vluti2_lane_f16<const INDEX: i32>(a: float16x4_t, b: uint8x8_t) -> float16x8_t {
12856    static_assert!(INDEX >= 0 && INDEX <= 3);
12857    transmute(vluti2_lane_s16::<INDEX>(transmute(a), b))
12858}
12859#[doc = "Lookup table read with 2-bit indices"]
12860#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_f16)"]
12861#[doc = "## Safety"]
12862#[doc = "  * Neon intrinsic unsafe"]
12863#[inline(always)]
12864#[target_feature(enable = "neon,lut")]
12865#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12866#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12867#[rustc_legacy_const_generics(2)]
12868pub unsafe fn vluti2q_lane_f16<const INDEX: i32>(a: float16x8_t, b: uint8x8_t) -> float16x8_t {
12869    static_assert!(INDEX >= 0 && INDEX <= 3);
12870    transmute(vluti2q_lane_s16::<INDEX>(transmute(a), b))
12871}
12872#[doc = "Lookup table read with 2-bit indices"]
12873#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_u8)"]
12874#[doc = "## Safety"]
12875#[doc = "  * Neon intrinsic unsafe"]
12876#[inline(always)]
12877#[target_feature(enable = "neon,lut")]
12878#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12879#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12880#[rustc_legacy_const_generics(2)]
12881pub unsafe fn vluti2_lane_u8<const INDEX: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x16_t {
12882    static_assert!(INDEX >= 0 && INDEX <= 1);
12883    transmute(vluti2_lane_s8::<INDEX>(transmute(a), b))
12884}
12885#[doc = "Lookup table read with 2-bit indices"]
12886#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_u8)"]
12887#[doc = "## Safety"]
12888#[doc = "  * Neon intrinsic unsafe"]
12889#[inline(always)]
12890#[target_feature(enable = "neon,lut")]
12891#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12892#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12893#[rustc_legacy_const_generics(2)]
12894pub unsafe fn vluti2q_lane_u8<const INDEX: i32>(a: uint8x16_t, b: uint8x8_t) -> uint8x16_t {
12895    static_assert!(INDEX >= 0 && INDEX <= 1);
12896    transmute(vluti2q_lane_s8::<INDEX>(transmute(a), b))
12897}
12898#[doc = "Lookup table read with 2-bit indices"]
12899#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_u16)"]
12900#[doc = "## Safety"]
12901#[doc = "  * Neon intrinsic unsafe"]
12902#[inline(always)]
12903#[target_feature(enable = "neon,lut")]
12904#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12905#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12906#[rustc_legacy_const_generics(2)]
12907pub unsafe fn vluti2_lane_u16<const INDEX: i32>(a: uint16x4_t, b: uint8x8_t) -> uint16x8_t {
12908    static_assert!(INDEX >= 0 && INDEX <= 3);
12909    transmute(vluti2_lane_s16::<INDEX>(transmute(a), b))
12910}
12911#[doc = "Lookup table read with 2-bit indices"]
12912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_u16)"]
12913#[doc = "## Safety"]
12914#[doc = "  * Neon intrinsic unsafe"]
12915#[inline(always)]
12916#[target_feature(enable = "neon,lut")]
12917#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12918#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12919#[rustc_legacy_const_generics(2)]
12920pub unsafe fn vluti2q_lane_u16<const INDEX: i32>(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t {
12921    static_assert!(INDEX >= 0 && INDEX <= 3);
12922    transmute(vluti2q_lane_s16::<INDEX>(transmute(a), b))
12923}
12924#[doc = "Lookup table read with 2-bit indices"]
12925#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_p8)"]
12926#[doc = "## Safety"]
12927#[doc = "  * Neon intrinsic unsafe"]
12928#[inline(always)]
12929#[target_feature(enable = "neon,lut")]
12930#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12931#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12932#[rustc_legacy_const_generics(2)]
12933pub unsafe fn vluti2_lane_p8<const INDEX: i32>(a: poly8x8_t, b: uint8x8_t) -> poly8x16_t {
12934    static_assert!(INDEX >= 0 && INDEX <= 1);
12935    transmute(vluti2_lane_s8::<INDEX>(transmute(a), b))
12936}
12937#[doc = "Lookup table read with 2-bit indices"]
12938#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_p8)"]
12939#[doc = "## Safety"]
12940#[doc = "  * Neon intrinsic unsafe"]
12941#[inline(always)]
12942#[target_feature(enable = "neon,lut")]
12943#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12944#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12945#[rustc_legacy_const_generics(2)]
12946pub unsafe fn vluti2q_lane_p8<const INDEX: i32>(a: poly8x16_t, b: uint8x8_t) -> poly8x16_t {
12947    static_assert!(INDEX >= 0 && INDEX <= 1);
12948    transmute(vluti2q_lane_s8::<INDEX>(transmute(a), b))
12949}
12950#[doc = "Lookup table read with 2-bit indices"]
12951#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_p16)"]
12952#[doc = "## Safety"]
12953#[doc = "  * Neon intrinsic unsafe"]
12954#[inline(always)]
12955#[target_feature(enable = "neon,lut")]
12956#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12957#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12958#[rustc_legacy_const_generics(2)]
12959pub unsafe fn vluti2_lane_p16<const INDEX: i32>(a: poly16x4_t, b: uint8x8_t) -> poly16x8_t {
12960    static_assert!(INDEX >= 0 && INDEX <= 3);
12961    transmute(vluti2_lane_s16::<INDEX>(transmute(a), b))
12962}
12963#[doc = "Lookup table read with 2-bit indices"]
12964#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_p16)"]
12965#[doc = "## Safety"]
12966#[doc = "  * Neon intrinsic unsafe"]
12967#[inline(always)]
12968#[target_feature(enable = "neon,lut")]
12969#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12970#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12971#[rustc_legacy_const_generics(2)]
12972pub unsafe fn vluti2q_lane_p16<const INDEX: i32>(a: poly16x8_t, b: uint8x8_t) -> poly16x8_t {
12973    static_assert!(INDEX >= 0 && INDEX <= 3);
12974    transmute(vluti2q_lane_s16::<INDEX>(transmute(a), b))
12975}
12976#[doc = "Lookup table read with 2-bit indices"]
12977#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_s8)"]
12978#[doc = "## Safety"]
12979#[doc = "  * Neon intrinsic unsafe"]
12980#[inline(always)]
12981#[target_feature(enable = "neon,lut")]
12982#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12983#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12984#[rustc_legacy_const_generics(2)]
12985pub unsafe fn vluti2_lane_s8<const LANE: i32>(a: int8x8_t, b: uint8x8_t) -> int8x16_t {
12986    static_assert!(LANE >= 0 && LANE <= 1);
12987    unsafe extern "unadjusted" {
12988        #[cfg_attr(
12989            any(target_arch = "aarch64", target_arch = "arm64ec"),
12990            link_name = "llvm.aarch64.neon.vluti2.lane.v16i8.v8i8"
12991        )]
12992        fn _vluti2_lane_s8(a: int8x8_t, b: uint8x8_t, n: i32) -> int8x16_t;
12993    }
12994    _vluti2_lane_s8(a, b, LANE)
12995}
12996#[doc = "Lookup table read with 2-bit indices"]
12997#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_s8)"]
12998#[doc = "## Safety"]
12999#[doc = "  * Neon intrinsic unsafe"]
13000#[inline(always)]
13001#[target_feature(enable = "neon,lut")]
13002#[cfg_attr(test, assert_instr(nop, LANE = 1))]
13003#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13004#[rustc_legacy_const_generics(2)]
13005pub unsafe fn vluti2q_lane_s8<const LANE: i32>(a: int8x16_t, b: uint8x8_t) -> int8x16_t {
13006    static_assert!(LANE >= 0 && LANE <= 1);
13007    unsafe extern "unadjusted" {
13008        #[cfg_attr(
13009            any(target_arch = "aarch64", target_arch = "arm64ec"),
13010            link_name = "llvm.aarch64.neon.vluti2.lane.v16i8.v16i8"
13011        )]
13012        fn _vluti2q_lane_s8(a: int8x16_t, b: uint8x8_t, n: i32) -> int8x16_t;
13013    }
13014    _vluti2q_lane_s8(a, b, LANE)
13015}
13016#[doc = "Lookup table read with 2-bit indices"]
13017#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_s16)"]
13018#[doc = "## Safety"]
13019#[doc = "  * Neon intrinsic unsafe"]
13020#[inline(always)]
13021#[target_feature(enable = "neon,lut")]
13022#[cfg_attr(test, assert_instr(nop, LANE = 1))]
13023#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13024#[rustc_legacy_const_generics(2)]
13025pub unsafe fn vluti2_lane_s16<const LANE: i32>(a: int16x4_t, b: uint8x8_t) -> int16x8_t {
13026    static_assert!(LANE >= 0 && LANE <= 3);
13027    unsafe extern "unadjusted" {
13028        #[cfg_attr(
13029            any(target_arch = "aarch64", target_arch = "arm64ec"),
13030            link_name = "llvm.aarch64.neon.vluti2.lane.v8i16.v4i16"
13031        )]
13032        fn _vluti2_lane_s16(a: int16x4_t, b: uint8x8_t, n: i32) -> int16x8_t;
13033    }
13034    _vluti2_lane_s16(a, b, LANE)
13035}
13036#[doc = "Lookup table read with 2-bit indices"]
13037#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_s16)"]
13038#[doc = "## Safety"]
13039#[doc = "  * Neon intrinsic unsafe"]
13040#[inline(always)]
13041#[target_feature(enable = "neon,lut")]
13042#[cfg_attr(test, assert_instr(nop, LANE = 1))]
13043#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13044#[rustc_legacy_const_generics(2)]
13045pub unsafe fn vluti2q_lane_s16<const LANE: i32>(a: int16x8_t, b: uint8x8_t) -> int16x8_t {
13046    static_assert!(LANE >= 0 && LANE <= 3);
13047    unsafe extern "unadjusted" {
13048        #[cfg_attr(
13049            any(target_arch = "aarch64", target_arch = "arm64ec"),
13050            link_name = "llvm.aarch64.neon.vluti2.lane.v8i16.v8i16"
13051        )]
13052        fn _vluti2q_lane_s16(a: int16x8_t, b: uint8x8_t, n: i32) -> int16x8_t;
13053    }
13054    _vluti2q_lane_s16(a, b, LANE)
13055}
13056#[doc = "Lookup table read with 2-bit indices"]
13057#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_f16)"]
13058#[doc = "## Safety"]
13059#[doc = "  * Neon intrinsic unsafe"]
13060#[inline(always)]
13061#[target_feature(enable = "neon,lut")]
13062#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13063#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13064#[rustc_legacy_const_generics(2)]
13065pub unsafe fn vluti2_laneq_f16<const INDEX: i32>(a: float16x4_t, b: uint8x16_t) -> float16x8_t {
13066    static_assert!(INDEX >= 0 && INDEX <= 7);
13067    transmute(vluti2_laneq_s16::<INDEX>(transmute(a), b))
13068}
13069#[doc = "Lookup table read with 2-bit indices"]
13070#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_f16)"]
13071#[doc = "## Safety"]
13072#[doc = "  * Neon intrinsic unsafe"]
13073#[inline(always)]
13074#[target_feature(enable = "neon,lut")]
13075#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13076#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13077#[rustc_legacy_const_generics(2)]
13078pub unsafe fn vluti2q_laneq_f16<const INDEX: i32>(a: float16x8_t, b: uint8x16_t) -> float16x8_t {
13079    static_assert!(INDEX >= 0 && INDEX <= 7);
13080    transmute(vluti2q_laneq_s16::<INDEX>(transmute(a), b))
13081}
13082#[doc = "Lookup table read with 2-bit indices"]
13083#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_u8)"]
13084#[doc = "## Safety"]
13085#[doc = "  * Neon intrinsic unsafe"]
13086#[inline(always)]
13087#[target_feature(enable = "neon,lut")]
13088#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13089#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13090#[rustc_legacy_const_generics(2)]
13091pub unsafe fn vluti2_laneq_u8<const INDEX: i32>(a: uint8x8_t, b: uint8x16_t) -> uint8x16_t {
13092    static_assert!(INDEX >= 0 && INDEX <= 3);
13093    transmute(vluti2_laneq_s8::<INDEX>(transmute(a), b))
13094}
13095#[doc = "Lookup table read with 2-bit indices"]
13096#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_u8)"]
13097#[doc = "## Safety"]
13098#[doc = "  * Neon intrinsic unsafe"]
13099#[inline(always)]
13100#[target_feature(enable = "neon,lut")]
13101#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13102#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13103#[rustc_legacy_const_generics(2)]
13104pub unsafe fn vluti2q_laneq_u8<const INDEX: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
13105    static_assert!(INDEX >= 0 && INDEX <= 3);
13106    transmute(vluti2q_laneq_s8::<INDEX>(transmute(a), b))
13107}
13108#[doc = "Lookup table read with 2-bit indices"]
13109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_u16)"]
13110#[doc = "## Safety"]
13111#[doc = "  * Neon intrinsic unsafe"]
13112#[inline(always)]
13113#[target_feature(enable = "neon,lut")]
13114#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13115#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13116#[rustc_legacy_const_generics(2)]
13117pub unsafe fn vluti2_laneq_u16<const INDEX: i32>(a: uint16x4_t, b: uint8x16_t) -> uint16x8_t {
13118    static_assert!(INDEX >= 0 && INDEX <= 7);
13119    transmute(vluti2_laneq_s16::<INDEX>(transmute(a), b))
13120}
13121#[doc = "Lookup table read with 2-bit indices"]
13122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_u16)"]
13123#[doc = "## Safety"]
13124#[doc = "  * Neon intrinsic unsafe"]
13125#[inline(always)]
13126#[target_feature(enable = "neon,lut")]
13127#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13128#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13129#[rustc_legacy_const_generics(2)]
13130pub unsafe fn vluti2q_laneq_u16<const INDEX: i32>(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t {
13131    static_assert!(INDEX >= 0 && INDEX <= 7);
13132    transmute(vluti2q_laneq_s16::<INDEX>(transmute(a), b))
13133}
13134#[doc = "Lookup table read with 2-bit indices"]
13135#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_p8)"]
13136#[doc = "## Safety"]
13137#[doc = "  * Neon intrinsic unsafe"]
13138#[inline(always)]
13139#[target_feature(enable = "neon,lut")]
13140#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13141#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13142#[rustc_legacy_const_generics(2)]
13143pub unsafe fn vluti2_laneq_p8<const INDEX: i32>(a: poly8x8_t, b: uint8x16_t) -> poly8x16_t {
13144    static_assert!(INDEX >= 0 && INDEX <= 3);
13145    transmute(vluti2_laneq_s8::<INDEX>(transmute(a), b))
13146}
13147#[doc = "Lookup table read with 2-bit indices"]
13148#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_p8)"]
13149#[doc = "## Safety"]
13150#[doc = "  * Neon intrinsic unsafe"]
13151#[inline(always)]
13152#[target_feature(enable = "neon,lut")]
13153#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13154#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13155#[rustc_legacy_const_generics(2)]
13156pub unsafe fn vluti2q_laneq_p8<const INDEX: i32>(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t {
13157    static_assert!(INDEX >= 0 && INDEX <= 3);
13158    transmute(vluti2q_laneq_s8::<INDEX>(transmute(a), b))
13159}
13160#[doc = "Lookup table read with 2-bit indices"]
13161#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_p16)"]
13162#[doc = "## Safety"]
13163#[doc = "  * Neon intrinsic unsafe"]
13164#[inline(always)]
13165#[target_feature(enable = "neon,lut")]
13166#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13167#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13168#[rustc_legacy_const_generics(2)]
13169pub unsafe fn vluti2_laneq_p16<const INDEX: i32>(a: poly16x4_t, b: uint8x16_t) -> poly16x8_t {
13170    static_assert!(INDEX >= 0 && INDEX <= 7);
13171    transmute(vluti2_laneq_s16::<INDEX>(transmute(a), b))
13172}
13173#[doc = "Lookup table read with 2-bit indices"]
13174#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_p16)"]
13175#[doc = "## Safety"]
13176#[doc = "  * Neon intrinsic unsafe"]
13177#[inline(always)]
13178#[target_feature(enable = "neon,lut")]
13179#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13180#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13181#[rustc_legacy_const_generics(2)]
13182pub unsafe fn vluti2q_laneq_p16<const INDEX: i32>(a: poly16x8_t, b: uint8x16_t) -> poly16x8_t {
13183    static_assert!(INDEX >= 0 && INDEX <= 7);
13184    transmute(vluti2q_laneq_s16::<INDEX>(transmute(a), b))
13185}
13186#[doc = "Lookup table read with 2-bit indices"]
13187#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_s8)"]
13188#[doc = "## Safety"]
13189#[doc = "  * Neon intrinsic unsafe"]
13190#[inline(always)]
13191#[target_feature(enable = "neon,lut")]
13192#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13193#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13194#[rustc_legacy_const_generics(2)]
13195pub unsafe fn vluti2_laneq_s8<const INDEX: i32>(a: int8x8_t, b: uint8x16_t) -> int8x16_t {
13196    static_assert!(INDEX >= 0 && INDEX <= 3);
13197    unsafe extern "unadjusted" {
13198        #[cfg_attr(
13199            any(target_arch = "aarch64", target_arch = "arm64ec"),
13200            link_name = "llvm.aarch64.neon.vluti2.laneq.v16i8.v8i8"
13201        )]
13202        fn _vluti2_laneq_s8(a: int8x8_t, b: uint8x16_t, n: i32) -> int8x16_t;
13203    }
13204    _vluti2_laneq_s8(a, b, INDEX)
13205}
13206#[doc = "Lookup table read with 2-bit indices"]
13207#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_s8)"]
13208#[doc = "## Safety"]
13209#[doc = "  * Neon intrinsic unsafe"]
13210#[inline(always)]
13211#[target_feature(enable = "neon,lut")]
13212#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13213#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13214#[rustc_legacy_const_generics(2)]
13215pub unsafe fn vluti2q_laneq_s8<const INDEX: i32>(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
13216    static_assert!(INDEX >= 0 && INDEX <= 3);
13217    unsafe extern "unadjusted" {
13218        #[cfg_attr(
13219            any(target_arch = "aarch64", target_arch = "arm64ec"),
13220            link_name = "llvm.aarch64.neon.vluti2.laneq.v16i8.v16i8"
13221        )]
13222        fn _vluti2q_laneq_s8(a: int8x16_t, b: uint8x16_t, n: i32) -> int8x16_t;
13223    }
13224    _vluti2q_laneq_s8(a, b, INDEX)
13225}
13226#[doc = "Lookup table read with 2-bit indices"]
13227#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_s16)"]
13228#[doc = "## Safety"]
13229#[doc = "  * Neon intrinsic unsafe"]
13230#[inline(always)]
13231#[target_feature(enable = "neon,lut")]
13232#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13233#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13234#[rustc_legacy_const_generics(2)]
13235pub unsafe fn vluti2_laneq_s16<const INDEX: i32>(a: int16x4_t, b: uint8x16_t) -> int16x8_t {
13236    static_assert!(INDEX >= 0 && INDEX <= 7);
13237    unsafe extern "unadjusted" {
13238        #[cfg_attr(
13239            any(target_arch = "aarch64", target_arch = "arm64ec"),
13240            link_name = "llvm.aarch64.neon.vluti2.laneq.v8i16.v4i16"
13241        )]
13242        fn _vluti2_laneq_s16(a: int16x4_t, b: uint8x16_t, n: i32) -> int16x8_t;
13243    }
13244    _vluti2_laneq_s16(a, b, INDEX)
13245}
13246#[doc = "Lookup table read with 2-bit indices"]
13247#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_s16)"]
13248#[doc = "## Safety"]
13249#[doc = "  * Neon intrinsic unsafe"]
13250#[inline(always)]
13251#[target_feature(enable = "neon,lut")]
13252#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13253#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13254#[rustc_legacy_const_generics(2)]
13255pub unsafe fn vluti2q_laneq_s16<const INDEX: i32>(a: int16x8_t, b: uint8x16_t) -> int16x8_t {
13256    static_assert!(INDEX >= 0 && INDEX <= 7);
13257    unsafe extern "unadjusted" {
13258        #[cfg_attr(
13259            any(target_arch = "aarch64", target_arch = "arm64ec"),
13260            link_name = "llvm.aarch64.neon.vluti2.laneq.v8i16.v8i16"
13261        )]
13262        fn _vluti2q_laneq_s16(a: int16x8_t, b: uint8x16_t, n: i32) -> int16x8_t;
13263    }
13264    _vluti2q_laneq_s16(a, b, INDEX)
13265}
13266#[doc = "Lookup table read with 4-bit indices"]
13267#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_f16_x2)"]
13268#[doc = "## Safety"]
13269#[doc = "  * Neon intrinsic unsafe"]
13270#[inline(always)]
13271#[target_feature(enable = "neon,lut,fp16")]
13272#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13273#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13274#[rustc_legacy_const_generics(2)]
13275pub unsafe fn vluti4q_lane_f16_x2<const LANE: i32>(a: float16x8x2_t, b: uint8x8_t) -> float16x8_t {
13276    static_assert!(LANE >= 0 && LANE <= 1);
13277    transmute(vluti4q_lane_s16_x2::<LANE>(transmute(a), b))
13278}
13279#[doc = "Lookup table read with 4-bit indices"]
13280#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_u16_x2)"]
13281#[doc = "## Safety"]
13282#[doc = "  * Neon intrinsic unsafe"]
13283#[inline(always)]
13284#[target_feature(enable = "neon,lut")]
13285#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13286#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13287#[rustc_legacy_const_generics(2)]
13288pub unsafe fn vluti4q_lane_u16_x2<const LANE: i32>(a: uint16x8x2_t, b: uint8x8_t) -> uint16x8_t {
13289    static_assert!(LANE >= 0 && LANE <= 1);
13290    transmute(vluti4q_lane_s16_x2::<LANE>(transmute(a), b))
13291}
13292#[doc = "Lookup table read with 4-bit indices"]
13293#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_p16_x2)"]
13294#[doc = "## Safety"]
13295#[doc = "  * Neon intrinsic unsafe"]
13296#[inline(always)]
13297#[target_feature(enable = "neon,lut")]
13298#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13299#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13300#[rustc_legacy_const_generics(2)]
13301pub unsafe fn vluti4q_lane_p16_x2<const LANE: i32>(a: poly16x8x2_t, b: uint8x8_t) -> poly16x8_t {
13302    static_assert!(LANE >= 0 && LANE <= 1);
13303    transmute(vluti4q_lane_s16_x2::<LANE>(transmute(a), b))
13304}
13305#[doc = "Lookup table read with 4-bit indices"]
13306#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_s16_x2)"]
13307#[doc = "## Safety"]
13308#[doc = "  * Neon intrinsic unsafe"]
13309#[inline(always)]
13310#[target_feature(enable = "neon,lut")]
13311#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13312#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13313#[rustc_legacy_const_generics(2)]
13314pub unsafe fn vluti4q_lane_s16_x2<const LANE: i32>(a: int16x8x2_t, b: uint8x8_t) -> int16x8_t {
13315    static_assert!(LANE >= 0 && LANE <= 1);
13316    unsafe extern "unadjusted" {
13317        #[cfg_attr(
13318            any(target_arch = "aarch64", target_arch = "arm64ec"),
13319            link_name = "llvm.aarch64.neon.vluti4q.lane.x2.v8i16"
13320        )]
13321        fn _vluti4q_lane_s16_x2(a: int16x8_t, a: int16x8_t, b: uint8x8_t, n: i32) -> int16x8_t;
13322    }
13323    _vluti4q_lane_s16_x2(a.0, a.1, b, LANE)
13324}
13325#[doc = "Lookup table read with 4-bit indices"]
13326#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_s8)"]
13327#[doc = "## Safety"]
13328#[doc = "  * Neon intrinsic unsafe"]
13329#[inline(always)]
13330#[target_feature(enable = "neon,lut")]
13331#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13332#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13333#[rustc_legacy_const_generics(2)]
13334pub unsafe fn vluti4q_lane_s8<const LANE: i32>(a: int8x16_t, b: uint8x8_t) -> int8x16_t {
13335    static_assert!(LANE == 0);
13336    unsafe extern "unadjusted" {
13337        #[cfg_attr(
13338            any(target_arch = "aarch64", target_arch = "arm64ec"),
13339            link_name = "llvm.aarch64.neon.vluti4q.lane.v8i8"
13340        )]
13341        fn _vluti4q_lane_s8(a: int8x16_t, b: uint8x8_t, n: i32) -> int8x16_t;
13342    }
13343    _vluti4q_lane_s8(a, b, LANE)
13344}
13345#[doc = "Lookup table read with 4-bit indices"]
13346#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_u8)"]
13347#[doc = "## Safety"]
13348#[doc = "  * Neon intrinsic unsafe"]
13349#[inline(always)]
13350#[target_feature(enable = "neon,lut")]
13351#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13352#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13353#[rustc_legacy_const_generics(2)]
13354pub unsafe fn vluti4q_lane_u8<const LANE: i32>(a: uint8x16_t, b: uint8x8_t) -> uint8x16_t {
13355    static_assert!(LANE == 0);
13356    transmute(vluti4q_lane_s8::<LANE>(transmute(a), b))
13357}
13358#[doc = "Lookup table read with 4-bit indices"]
13359#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_p8)"]
13360#[doc = "## Safety"]
13361#[doc = "  * Neon intrinsic unsafe"]
13362#[inline(always)]
13363#[target_feature(enable = "neon,lut")]
13364#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13365#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13366#[rustc_legacy_const_generics(2)]
13367pub unsafe fn vluti4q_lane_p8<const LANE: i32>(a: poly8x16_t, b: uint8x8_t) -> poly8x16_t {
13368    static_assert!(LANE == 0);
13369    transmute(vluti4q_lane_s8::<LANE>(transmute(a), b))
13370}
13371#[doc = "Lookup table read with 4-bit indices"]
13372#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_f16_x2)"]
13373#[doc = "## Safety"]
13374#[doc = "  * Neon intrinsic unsafe"]
13375#[inline(always)]
13376#[target_feature(enable = "neon,lut,fp16")]
13377#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13378#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13379#[rustc_legacy_const_generics(2)]
13380pub unsafe fn vluti4q_laneq_f16_x2<const LANE: i32>(
13381    a: float16x8x2_t,
13382    b: uint8x16_t,
13383) -> float16x8_t {
13384    static_assert!(LANE >= 0 && LANE <= 3);
13385    transmute(vluti4q_laneq_s16_x2::<LANE>(transmute(a), b))
13386}
13387#[doc = "Lookup table read with 4-bit indices"]
13388#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_u16_x2)"]
13389#[doc = "## Safety"]
13390#[doc = "  * Neon intrinsic unsafe"]
13391#[inline(always)]
13392#[target_feature(enable = "neon,lut")]
13393#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13394#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13395#[rustc_legacy_const_generics(2)]
13396pub unsafe fn vluti4q_laneq_u16_x2<const LANE: i32>(a: uint16x8x2_t, b: uint8x16_t) -> uint16x8_t {
13397    static_assert!(LANE >= 0 && LANE <= 3);
13398    transmute(vluti4q_laneq_s16_x2::<LANE>(transmute(a), b))
13399}
13400#[doc = "Lookup table read with 4-bit indices"]
13401#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_p16_x2)"]
13402#[doc = "## Safety"]
13403#[doc = "  * Neon intrinsic unsafe"]
13404#[inline(always)]
13405#[target_feature(enable = "neon,lut")]
13406#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13407#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13408#[rustc_legacy_const_generics(2)]
13409pub unsafe fn vluti4q_laneq_p16_x2<const LANE: i32>(a: poly16x8x2_t, b: uint8x16_t) -> poly16x8_t {
13410    static_assert!(LANE >= 0 && LANE <= 3);
13411    transmute(vluti4q_laneq_s16_x2::<LANE>(transmute(a), b))
13412}
13413#[doc = "Lookup table read with 4-bit indices"]
13414#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_s16_x2)"]
13415#[doc = "## Safety"]
13416#[doc = "  * Neon intrinsic unsafe"]
13417#[inline(always)]
13418#[target_feature(enable = "neon,lut")]
13419#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13420#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13421#[rustc_legacy_const_generics(2)]
13422pub unsafe fn vluti4q_laneq_s16_x2<const LANE: i32>(a: int16x8x2_t, b: uint8x16_t) -> int16x8_t {
13423    static_assert!(LANE >= 0 && LANE <= 3);
13424    unsafe extern "unadjusted" {
13425        #[cfg_attr(
13426            any(target_arch = "aarch64", target_arch = "arm64ec"),
13427            link_name = "llvm.aarch64.neon.vluti4q.laneq.x2.v8i16"
13428        )]
13429        fn _vluti4q_laneq_s16_x2(a: int16x8_t, b: int16x8_t, c: uint8x16_t, n: i32) -> int16x8_t;
13430    }
13431    _vluti4q_laneq_s16_x2(a.0, a.1, b, LANE)
13432}
13433#[doc = "Lookup table read with 4-bit indices"]
13434#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_s8)"]
13435#[doc = "## Safety"]
13436#[doc = "  * Neon intrinsic unsafe"]
13437#[inline(always)]
13438#[target_feature(enable = "neon,lut")]
13439#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13440#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13441#[rustc_legacy_const_generics(2)]
13442pub unsafe fn vluti4q_laneq_s8<const LANE: i32>(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
13443    static_assert!(LANE >= 0 && LANE <= 1);
13444    unsafe extern "unadjusted" {
13445        #[cfg_attr(
13446            any(target_arch = "aarch64", target_arch = "arm64ec"),
13447            link_name = "llvm.aarch64.neon.vluti4q.laneq.v16i8"
13448        )]
13449        fn _vluti4q_laneq_s8(a: int8x16_t, b: uint8x16_t, n: i32) -> int8x16_t;
13450    }
13451    _vluti4q_laneq_s8(a, b, LANE)
13452}
13453#[doc = "Lookup table read with 4-bit indices"]
13454#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_u8)"]
13455#[doc = "## Safety"]
13456#[doc = "  * Neon intrinsic unsafe"]
13457#[inline(always)]
13458#[target_feature(enable = "neon,lut")]
13459#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13460#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13461#[rustc_legacy_const_generics(2)]
13462pub unsafe fn vluti4q_laneq_u8<const LANE: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
13463    static_assert!(LANE >= 0 && LANE <= 1);
13464    transmute(vluti4q_laneq_s8::<LANE>(transmute(a), b))
13465}
13466#[doc = "Lookup table read with 4-bit indices"]
13467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_p8)"]
13468#[doc = "## Safety"]
13469#[doc = "  * Neon intrinsic unsafe"]
13470#[inline(always)]
13471#[target_feature(enable = "neon,lut")]
13472#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13473#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13474#[rustc_legacy_const_generics(2)]
13475pub unsafe fn vluti4q_laneq_p8<const LANE: i32>(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t {
13476    static_assert!(LANE >= 0 && LANE <= 1);
13477    transmute(vluti4q_laneq_s8::<LANE>(transmute(a), b))
13478}
13479#[doc = "Maximum (vector)"]
13480#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f64)"]
13481#[inline(always)]
13482#[target_feature(enable = "neon")]
13483#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13484#[cfg_attr(test, assert_instr(fmax))]
13485pub fn vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13486    unsafe extern "unadjusted" {
13487        #[cfg_attr(
13488            any(target_arch = "aarch64", target_arch = "arm64ec"),
13489            link_name = "llvm.aarch64.neon.fmax.v1f64"
13490        )]
13491        fn _vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
13492    }
13493    unsafe { _vmax_f64(a, b) }
13494}
13495#[doc = "Maximum (vector)"]
13496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f64)"]
13497#[inline(always)]
13498#[target_feature(enable = "neon")]
13499#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13500#[cfg_attr(test, assert_instr(fmax))]
13501pub fn vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13502    unsafe extern "unadjusted" {
13503        #[cfg_attr(
13504            any(target_arch = "aarch64", target_arch = "arm64ec"),
13505            link_name = "llvm.aarch64.neon.fmax.v2f64"
13506        )]
13507        fn _vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
13508    }
13509    unsafe { _vmaxq_f64(a, b) }
13510}
13511#[doc = "Maximum (vector)"]
13512#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxh_f16)"]
13513#[inline(always)]
13514#[target_feature(enable = "neon,fp16")]
13515#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13516#[cfg(not(target_arch = "arm64ec"))]
13517#[cfg_attr(test, assert_instr(fmax))]
13518pub fn vmaxh_f16(a: f16, b: f16) -> f16 {
13519    unsafe extern "unadjusted" {
13520        #[cfg_attr(
13521            any(target_arch = "aarch64", target_arch = "arm64ec"),
13522            link_name = "llvm.aarch64.neon.fmax.f16"
13523        )]
13524        fn _vmaxh_f16(a: f16, b: f16) -> f16;
13525    }
13526    unsafe { _vmaxh_f16(a, b) }
13527}
13528#[doc = "Floating-point Maximum Number (vector)"]
13529#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f64)"]
13530#[inline(always)]
13531#[target_feature(enable = "neon")]
13532#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13533#[cfg_attr(test, assert_instr(fmaxnm))]
13534pub fn vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13535    unsafe { simd_fmax(a, b) }
13536}
13537#[doc = "Floating-point Maximum Number (vector)"]
13538#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f64)"]
13539#[inline(always)]
13540#[target_feature(enable = "neon")]
13541#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13542#[cfg_attr(test, assert_instr(fmaxnm))]
13543pub fn vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13544    unsafe { simd_fmax(a, b) }
13545}
13546#[doc = "Floating-point Maximum Number"]
13547#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmh_f16)"]
13548#[inline(always)]
13549#[target_feature(enable = "neon,fp16")]
13550#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13551#[cfg(not(target_arch = "arm64ec"))]
13552#[cfg_attr(test, assert_instr(fmaxnm))]
13553pub fn vmaxnmh_f16(a: f16, b: f16) -> f16 {
13554    f16::max(a, b)
13555}
13556#[doc = "Floating-point maximum number across vector"]
13557#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f16)"]
13558#[inline(always)]
13559#[target_feature(enable = "neon,fp16")]
13560#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13561#[cfg(not(target_arch = "arm64ec"))]
13562#[cfg_attr(test, assert_instr(fmaxnmv))]
13563pub fn vmaxnmv_f16(a: float16x4_t) -> f16 {
13564    unsafe { simd_reduce_max(a) }
13565}
13566#[doc = "Floating-point maximum number across vector"]
13567#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f16)"]
13568#[inline(always)]
13569#[target_feature(enable = "neon,fp16")]
13570#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13571#[cfg(not(target_arch = "arm64ec"))]
13572#[cfg_attr(test, assert_instr(fmaxnmv))]
13573pub fn vmaxnmvq_f16(a: float16x8_t) -> f16 {
13574    unsafe { simd_reduce_max(a) }
13575}
13576#[doc = "Floating-point maximum number across vector"]
13577#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f32)"]
13578#[inline(always)]
13579#[target_feature(enable = "neon")]
13580#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13581#[cfg_attr(test, assert_instr(fmaxnmp))]
13582pub fn vmaxnmv_f32(a: float32x2_t) -> f32 {
13583    unsafe { simd_reduce_max(a) }
13584}
13585#[doc = "Floating-point maximum number across vector"]
13586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f64)"]
13587#[inline(always)]
13588#[target_feature(enable = "neon")]
13589#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13590#[cfg_attr(test, assert_instr(fmaxnmp))]
13591pub fn vmaxnmvq_f64(a: float64x2_t) -> f64 {
13592    unsafe { simd_reduce_max(a) }
13593}
13594#[doc = "Floating-point maximum number across vector"]
13595#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f32)"]
13596#[inline(always)]
13597#[target_feature(enable = "neon")]
13598#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13599#[cfg_attr(test, assert_instr(fmaxnmv))]
13600pub fn vmaxnmvq_f32(a: float32x4_t) -> f32 {
13601    unsafe { simd_reduce_max(a) }
13602}
13603#[doc = "Floating-point maximum number across vector"]
13604#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_f16)"]
13605#[inline(always)]
13606#[target_feature(enable = "neon,fp16")]
13607#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13608#[cfg(not(target_arch = "arm64ec"))]
13609#[cfg_attr(test, assert_instr(fmaxv))]
13610pub fn vmaxv_f16(a: float16x4_t) -> f16 {
13611    unsafe extern "unadjusted" {
13612        #[cfg_attr(
13613            any(target_arch = "aarch64", target_arch = "arm64ec"),
13614            link_name = "llvm.aarch64.neon.fmaxv.f16.v4f16"
13615        )]
13616        fn _vmaxv_f16(a: float16x4_t) -> f16;
13617    }
13618    unsafe { _vmaxv_f16(a) }
13619}
13620#[doc = "Floating-point maximum number across vector"]
13621#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f16)"]
13622#[inline(always)]
13623#[target_feature(enable = "neon,fp16")]
13624#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13625#[cfg(not(target_arch = "arm64ec"))]
13626#[cfg_attr(test, assert_instr(fmaxv))]
13627pub fn vmaxvq_f16(a: float16x8_t) -> f16 {
13628    unsafe extern "unadjusted" {
13629        #[cfg_attr(
13630            any(target_arch = "aarch64", target_arch = "arm64ec"),
13631            link_name = "llvm.aarch64.neon.fmaxv.f16.v8f16"
13632        )]
13633        fn _vmaxvq_f16(a: float16x8_t) -> f16;
13634    }
13635    unsafe { _vmaxvq_f16(a) }
13636}
13637#[doc = "Horizontal vector max."]
13638#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_f32)"]
13639#[inline(always)]
13640#[target_feature(enable = "neon")]
13641#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13642#[cfg_attr(test, assert_instr(fmaxp))]
13643pub fn vmaxv_f32(a: float32x2_t) -> f32 {
13644    unsafe extern "unadjusted" {
13645        #[cfg_attr(
13646            any(target_arch = "aarch64", target_arch = "arm64ec"),
13647            link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32"
13648        )]
13649        fn _vmaxv_f32(a: float32x2_t) -> f32;
13650    }
13651    unsafe { _vmaxv_f32(a) }
13652}
13653#[doc = "Horizontal vector max."]
13654#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f32)"]
13655#[inline(always)]
13656#[target_feature(enable = "neon")]
13657#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13658#[cfg_attr(test, assert_instr(fmaxv))]
13659pub fn vmaxvq_f32(a: float32x4_t) -> f32 {
13660    unsafe extern "unadjusted" {
13661        #[cfg_attr(
13662            any(target_arch = "aarch64", target_arch = "arm64ec"),
13663            link_name = "llvm.aarch64.neon.fmaxv.f32.v4f32"
13664        )]
13665        fn _vmaxvq_f32(a: float32x4_t) -> f32;
13666    }
13667    unsafe { _vmaxvq_f32(a) }
13668}
13669#[doc = "Horizontal vector max."]
13670#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f64)"]
13671#[inline(always)]
13672#[target_feature(enable = "neon")]
13673#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13674#[cfg_attr(test, assert_instr(fmaxp))]
13675pub fn vmaxvq_f64(a: float64x2_t) -> f64 {
13676    unsafe extern "unadjusted" {
13677        #[cfg_attr(
13678            any(target_arch = "aarch64", target_arch = "arm64ec"),
13679            link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64"
13680        )]
13681        fn _vmaxvq_f64(a: float64x2_t) -> f64;
13682    }
13683    unsafe { _vmaxvq_f64(a) }
13684}
13685#[doc = "Horizontal vector max."]
13686#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s8)"]
13687#[inline(always)]
13688#[target_feature(enable = "neon")]
13689#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13690#[cfg_attr(test, assert_instr(smaxv))]
13691pub fn vmaxv_s8(a: int8x8_t) -> i8 {
13692    unsafe { simd_reduce_max(a) }
13693}
13694#[doc = "Horizontal vector max."]
13695#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s8)"]
13696#[inline(always)]
13697#[target_feature(enable = "neon")]
13698#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13699#[cfg_attr(test, assert_instr(smaxv))]
13700pub fn vmaxvq_s8(a: int8x16_t) -> i8 {
13701    unsafe { simd_reduce_max(a) }
13702}
13703#[doc = "Horizontal vector max."]
13704#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s16)"]
13705#[inline(always)]
13706#[target_feature(enable = "neon")]
13707#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13708#[cfg_attr(test, assert_instr(smaxv))]
13709pub fn vmaxv_s16(a: int16x4_t) -> i16 {
13710    unsafe { simd_reduce_max(a) }
13711}
13712#[doc = "Horizontal vector max."]
13713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s16)"]
13714#[inline(always)]
13715#[target_feature(enable = "neon")]
13716#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13717#[cfg_attr(test, assert_instr(smaxv))]
13718pub fn vmaxvq_s16(a: int16x8_t) -> i16 {
13719    unsafe { simd_reduce_max(a) }
13720}
13721#[doc = "Horizontal vector max."]
13722#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s32)"]
13723#[inline(always)]
13724#[target_feature(enable = "neon")]
13725#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13726#[cfg_attr(test, assert_instr(smaxp))]
13727pub fn vmaxv_s32(a: int32x2_t) -> i32 {
13728    unsafe { simd_reduce_max(a) }
13729}
13730#[doc = "Horizontal vector max."]
13731#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s32)"]
13732#[inline(always)]
13733#[target_feature(enable = "neon")]
13734#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13735#[cfg_attr(test, assert_instr(smaxv))]
13736pub fn vmaxvq_s32(a: int32x4_t) -> i32 {
13737    unsafe { simd_reduce_max(a) }
13738}
13739#[doc = "Horizontal vector max."]
13740#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u8)"]
13741#[inline(always)]
13742#[target_feature(enable = "neon")]
13743#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13744#[cfg_attr(test, assert_instr(umaxv))]
13745pub fn vmaxv_u8(a: uint8x8_t) -> u8 {
13746    unsafe { simd_reduce_max(a) }
13747}
13748#[doc = "Horizontal vector max."]
13749#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u8)"]
13750#[inline(always)]
13751#[target_feature(enable = "neon")]
13752#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13753#[cfg_attr(test, assert_instr(umaxv))]
13754pub fn vmaxvq_u8(a: uint8x16_t) -> u8 {
13755    unsafe { simd_reduce_max(a) }
13756}
13757#[doc = "Horizontal vector max."]
13758#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u16)"]
13759#[inline(always)]
13760#[target_feature(enable = "neon")]
13761#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13762#[cfg_attr(test, assert_instr(umaxv))]
13763pub fn vmaxv_u16(a: uint16x4_t) -> u16 {
13764    unsafe { simd_reduce_max(a) }
13765}
13766#[doc = "Horizontal vector max."]
13767#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u16)"]
13768#[inline(always)]
13769#[target_feature(enable = "neon")]
13770#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13771#[cfg_attr(test, assert_instr(umaxv))]
13772pub fn vmaxvq_u16(a: uint16x8_t) -> u16 {
13773    unsafe { simd_reduce_max(a) }
13774}
13775#[doc = "Horizontal vector max."]
13776#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u32)"]
13777#[inline(always)]
13778#[target_feature(enable = "neon")]
13779#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13780#[cfg_attr(test, assert_instr(umaxp))]
13781pub fn vmaxv_u32(a: uint32x2_t) -> u32 {
13782    unsafe { simd_reduce_max(a) }
13783}
13784#[doc = "Horizontal vector max."]
13785#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u32)"]
13786#[inline(always)]
13787#[target_feature(enable = "neon")]
13788#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13789#[cfg_attr(test, assert_instr(umaxv))]
13790pub fn vmaxvq_u32(a: uint32x4_t) -> u32 {
13791    unsafe { simd_reduce_max(a) }
13792}
13793#[doc = "Minimum (vector)"]
13794#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f64)"]
13795#[inline(always)]
13796#[target_feature(enable = "neon")]
13797#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13798#[cfg_attr(test, assert_instr(fmin))]
13799pub fn vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13800    unsafe extern "unadjusted" {
13801        #[cfg_attr(
13802            any(target_arch = "aarch64", target_arch = "arm64ec"),
13803            link_name = "llvm.aarch64.neon.fmin.v1f64"
13804        )]
13805        fn _vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
13806    }
13807    unsafe { _vmin_f64(a, b) }
13808}
13809#[doc = "Minimum (vector)"]
13810#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f64)"]
13811#[inline(always)]
13812#[target_feature(enable = "neon")]
13813#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13814#[cfg_attr(test, assert_instr(fmin))]
13815pub fn vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13816    unsafe extern "unadjusted" {
13817        #[cfg_attr(
13818            any(target_arch = "aarch64", target_arch = "arm64ec"),
13819            link_name = "llvm.aarch64.neon.fmin.v2f64"
13820        )]
13821        fn _vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
13822    }
13823    unsafe { _vminq_f64(a, b) }
13824}
13825#[doc = "Minimum (vector)"]
13826#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminh_f16)"]
13827#[inline(always)]
13828#[target_feature(enable = "neon,fp16")]
13829#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13830#[cfg(not(target_arch = "arm64ec"))]
13831#[cfg_attr(test, assert_instr(fmin))]
13832pub fn vminh_f16(a: f16, b: f16) -> f16 {
13833    unsafe extern "unadjusted" {
13834        #[cfg_attr(
13835            any(target_arch = "aarch64", target_arch = "arm64ec"),
13836            link_name = "llvm.aarch64.neon.fmin.f16"
13837        )]
13838        fn _vminh_f16(a: f16, b: f16) -> f16;
13839    }
13840    unsafe { _vminh_f16(a, b) }
13841}
13842#[doc = "Floating-point Minimum Number (vector)"]
13843#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f64)"]
13844#[inline(always)]
13845#[target_feature(enable = "neon")]
13846#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13847#[cfg_attr(test, assert_instr(fminnm))]
13848pub fn vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13849    unsafe { simd_fmin(a, b) }
13850}
13851#[doc = "Floating-point Minimum Number (vector)"]
13852#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f64)"]
13853#[inline(always)]
13854#[target_feature(enable = "neon")]
13855#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13856#[cfg_attr(test, assert_instr(fminnm))]
13857pub fn vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13858    unsafe { simd_fmin(a, b) }
13859}
13860#[doc = "Floating-point Minimum Number"]
13861#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmh_f16)"]
13862#[inline(always)]
13863#[target_feature(enable = "neon,fp16")]
13864#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13865#[cfg(not(target_arch = "arm64ec"))]
13866#[cfg_attr(test, assert_instr(fminnm))]
13867pub fn vminnmh_f16(a: f16, b: f16) -> f16 {
13868    f16::min(a, b)
13869}
13870#[doc = "Floating-point minimum number across vector"]
13871#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f16)"]
13872#[inline(always)]
13873#[target_feature(enable = "neon,fp16")]
13874#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13875#[cfg(not(target_arch = "arm64ec"))]
13876#[cfg_attr(test, assert_instr(fminnmv))]
13877pub fn vminnmv_f16(a: float16x4_t) -> f16 {
13878    unsafe { simd_reduce_min(a) }
13879}
13880#[doc = "Floating-point minimum number across vector"]
13881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f16)"]
13882#[inline(always)]
13883#[target_feature(enable = "neon,fp16")]
13884#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13885#[cfg(not(target_arch = "arm64ec"))]
13886#[cfg_attr(test, assert_instr(fminnmv))]
13887pub fn vminnmvq_f16(a: float16x8_t) -> f16 {
13888    unsafe { simd_reduce_min(a) }
13889}
13890#[doc = "Floating-point minimum number across vector"]
13891#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f32)"]
13892#[inline(always)]
13893#[target_feature(enable = "neon")]
13894#[cfg_attr(test, assert_instr(fminnmp))]
13895#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13896pub fn vminnmv_f32(a: float32x2_t) -> f32 {
13897    unsafe { simd_reduce_min(a) }
13898}
13899#[doc = "Floating-point minimum number across vector"]
13900#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f64)"]
13901#[inline(always)]
13902#[target_feature(enable = "neon")]
13903#[cfg_attr(test, assert_instr(fminnmp))]
13904#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13905pub fn vminnmvq_f64(a: float64x2_t) -> f64 {
13906    unsafe { simd_reduce_min(a) }
13907}
13908#[doc = "Floating-point minimum number across vector"]
13909#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f32)"]
13910#[inline(always)]
13911#[target_feature(enable = "neon")]
13912#[cfg_attr(test, assert_instr(fminnmv))]
13913#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13914pub fn vminnmvq_f32(a: float32x4_t) -> f32 {
13915    unsafe { simd_reduce_min(a) }
13916}
13917#[doc = "Floating-point minimum number across vector"]
13918#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_f16)"]
13919#[inline(always)]
13920#[target_feature(enable = "neon,fp16")]
13921#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13922#[cfg(not(target_arch = "arm64ec"))]
13923#[cfg_attr(test, assert_instr(fminv))]
13924pub fn vminv_f16(a: float16x4_t) -> f16 {
13925    unsafe extern "unadjusted" {
13926        #[cfg_attr(
13927            any(target_arch = "aarch64", target_arch = "arm64ec"),
13928            link_name = "llvm.aarch64.neon.fminv.f16.v4f16"
13929        )]
13930        fn _vminv_f16(a: float16x4_t) -> f16;
13931    }
13932    unsafe { _vminv_f16(a) }
13933}
13934#[doc = "Floating-point minimum number across vector"]
13935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f16)"]
13936#[inline(always)]
13937#[target_feature(enable = "neon,fp16")]
13938#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13939#[cfg(not(target_arch = "arm64ec"))]
13940#[cfg_attr(test, assert_instr(fminv))]
13941pub fn vminvq_f16(a: float16x8_t) -> f16 {
13942    unsafe extern "unadjusted" {
13943        #[cfg_attr(
13944            any(target_arch = "aarch64", target_arch = "arm64ec"),
13945            link_name = "llvm.aarch64.neon.fminv.f16.v8f16"
13946        )]
13947        fn _vminvq_f16(a: float16x8_t) -> f16;
13948    }
13949    unsafe { _vminvq_f16(a) }
13950}
13951#[doc = "Horizontal vector min."]
13952#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_f32)"]
13953#[inline(always)]
13954#[target_feature(enable = "neon")]
13955#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13956#[cfg_attr(test, assert_instr(fminp))]
13957pub fn vminv_f32(a: float32x2_t) -> f32 {
13958    unsafe extern "unadjusted" {
13959        #[cfg_attr(
13960            any(target_arch = "aarch64", target_arch = "arm64ec"),
13961            link_name = "llvm.aarch64.neon.fminv.f32.v2f32"
13962        )]
13963        fn _vminv_f32(a: float32x2_t) -> f32;
13964    }
13965    unsafe { _vminv_f32(a) }
13966}
13967#[doc = "Horizontal vector min."]
13968#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f32)"]
13969#[inline(always)]
13970#[target_feature(enable = "neon")]
13971#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13972#[cfg_attr(test, assert_instr(fminv))]
13973pub fn vminvq_f32(a: float32x4_t) -> f32 {
13974    unsafe extern "unadjusted" {
13975        #[cfg_attr(
13976            any(target_arch = "aarch64", target_arch = "arm64ec"),
13977            link_name = "llvm.aarch64.neon.fminv.f32.v4f32"
13978        )]
13979        fn _vminvq_f32(a: float32x4_t) -> f32;
13980    }
13981    unsafe { _vminvq_f32(a) }
13982}
13983#[doc = "Horizontal vector min."]
13984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f64)"]
13985#[inline(always)]
13986#[target_feature(enable = "neon")]
13987#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13988#[cfg_attr(test, assert_instr(fminp))]
13989pub fn vminvq_f64(a: float64x2_t) -> f64 {
13990    unsafe extern "unadjusted" {
13991        #[cfg_attr(
13992            any(target_arch = "aarch64", target_arch = "arm64ec"),
13993            link_name = "llvm.aarch64.neon.fminv.f64.v2f64"
13994        )]
13995        fn _vminvq_f64(a: float64x2_t) -> f64;
13996    }
13997    unsafe { _vminvq_f64(a) }
13998}
13999#[doc = "Horizontal vector min."]
14000#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s8)"]
14001#[inline(always)]
14002#[target_feature(enable = "neon")]
14003#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14004#[cfg_attr(test, assert_instr(sminv))]
14005pub fn vminv_s8(a: int8x8_t) -> i8 {
14006    unsafe { simd_reduce_min(a) }
14007}
14008#[doc = "Horizontal vector min."]
14009#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s8)"]
14010#[inline(always)]
14011#[target_feature(enable = "neon")]
14012#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14013#[cfg_attr(test, assert_instr(sminv))]
14014pub fn vminvq_s8(a: int8x16_t) -> i8 {
14015    unsafe { simd_reduce_min(a) }
14016}
14017#[doc = "Horizontal vector min."]
14018#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s16)"]
14019#[inline(always)]
14020#[target_feature(enable = "neon")]
14021#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14022#[cfg_attr(test, assert_instr(sminv))]
14023pub fn vminv_s16(a: int16x4_t) -> i16 {
14024    unsafe { simd_reduce_min(a) }
14025}
14026#[doc = "Horizontal vector min."]
14027#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s16)"]
14028#[inline(always)]
14029#[target_feature(enable = "neon")]
14030#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14031#[cfg_attr(test, assert_instr(sminv))]
14032pub fn vminvq_s16(a: int16x8_t) -> i16 {
14033    unsafe { simd_reduce_min(a) }
14034}
14035#[doc = "Horizontal vector min."]
14036#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s32)"]
14037#[inline(always)]
14038#[target_feature(enable = "neon")]
14039#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14040#[cfg_attr(test, assert_instr(sminp))]
14041pub fn vminv_s32(a: int32x2_t) -> i32 {
14042    unsafe { simd_reduce_min(a) }
14043}
14044#[doc = "Horizontal vector min."]
14045#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s32)"]
14046#[inline(always)]
14047#[target_feature(enable = "neon")]
14048#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14049#[cfg_attr(test, assert_instr(sminv))]
14050pub fn vminvq_s32(a: int32x4_t) -> i32 {
14051    unsafe { simd_reduce_min(a) }
14052}
14053#[doc = "Horizontal vector min."]
14054#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u8)"]
14055#[inline(always)]
14056#[target_feature(enable = "neon")]
14057#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14058#[cfg_attr(test, assert_instr(uminv))]
14059pub fn vminv_u8(a: uint8x8_t) -> u8 {
14060    unsafe { simd_reduce_min(a) }
14061}
14062#[doc = "Horizontal vector min."]
14063#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u8)"]
14064#[inline(always)]
14065#[target_feature(enable = "neon")]
14066#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14067#[cfg_attr(test, assert_instr(uminv))]
14068pub fn vminvq_u8(a: uint8x16_t) -> u8 {
14069    unsafe { simd_reduce_min(a) }
14070}
14071#[doc = "Horizontal vector min."]
14072#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u16)"]
14073#[inline(always)]
14074#[target_feature(enable = "neon")]
14075#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14076#[cfg_attr(test, assert_instr(uminv))]
14077pub fn vminv_u16(a: uint16x4_t) -> u16 {
14078    unsafe { simd_reduce_min(a) }
14079}
14080#[doc = "Horizontal vector min."]
14081#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u16)"]
14082#[inline(always)]
14083#[target_feature(enable = "neon")]
14084#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14085#[cfg_attr(test, assert_instr(uminv))]
14086pub fn vminvq_u16(a: uint16x8_t) -> u16 {
14087    unsafe { simd_reduce_min(a) }
14088}
14089#[doc = "Horizontal vector min."]
14090#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u32)"]
14091#[inline(always)]
14092#[target_feature(enable = "neon")]
14093#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14094#[cfg_attr(test, assert_instr(uminp))]
14095pub fn vminv_u32(a: uint32x2_t) -> u32 {
14096    unsafe { simd_reduce_min(a) }
14097}
14098#[doc = "Horizontal vector min."]
14099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u32)"]
14100#[inline(always)]
14101#[target_feature(enable = "neon")]
14102#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14103#[cfg_attr(test, assert_instr(uminv))]
14104pub fn vminvq_u32(a: uint32x4_t) -> u32 {
14105    unsafe { simd_reduce_min(a) }
14106}
14107#[doc = "Floating-point multiply-add to accumulator"]
14108#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f64)"]
14109#[inline(always)]
14110#[target_feature(enable = "neon")]
14111#[cfg_attr(test, assert_instr(fmul))]
14112#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14113pub fn vmla_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
14114    unsafe { simd_add(a, simd_mul(b, c)) }
14115}
14116#[doc = "Floating-point multiply-add to accumulator"]
14117#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f64)"]
14118#[inline(always)]
14119#[target_feature(enable = "neon")]
14120#[cfg_attr(test, assert_instr(fmul))]
14121#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14122pub fn vmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
14123    unsafe { simd_add(a, simd_mul(b, c)) }
14124}
14125#[doc = "Multiply-add long"]
14126#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s16)"]
14127#[inline(always)]
14128#[target_feature(enable = "neon")]
14129#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
14130#[rustc_legacy_const_generics(3)]
14131#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14132pub fn vmlal_high_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
14133    static_assert_uimm_bits!(LANE, 2);
14134    unsafe { vmlal_high_s16(a, b, simd_shuffle!(c, c, [LANE as u32; 8])) }
14135}
14136#[doc = "Multiply-add long"]
14137#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s16)"]
14138#[inline(always)]
14139#[target_feature(enable = "neon")]
14140#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
14141#[rustc_legacy_const_generics(3)]
14142#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14143pub fn vmlal_high_laneq_s16<const LANE: i32>(
14144    a: int32x4_t,
14145    b: int16x8_t,
14146    c: int16x8_t,
14147) -> int32x4_t {
14148    static_assert_uimm_bits!(LANE, 3);
14149    unsafe { vmlal_high_s16(a, b, simd_shuffle!(c, c, [LANE as u32; 8])) }
14150}
14151#[doc = "Multiply-add long"]
14152#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s32)"]
14153#[inline(always)]
14154#[target_feature(enable = "neon")]
14155#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
14156#[rustc_legacy_const_generics(3)]
14157#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14158pub fn vmlal_high_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
14159    static_assert_uimm_bits!(LANE, 1);
14160    unsafe { vmlal_high_s32(a, b, simd_shuffle!(c, c, [LANE as u32; 4])) }
14161}
14162#[doc = "Multiply-add long"]
14163#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s32)"]
14164#[inline(always)]
14165#[target_feature(enable = "neon")]
14166#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
14167#[rustc_legacy_const_generics(3)]
14168#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14169pub fn vmlal_high_laneq_s32<const LANE: i32>(
14170    a: int64x2_t,
14171    b: int32x4_t,
14172    c: int32x4_t,
14173) -> int64x2_t {
14174    static_assert_uimm_bits!(LANE, 2);
14175    unsafe { vmlal_high_s32(a, b, simd_shuffle!(c, c, [LANE as u32; 4])) }
14176}
14177#[doc = "Multiply-add long"]
14178#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u16)"]
14179#[inline(always)]
14180#[target_feature(enable = "neon")]
14181#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14182#[rustc_legacy_const_generics(3)]
14183#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14184pub fn vmlal_high_lane_u16<const LANE: i32>(
14185    a: uint32x4_t,
14186    b: uint16x8_t,
14187    c: uint16x4_t,
14188) -> uint32x4_t {
14189    static_assert_uimm_bits!(LANE, 2);
14190    unsafe { vmlal_high_u16(a, b, simd_shuffle!(c, c, [LANE as u32; 8])) }
14191}
14192#[doc = "Multiply-add long"]
14193#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u16)"]
14194#[inline(always)]
14195#[target_feature(enable = "neon")]
14196#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14197#[rustc_legacy_const_generics(3)]
14198#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14199pub fn vmlal_high_laneq_u16<const LANE: i32>(
14200    a: uint32x4_t,
14201    b: uint16x8_t,
14202    c: uint16x8_t,
14203) -> uint32x4_t {
14204    static_assert_uimm_bits!(LANE, 3);
14205    unsafe { vmlal_high_u16(a, b, simd_shuffle!(c, c, [LANE as u32; 8])) }
14206}
14207#[doc = "Multiply-add long"]
14208#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u32)"]
14209#[inline(always)]
14210#[target_feature(enable = "neon")]
14211#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14212#[rustc_legacy_const_generics(3)]
14213#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14214pub fn vmlal_high_lane_u32<const LANE: i32>(
14215    a: uint64x2_t,
14216    b: uint32x4_t,
14217    c: uint32x2_t,
14218) -> uint64x2_t {
14219    static_assert_uimm_bits!(LANE, 1);
14220    unsafe { vmlal_high_u32(a, b, simd_shuffle!(c, c, [LANE as u32; 4])) }
14221}
14222#[doc = "Multiply-add long"]
14223#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u32)"]
14224#[inline(always)]
14225#[target_feature(enable = "neon")]
14226#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14227#[rustc_legacy_const_generics(3)]
14228#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14229pub fn vmlal_high_laneq_u32<const LANE: i32>(
14230    a: uint64x2_t,
14231    b: uint32x4_t,
14232    c: uint32x4_t,
14233) -> uint64x2_t {
14234    static_assert_uimm_bits!(LANE, 2);
14235    unsafe { vmlal_high_u32(a, b, simd_shuffle!(c, c, [LANE as u32; 4])) }
14236}
14237#[doc = "Multiply-add long"]
14238#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s16)"]
14239#[inline(always)]
14240#[target_feature(enable = "neon")]
14241#[cfg_attr(test, assert_instr(smlal2))]
14242#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14243pub fn vmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
14244    vmlal_high_s16(a, b, vdupq_n_s16(c))
14245}
14246#[doc = "Multiply-add long"]
14247#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s32)"]
14248#[inline(always)]
14249#[target_feature(enable = "neon")]
14250#[cfg_attr(test, assert_instr(smlal2))]
14251#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14252pub fn vmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
14253    vmlal_high_s32(a, b, vdupq_n_s32(c))
14254}
14255#[doc = "Multiply-add long"]
14256#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u16)"]
14257#[inline(always)]
14258#[target_feature(enable = "neon")]
14259#[cfg_attr(test, assert_instr(umlal2))]
14260#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14261pub fn vmlal_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t {
14262    vmlal_high_u16(a, b, vdupq_n_u16(c))
14263}
14264#[doc = "Multiply-add long"]
14265#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u32)"]
14266#[inline(always)]
14267#[target_feature(enable = "neon")]
14268#[cfg_attr(test, assert_instr(umlal2))]
14269#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14270pub fn vmlal_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t {
14271    vmlal_high_u32(a, b, vdupq_n_u32(c))
14272}
14273#[doc = "Signed multiply-add long"]
14274#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s8)"]
14275#[inline(always)]
14276#[target_feature(enable = "neon")]
14277#[cfg_attr(test, assert_instr(smlal2))]
14278#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14279pub fn vmlal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
14280    unsafe {
14281        let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14282        let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14283        vmlal_s8(a, b, c)
14284    }
14285}
14286#[doc = "Signed multiply-add long"]
14287#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s16)"]
14288#[inline(always)]
14289#[target_feature(enable = "neon")]
14290#[cfg_attr(test, assert_instr(smlal2))]
14291#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14292pub fn vmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
14293    unsafe {
14294        let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14295        let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14296        vmlal_s16(a, b, c)
14297    }
14298}
14299#[doc = "Signed multiply-add long"]
14300#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s32)"]
14301#[inline(always)]
14302#[target_feature(enable = "neon")]
14303#[cfg_attr(test, assert_instr(smlal2))]
14304#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14305pub fn vmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
14306    unsafe {
14307        let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
14308        let c: int32x2_t = simd_shuffle!(c, c, [2, 3]);
14309        vmlal_s32(a, b, c)
14310    }
14311}
14312#[doc = "Unsigned multiply-add long"]
14313#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u8)"]
14314#[inline(always)]
14315#[target_feature(enable = "neon")]
14316#[cfg_attr(test, assert_instr(umlal2))]
14317#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14318pub fn vmlal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
14319    unsafe {
14320        let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14321        let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14322        vmlal_u8(a, b, c)
14323    }
14324}
14325#[doc = "Unsigned multiply-add long"]
14326#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u16)"]
14327#[inline(always)]
14328#[target_feature(enable = "neon")]
14329#[cfg_attr(test, assert_instr(umlal2))]
14330#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14331pub fn vmlal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
14332    unsafe {
14333        let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14334        let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14335        vmlal_u16(a, b, c)
14336    }
14337}
14338#[doc = "Unsigned multiply-add long"]
14339#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u32)"]
14340#[inline(always)]
14341#[target_feature(enable = "neon")]
14342#[cfg_attr(test, assert_instr(umlal2))]
14343#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14344pub fn vmlal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
14345    unsafe {
14346        let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
14347        let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
14348        vmlal_u32(a, b, c)
14349    }
14350}
14351#[doc = "Floating-point multiply-subtract from accumulator"]
14352#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_f64)"]
14353#[inline(always)]
14354#[target_feature(enable = "neon")]
14355#[cfg_attr(test, assert_instr(fmul))]
14356#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14357pub fn vmls_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
14358    unsafe { simd_sub(a, simd_mul(b, c)) }
14359}
14360#[doc = "Floating-point multiply-subtract from accumulator"]
14361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f64)"]
14362#[inline(always)]
14363#[target_feature(enable = "neon")]
14364#[cfg_attr(test, assert_instr(fmul))]
14365#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14366pub fn vmlsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
14367    unsafe { simd_sub(a, simd_mul(b, c)) }
14368}
14369#[doc = "Multiply-subtract long"]
14370#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s16)"]
14371#[inline(always)]
14372#[target_feature(enable = "neon")]
14373#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14374#[rustc_legacy_const_generics(3)]
14375#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14376pub fn vmlsl_high_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
14377    static_assert_uimm_bits!(LANE, 2);
14378    unsafe { vmlsl_high_s16(a, b, simd_shuffle!(c, c, [LANE as u32; 8])) }
14379}
14380#[doc = "Multiply-subtract long"]
14381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s16)"]
14382#[inline(always)]
14383#[target_feature(enable = "neon")]
14384#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14385#[rustc_legacy_const_generics(3)]
14386#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14387pub fn vmlsl_high_laneq_s16<const LANE: i32>(
14388    a: int32x4_t,
14389    b: int16x8_t,
14390    c: int16x8_t,
14391) -> int32x4_t {
14392    static_assert_uimm_bits!(LANE, 3);
14393    unsafe { vmlsl_high_s16(a, b, simd_shuffle!(c, c, [LANE as u32; 8])) }
14394}
14395#[doc = "Multiply-subtract long"]
14396#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s32)"]
14397#[inline(always)]
14398#[target_feature(enable = "neon")]
14399#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14400#[rustc_legacy_const_generics(3)]
14401#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14402pub fn vmlsl_high_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
14403    static_assert_uimm_bits!(LANE, 1);
14404    unsafe { vmlsl_high_s32(a, b, simd_shuffle!(c, c, [LANE as u32; 4])) }
14405}
14406#[doc = "Multiply-subtract long"]
14407#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s32)"]
14408#[inline(always)]
14409#[target_feature(enable = "neon")]
14410#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14411#[rustc_legacy_const_generics(3)]
14412#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14413pub fn vmlsl_high_laneq_s32<const LANE: i32>(
14414    a: int64x2_t,
14415    b: int32x4_t,
14416    c: int32x4_t,
14417) -> int64x2_t {
14418    static_assert_uimm_bits!(LANE, 2);
14419    unsafe { vmlsl_high_s32(a, b, simd_shuffle!(c, c, [LANE as u32; 4])) }
14420}
14421#[doc = "Multiply-subtract long"]
14422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u16)"]
14423#[inline(always)]
14424#[target_feature(enable = "neon")]
14425#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14426#[rustc_legacy_const_generics(3)]
14427#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14428pub fn vmlsl_high_lane_u16<const LANE: i32>(
14429    a: uint32x4_t,
14430    b: uint16x8_t,
14431    c: uint16x4_t,
14432) -> uint32x4_t {
14433    static_assert_uimm_bits!(LANE, 2);
14434    unsafe { vmlsl_high_u16(a, b, simd_shuffle!(c, c, [LANE as u32; 8])) }
14435}
14436#[doc = "Multiply-subtract long"]
14437#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u16)"]
14438#[inline(always)]
14439#[target_feature(enable = "neon")]
14440#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14441#[rustc_legacy_const_generics(3)]
14442#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14443pub fn vmlsl_high_laneq_u16<const LANE: i32>(
14444    a: uint32x4_t,
14445    b: uint16x8_t,
14446    c: uint16x8_t,
14447) -> uint32x4_t {
14448    static_assert_uimm_bits!(LANE, 3);
14449    unsafe { vmlsl_high_u16(a, b, simd_shuffle!(c, c, [LANE as u32; 8])) }
14450}
14451#[doc = "Multiply-subtract long"]
14452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u32)"]
14453#[inline(always)]
14454#[target_feature(enable = "neon")]
14455#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14456#[rustc_legacy_const_generics(3)]
14457#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14458pub fn vmlsl_high_lane_u32<const LANE: i32>(
14459    a: uint64x2_t,
14460    b: uint32x4_t,
14461    c: uint32x2_t,
14462) -> uint64x2_t {
14463    static_assert_uimm_bits!(LANE, 1);
14464    unsafe { vmlsl_high_u32(a, b, simd_shuffle!(c, c, [LANE as u32; 4])) }
14465}
14466#[doc = "Multiply-subtract long"]
14467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u32)"]
14468#[inline(always)]
14469#[target_feature(enable = "neon")]
14470#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14471#[rustc_legacy_const_generics(3)]
14472#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14473pub fn vmlsl_high_laneq_u32<const LANE: i32>(
14474    a: uint64x2_t,
14475    b: uint32x4_t,
14476    c: uint32x4_t,
14477) -> uint64x2_t {
14478    static_assert_uimm_bits!(LANE, 2);
14479    unsafe { vmlsl_high_u32(a, b, simd_shuffle!(c, c, [LANE as u32; 4])) }
14480}
14481#[doc = "Multiply-subtract long"]
14482#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s16)"]
14483#[inline(always)]
14484#[target_feature(enable = "neon")]
14485#[cfg_attr(test, assert_instr(smlsl2))]
14486#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14487pub fn vmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
14488    vmlsl_high_s16(a, b, vdupq_n_s16(c))
14489}
14490#[doc = "Multiply-subtract long"]
14491#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s32)"]
14492#[inline(always)]
14493#[target_feature(enable = "neon")]
14494#[cfg_attr(test, assert_instr(smlsl2))]
14495#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14496pub fn vmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
14497    vmlsl_high_s32(a, b, vdupq_n_s32(c))
14498}
14499#[doc = "Multiply-subtract long"]
14500#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u16)"]
14501#[inline(always)]
14502#[target_feature(enable = "neon")]
14503#[cfg_attr(test, assert_instr(umlsl2))]
14504#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14505pub fn vmlsl_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t {
14506    vmlsl_high_u16(a, b, vdupq_n_u16(c))
14507}
14508#[doc = "Multiply-subtract long"]
14509#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u32)"]
14510#[inline(always)]
14511#[target_feature(enable = "neon")]
14512#[cfg_attr(test, assert_instr(umlsl2))]
14513#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14514pub fn vmlsl_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t {
14515    vmlsl_high_u32(a, b, vdupq_n_u32(c))
14516}
14517#[doc = "Signed multiply-subtract long"]
14518#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s8)"]
14519#[inline(always)]
14520#[target_feature(enable = "neon")]
14521#[cfg_attr(test, assert_instr(smlsl2))]
14522#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14523pub fn vmlsl_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
14524    unsafe {
14525        let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14526        let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14527        vmlsl_s8(a, b, c)
14528    }
14529}
14530#[doc = "Signed multiply-subtract long"]
14531#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s16)"]
14532#[inline(always)]
14533#[target_feature(enable = "neon")]
14534#[cfg_attr(test, assert_instr(smlsl2))]
14535#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14536pub fn vmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
14537    unsafe {
14538        let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14539        let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14540        vmlsl_s16(a, b, c)
14541    }
14542}
14543#[doc = "Signed multiply-subtract long"]
14544#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s32)"]
14545#[inline(always)]
14546#[target_feature(enable = "neon")]
14547#[cfg_attr(test, assert_instr(smlsl2))]
14548#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14549pub fn vmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
14550    unsafe {
14551        let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
14552        let c: int32x2_t = simd_shuffle!(c, c, [2, 3]);
14553        vmlsl_s32(a, b, c)
14554    }
14555}
14556#[doc = "Unsigned multiply-subtract long"]
14557#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u8)"]
14558#[inline(always)]
14559#[target_feature(enable = "neon")]
14560#[cfg_attr(test, assert_instr(umlsl2))]
14561#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14562pub fn vmlsl_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
14563    unsafe {
14564        let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14565        let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14566        vmlsl_u8(a, b, c)
14567    }
14568}
14569#[doc = "Unsigned multiply-subtract long"]
14570#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u16)"]
14571#[inline(always)]
14572#[target_feature(enable = "neon")]
14573#[cfg_attr(test, assert_instr(umlsl2))]
14574#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14575pub fn vmlsl_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
14576    unsafe {
14577        let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14578        let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14579        vmlsl_u16(a, b, c)
14580    }
14581}
14582#[doc = "Unsigned multiply-subtract long"]
14583#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u32)"]
14584#[inline(always)]
14585#[target_feature(enable = "neon")]
14586#[cfg_attr(test, assert_instr(umlsl2))]
14587#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14588pub fn vmlsl_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
14589    unsafe {
14590        let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
14591        let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
14592        vmlsl_u32(a, b, c)
14593    }
14594}
14595#[doc = "Vector move"]
14596#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s8)"]
14597#[inline(always)]
14598#[target_feature(enable = "neon")]
14599#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14600#[cfg_attr(test, assert_instr(sxtl2))]
14601pub fn vmovl_high_s8(a: int8x16_t) -> int16x8_t {
14602    unsafe {
14603        let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
14604        vmovl_s8(a)
14605    }
14606}
14607#[doc = "Vector move"]
14608#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s16)"]
14609#[inline(always)]
14610#[target_feature(enable = "neon")]
14611#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14612#[cfg_attr(test, assert_instr(sxtl2))]
14613pub fn vmovl_high_s16(a: int16x8_t) -> int32x4_t {
14614    unsafe {
14615        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
14616        vmovl_s16(a)
14617    }
14618}
14619#[doc = "Vector move"]
14620#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s32)"]
14621#[inline(always)]
14622#[target_feature(enable = "neon")]
14623#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14624#[cfg_attr(test, assert_instr(sxtl2))]
14625pub fn vmovl_high_s32(a: int32x4_t) -> int64x2_t {
14626    unsafe {
14627        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
14628        vmovl_s32(a)
14629    }
14630}
14631#[doc = "Vector move"]
14632#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u8)"]
14633#[inline(always)]
14634#[target_feature(enable = "neon")]
14635#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14636#[cfg_attr(test, assert_instr(uxtl2))]
14637pub fn vmovl_high_u8(a: uint8x16_t) -> uint16x8_t {
14638    unsafe {
14639        let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
14640        vmovl_u8(a)
14641    }
14642}
14643#[doc = "Vector move"]
14644#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u16)"]
14645#[inline(always)]
14646#[target_feature(enable = "neon")]
14647#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14648#[cfg_attr(test, assert_instr(uxtl2))]
14649pub fn vmovl_high_u16(a: uint16x8_t) -> uint32x4_t {
14650    unsafe {
14651        let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
14652        vmovl_u16(a)
14653    }
14654}
14655#[doc = "Vector move"]
14656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u32)"]
14657#[inline(always)]
14658#[target_feature(enable = "neon")]
14659#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14660#[cfg_attr(test, assert_instr(uxtl2))]
14661pub fn vmovl_high_u32(a: uint32x4_t) -> uint64x2_t {
14662    unsafe {
14663        let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
14664        vmovl_u32(a)
14665    }
14666}
14667#[doc = "Extract narrow"]
14668#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s16)"]
14669#[inline(always)]
14670#[target_feature(enable = "neon")]
14671#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14672#[cfg_attr(test, assert_instr(xtn2))]
14673pub fn vmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t {
14674    unsafe {
14675        let c: int8x8_t = simd_cast(b);
14676        simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
14677    }
14678}
14679#[doc = "Extract narrow"]
14680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s32)"]
14681#[inline(always)]
14682#[target_feature(enable = "neon")]
14683#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14684#[cfg_attr(test, assert_instr(xtn2))]
14685pub fn vmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t {
14686    unsafe {
14687        let c: int16x4_t = simd_cast(b);
14688        simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7])
14689    }
14690}
14691#[doc = "Extract narrow"]
14692#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s64)"]
14693#[inline(always)]
14694#[target_feature(enable = "neon")]
14695#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14696#[cfg_attr(test, assert_instr(xtn2))]
14697pub fn vmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t {
14698    unsafe {
14699        let c: int32x2_t = simd_cast(b);
14700        simd_shuffle!(a, c, [0, 1, 2, 3])
14701    }
14702}
14703#[doc = "Extract narrow"]
14704#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u16)"]
14705#[inline(always)]
14706#[target_feature(enable = "neon")]
14707#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14708#[cfg_attr(test, assert_instr(xtn2))]
14709pub fn vmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
14710    unsafe {
14711        let c: uint8x8_t = simd_cast(b);
14712        simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
14713    }
14714}
14715#[doc = "Extract narrow"]
14716#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u32)"]
14717#[inline(always)]
14718#[target_feature(enable = "neon")]
14719#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14720#[cfg_attr(test, assert_instr(xtn2))]
14721pub fn vmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
14722    unsafe {
14723        let c: uint16x4_t = simd_cast(b);
14724        simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7])
14725    }
14726}
14727#[doc = "Extract narrow"]
14728#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u64)"]
14729#[inline(always)]
14730#[target_feature(enable = "neon")]
14731#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14732#[cfg_attr(test, assert_instr(xtn2))]
14733pub fn vmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
14734    unsafe {
14735        let c: uint32x2_t = simd_cast(b);
14736        simd_shuffle!(a, c, [0, 1, 2, 3])
14737    }
14738}
14739#[doc = "Multiply"]
14740#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f64)"]
14741#[inline(always)]
14742#[target_feature(enable = "neon")]
14743#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14744#[cfg_attr(test, assert_instr(fmul))]
14745pub fn vmul_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
14746    unsafe { simd_mul(a, b) }
14747}
14748#[doc = "Multiply"]
14749#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f64)"]
14750#[inline(always)]
14751#[target_feature(enable = "neon")]
14752#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14753#[cfg_attr(test, assert_instr(fmul))]
14754pub fn vmulq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
14755    unsafe { simd_mul(a, b) }
14756}
14757#[doc = "Floating-point multiply"]
14758#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_f64)"]
14759#[inline(always)]
14760#[target_feature(enable = "neon")]
14761#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14762#[rustc_legacy_const_generics(2)]
14763#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14764pub fn vmul_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t) -> float64x1_t {
14765    static_assert!(LANE == 0);
14766    unsafe { simd_mul(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
14767}
14768#[doc = "Floating-point multiply"]
14769#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f16)"]
14770#[inline(always)]
14771#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14772#[rustc_legacy_const_generics(2)]
14773#[target_feature(enable = "neon,fp16")]
14774#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
14775#[cfg(not(target_arch = "arm64ec"))]
14776pub fn vmul_laneq_f16<const LANE: i32>(a: float16x4_t, b: float16x8_t) -> float16x4_t {
14777    static_assert_uimm_bits!(LANE, 3);
14778    unsafe { simd_mul(a, simd_shuffle!(b, b, [LANE as u32; 4])) }
14779}
14780#[doc = "Floating-point multiply"]
14781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f16)"]
14782#[inline(always)]
14783#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14784#[rustc_legacy_const_generics(2)]
14785#[target_feature(enable = "neon,fp16")]
14786#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
14787#[cfg(not(target_arch = "arm64ec"))]
14788pub fn vmulq_laneq_f16<const LANE: i32>(a: float16x8_t, b: float16x8_t) -> float16x8_t {
14789    static_assert_uimm_bits!(LANE, 3);
14790    unsafe { simd_mul(a, simd_shuffle!(b, b, [LANE as u32; 8])) }
14791}
14792#[doc = "Floating-point multiply"]
14793#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f64)"]
14794#[inline(always)]
14795#[target_feature(enable = "neon")]
14796#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14797#[rustc_legacy_const_generics(2)]
14798#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14799pub fn vmul_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x2_t) -> float64x1_t {
14800    static_assert_uimm_bits!(LANE, 1);
14801    unsafe { simd_mul(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
14802}
14803#[doc = "Vector multiply by scalar"]
14804#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_f64)"]
14805#[inline(always)]
14806#[target_feature(enable = "neon")]
14807#[cfg_attr(test, assert_instr(fmul))]
14808#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14809pub fn vmul_n_f64(a: float64x1_t, b: f64) -> float64x1_t {
14810    unsafe { simd_mul(a, vdup_n_f64(b)) }
14811}
14812#[doc = "Vector multiply by scalar"]
14813#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f64)"]
14814#[inline(always)]
14815#[target_feature(enable = "neon")]
14816#[cfg_attr(test, assert_instr(fmul))]
14817#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14818pub fn vmulq_n_f64(a: float64x2_t, b: f64) -> float64x2_t {
14819    unsafe { simd_mul(a, vdupq_n_f64(b)) }
14820}
14821#[doc = "Floating-point multiply"]
14822#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_lane_f64)"]
14823#[inline(always)]
14824#[target_feature(enable = "neon")]
14825#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14826#[rustc_legacy_const_generics(2)]
14827#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14828pub fn vmuld_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> f64 {
14829    static_assert!(LANE == 0);
14830    unsafe {
14831        let b: f64 = simd_extract!(b, LANE as u32);
14832        a * b
14833    }
14834}
14835#[doc = "Add"]
14836#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_f16)"]
14837#[inline(always)]
14838#[target_feature(enable = "neon,fp16")]
14839#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
14840#[cfg(not(target_arch = "arm64ec"))]
14841#[cfg_attr(test, assert_instr(fmul))]
14842pub fn vmulh_f16(a: f16, b: f16) -> f16 {
14843    a * b
14844}
14845#[doc = "Floating-point multiply"]
14846#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_lane_f16)"]
14847#[inline(always)]
14848#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14849#[rustc_legacy_const_generics(2)]
14850#[target_feature(enable = "neon,fp16")]
14851#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
14852#[cfg(not(target_arch = "arm64ec"))]
14853pub fn vmulh_lane_f16<const LANE: i32>(a: f16, b: float16x4_t) -> f16 {
14854    static_assert_uimm_bits!(LANE, 2);
14855    unsafe {
14856        let b: f16 = simd_extract!(b, LANE as u32);
14857        a * b
14858    }
14859}
14860#[doc = "Floating-point multiply"]
14861#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_laneq_f16)"]
14862#[inline(always)]
14863#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14864#[rustc_legacy_const_generics(2)]
14865#[target_feature(enable = "neon,fp16")]
14866#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
14867#[cfg(not(target_arch = "arm64ec"))]
14868pub fn vmulh_laneq_f16<const LANE: i32>(a: f16, b: float16x8_t) -> f16 {
14869    static_assert_uimm_bits!(LANE, 3);
14870    unsafe {
14871        let b: f16 = simd_extract!(b, LANE as u32);
14872        a * b
14873    }
14874}
14875#[doc = "Multiply long"]
14876#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s16)"]
14877#[inline(always)]
14878#[target_feature(enable = "neon")]
14879#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
14880#[rustc_legacy_const_generics(2)]
14881#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14882pub fn vmull_high_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int32x4_t {
14883    static_assert_uimm_bits!(LANE, 2);
14884    unsafe { vmull_high_s16(a, simd_shuffle!(b, b, [LANE as u32; 8])) }
14885}
14886#[doc = "Multiply long"]
14887#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s16)"]
14888#[inline(always)]
14889#[target_feature(enable = "neon")]
14890#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
14891#[rustc_legacy_const_generics(2)]
14892#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14893pub fn vmull_high_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t) -> int32x4_t {
14894    static_assert_uimm_bits!(LANE, 3);
14895    unsafe { vmull_high_s16(a, simd_shuffle!(b, b, [LANE as u32; 8])) }
14896}
14897#[doc = "Multiply long"]
14898#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s32)"]
14899#[inline(always)]
14900#[target_feature(enable = "neon")]
14901#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
14902#[rustc_legacy_const_generics(2)]
14903#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14904pub fn vmull_high_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int64x2_t {
14905    static_assert_uimm_bits!(LANE, 1);
14906    unsafe { vmull_high_s32(a, simd_shuffle!(b, b, [LANE as u32; 4])) }
14907}
14908#[doc = "Multiply long"]
14909#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s32)"]
14910#[inline(always)]
14911#[target_feature(enable = "neon")]
14912#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
14913#[rustc_legacy_const_generics(2)]
14914#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14915pub fn vmull_high_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t) -> int64x2_t {
14916    static_assert_uimm_bits!(LANE, 2);
14917    unsafe { vmull_high_s32(a, simd_shuffle!(b, b, [LANE as u32; 4])) }
14918}
14919#[doc = "Multiply long"]
14920#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u16)"]
14921#[inline(always)]
14922#[target_feature(enable = "neon")]
14923#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
14924#[rustc_legacy_const_generics(2)]
14925#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14926pub fn vmull_high_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint16x4_t) -> uint32x4_t {
14927    static_assert_uimm_bits!(LANE, 2);
14928    unsafe { vmull_high_u16(a, simd_shuffle!(b, b, [LANE as u32; 8])) }
14929}
14930#[doc = "Multiply long"]
14931#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u16)"]
14932#[inline(always)]
14933#[target_feature(enable = "neon")]
14934#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
14935#[rustc_legacy_const_generics(2)]
14936#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14937pub fn vmull_high_laneq_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
14938    static_assert_uimm_bits!(LANE, 3);
14939    unsafe { vmull_high_u16(a, simd_shuffle!(b, b, [LANE as u32; 8])) }
14940}
14941#[doc = "Multiply long"]
14942#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u32)"]
14943#[inline(always)]
14944#[target_feature(enable = "neon")]
14945#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
14946#[rustc_legacy_const_generics(2)]
14947#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14948pub fn vmull_high_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint32x2_t) -> uint64x2_t {
14949    static_assert_uimm_bits!(LANE, 1);
14950    unsafe { vmull_high_u32(a, simd_shuffle!(b, b, [LANE as u32; 4])) }
14951}
14952#[doc = "Multiply long"]
14953#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u32)"]
14954#[inline(always)]
14955#[target_feature(enable = "neon")]
14956#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
14957#[rustc_legacy_const_generics(2)]
14958#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14959pub fn vmull_high_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
14960    static_assert_uimm_bits!(LANE, 2);
14961    unsafe { vmull_high_u32(a, simd_shuffle!(b, b, [LANE as u32; 4])) }
14962}
14963#[doc = "Multiply long"]
14964#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s16)"]
14965#[inline(always)]
14966#[target_feature(enable = "neon")]
14967#[cfg_attr(test, assert_instr(smull2))]
14968#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14969pub fn vmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t {
14970    vmull_high_s16(a, vdupq_n_s16(b))
14971}
14972#[doc = "Multiply long"]
14973#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s32)"]
14974#[inline(always)]
14975#[target_feature(enable = "neon")]
14976#[cfg_attr(test, assert_instr(smull2))]
14977#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14978pub fn vmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t {
14979    vmull_high_s32(a, vdupq_n_s32(b))
14980}
14981#[doc = "Multiply long"]
14982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u16)"]
14983#[inline(always)]
14984#[target_feature(enable = "neon")]
14985#[cfg_attr(test, assert_instr(umull2))]
14986#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14987pub fn vmull_high_n_u16(a: uint16x8_t, b: u16) -> uint32x4_t {
14988    vmull_high_u16(a, vdupq_n_u16(b))
14989}
14990#[doc = "Multiply long"]
14991#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u32)"]
14992#[inline(always)]
14993#[target_feature(enable = "neon")]
14994#[cfg_attr(test, assert_instr(umull2))]
14995#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14996pub fn vmull_high_n_u32(a: uint32x4_t, b: u32) -> uint64x2_t {
14997    vmull_high_u32(a, vdupq_n_u32(b))
14998}
14999#[doc = "Polynomial multiply long"]
15000#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p64)"]
15001#[inline(always)]
15002#[target_feature(enable = "neon,aes")]
15003#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15004#[cfg_attr(test, assert_instr(pmull2))]
15005pub fn vmull_high_p64(a: poly64x2_t, b: poly64x2_t) -> p128 {
15006    unsafe { vmull_p64(simd_extract!(a, 1), simd_extract!(b, 1)) }
15007}
15008#[doc = "Polynomial multiply long"]
15009#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p8)"]
15010#[inline(always)]
15011#[target_feature(enable = "neon")]
15012#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15013#[cfg_attr(test, assert_instr(pmull2))]
15014pub fn vmull_high_p8(a: poly8x16_t, b: poly8x16_t) -> poly16x8_t {
15015    unsafe {
15016        let a: poly8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
15017        let b: poly8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
15018        vmull_p8(a, b)
15019    }
15020}
15021#[doc = "Signed multiply long"]
15022#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s8)"]
15023#[inline(always)]
15024#[target_feature(enable = "neon")]
15025#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15026#[cfg_attr(test, assert_instr(smull2))]
15027pub fn vmull_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
15028    unsafe {
15029        let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
15030        let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
15031        vmull_s8(a, b)
15032    }
15033}
15034#[doc = "Signed multiply long"]
15035#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s16)"]
15036#[inline(always)]
15037#[target_feature(enable = "neon")]
15038#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15039#[cfg_attr(test, assert_instr(smull2))]
15040pub fn vmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
15041    unsafe {
15042        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
15043        let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
15044        vmull_s16(a, b)
15045    }
15046}
15047#[doc = "Signed multiply long"]
15048#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s32)"]
15049#[inline(always)]
15050#[target_feature(enable = "neon")]
15051#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15052#[cfg_attr(test, assert_instr(smull2))]
15053pub fn vmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
15054    unsafe {
15055        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
15056        let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
15057        vmull_s32(a, b)
15058    }
15059}
15060#[doc = "Unsigned multiply long"]
15061#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u8)"]
15062#[inline(always)]
15063#[target_feature(enable = "neon")]
15064#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15065#[cfg_attr(test, assert_instr(umull2))]
15066pub fn vmull_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
15067    unsafe {
15068        let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
15069        let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
15070        vmull_u8(a, b)
15071    }
15072}
15073#[doc = "Unsigned multiply long"]
15074#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u16)"]
15075#[inline(always)]
15076#[target_feature(enable = "neon")]
15077#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15078#[cfg_attr(test, assert_instr(umull2))]
15079pub fn vmull_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
15080    unsafe {
15081        let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
15082        let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
15083        vmull_u16(a, b)
15084    }
15085}
15086#[doc = "Unsigned multiply long"]
15087#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u32)"]
15088#[inline(always)]
15089#[target_feature(enable = "neon")]
15090#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15091#[cfg_attr(test, assert_instr(umull2))]
15092pub fn vmull_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
15093    unsafe {
15094        let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
15095        let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
15096        vmull_u32(a, b)
15097    }
15098}
15099#[doc = "Polynomial multiply long"]
15100#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_p64)"]
15101#[inline(always)]
15102#[target_feature(enable = "neon,aes")]
15103#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15104#[cfg_attr(test, assert_instr(pmull))]
15105pub fn vmull_p64(a: p64, b: p64) -> p128 {
15106    unsafe extern "unadjusted" {
15107        #[cfg_attr(
15108            any(target_arch = "aarch64", target_arch = "arm64ec"),
15109            link_name = "llvm.aarch64.neon.pmull64"
15110        )]
15111        fn _vmull_p64(a: p64, b: p64) -> int8x16_t;
15112    }
15113    unsafe { transmute(_vmull_p64(a, b)) }
15114}
15115#[doc = "Floating-point multiply"]
15116#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f64)"]
15117#[inline(always)]
15118#[target_feature(enable = "neon")]
15119#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15120#[rustc_legacy_const_generics(2)]
15121#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15122pub fn vmulq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x1_t) -> float64x2_t {
15123    static_assert!(LANE == 0);
15124    unsafe { simd_mul(a, simd_shuffle!(b, b, [LANE as u32; 2])) }
15125}
15126#[doc = "Floating-point multiply"]
15127#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f64)"]
15128#[inline(always)]
15129#[target_feature(enable = "neon")]
15130#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15131#[rustc_legacy_const_generics(2)]
15132#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15133pub fn vmulq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15134    static_assert_uimm_bits!(LANE, 1);
15135    unsafe { simd_mul(a, simd_shuffle!(b, b, [LANE as u32; 2])) }
15136}
15137#[doc = "Floating-point multiply"]
15138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_lane_f32)"]
15139#[inline(always)]
15140#[target_feature(enable = "neon")]
15141#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15142#[rustc_legacy_const_generics(2)]
15143#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15144pub fn vmuls_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> f32 {
15145    static_assert_uimm_bits!(LANE, 1);
15146    unsafe {
15147        let b: f32 = simd_extract!(b, LANE as u32);
15148        a * b
15149    }
15150}
15151#[doc = "Floating-point multiply"]
15152#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_laneq_f32)"]
15153#[inline(always)]
15154#[target_feature(enable = "neon")]
15155#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15156#[rustc_legacy_const_generics(2)]
15157#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15158pub fn vmuls_laneq_f32<const LANE: i32>(a: f32, b: float32x4_t) -> f32 {
15159    static_assert_uimm_bits!(LANE, 2);
15160    unsafe {
15161        let b: f32 = simd_extract!(b, LANE as u32);
15162        a * b
15163    }
15164}
15165#[doc = "Floating-point multiply"]
15166#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_laneq_f64)"]
15167#[inline(always)]
15168#[target_feature(enable = "neon")]
15169#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15170#[rustc_legacy_const_generics(2)]
15171#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15172pub fn vmuld_laneq_f64<const LANE: i32>(a: f64, b: float64x2_t) -> f64 {
15173    static_assert_uimm_bits!(LANE, 1);
15174    unsafe {
15175        let b: f64 = simd_extract!(b, LANE as u32);
15176        a * b
15177    }
15178}
15179#[doc = "Floating-point multiply extended"]
15180#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f16)"]
15181#[inline(always)]
15182#[target_feature(enable = "neon,fp16")]
15183#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15184#[cfg(not(target_arch = "arm64ec"))]
15185#[cfg_attr(test, assert_instr(fmulx))]
15186pub fn vmulx_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
15187    unsafe extern "unadjusted" {
15188        #[cfg_attr(
15189            any(target_arch = "aarch64", target_arch = "arm64ec"),
15190            link_name = "llvm.aarch64.neon.fmulx.v4f16"
15191        )]
15192        fn _vmulx_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
15193    }
15194    unsafe { _vmulx_f16(a, b) }
15195}
15196#[doc = "Floating-point multiply extended"]
15197#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f16)"]
15198#[inline(always)]
15199#[target_feature(enable = "neon,fp16")]
15200#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15201#[cfg(not(target_arch = "arm64ec"))]
15202#[cfg_attr(test, assert_instr(fmulx))]
15203pub fn vmulxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15204    unsafe extern "unadjusted" {
15205        #[cfg_attr(
15206            any(target_arch = "aarch64", target_arch = "arm64ec"),
15207            link_name = "llvm.aarch64.neon.fmulx.v8f16"
15208        )]
15209        fn _vmulxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
15210    }
15211    unsafe { _vmulxq_f16(a, b) }
15212}
15213#[doc = "Floating-point multiply extended"]
15214#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f32)"]
15215#[inline(always)]
15216#[target_feature(enable = "neon")]
15217#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15218#[cfg_attr(test, assert_instr(fmulx))]
15219pub fn vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
15220    unsafe extern "unadjusted" {
15221        #[cfg_attr(
15222            any(target_arch = "aarch64", target_arch = "arm64ec"),
15223            link_name = "llvm.aarch64.neon.fmulx.v2f32"
15224        )]
15225        fn _vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
15226    }
15227    unsafe { _vmulx_f32(a, b) }
15228}
15229#[doc = "Floating-point multiply extended"]
15230#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f32)"]
15231#[inline(always)]
15232#[target_feature(enable = "neon")]
15233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15234#[cfg_attr(test, assert_instr(fmulx))]
15235pub fn vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
15236    unsafe extern "unadjusted" {
15237        #[cfg_attr(
15238            any(target_arch = "aarch64", target_arch = "arm64ec"),
15239            link_name = "llvm.aarch64.neon.fmulx.v4f32"
15240        )]
15241        fn _vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
15242    }
15243    unsafe { _vmulxq_f32(a, b) }
15244}
15245#[doc = "Floating-point multiply extended"]
15246#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f64)"]
15247#[inline(always)]
15248#[target_feature(enable = "neon")]
15249#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15250#[cfg_attr(test, assert_instr(fmulx))]
15251pub fn vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
15252    unsafe extern "unadjusted" {
15253        #[cfg_attr(
15254            any(target_arch = "aarch64", target_arch = "arm64ec"),
15255            link_name = "llvm.aarch64.neon.fmulx.v1f64"
15256        )]
15257        fn _vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
15258    }
15259    unsafe { _vmulx_f64(a, b) }
15260}
15261#[doc = "Floating-point multiply extended"]
15262#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f64)"]
15263#[inline(always)]
15264#[target_feature(enable = "neon")]
15265#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15266#[cfg_attr(test, assert_instr(fmulx))]
15267pub fn vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15268    unsafe extern "unadjusted" {
15269        #[cfg_attr(
15270            any(target_arch = "aarch64", target_arch = "arm64ec"),
15271            link_name = "llvm.aarch64.neon.fmulx.v2f64"
15272        )]
15273        fn _vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
15274    }
15275    unsafe { _vmulxq_f64(a, b) }
15276}
15277#[doc = "Floating-point multiply extended"]
15278#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f16)"]
15279#[inline(always)]
15280#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15281#[rustc_legacy_const_generics(2)]
15282#[target_feature(enable = "neon,fp16")]
15283#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15284#[cfg(not(target_arch = "arm64ec"))]
15285pub fn vmulx_lane_f16<const LANE: i32>(a: float16x4_t, b: float16x4_t) -> float16x4_t {
15286    static_assert_uimm_bits!(LANE, 2);
15287    unsafe { vmulx_f16(a, simd_shuffle!(b, b, [LANE as u32; 4])) }
15288}
15289#[doc = "Floating-point multiply extended"]
15290#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f16)"]
15291#[inline(always)]
15292#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15293#[rustc_legacy_const_generics(2)]
15294#[target_feature(enable = "neon,fp16")]
15295#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15296#[cfg(not(target_arch = "arm64ec"))]
15297pub fn vmulx_laneq_f16<const LANE: i32>(a: float16x4_t, b: float16x8_t) -> float16x4_t {
15298    static_assert_uimm_bits!(LANE, 3);
15299    unsafe { vmulx_f16(a, simd_shuffle!(b, b, [LANE as u32; 4])) }
15300}
15301#[doc = "Floating-point multiply extended"]
15302#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f16)"]
15303#[inline(always)]
15304#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15305#[rustc_legacy_const_generics(2)]
15306#[target_feature(enable = "neon,fp16")]
15307#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15308#[cfg(not(target_arch = "arm64ec"))]
15309pub fn vmulxq_lane_f16<const LANE: i32>(a: float16x8_t, b: float16x4_t) -> float16x8_t {
15310    static_assert_uimm_bits!(LANE, 2);
15311    unsafe { vmulxq_f16(a, simd_shuffle!(b, b, [LANE as u32; 8])) }
15312}
15313#[doc = "Floating-point multiply extended"]
15314#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f16)"]
15315#[inline(always)]
15316#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15317#[rustc_legacy_const_generics(2)]
15318#[target_feature(enable = "neon,fp16")]
15319#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15320#[cfg(not(target_arch = "arm64ec"))]
15321pub fn vmulxq_laneq_f16<const LANE: i32>(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15322    static_assert_uimm_bits!(LANE, 3);
15323    unsafe { vmulxq_f16(a, simd_shuffle!(b, b, [LANE as u32; 8])) }
15324}
15325#[doc = "Floating-point multiply extended"]
15326#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f32)"]
15327#[inline(always)]
15328#[target_feature(enable = "neon")]
15329#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15330#[rustc_legacy_const_generics(2)]
15331#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15332pub fn vmulx_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t) -> float32x2_t {
15333    static_assert_uimm_bits!(LANE, 1);
15334    unsafe { vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32; 2])) }
15335}
15336#[doc = "Floating-point multiply extended"]
15337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f32)"]
15338#[inline(always)]
15339#[target_feature(enable = "neon")]
15340#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15341#[rustc_legacy_const_generics(2)]
15342#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15343pub fn vmulx_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x4_t) -> float32x2_t {
15344    static_assert_uimm_bits!(LANE, 2);
15345    unsafe { vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32; 2])) }
15346}
15347#[doc = "Floating-point multiply extended"]
15348#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f32)"]
15349#[inline(always)]
15350#[target_feature(enable = "neon")]
15351#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15352#[rustc_legacy_const_generics(2)]
15353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15354pub fn vmulxq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x2_t) -> float32x4_t {
15355    static_assert_uimm_bits!(LANE, 1);
15356    unsafe { vmulxq_f32(a, simd_shuffle!(b, b, [LANE as u32; 4])) }
15357}
15358#[doc = "Floating-point multiply extended"]
15359#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f32)"]
15360#[inline(always)]
15361#[target_feature(enable = "neon")]
15362#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15363#[rustc_legacy_const_generics(2)]
15364#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15365pub fn vmulxq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t) -> float32x4_t {
15366    static_assert_uimm_bits!(LANE, 2);
15367    unsafe { vmulxq_f32(a, simd_shuffle!(b, b, [LANE as u32; 4])) }
15368}
15369#[doc = "Floating-point multiply extended"]
15370#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f64)"]
15371#[inline(always)]
15372#[target_feature(enable = "neon")]
15373#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15374#[rustc_legacy_const_generics(2)]
15375#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15376pub fn vmulxq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15377    static_assert_uimm_bits!(LANE, 1);
15378    unsafe { vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32; 2])) }
15379}
15380#[doc = "Floating-point multiply extended"]
15381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f64)"]
15382#[inline(always)]
15383#[target_feature(enable = "neon")]
15384#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15385#[rustc_legacy_const_generics(2)]
15386#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15387pub fn vmulx_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t) -> float64x1_t {
15388    static_assert!(LANE == 0);
15389    unsafe { vmulx_f64(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
15390}
15391#[doc = "Floating-point multiply extended"]
15392#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f64)"]
15393#[inline(always)]
15394#[target_feature(enable = "neon")]
15395#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15396#[rustc_legacy_const_generics(2)]
15397#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15398pub fn vmulx_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x2_t) -> float64x1_t {
15399    static_assert_uimm_bits!(LANE, 1);
15400    unsafe { vmulx_f64(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
15401}
15402#[doc = "Vector multiply by scalar"]
15403#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_n_f16)"]
15404#[inline(always)]
15405#[cfg_attr(test, assert_instr(fmulx))]
15406#[target_feature(enable = "neon,fp16")]
15407#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15408#[cfg(not(target_arch = "arm64ec"))]
15409pub fn vmulx_n_f16(a: float16x4_t, b: f16) -> float16x4_t {
15410    vmulx_f16(a, vdup_n_f16(b))
15411}
15412#[doc = "Vector multiply by scalar"]
15413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_n_f16)"]
15414#[inline(always)]
15415#[cfg_attr(test, assert_instr(fmulx))]
15416#[target_feature(enable = "neon,fp16")]
15417#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15418#[cfg(not(target_arch = "arm64ec"))]
15419pub fn vmulxq_n_f16(a: float16x8_t, b: f16) -> float16x8_t {
15420    vmulxq_f16(a, vdupq_n_f16(b))
15421}
15422#[doc = "Floating-point multiply extended"]
15423#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_f64)"]
15424#[inline(always)]
15425#[target_feature(enable = "neon")]
15426#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15427#[cfg_attr(test, assert_instr(fmulx))]
15428pub fn vmulxd_f64(a: f64, b: f64) -> f64 {
15429    unsafe extern "unadjusted" {
15430        #[cfg_attr(
15431            any(target_arch = "aarch64", target_arch = "arm64ec"),
15432            link_name = "llvm.aarch64.neon.fmulx.f64"
15433        )]
15434        fn _vmulxd_f64(a: f64, b: f64) -> f64;
15435    }
15436    unsafe { _vmulxd_f64(a, b) }
15437}
15438#[doc = "Floating-point multiply extended"]
15439#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_f32)"]
15440#[inline(always)]
15441#[target_feature(enable = "neon")]
15442#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15443#[cfg_attr(test, assert_instr(fmulx))]
15444pub fn vmulxs_f32(a: f32, b: f32) -> f32 {
15445    unsafe extern "unadjusted" {
15446        #[cfg_attr(
15447            any(target_arch = "aarch64", target_arch = "arm64ec"),
15448            link_name = "llvm.aarch64.neon.fmulx.f32"
15449        )]
15450        fn _vmulxs_f32(a: f32, b: f32) -> f32;
15451    }
15452    unsafe { _vmulxs_f32(a, b) }
15453}
15454#[doc = "Floating-point multiply extended"]
15455#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_lane_f64)"]
15456#[inline(always)]
15457#[target_feature(enable = "neon")]
15458#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15459#[rustc_legacy_const_generics(2)]
15460#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15461pub fn vmulxd_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> f64 {
15462    static_assert!(LANE == 0);
15463    unsafe { vmulxd_f64(a, simd_extract!(b, LANE as u32)) }
15464}
15465#[doc = "Floating-point multiply extended"]
15466#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_laneq_f64)"]
15467#[inline(always)]
15468#[target_feature(enable = "neon")]
15469#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15470#[rustc_legacy_const_generics(2)]
15471#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15472pub fn vmulxd_laneq_f64<const LANE: i32>(a: f64, b: float64x2_t) -> f64 {
15473    static_assert_uimm_bits!(LANE, 1);
15474    unsafe { vmulxd_f64(a, simd_extract!(b, LANE as u32)) }
15475}
15476#[doc = "Floating-point multiply extended"]
15477#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_lane_f32)"]
15478#[inline(always)]
15479#[target_feature(enable = "neon")]
15480#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15481#[rustc_legacy_const_generics(2)]
15482#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15483pub fn vmulxs_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> f32 {
15484    static_assert_uimm_bits!(LANE, 1);
15485    unsafe { vmulxs_f32(a, simd_extract!(b, LANE as u32)) }
15486}
15487#[doc = "Floating-point multiply extended"]
15488#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_laneq_f32)"]
15489#[inline(always)]
15490#[target_feature(enable = "neon")]
15491#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15492#[rustc_legacy_const_generics(2)]
15493#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15494pub fn vmulxs_laneq_f32<const LANE: i32>(a: f32, b: float32x4_t) -> f32 {
15495    static_assert_uimm_bits!(LANE, 2);
15496    unsafe { vmulxs_f32(a, simd_extract!(b, LANE as u32)) }
15497}
15498#[doc = "Floating-point multiply extended"]
15499#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_f16)"]
15500#[inline(always)]
15501#[target_feature(enable = "neon,fp16")]
15502#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15503#[cfg(not(target_arch = "arm64ec"))]
15504#[cfg_attr(test, assert_instr(fmulx))]
15505pub fn vmulxh_f16(a: f16, b: f16) -> f16 {
15506    unsafe extern "unadjusted" {
15507        #[cfg_attr(
15508            any(target_arch = "aarch64", target_arch = "arm64ec"),
15509            link_name = "llvm.aarch64.neon.fmulx.f16"
15510        )]
15511        fn _vmulxh_f16(a: f16, b: f16) -> f16;
15512    }
15513    unsafe { _vmulxh_f16(a, b) }
15514}
15515#[doc = "Floating-point multiply extended"]
15516#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_lane_f16)"]
15517#[inline(always)]
15518#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15519#[rustc_legacy_const_generics(2)]
15520#[target_feature(enable = "neon,fp16")]
15521#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15522#[cfg(not(target_arch = "arm64ec"))]
15523pub fn vmulxh_lane_f16<const LANE: i32>(a: f16, b: float16x4_t) -> f16 {
15524    static_assert_uimm_bits!(LANE, 2);
15525    unsafe { vmulxh_f16(a, simd_extract!(b, LANE as u32)) }
15526}
15527#[doc = "Floating-point multiply extended"]
15528#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_laneq_f16)"]
15529#[inline(always)]
15530#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15531#[rustc_legacy_const_generics(2)]
15532#[target_feature(enable = "neon,fp16")]
15533#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15534#[cfg(not(target_arch = "arm64ec"))]
15535pub fn vmulxh_laneq_f16<const LANE: i32>(a: f16, b: float16x8_t) -> f16 {
15536    static_assert_uimm_bits!(LANE, 3);
15537    unsafe { vmulxh_f16(a, simd_extract!(b, LANE as u32)) }
15538}
15539#[doc = "Floating-point multiply extended"]
15540#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f64)"]
15541#[inline(always)]
15542#[target_feature(enable = "neon")]
15543#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15544#[rustc_legacy_const_generics(2)]
15545#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15546pub fn vmulxq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x1_t) -> float64x2_t {
15547    static_assert!(LANE == 0);
15548    unsafe { vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32; 2])) }
15549}
15550#[doc = "Negate"]
15551#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f64)"]
15552#[inline(always)]
15553#[target_feature(enable = "neon")]
15554#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15555#[cfg_attr(test, assert_instr(fneg))]
15556pub fn vneg_f64(a: float64x1_t) -> float64x1_t {
15557    unsafe { simd_neg(a) }
15558}
15559#[doc = "Negate"]
15560#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f64)"]
15561#[inline(always)]
15562#[target_feature(enable = "neon")]
15563#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15564#[cfg_attr(test, assert_instr(fneg))]
15565pub fn vnegq_f64(a: float64x2_t) -> float64x2_t {
15566    unsafe { simd_neg(a) }
15567}
15568#[doc = "Negate"]
15569#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s64)"]
15570#[inline(always)]
15571#[target_feature(enable = "neon")]
15572#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15573#[cfg_attr(test, assert_instr(neg))]
15574pub fn vneg_s64(a: int64x1_t) -> int64x1_t {
15575    unsafe { simd_neg(a) }
15576}
15577#[doc = "Negate"]
15578#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s64)"]
15579#[inline(always)]
15580#[target_feature(enable = "neon")]
15581#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15582#[cfg_attr(test, assert_instr(neg))]
15583pub fn vnegq_s64(a: int64x2_t) -> int64x2_t {
15584    unsafe { simd_neg(a) }
15585}
15586#[doc = "Negate"]
15587#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegd_s64)"]
15588#[inline(always)]
15589#[target_feature(enable = "neon")]
15590#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15591#[cfg_attr(test, assert_instr(neg))]
15592pub fn vnegd_s64(a: i64) -> i64 {
15593    a.wrapping_neg()
15594}
15595#[doc = "Negate"]
15596#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegh_f16)"]
15597#[inline(always)]
15598#[target_feature(enable = "neon,fp16")]
15599#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15600#[cfg(not(target_arch = "arm64ec"))]
15601#[cfg_attr(test, assert_instr(fneg))]
15602pub fn vnegh_f16(a: f16) -> f16 {
15603    -a
15604}
15605#[doc = "Floating-point add pairwise"]
15606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_f64)"]
15607#[inline(always)]
15608#[target_feature(enable = "neon")]
15609#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15610#[cfg_attr(test, assert_instr(nop))]
15611pub fn vpaddd_f64(a: float64x2_t) -> f64 {
15612    unsafe {
15613        let a1: f64 = simd_extract!(a, 0);
15614        let a2: f64 = simd_extract!(a, 1);
15615        a1 + a2
15616    }
15617}
15618#[doc = "Floating-point add pairwise"]
15619#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadds_f32)"]
15620#[inline(always)]
15621#[target_feature(enable = "neon")]
15622#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15623#[cfg_attr(test, assert_instr(nop))]
15624pub fn vpadds_f32(a: float32x2_t) -> f32 {
15625    unsafe {
15626        let a1: f32 = simd_extract!(a, 0);
15627        let a2: f32 = simd_extract!(a, 1);
15628        a1 + a2
15629    }
15630}
15631#[doc = "Add pairwise"]
15632#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_s64)"]
15633#[inline(always)]
15634#[target_feature(enable = "neon")]
15635#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15636#[cfg_attr(test, assert_instr(addp))]
15637pub fn vpaddd_s64(a: int64x2_t) -> i64 {
15638    unsafe { simd_reduce_add_ordered(a, 0) }
15639}
15640#[doc = "Add pairwise"]
15641#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_u64)"]
15642#[inline(always)]
15643#[target_feature(enable = "neon")]
15644#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15645#[cfg_attr(test, assert_instr(addp))]
15646pub fn vpaddd_u64(a: uint64x2_t) -> u64 {
15647    unsafe { simd_reduce_add_ordered(a, 0) }
15648}
15649#[doc = "Floating-point add pairwise"]
15650#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f16)"]
15651#[inline(always)]
15652#[target_feature(enable = "neon,fp16")]
15653#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15654#[cfg(not(target_arch = "arm64ec"))]
15655#[cfg_attr(test, assert_instr(faddp))]
15656pub fn vpaddq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15657    unsafe {
15658        let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<8>());
15659        let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<8>());
15660        simd_add(even, odd)
15661    }
15662}
15663#[doc = "Floating-point add pairwise"]
15664#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f32)"]
15665#[inline(always)]
15666#[target_feature(enable = "neon")]
15667#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15668#[cfg_attr(test, assert_instr(faddp))]
15669pub fn vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
15670    unsafe {
15671        let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<4>());
15672        let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<4>());
15673        simd_add(even, odd)
15674    }
15675}
15676#[doc = "Floating-point add pairwise"]
15677#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f64)"]
15678#[inline(always)]
15679#[target_feature(enable = "neon")]
15680#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15681#[cfg_attr(test, assert_instr(faddp))]
15682pub fn vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15683    unsafe {
15684        let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<2>());
15685        let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<2>());
15686        simd_add(even, odd)
15687    }
15688}
15689#[doc = "Add Pairwise"]
15690#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s8)"]
15691#[inline(always)]
15692#[target_feature(enable = "neon")]
15693#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15694#[cfg_attr(test, assert_instr(addp))]
15695pub fn vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
15696    unsafe {
15697        let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<16>());
15698        let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<16>());
15699        simd_add(even, odd)
15700    }
15701}
15702#[doc = "Add Pairwise"]
15703#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s16)"]
15704#[inline(always)]
15705#[target_feature(enable = "neon")]
15706#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15707#[cfg_attr(test, assert_instr(addp))]
15708pub fn vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
15709    unsafe {
15710        let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<8>());
15711        let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<8>());
15712        simd_add(even, odd)
15713    }
15714}
15715#[doc = "Add Pairwise"]
15716#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s32)"]
15717#[inline(always)]
15718#[target_feature(enable = "neon")]
15719#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15720#[cfg_attr(test, assert_instr(addp))]
15721pub fn vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
15722    unsafe {
15723        let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<4>());
15724        let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<4>());
15725        simd_add(even, odd)
15726    }
15727}
15728#[doc = "Add Pairwise"]
15729#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s64)"]
15730#[inline(always)]
15731#[target_feature(enable = "neon")]
15732#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15733#[cfg_attr(test, assert_instr(addp))]
15734pub fn vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
15735    unsafe {
15736        let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<2>());
15737        let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<2>());
15738        simd_add(even, odd)
15739    }
15740}
15741#[doc = "Add Pairwise"]
15742#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u8)"]
15743#[inline(always)]
15744#[target_feature(enable = "neon")]
15745#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15746#[cfg_attr(test, assert_instr(addp))]
15747pub fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
15748    unsafe {
15749        let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<16>());
15750        let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<16>());
15751        simd_add(even, odd)
15752    }
15753}
15754#[doc = "Add Pairwise"]
15755#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u16)"]
15756#[inline(always)]
15757#[target_feature(enable = "neon")]
15758#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15759#[cfg_attr(test, assert_instr(addp))]
15760pub fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
15761    unsafe {
15762        let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<8>());
15763        let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<8>());
15764        simd_add(even, odd)
15765    }
15766}
15767#[doc = "Add Pairwise"]
15768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u32)"]
15769#[inline(always)]
15770#[target_feature(enable = "neon")]
15771#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15772#[cfg_attr(test, assert_instr(addp))]
15773pub fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
15774    unsafe {
15775        let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<4>());
15776        let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<4>());
15777        simd_add(even, odd)
15778    }
15779}
15780#[doc = "Add Pairwise"]
15781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u64)"]
15782#[inline(always)]
15783#[target_feature(enable = "neon")]
15784#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15785#[cfg_attr(test, assert_instr(addp))]
15786pub fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
15787    unsafe {
15788        let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<2>());
15789        let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<2>());
15790        simd_add(even, odd)
15791    }
15792}
15793#[doc = "Floating-point add pairwise"]
15794#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_f16)"]
15795#[inline(always)]
15796#[target_feature(enable = "neon,fp16")]
15797#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15798#[cfg(not(target_arch = "arm64ec"))]
15799#[cfg_attr(test, assert_instr(fmaxp))]
15800pub fn vpmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
15801    unsafe extern "unadjusted" {
15802        #[cfg_attr(
15803            any(target_arch = "aarch64", target_arch = "arm64ec"),
15804            link_name = "llvm.aarch64.neon.fmaxp.v4f16"
15805        )]
15806        fn _vpmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
15807    }
15808    unsafe { _vpmax_f16(a, b) }
15809}
15810#[doc = "Floating-point add pairwise"]
15811#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f16)"]
15812#[inline(always)]
15813#[target_feature(enable = "neon,fp16")]
15814#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15815#[cfg(not(target_arch = "arm64ec"))]
15816#[cfg_attr(test, assert_instr(fmaxp))]
15817pub fn vpmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15818    unsafe extern "unadjusted" {
15819        #[cfg_attr(
15820            any(target_arch = "aarch64", target_arch = "arm64ec"),
15821            link_name = "llvm.aarch64.neon.fmaxp.v8f16"
15822        )]
15823        fn _vpmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
15824    }
15825    unsafe { _vpmaxq_f16(a, b) }
15826}
15827#[doc = "Floating-point add pairwise"]
15828#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f16)"]
15829#[inline(always)]
15830#[target_feature(enable = "neon,fp16")]
15831#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15832#[cfg(not(target_arch = "arm64ec"))]
15833#[cfg_attr(test, assert_instr(fmaxnmp))]
15834pub fn vpmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
15835    unsafe extern "unadjusted" {
15836        #[cfg_attr(
15837            any(target_arch = "aarch64", target_arch = "arm64ec"),
15838            link_name = "llvm.aarch64.neon.fmaxnmp.v4f16"
15839        )]
15840        fn _vpmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
15841    }
15842    unsafe { _vpmaxnm_f16(a, b) }
15843}
15844#[doc = "Floating-point add pairwise"]
15845#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f16)"]
15846#[inline(always)]
15847#[target_feature(enable = "neon,fp16")]
15848#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15849#[cfg(not(target_arch = "arm64ec"))]
15850#[cfg_attr(test, assert_instr(fmaxnmp))]
15851pub fn vpmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15852    unsafe extern "unadjusted" {
15853        #[cfg_attr(
15854            any(target_arch = "aarch64", target_arch = "arm64ec"),
15855            link_name = "llvm.aarch64.neon.fmaxnmp.v8f16"
15856        )]
15857        fn _vpmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
15858    }
15859    unsafe { _vpmaxnmq_f16(a, b) }
15860}
15861#[doc = "Floating-point Maximum Number Pairwise (vector)."]
15862#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f32)"]
15863#[inline(always)]
15864#[target_feature(enable = "neon")]
15865#[cfg_attr(test, assert_instr(fmaxnmp))]
15866#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15867pub fn vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
15868    unsafe extern "unadjusted" {
15869        #[cfg_attr(
15870            any(target_arch = "aarch64", target_arch = "arm64ec"),
15871            link_name = "llvm.aarch64.neon.fmaxnmp.v2f32"
15872        )]
15873        fn _vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
15874    }
15875    unsafe { _vpmaxnm_f32(a, b) }
15876}
15877#[doc = "Floating-point Maximum Number Pairwise (vector)."]
15878#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f32)"]
15879#[inline(always)]
15880#[target_feature(enable = "neon")]
15881#[cfg_attr(test, assert_instr(fmaxnmp))]
15882#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15883pub fn vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
15884    unsafe extern "unadjusted" {
15885        #[cfg_attr(
15886            any(target_arch = "aarch64", target_arch = "arm64ec"),
15887            link_name = "llvm.aarch64.neon.fmaxnmp.v4f32"
15888        )]
15889        fn _vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
15890    }
15891    unsafe { _vpmaxnmq_f32(a, b) }
15892}
15893#[doc = "Floating-point Maximum Number Pairwise (vector)."]
15894#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f64)"]
15895#[inline(always)]
15896#[target_feature(enable = "neon")]
15897#[cfg_attr(test, assert_instr(fmaxnmp))]
15898#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15899pub fn vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15900    unsafe extern "unadjusted" {
15901        #[cfg_attr(
15902            any(target_arch = "aarch64", target_arch = "arm64ec"),
15903            link_name = "llvm.aarch64.neon.fmaxnmp.v2f64"
15904        )]
15905        fn _vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
15906    }
15907    unsafe { _vpmaxnmq_f64(a, b) }
15908}
15909#[doc = "Floating-point maximum number pairwise"]
15910#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmqd_f64)"]
15911#[inline(always)]
15912#[target_feature(enable = "neon")]
15913#[cfg_attr(test, assert_instr(fmaxnmp))]
15914#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15915pub fn vpmaxnmqd_f64(a: float64x2_t) -> f64 {
15916    unsafe extern "unadjusted" {
15917        #[cfg_attr(
15918            any(target_arch = "aarch64", target_arch = "arm64ec"),
15919            link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64"
15920        )]
15921        fn _vpmaxnmqd_f64(a: float64x2_t) -> f64;
15922    }
15923    unsafe { _vpmaxnmqd_f64(a) }
15924}
15925#[doc = "Floating-point maximum number pairwise"]
15926#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnms_f32)"]
15927#[inline(always)]
15928#[target_feature(enable = "neon")]
15929#[cfg_attr(test, assert_instr(fmaxnmp))]
15930#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15931pub fn vpmaxnms_f32(a: float32x2_t) -> f32 {
15932    unsafe extern "unadjusted" {
15933        #[cfg_attr(
15934            any(target_arch = "aarch64", target_arch = "arm64ec"),
15935            link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32"
15936        )]
15937        fn _vpmaxnms_f32(a: float32x2_t) -> f32;
15938    }
15939    unsafe { _vpmaxnms_f32(a) }
15940}
15941#[doc = "Folding maximum of adjacent pairs"]
15942#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f32)"]
15943#[inline(always)]
15944#[target_feature(enable = "neon")]
15945#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15946#[cfg_attr(test, assert_instr(fmaxp))]
15947pub fn vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
15948    unsafe extern "unadjusted" {
15949        #[cfg_attr(
15950            any(target_arch = "aarch64", target_arch = "arm64ec"),
15951            link_name = "llvm.aarch64.neon.fmaxp.v4f32"
15952        )]
15953        fn _vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
15954    }
15955    unsafe { _vpmaxq_f32(a, b) }
15956}
15957#[doc = "Folding maximum of adjacent pairs"]
15958#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f64)"]
15959#[inline(always)]
15960#[target_feature(enable = "neon")]
15961#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15962#[cfg_attr(test, assert_instr(fmaxp))]
15963pub fn vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15964    unsafe extern "unadjusted" {
15965        #[cfg_attr(
15966            any(target_arch = "aarch64", target_arch = "arm64ec"),
15967            link_name = "llvm.aarch64.neon.fmaxp.v2f64"
15968        )]
15969        fn _vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
15970    }
15971    unsafe { _vpmaxq_f64(a, b) }
15972}
15973#[doc = "Folding maximum of adjacent pairs"]
15974#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s8)"]
15975#[inline(always)]
15976#[target_feature(enable = "neon")]
15977#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15978#[cfg_attr(test, assert_instr(smaxp))]
15979pub fn vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
15980    unsafe extern "unadjusted" {
15981        #[cfg_attr(
15982            any(target_arch = "aarch64", target_arch = "arm64ec"),
15983            link_name = "llvm.aarch64.neon.smaxp.v16i8"
15984        )]
15985        fn _vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t;
15986    }
15987    unsafe { _vpmaxq_s8(a, b) }
15988}
15989#[doc = "Folding maximum of adjacent pairs"]
15990#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s16)"]
15991#[inline(always)]
15992#[target_feature(enable = "neon")]
15993#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15994#[cfg_attr(test, assert_instr(smaxp))]
15995pub fn vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
15996    unsafe extern "unadjusted" {
15997        #[cfg_attr(
15998            any(target_arch = "aarch64", target_arch = "arm64ec"),
15999            link_name = "llvm.aarch64.neon.smaxp.v8i16"
16000        )]
16001        fn _vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t;
16002    }
16003    unsafe { _vpmaxq_s16(a, b) }
16004}
16005#[doc = "Folding maximum of adjacent pairs"]
16006#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s32)"]
16007#[inline(always)]
16008#[target_feature(enable = "neon")]
16009#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16010#[cfg_attr(test, assert_instr(smaxp))]
16011pub fn vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
16012    unsafe extern "unadjusted" {
16013        #[cfg_attr(
16014            any(target_arch = "aarch64", target_arch = "arm64ec"),
16015            link_name = "llvm.aarch64.neon.smaxp.v4i32"
16016        )]
16017        fn _vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t;
16018    }
16019    unsafe { _vpmaxq_s32(a, b) }
16020}
16021#[doc = "Folding maximum of adjacent pairs"]
16022#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u8)"]
16023#[inline(always)]
16024#[target_feature(enable = "neon")]
16025#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16026#[cfg_attr(test, assert_instr(umaxp))]
16027pub fn vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16028    unsafe extern "unadjusted" {
16029        #[cfg_attr(
16030            any(target_arch = "aarch64", target_arch = "arm64ec"),
16031            link_name = "llvm.aarch64.neon.umaxp.v16i8"
16032        )]
16033        fn _vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t;
16034    }
16035    unsafe { _vpmaxq_u8(a, b) }
16036}
16037#[doc = "Folding maximum of adjacent pairs"]
16038#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u16)"]
16039#[inline(always)]
16040#[target_feature(enable = "neon")]
16041#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16042#[cfg_attr(test, assert_instr(umaxp))]
16043pub fn vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16044    unsafe extern "unadjusted" {
16045        #[cfg_attr(
16046            any(target_arch = "aarch64", target_arch = "arm64ec"),
16047            link_name = "llvm.aarch64.neon.umaxp.v8i16"
16048        )]
16049        fn _vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t;
16050    }
16051    unsafe { _vpmaxq_u16(a, b) }
16052}
16053#[doc = "Folding maximum of adjacent pairs"]
16054#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u32)"]
16055#[inline(always)]
16056#[target_feature(enable = "neon")]
16057#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16058#[cfg_attr(test, assert_instr(umaxp))]
16059pub fn vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16060    unsafe extern "unadjusted" {
16061        #[cfg_attr(
16062            any(target_arch = "aarch64", target_arch = "arm64ec"),
16063            link_name = "llvm.aarch64.neon.umaxp.v4i32"
16064        )]
16065        fn _vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
16066    }
16067    unsafe { _vpmaxq_u32(a, b) }
16068}
16069#[doc = "Floating-point maximum pairwise"]
16070#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxqd_f64)"]
16071#[inline(always)]
16072#[target_feature(enable = "neon")]
16073#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16074#[cfg_attr(test, assert_instr(fmaxp))]
16075pub fn vpmaxqd_f64(a: float64x2_t) -> f64 {
16076    unsafe extern "unadjusted" {
16077        #[cfg_attr(
16078            any(target_arch = "aarch64", target_arch = "arm64ec"),
16079            link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64"
16080        )]
16081        fn _vpmaxqd_f64(a: float64x2_t) -> f64;
16082    }
16083    unsafe { _vpmaxqd_f64(a) }
16084}
16085#[doc = "Floating-point maximum pairwise"]
16086#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxs_f32)"]
16087#[inline(always)]
16088#[target_feature(enable = "neon")]
16089#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16090#[cfg_attr(test, assert_instr(fmaxp))]
16091pub fn vpmaxs_f32(a: float32x2_t) -> f32 {
16092    unsafe extern "unadjusted" {
16093        #[cfg_attr(
16094            any(target_arch = "aarch64", target_arch = "arm64ec"),
16095            link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32"
16096        )]
16097        fn _vpmaxs_f32(a: float32x2_t) -> f32;
16098    }
16099    unsafe { _vpmaxs_f32(a) }
16100}
16101#[doc = "Floating-point add pairwise"]
16102#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_f16)"]
16103#[inline(always)]
16104#[target_feature(enable = "neon,fp16")]
16105#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
16106#[cfg(not(target_arch = "arm64ec"))]
16107#[cfg_attr(test, assert_instr(fminp))]
16108pub fn vpmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16109    unsafe extern "unadjusted" {
16110        #[cfg_attr(
16111            any(target_arch = "aarch64", target_arch = "arm64ec"),
16112            link_name = "llvm.aarch64.neon.fminp.v4f16"
16113        )]
16114        fn _vpmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16115    }
16116    unsafe { _vpmin_f16(a, b) }
16117}
16118#[doc = "Floating-point add pairwise"]
16119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f16)"]
16120#[inline(always)]
16121#[target_feature(enable = "neon,fp16")]
16122#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
16123#[cfg(not(target_arch = "arm64ec"))]
16124#[cfg_attr(test, assert_instr(fminp))]
16125pub fn vpminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16126    unsafe extern "unadjusted" {
16127        #[cfg_attr(
16128            any(target_arch = "aarch64", target_arch = "arm64ec"),
16129            link_name = "llvm.aarch64.neon.fminp.v8f16"
16130        )]
16131        fn _vpminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16132    }
16133    unsafe { _vpminq_f16(a, b) }
16134}
16135#[doc = "Floating-point add pairwise"]
16136#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f16)"]
16137#[inline(always)]
16138#[target_feature(enable = "neon,fp16")]
16139#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
16140#[cfg(not(target_arch = "arm64ec"))]
16141#[cfg_attr(test, assert_instr(fminnmp))]
16142pub fn vpminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16143    unsafe extern "unadjusted" {
16144        #[cfg_attr(
16145            any(target_arch = "aarch64", target_arch = "arm64ec"),
16146            link_name = "llvm.aarch64.neon.fminnmp.v4f16"
16147        )]
16148        fn _vpminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16149    }
16150    unsafe { _vpminnm_f16(a, b) }
16151}
16152#[doc = "Floating-point add pairwise"]
16153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f16)"]
16154#[inline(always)]
16155#[target_feature(enable = "neon,fp16")]
16156#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
16157#[cfg(not(target_arch = "arm64ec"))]
16158#[cfg_attr(test, assert_instr(fminnmp))]
16159pub fn vpminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16160    unsafe extern "unadjusted" {
16161        #[cfg_attr(
16162            any(target_arch = "aarch64", target_arch = "arm64ec"),
16163            link_name = "llvm.aarch64.neon.fminnmp.v8f16"
16164        )]
16165        fn _vpminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16166    }
16167    unsafe { _vpminnmq_f16(a, b) }
16168}
16169#[doc = "Floating-point Minimum Number Pairwise (vector)."]
16170#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f32)"]
16171#[inline(always)]
16172#[target_feature(enable = "neon")]
16173#[cfg_attr(test, assert_instr(fminnmp))]
16174#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16175pub fn vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
16176    unsafe extern "unadjusted" {
16177        #[cfg_attr(
16178            any(target_arch = "aarch64", target_arch = "arm64ec"),
16179            link_name = "llvm.aarch64.neon.fminnmp.v2f32"
16180        )]
16181        fn _vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
16182    }
16183    unsafe { _vpminnm_f32(a, b) }
16184}
16185#[doc = "Floating-point Minimum Number Pairwise (vector)."]
16186#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f32)"]
16187#[inline(always)]
16188#[target_feature(enable = "neon")]
16189#[cfg_attr(test, assert_instr(fminnmp))]
16190#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16191pub fn vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16192    unsafe extern "unadjusted" {
16193        #[cfg_attr(
16194            any(target_arch = "aarch64", target_arch = "arm64ec"),
16195            link_name = "llvm.aarch64.neon.fminnmp.v4f32"
16196        )]
16197        fn _vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16198    }
16199    unsafe { _vpminnmq_f32(a, b) }
16200}
16201#[doc = "Floating-point Minimum Number Pairwise (vector)."]
16202#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f64)"]
16203#[inline(always)]
16204#[target_feature(enable = "neon")]
16205#[cfg_attr(test, assert_instr(fminnmp))]
16206#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16207pub fn vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16208    unsafe extern "unadjusted" {
16209        #[cfg_attr(
16210            any(target_arch = "aarch64", target_arch = "arm64ec"),
16211            link_name = "llvm.aarch64.neon.fminnmp.v2f64"
16212        )]
16213        fn _vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16214    }
16215    unsafe { _vpminnmq_f64(a, b) }
16216}
16217#[doc = "Floating-point minimum number pairwise"]
16218#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmqd_f64)"]
16219#[inline(always)]
16220#[target_feature(enable = "neon")]
16221#[cfg_attr(test, assert_instr(fminnmp))]
16222#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16223pub fn vpminnmqd_f64(a: float64x2_t) -> f64 {
16224    unsafe extern "unadjusted" {
16225        #[cfg_attr(
16226            any(target_arch = "aarch64", target_arch = "arm64ec"),
16227            link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64"
16228        )]
16229        fn _vpminnmqd_f64(a: float64x2_t) -> f64;
16230    }
16231    unsafe { _vpminnmqd_f64(a) }
16232}
16233#[doc = "Floating-point minimum number pairwise"]
16234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnms_f32)"]
16235#[inline(always)]
16236#[target_feature(enable = "neon")]
16237#[cfg_attr(test, assert_instr(fminnmp))]
16238#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16239pub fn vpminnms_f32(a: float32x2_t) -> f32 {
16240    unsafe extern "unadjusted" {
16241        #[cfg_attr(
16242            any(target_arch = "aarch64", target_arch = "arm64ec"),
16243            link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32"
16244        )]
16245        fn _vpminnms_f32(a: float32x2_t) -> f32;
16246    }
16247    unsafe { _vpminnms_f32(a) }
16248}
16249#[doc = "Folding minimum of adjacent pairs"]
16250#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f32)"]
16251#[inline(always)]
16252#[target_feature(enable = "neon")]
16253#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16254#[cfg_attr(test, assert_instr(fminp))]
16255pub fn vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16256    unsafe extern "unadjusted" {
16257        #[cfg_attr(
16258            any(target_arch = "aarch64", target_arch = "arm64ec"),
16259            link_name = "llvm.aarch64.neon.fminp.v4f32"
16260        )]
16261        fn _vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16262    }
16263    unsafe { _vpminq_f32(a, b) }
16264}
16265#[doc = "Folding minimum of adjacent pairs"]
16266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f64)"]
16267#[inline(always)]
16268#[target_feature(enable = "neon")]
16269#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16270#[cfg_attr(test, assert_instr(fminp))]
16271pub fn vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16272    unsafe extern "unadjusted" {
16273        #[cfg_attr(
16274            any(target_arch = "aarch64", target_arch = "arm64ec"),
16275            link_name = "llvm.aarch64.neon.fminp.v2f64"
16276        )]
16277        fn _vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16278    }
16279    unsafe { _vpminq_f64(a, b) }
16280}
16281#[doc = "Folding minimum of adjacent pairs"]
16282#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s8)"]
16283#[inline(always)]
16284#[target_feature(enable = "neon")]
16285#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16286#[cfg_attr(test, assert_instr(sminp))]
16287pub fn vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
16288    unsafe extern "unadjusted" {
16289        #[cfg_attr(
16290            any(target_arch = "aarch64", target_arch = "arm64ec"),
16291            link_name = "llvm.aarch64.neon.sminp.v16i8"
16292        )]
16293        fn _vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t;
16294    }
16295    unsafe { _vpminq_s8(a, b) }
16296}
16297#[doc = "Folding minimum of adjacent pairs"]
16298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s16)"]
16299#[inline(always)]
16300#[target_feature(enable = "neon")]
16301#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16302#[cfg_attr(test, assert_instr(sminp))]
16303pub fn vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
16304    unsafe extern "unadjusted" {
16305        #[cfg_attr(
16306            any(target_arch = "aarch64", target_arch = "arm64ec"),
16307            link_name = "llvm.aarch64.neon.sminp.v8i16"
16308        )]
16309        fn _vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t;
16310    }
16311    unsafe { _vpminq_s16(a, b) }
16312}
16313#[doc = "Folding minimum of adjacent pairs"]
16314#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s32)"]
16315#[inline(always)]
16316#[target_feature(enable = "neon")]
16317#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16318#[cfg_attr(test, assert_instr(sminp))]
16319pub fn vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
16320    unsafe extern "unadjusted" {
16321        #[cfg_attr(
16322            any(target_arch = "aarch64", target_arch = "arm64ec"),
16323            link_name = "llvm.aarch64.neon.sminp.v4i32"
16324        )]
16325        fn _vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t;
16326    }
16327    unsafe { _vpminq_s32(a, b) }
16328}
16329#[doc = "Folding minimum of adjacent pairs"]
16330#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u8)"]
16331#[inline(always)]
16332#[target_feature(enable = "neon")]
16333#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16334#[cfg_attr(test, assert_instr(uminp))]
16335pub fn vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16336    unsafe extern "unadjusted" {
16337        #[cfg_attr(
16338            any(target_arch = "aarch64", target_arch = "arm64ec"),
16339            link_name = "llvm.aarch64.neon.uminp.v16i8"
16340        )]
16341        fn _vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t;
16342    }
16343    unsafe { _vpminq_u8(a, b) }
16344}
16345#[doc = "Folding minimum of adjacent pairs"]
16346#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u16)"]
16347#[inline(always)]
16348#[target_feature(enable = "neon")]
16349#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16350#[cfg_attr(test, assert_instr(uminp))]
16351pub fn vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16352    unsafe extern "unadjusted" {
16353        #[cfg_attr(
16354            any(target_arch = "aarch64", target_arch = "arm64ec"),
16355            link_name = "llvm.aarch64.neon.uminp.v8i16"
16356        )]
16357        fn _vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t;
16358    }
16359    unsafe { _vpminq_u16(a, b) }
16360}
16361#[doc = "Folding minimum of adjacent pairs"]
16362#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u32)"]
16363#[inline(always)]
16364#[target_feature(enable = "neon")]
16365#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16366#[cfg_attr(test, assert_instr(uminp))]
16367pub fn vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16368    unsafe extern "unadjusted" {
16369        #[cfg_attr(
16370            any(target_arch = "aarch64", target_arch = "arm64ec"),
16371            link_name = "llvm.aarch64.neon.uminp.v4i32"
16372        )]
16373        fn _vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
16374    }
16375    unsafe { _vpminq_u32(a, b) }
16376}
16377#[doc = "Floating-point minimum pairwise"]
16378#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminqd_f64)"]
16379#[inline(always)]
16380#[target_feature(enable = "neon")]
16381#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16382#[cfg_attr(test, assert_instr(fminp))]
16383pub fn vpminqd_f64(a: float64x2_t) -> f64 {
16384    unsafe extern "unadjusted" {
16385        #[cfg_attr(
16386            any(target_arch = "aarch64", target_arch = "arm64ec"),
16387            link_name = "llvm.aarch64.neon.fminv.f64.v2f64"
16388        )]
16389        fn _vpminqd_f64(a: float64x2_t) -> f64;
16390    }
16391    unsafe { _vpminqd_f64(a) }
16392}
16393#[doc = "Floating-point minimum pairwise"]
16394#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmins_f32)"]
16395#[inline(always)]
16396#[target_feature(enable = "neon")]
16397#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16398#[cfg_attr(test, assert_instr(fminp))]
16399pub fn vpmins_f32(a: float32x2_t) -> f32 {
16400    unsafe extern "unadjusted" {
16401        #[cfg_attr(
16402            any(target_arch = "aarch64", target_arch = "arm64ec"),
16403            link_name = "llvm.aarch64.neon.fminv.f32.v2f32"
16404        )]
16405        fn _vpmins_f32(a: float32x2_t) -> f32;
16406    }
16407    unsafe { _vpmins_f32(a) }
16408}
16409#[doc = "Signed saturating Absolute value"]
16410#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s64)"]
16411#[inline(always)]
16412#[target_feature(enable = "neon")]
16413#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16414#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16415pub fn vqabs_s64(a: int64x1_t) -> int64x1_t {
16416    unsafe extern "unadjusted" {
16417        #[cfg_attr(
16418            any(target_arch = "aarch64", target_arch = "arm64ec"),
16419            link_name = "llvm.aarch64.neon.sqabs.v1i64"
16420        )]
16421        fn _vqabs_s64(a: int64x1_t) -> int64x1_t;
16422    }
16423    unsafe { _vqabs_s64(a) }
16424}
16425#[doc = "Signed saturating Absolute value"]
16426#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s64)"]
16427#[inline(always)]
16428#[target_feature(enable = "neon")]
16429#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16430#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16431pub fn vqabsq_s64(a: int64x2_t) -> int64x2_t {
16432    unsafe extern "unadjusted" {
16433        #[cfg_attr(
16434            any(target_arch = "aarch64", target_arch = "arm64ec"),
16435            link_name = "llvm.aarch64.neon.sqabs.v2i64"
16436        )]
16437        fn _vqabsq_s64(a: int64x2_t) -> int64x2_t;
16438    }
16439    unsafe { _vqabsq_s64(a) }
16440}
16441#[doc = "Signed saturating absolute value"]
16442#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsb_s8)"]
16443#[inline(always)]
16444#[target_feature(enable = "neon")]
16445#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16446#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16447pub fn vqabsb_s8(a: i8) -> i8 {
16448    unsafe { simd_extract!(vqabs_s8(vdup_n_s8(a)), 0) }
16449}
16450#[doc = "Signed saturating absolute value"]
16451#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsh_s16)"]
16452#[inline(always)]
16453#[target_feature(enable = "neon")]
16454#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16455#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16456pub fn vqabsh_s16(a: i16) -> i16 {
16457    unsafe { simd_extract!(vqabs_s16(vdup_n_s16(a)), 0) }
16458}
16459#[doc = "Signed saturating absolute value"]
16460#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabss_s32)"]
16461#[inline(always)]
16462#[target_feature(enable = "neon")]
16463#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16464#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16465pub fn vqabss_s32(a: i32) -> i32 {
16466    unsafe extern "unadjusted" {
16467        #[cfg_attr(
16468            any(target_arch = "aarch64", target_arch = "arm64ec"),
16469            link_name = "llvm.aarch64.neon.sqabs.i32"
16470        )]
16471        fn _vqabss_s32(a: i32) -> i32;
16472    }
16473    unsafe { _vqabss_s32(a) }
16474}
16475#[doc = "Signed saturating absolute value"]
16476#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsd_s64)"]
16477#[inline(always)]
16478#[target_feature(enable = "neon")]
16479#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16480#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16481pub fn vqabsd_s64(a: i64) -> i64 {
16482    unsafe extern "unadjusted" {
16483        #[cfg_attr(
16484            any(target_arch = "aarch64", target_arch = "arm64ec"),
16485            link_name = "llvm.aarch64.neon.sqabs.i64"
16486        )]
16487        fn _vqabsd_s64(a: i64) -> i64;
16488    }
16489    unsafe { _vqabsd_s64(a) }
16490}
16491#[doc = "Saturating add"]
16492#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_s8)"]
16493#[inline(always)]
16494#[target_feature(enable = "neon")]
16495#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16496#[cfg_attr(test, assert_instr(sqadd))]
16497pub fn vqaddb_s8(a: i8, b: i8) -> i8 {
16498    let a: int8x8_t = vdup_n_s8(a);
16499    let b: int8x8_t = vdup_n_s8(b);
16500    unsafe { simd_extract!(vqadd_s8(a, b), 0) }
16501}
16502#[doc = "Saturating add"]
16503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_s16)"]
16504#[inline(always)]
16505#[target_feature(enable = "neon")]
16506#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16507#[cfg_attr(test, assert_instr(sqadd))]
16508pub fn vqaddh_s16(a: i16, b: i16) -> i16 {
16509    let a: int16x4_t = vdup_n_s16(a);
16510    let b: int16x4_t = vdup_n_s16(b);
16511    unsafe { simd_extract!(vqadd_s16(a, b), 0) }
16512}
16513#[doc = "Saturating add"]
16514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_u8)"]
16515#[inline(always)]
16516#[target_feature(enable = "neon")]
16517#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16518#[cfg_attr(test, assert_instr(uqadd))]
16519pub fn vqaddb_u8(a: u8, b: u8) -> u8 {
16520    let a: uint8x8_t = vdup_n_u8(a);
16521    let b: uint8x8_t = vdup_n_u8(b);
16522    unsafe { simd_extract!(vqadd_u8(a, b), 0) }
16523}
16524#[doc = "Saturating add"]
16525#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_u16)"]
16526#[inline(always)]
16527#[target_feature(enable = "neon")]
16528#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16529#[cfg_attr(test, assert_instr(uqadd))]
16530pub fn vqaddh_u16(a: u16, b: u16) -> u16 {
16531    let a: uint16x4_t = vdup_n_u16(a);
16532    let b: uint16x4_t = vdup_n_u16(b);
16533    unsafe { simd_extract!(vqadd_u16(a, b), 0) }
16534}
16535#[doc = "Saturating add"]
16536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_s32)"]
16537#[inline(always)]
16538#[target_feature(enable = "neon")]
16539#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16540#[cfg_attr(test, assert_instr(sqadd))]
16541pub fn vqadds_s32(a: i32, b: i32) -> i32 {
16542    unsafe extern "unadjusted" {
16543        #[cfg_attr(
16544            any(target_arch = "aarch64", target_arch = "arm64ec"),
16545            link_name = "llvm.aarch64.neon.sqadd.i32"
16546        )]
16547        fn _vqadds_s32(a: i32, b: i32) -> i32;
16548    }
16549    unsafe { _vqadds_s32(a, b) }
16550}
16551#[doc = "Saturating add"]
16552#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_s64)"]
16553#[inline(always)]
16554#[target_feature(enable = "neon")]
16555#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16556#[cfg_attr(test, assert_instr(sqadd))]
16557pub fn vqaddd_s64(a: i64, b: i64) -> i64 {
16558    unsafe extern "unadjusted" {
16559        #[cfg_attr(
16560            any(target_arch = "aarch64", target_arch = "arm64ec"),
16561            link_name = "llvm.aarch64.neon.sqadd.i64"
16562        )]
16563        fn _vqaddd_s64(a: i64, b: i64) -> i64;
16564    }
16565    unsafe { _vqaddd_s64(a, b) }
16566}
16567#[doc = "Saturating add"]
16568#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_u32)"]
16569#[inline(always)]
16570#[target_feature(enable = "neon")]
16571#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16572#[cfg_attr(test, assert_instr(uqadd))]
16573pub fn vqadds_u32(a: u32, b: u32) -> u32 {
16574    unsafe extern "unadjusted" {
16575        #[cfg_attr(
16576            any(target_arch = "aarch64", target_arch = "arm64ec"),
16577            link_name = "llvm.aarch64.neon.uqadd.i32"
16578        )]
16579        fn _vqadds_u32(a: u32, b: u32) -> u32;
16580    }
16581    unsafe { _vqadds_u32(a, b) }
16582}
16583#[doc = "Saturating add"]
16584#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_u64)"]
16585#[inline(always)]
16586#[target_feature(enable = "neon")]
16587#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16588#[cfg_attr(test, assert_instr(uqadd))]
16589pub fn vqaddd_u64(a: u64, b: u64) -> u64 {
16590    unsafe extern "unadjusted" {
16591        #[cfg_attr(
16592            any(target_arch = "aarch64", target_arch = "arm64ec"),
16593            link_name = "llvm.aarch64.neon.uqadd.i64"
16594        )]
16595        fn _vqaddd_u64(a: u64, b: u64) -> u64;
16596    }
16597    unsafe { _vqaddd_u64(a, b) }
16598}
16599#[doc = "Signed saturating doubling multiply-add long"]
16600#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s16)"]
16601#[inline(always)]
16602#[target_feature(enable = "neon")]
16603#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
16604#[rustc_legacy_const_generics(3)]
16605#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16606pub fn vqdmlal_high_lane_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
16607    static_assert_uimm_bits!(N, 2);
16608    vqaddq_s32(a, vqdmull_high_lane_s16::<N>(b, c))
16609}
16610#[doc = "Signed saturating doubling multiply-add long"]
16611#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s16)"]
16612#[inline(always)]
16613#[target_feature(enable = "neon")]
16614#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
16615#[rustc_legacy_const_generics(3)]
16616#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16617pub fn vqdmlal_high_laneq_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
16618    static_assert_uimm_bits!(N, 3);
16619    vqaddq_s32(a, vqdmull_high_laneq_s16::<N>(b, c))
16620}
16621#[doc = "Signed saturating doubling multiply-add long"]
16622#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s32)"]
16623#[inline(always)]
16624#[target_feature(enable = "neon")]
16625#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
16626#[rustc_legacy_const_generics(3)]
16627#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16628pub fn vqdmlal_high_lane_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
16629    static_assert_uimm_bits!(N, 1);
16630    vqaddq_s64(a, vqdmull_high_lane_s32::<N>(b, c))
16631}
16632#[doc = "Signed saturating doubling multiply-add long"]
16633#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s32)"]
16634#[inline(always)]
16635#[target_feature(enable = "neon")]
16636#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
16637#[rustc_legacy_const_generics(3)]
16638#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16639pub fn vqdmlal_high_laneq_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
16640    static_assert_uimm_bits!(N, 2);
16641    vqaddq_s64(a, vqdmull_high_laneq_s32::<N>(b, c))
16642}
16643#[doc = "Signed saturating doubling multiply-add long"]
16644#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s16)"]
16645#[inline(always)]
16646#[target_feature(enable = "neon")]
16647#[cfg_attr(test, assert_instr(sqdmlal2))]
16648#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16649pub fn vqdmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
16650    vqaddq_s32(a, vqdmull_high_n_s16(b, c))
16651}
16652#[doc = "Signed saturating doubling multiply-add long"]
16653#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s16)"]
16654#[inline(always)]
16655#[target_feature(enable = "neon")]
16656#[cfg_attr(test, assert_instr(sqdmlal2))]
16657#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16658pub fn vqdmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
16659    vqaddq_s32(a, vqdmull_high_s16(b, c))
16660}
16661#[doc = "Signed saturating doubling multiply-add long"]
16662#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s32)"]
16663#[inline(always)]
16664#[target_feature(enable = "neon")]
16665#[cfg_attr(test, assert_instr(sqdmlal2))]
16666#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16667pub fn vqdmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
16668    vqaddq_s64(a, vqdmull_high_n_s32(b, c))
16669}
16670#[doc = "Signed saturating doubling multiply-add long"]
16671#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s32)"]
16672#[inline(always)]
16673#[target_feature(enable = "neon")]
16674#[cfg_attr(test, assert_instr(sqdmlal2))]
16675#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16676pub fn vqdmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
16677    vqaddq_s64(a, vqdmull_high_s32(b, c))
16678}
16679#[doc = "Vector widening saturating doubling multiply accumulate with scalar"]
16680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s16)"]
16681#[inline(always)]
16682#[target_feature(enable = "neon")]
16683#[cfg_attr(test, assert_instr(sqdmlal, N = 2))]
16684#[rustc_legacy_const_generics(3)]
16685#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16686pub fn vqdmlal_laneq_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t {
16687    static_assert_uimm_bits!(N, 3);
16688    vqaddq_s32(a, vqdmull_laneq_s16::<N>(b, c))
16689}
16690#[doc = "Vector widening saturating doubling multiply accumulate with scalar"]
16691#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s32)"]
16692#[inline(always)]
16693#[target_feature(enable = "neon")]
16694#[cfg_attr(test, assert_instr(sqdmlal, N = 1))]
16695#[rustc_legacy_const_generics(3)]
16696#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16697pub fn vqdmlal_laneq_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t {
16698    static_assert_uimm_bits!(N, 2);
16699    vqaddq_s64(a, vqdmull_laneq_s32::<N>(b, c))
16700}
16701#[doc = "Signed saturating doubling multiply-add long"]
16702#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_lane_s16)"]
16703#[inline(always)]
16704#[target_feature(enable = "neon")]
16705#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
16706#[rustc_legacy_const_generics(3)]
16707#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16708pub fn vqdmlalh_lane_s16<const LANE: i32>(a: i32, b: i16, c: int16x4_t) -> i32 {
16709    static_assert_uimm_bits!(LANE, 2);
16710    unsafe { vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) }
16711}
16712#[doc = "Signed saturating doubling multiply-add long"]
16713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_laneq_s16)"]
16714#[inline(always)]
16715#[target_feature(enable = "neon")]
16716#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
16717#[rustc_legacy_const_generics(3)]
16718#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16719pub fn vqdmlalh_laneq_s16<const LANE: i32>(a: i32, b: i16, c: int16x8_t) -> i32 {
16720    static_assert_uimm_bits!(LANE, 3);
16721    unsafe { vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) }
16722}
16723#[doc = "Signed saturating doubling multiply-add long"]
16724#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_lane_s32)"]
16725#[inline(always)]
16726#[target_feature(enable = "neon")]
16727#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
16728#[rustc_legacy_const_generics(3)]
16729#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16730pub fn vqdmlals_lane_s32<const LANE: i32>(a: i64, b: i32, c: int32x2_t) -> i64 {
16731    static_assert_uimm_bits!(LANE, 1);
16732    unsafe { vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) }
16733}
16734#[doc = "Signed saturating doubling multiply-add long"]
16735#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_laneq_s32)"]
16736#[inline(always)]
16737#[target_feature(enable = "neon")]
16738#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
16739#[rustc_legacy_const_generics(3)]
16740#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16741pub fn vqdmlals_laneq_s32<const LANE: i32>(a: i64, b: i32, c: int32x4_t) -> i64 {
16742    static_assert_uimm_bits!(LANE, 2);
16743    unsafe { vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) }
16744}
16745#[doc = "Signed saturating doubling multiply-add long"]
16746#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_s16)"]
16747#[inline(always)]
16748#[target_feature(enable = "neon")]
16749#[cfg_attr(test, assert_instr(sqdmlal))]
16750#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16751pub fn vqdmlalh_s16(a: i32, b: i16, c: i16) -> i32 {
16752    let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c));
16753    unsafe { vqadds_s32(a, simd_extract!(x, 0)) }
16754}
16755#[doc = "Signed saturating doubling multiply-add long"]
16756#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_s32)"]
16757#[inline(always)]
16758#[target_feature(enable = "neon")]
16759#[cfg_attr(test, assert_instr(sqdmlal))]
16760#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16761pub fn vqdmlals_s32(a: i64, b: i32, c: i32) -> i64 {
16762    let x: i64 = vqaddd_s64(a, vqdmulls_s32(b, c));
16763    x
16764}
16765#[doc = "Signed saturating doubling multiply-subtract long"]
16766#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s16)"]
16767#[inline(always)]
16768#[target_feature(enable = "neon")]
16769#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
16770#[rustc_legacy_const_generics(3)]
16771#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16772pub fn vqdmlsl_high_lane_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
16773    static_assert_uimm_bits!(N, 2);
16774    vqsubq_s32(a, vqdmull_high_lane_s16::<N>(b, c))
16775}
16776#[doc = "Signed saturating doubling multiply-subtract long"]
16777#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s16)"]
16778#[inline(always)]
16779#[target_feature(enable = "neon")]
16780#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
16781#[rustc_legacy_const_generics(3)]
16782#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16783pub fn vqdmlsl_high_laneq_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
16784    static_assert_uimm_bits!(N, 3);
16785    vqsubq_s32(a, vqdmull_high_laneq_s16::<N>(b, c))
16786}
16787#[doc = "Signed saturating doubling multiply-subtract long"]
16788#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s32)"]
16789#[inline(always)]
16790#[target_feature(enable = "neon")]
16791#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
16792#[rustc_legacy_const_generics(3)]
16793#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16794pub fn vqdmlsl_high_lane_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
16795    static_assert_uimm_bits!(N, 1);
16796    vqsubq_s64(a, vqdmull_high_lane_s32::<N>(b, c))
16797}
16798#[doc = "Signed saturating doubling multiply-subtract long"]
16799#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s32)"]
16800#[inline(always)]
16801#[target_feature(enable = "neon")]
16802#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
16803#[rustc_legacy_const_generics(3)]
16804#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16805pub fn vqdmlsl_high_laneq_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
16806    static_assert_uimm_bits!(N, 2);
16807    vqsubq_s64(a, vqdmull_high_laneq_s32::<N>(b, c))
16808}
16809#[doc = "Signed saturating doubling multiply-subtract long"]
16810#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s16)"]
16811#[inline(always)]
16812#[target_feature(enable = "neon")]
16813#[cfg_attr(test, assert_instr(sqdmlsl2))]
16814#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16815pub fn vqdmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
16816    vqsubq_s32(a, vqdmull_high_n_s16(b, c))
16817}
16818#[doc = "Signed saturating doubling multiply-subtract long"]
16819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s16)"]
16820#[inline(always)]
16821#[target_feature(enable = "neon")]
16822#[cfg_attr(test, assert_instr(sqdmlsl2))]
16823#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16824pub fn vqdmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
16825    vqsubq_s32(a, vqdmull_high_s16(b, c))
16826}
16827#[doc = "Signed saturating doubling multiply-subtract long"]
16828#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s32)"]
16829#[inline(always)]
16830#[target_feature(enable = "neon")]
16831#[cfg_attr(test, assert_instr(sqdmlsl2))]
16832#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16833pub fn vqdmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
16834    vqsubq_s64(a, vqdmull_high_n_s32(b, c))
16835}
16836#[doc = "Signed saturating doubling multiply-subtract long"]
16837#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s32)"]
16838#[inline(always)]
16839#[target_feature(enable = "neon")]
16840#[cfg_attr(test, assert_instr(sqdmlsl2))]
16841#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16842pub fn vqdmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
16843    vqsubq_s64(a, vqdmull_high_s32(b, c))
16844}
16845#[doc = "Vector widening saturating doubling multiply subtract with scalar"]
16846#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s16)"]
16847#[inline(always)]
16848#[target_feature(enable = "neon")]
16849#[cfg_attr(test, assert_instr(sqdmlsl, N = 2))]
16850#[rustc_legacy_const_generics(3)]
16851#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16852pub fn vqdmlsl_laneq_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t {
16853    static_assert_uimm_bits!(N, 3);
16854    vqsubq_s32(a, vqdmull_laneq_s16::<N>(b, c))
16855}
16856#[doc = "Vector widening saturating doubling multiply subtract with scalar"]
16857#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s32)"]
16858#[inline(always)]
16859#[target_feature(enable = "neon")]
16860#[cfg_attr(test, assert_instr(sqdmlsl, N = 1))]
16861#[rustc_legacy_const_generics(3)]
16862#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16863pub fn vqdmlsl_laneq_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t {
16864    static_assert_uimm_bits!(N, 2);
16865    vqsubq_s64(a, vqdmull_laneq_s32::<N>(b, c))
16866}
16867#[doc = "Signed saturating doubling multiply-subtract long"]
16868#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_lane_s16)"]
16869#[inline(always)]
16870#[target_feature(enable = "neon")]
16871#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
16872#[rustc_legacy_const_generics(3)]
16873#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16874pub fn vqdmlslh_lane_s16<const LANE: i32>(a: i32, b: i16, c: int16x4_t) -> i32 {
16875    static_assert_uimm_bits!(LANE, 2);
16876    unsafe { vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) }
16877}
16878#[doc = "Signed saturating doubling multiply-subtract long"]
16879#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_laneq_s16)"]
16880#[inline(always)]
16881#[target_feature(enable = "neon")]
16882#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
16883#[rustc_legacy_const_generics(3)]
16884#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16885pub fn vqdmlslh_laneq_s16<const LANE: i32>(a: i32, b: i16, c: int16x8_t) -> i32 {
16886    static_assert_uimm_bits!(LANE, 3);
16887    unsafe { vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) }
16888}
16889#[doc = "Signed saturating doubling multiply-subtract long"]
16890#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_lane_s32)"]
16891#[inline(always)]
16892#[target_feature(enable = "neon")]
16893#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
16894#[rustc_legacy_const_generics(3)]
16895#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16896pub fn vqdmlsls_lane_s32<const LANE: i32>(a: i64, b: i32, c: int32x2_t) -> i64 {
16897    static_assert_uimm_bits!(LANE, 1);
16898    unsafe { vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) }
16899}
16900#[doc = "Signed saturating doubling multiply-subtract long"]
16901#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_laneq_s32)"]
16902#[inline(always)]
16903#[target_feature(enable = "neon")]
16904#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
16905#[rustc_legacy_const_generics(3)]
16906#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16907pub fn vqdmlsls_laneq_s32<const LANE: i32>(a: i64, b: i32, c: int32x4_t) -> i64 {
16908    static_assert_uimm_bits!(LANE, 2);
16909    unsafe { vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) }
16910}
16911#[doc = "Signed saturating doubling multiply-subtract long"]
16912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_s16)"]
16913#[inline(always)]
16914#[target_feature(enable = "neon")]
16915#[cfg_attr(test, assert_instr(sqdmlsl))]
16916#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16917pub fn vqdmlslh_s16(a: i32, b: i16, c: i16) -> i32 {
16918    let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c));
16919    unsafe { vqsubs_s32(a, simd_extract!(x, 0)) }
16920}
16921#[doc = "Signed saturating doubling multiply-subtract long"]
16922#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_s32)"]
16923#[inline(always)]
16924#[target_feature(enable = "neon")]
16925#[cfg_attr(test, assert_instr(sqdmlsl))]
16926#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16927pub fn vqdmlsls_s32(a: i64, b: i32, c: i32) -> i64 {
16928    let x: i64 = vqsubd_s64(a, vqdmulls_s32(b, c));
16929    x
16930}
16931#[doc = "Vector saturating doubling multiply high by scalar"]
16932#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s16)"]
16933#[inline(always)]
16934#[target_feature(enable = "neon")]
16935#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
16936#[rustc_legacy_const_generics(2)]
16937#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16938pub fn vqdmulh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
16939    static_assert_uimm_bits!(LANE, 2);
16940    unsafe { vqdmulh_s16(a, vdup_n_s16(simd_extract!(b, LANE as u32))) }
16941}
16942#[doc = "Vector saturating doubling multiply high by scalar"]
16943#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s16)"]
16944#[inline(always)]
16945#[target_feature(enable = "neon")]
16946#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
16947#[rustc_legacy_const_generics(2)]
16948#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16949pub fn vqdmulhq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int16x8_t {
16950    static_assert_uimm_bits!(LANE, 2);
16951    unsafe { vqdmulhq_s16(a, vdupq_n_s16(simd_extract!(b, LANE as u32))) }
16952}
16953#[doc = "Vector saturating doubling multiply high by scalar"]
16954#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s32)"]
16955#[inline(always)]
16956#[target_feature(enable = "neon")]
16957#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
16958#[rustc_legacy_const_generics(2)]
16959#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16960pub fn vqdmulh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
16961    static_assert_uimm_bits!(LANE, 1);
16962    unsafe { vqdmulh_s32(a, vdup_n_s32(simd_extract!(b, LANE as u32))) }
16963}
16964#[doc = "Vector saturating doubling multiply high by scalar"]
16965#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s32)"]
16966#[inline(always)]
16967#[target_feature(enable = "neon")]
16968#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
16969#[rustc_legacy_const_generics(2)]
16970#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16971pub fn vqdmulhq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int32x4_t {
16972    static_assert_uimm_bits!(LANE, 1);
16973    unsafe { vqdmulhq_s32(a, vdupq_n_s32(simd_extract!(b, LANE as u32))) }
16974}
16975#[doc = "Signed saturating doubling multiply returning high half"]
16976#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_lane_s16)"]
16977#[inline(always)]
16978#[target_feature(enable = "neon")]
16979#[cfg_attr(test, assert_instr(sqdmulh, N = 2))]
16980#[rustc_legacy_const_generics(2)]
16981#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16982pub fn vqdmulhh_lane_s16<const N: i32>(a: i16, b: int16x4_t) -> i16 {
16983    static_assert_uimm_bits!(N, 2);
16984    unsafe {
16985        let b: i16 = simd_extract!(b, N as u32);
16986        vqdmulhh_s16(a, b)
16987    }
16988}
16989#[doc = "Signed saturating doubling multiply returning high half"]
16990#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_laneq_s16)"]
16991#[inline(always)]
16992#[target_feature(enable = "neon")]
16993#[cfg_attr(test, assert_instr(sqdmulh, N = 2))]
16994#[rustc_legacy_const_generics(2)]
16995#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16996pub fn vqdmulhh_laneq_s16<const N: i32>(a: i16, b: int16x8_t) -> i16 {
16997    static_assert_uimm_bits!(N, 3);
16998    unsafe {
16999        let b: i16 = simd_extract!(b, N as u32);
17000        vqdmulhh_s16(a, b)
17001    }
17002}
17003#[doc = "Signed saturating doubling multiply returning high half"]
17004#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_s16)"]
17005#[inline(always)]
17006#[target_feature(enable = "neon")]
17007#[cfg_attr(test, assert_instr(sqdmulh))]
17008#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17009pub fn vqdmulhh_s16(a: i16, b: i16) -> i16 {
17010    let a: int16x4_t = vdup_n_s16(a);
17011    let b: int16x4_t = vdup_n_s16(b);
17012    unsafe { simd_extract!(vqdmulh_s16(a, b), 0) }
17013}
17014#[doc = "Signed saturating doubling multiply returning high half"]
17015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_s32)"]
17016#[inline(always)]
17017#[target_feature(enable = "neon")]
17018#[cfg_attr(test, assert_instr(sqdmulh))]
17019#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17020pub fn vqdmulhs_s32(a: i32, b: i32) -> i32 {
17021    let a: int32x2_t = vdup_n_s32(a);
17022    let b: int32x2_t = vdup_n_s32(b);
17023    unsafe { simd_extract!(vqdmulh_s32(a, b), 0) }
17024}
17025#[doc = "Signed saturating doubling multiply returning high half"]
17026#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_lane_s32)"]
17027#[inline(always)]
17028#[target_feature(enable = "neon")]
17029#[cfg_attr(test, assert_instr(sqdmulh, N = 1))]
17030#[rustc_legacy_const_generics(2)]
17031#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17032pub fn vqdmulhs_lane_s32<const N: i32>(a: i32, b: int32x2_t) -> i32 {
17033    static_assert_uimm_bits!(N, 1);
17034    unsafe {
17035        let b: i32 = simd_extract!(b, N as u32);
17036        vqdmulhs_s32(a, b)
17037    }
17038}
17039#[doc = "Signed saturating doubling multiply returning high half"]
17040#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_laneq_s32)"]
17041#[inline(always)]
17042#[target_feature(enable = "neon")]
17043#[cfg_attr(test, assert_instr(sqdmulh, N = 1))]
17044#[rustc_legacy_const_generics(2)]
17045#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17046pub fn vqdmulhs_laneq_s32<const N: i32>(a: i32, b: int32x4_t) -> i32 {
17047    static_assert_uimm_bits!(N, 2);
17048    unsafe {
17049        let b: i32 = simd_extract!(b, N as u32);
17050        vqdmulhs_s32(a, b)
17051    }
17052}
17053#[doc = "Signed saturating doubling multiply long"]
17054#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s16)"]
17055#[inline(always)]
17056#[target_feature(enable = "neon")]
17057#[cfg_attr(test, assert_instr(sqdmull2, N = 2))]
17058#[rustc_legacy_const_generics(2)]
17059#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17060pub fn vqdmull_high_lane_s16<const N: i32>(a: int16x8_t, b: int16x4_t) -> int32x4_t {
17061    static_assert_uimm_bits!(N, 2);
17062    unsafe {
17063        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17064        let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
17065        vqdmull_s16(a, b)
17066    }
17067}
17068#[doc = "Signed saturating doubling multiply long"]
17069#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s32)"]
17070#[inline(always)]
17071#[target_feature(enable = "neon")]
17072#[cfg_attr(test, assert_instr(sqdmull2, N = 2))]
17073#[rustc_legacy_const_generics(2)]
17074#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17075pub fn vqdmull_high_laneq_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int64x2_t {
17076    static_assert_uimm_bits!(N, 2);
17077    unsafe {
17078        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17079        let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
17080        vqdmull_s32(a, b)
17081    }
17082}
17083#[doc = "Signed saturating doubling multiply long"]
17084#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s32)"]
17085#[inline(always)]
17086#[target_feature(enable = "neon")]
17087#[cfg_attr(test, assert_instr(sqdmull2, N = 1))]
17088#[rustc_legacy_const_generics(2)]
17089#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17090pub fn vqdmull_high_lane_s32<const N: i32>(a: int32x4_t, b: int32x2_t) -> int64x2_t {
17091    static_assert_uimm_bits!(N, 1);
17092    unsafe {
17093        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17094        let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
17095        vqdmull_s32(a, b)
17096    }
17097}
17098#[doc = "Signed saturating doubling multiply long"]
17099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s16)"]
17100#[inline(always)]
17101#[target_feature(enable = "neon")]
17102#[cfg_attr(test, assert_instr(sqdmull2, N = 4))]
17103#[rustc_legacy_const_generics(2)]
17104#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17105pub fn vqdmull_high_laneq_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int32x4_t {
17106    static_assert_uimm_bits!(N, 3);
17107    unsafe {
17108        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17109        let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
17110        vqdmull_s16(a, b)
17111    }
17112}
17113#[doc = "Signed saturating doubling multiply long"]
17114#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s16)"]
17115#[inline(always)]
17116#[target_feature(enable = "neon")]
17117#[cfg_attr(test, assert_instr(sqdmull2))]
17118#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17119pub fn vqdmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t {
17120    unsafe {
17121        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17122        let b: int16x4_t = vdup_n_s16(b);
17123        vqdmull_s16(a, b)
17124    }
17125}
17126#[doc = "Signed saturating doubling multiply long"]
17127#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s32)"]
17128#[inline(always)]
17129#[target_feature(enable = "neon")]
17130#[cfg_attr(test, assert_instr(sqdmull2))]
17131#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17132pub fn vqdmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t {
17133    unsafe {
17134        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17135        let b: int32x2_t = vdup_n_s32(b);
17136        vqdmull_s32(a, b)
17137    }
17138}
17139#[doc = "Signed saturating doubling multiply long"]
17140#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s16)"]
17141#[inline(always)]
17142#[target_feature(enable = "neon")]
17143#[cfg_attr(test, assert_instr(sqdmull2))]
17144#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17145pub fn vqdmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
17146    unsafe {
17147        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17148        let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
17149        vqdmull_s16(a, b)
17150    }
17151}
17152#[doc = "Signed saturating doubling multiply long"]
17153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s32)"]
17154#[inline(always)]
17155#[target_feature(enable = "neon")]
17156#[cfg_attr(test, assert_instr(sqdmull2))]
17157#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17158pub fn vqdmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
17159    unsafe {
17160        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17161        let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
17162        vqdmull_s32(a, b)
17163    }
17164}
17165#[doc = "Vector saturating doubling long multiply by scalar"]
17166#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s16)"]
17167#[inline(always)]
17168#[target_feature(enable = "neon")]
17169#[cfg_attr(test, assert_instr(sqdmull, N = 4))]
17170#[rustc_legacy_const_generics(2)]
17171#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17172pub fn vqdmull_laneq_s16<const N: i32>(a: int16x4_t, b: int16x8_t) -> int32x4_t {
17173    static_assert_uimm_bits!(N, 3);
17174    unsafe {
17175        let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
17176        vqdmull_s16(a, b)
17177    }
17178}
17179#[doc = "Vector saturating doubling long multiply by scalar"]
17180#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s32)"]
17181#[inline(always)]
17182#[target_feature(enable = "neon")]
17183#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
17184#[rustc_legacy_const_generics(2)]
17185#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17186pub fn vqdmull_laneq_s32<const N: i32>(a: int32x2_t, b: int32x4_t) -> int64x2_t {
17187    static_assert_uimm_bits!(N, 2);
17188    unsafe {
17189        let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
17190        vqdmull_s32(a, b)
17191    }
17192}
17193#[doc = "Signed saturating doubling multiply long"]
17194#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_lane_s16)"]
17195#[inline(always)]
17196#[target_feature(enable = "neon")]
17197#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
17198#[rustc_legacy_const_generics(2)]
17199#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17200pub fn vqdmullh_lane_s16<const N: i32>(a: i16, b: int16x4_t) -> i32 {
17201    static_assert_uimm_bits!(N, 2);
17202    unsafe {
17203        let b: i16 = simd_extract!(b, N as u32);
17204        vqdmullh_s16(a, b)
17205    }
17206}
17207#[doc = "Signed saturating doubling multiply long"]
17208#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_laneq_s32)"]
17209#[inline(always)]
17210#[target_feature(enable = "neon")]
17211#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
17212#[rustc_legacy_const_generics(2)]
17213#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17214pub fn vqdmulls_laneq_s32<const N: i32>(a: i32, b: int32x4_t) -> i64 {
17215    static_assert_uimm_bits!(N, 2);
17216    unsafe {
17217        let b: i32 = simd_extract!(b, N as u32);
17218        vqdmulls_s32(a, b)
17219    }
17220}
17221#[doc = "Signed saturating doubling multiply long"]
17222#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_laneq_s16)"]
17223#[inline(always)]
17224#[target_feature(enable = "neon")]
17225#[cfg_attr(test, assert_instr(sqdmull, N = 4))]
17226#[rustc_legacy_const_generics(2)]
17227#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17228pub fn vqdmullh_laneq_s16<const N: i32>(a: i16, b: int16x8_t) -> i32 {
17229    static_assert_uimm_bits!(N, 3);
17230    unsafe {
17231        let b: i16 = simd_extract!(b, N as u32);
17232        vqdmullh_s16(a, b)
17233    }
17234}
17235#[doc = "Signed saturating doubling multiply long"]
17236#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_s16)"]
17237#[inline(always)]
17238#[target_feature(enable = "neon")]
17239#[cfg_attr(test, assert_instr(sqdmull))]
17240#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17241pub fn vqdmullh_s16(a: i16, b: i16) -> i32 {
17242    let a: int16x4_t = vdup_n_s16(a);
17243    let b: int16x4_t = vdup_n_s16(b);
17244    unsafe { simd_extract!(vqdmull_s16(a, b), 0) }
17245}
17246#[doc = "Signed saturating doubling multiply long"]
17247#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_lane_s32)"]
17248#[inline(always)]
17249#[target_feature(enable = "neon")]
17250#[cfg_attr(test, assert_instr(sqdmull, N = 1))]
17251#[rustc_legacy_const_generics(2)]
17252#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17253pub fn vqdmulls_lane_s32<const N: i32>(a: i32, b: int32x2_t) -> i64 {
17254    static_assert_uimm_bits!(N, 1);
17255    unsafe {
17256        let b: i32 = simd_extract!(b, N as u32);
17257        vqdmulls_s32(a, b)
17258    }
17259}
17260#[doc = "Signed saturating doubling multiply long"]
17261#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_s32)"]
17262#[inline(always)]
17263#[target_feature(enable = "neon")]
17264#[cfg_attr(test, assert_instr(sqdmull))]
17265#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17266pub fn vqdmulls_s32(a: i32, b: i32) -> i64 {
17267    unsafe extern "unadjusted" {
17268        #[cfg_attr(
17269            any(target_arch = "aarch64", target_arch = "arm64ec"),
17270            link_name = "llvm.aarch64.neon.sqdmulls.scalar"
17271        )]
17272        fn _vqdmulls_s32(a: i32, b: i32) -> i64;
17273    }
17274    unsafe { _vqdmulls_s32(a, b) }
17275}
17276#[doc = "Signed saturating extract narrow"]
17277#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s16)"]
17278#[inline(always)]
17279#[target_feature(enable = "neon")]
17280#[cfg_attr(test, assert_instr(sqxtn2))]
17281#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17282pub fn vqmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t {
17283    unsafe {
17284        simd_shuffle!(
17285            a,
17286            vqmovn_s16(b),
17287            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
17288        )
17289    }
17290}
17291#[doc = "Signed saturating extract narrow"]
17292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s32)"]
17293#[inline(always)]
17294#[target_feature(enable = "neon")]
17295#[cfg_attr(test, assert_instr(sqxtn2))]
17296#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17297pub fn vqmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t {
17298    unsafe { simd_shuffle!(a, vqmovn_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
17299}
17300#[doc = "Signed saturating extract narrow"]
17301#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s64)"]
17302#[inline(always)]
17303#[target_feature(enable = "neon")]
17304#[cfg_attr(test, assert_instr(sqxtn2))]
17305#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17306pub fn vqmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t {
17307    unsafe { simd_shuffle!(a, vqmovn_s64(b), [0, 1, 2, 3]) }
17308}
17309#[doc = "Signed saturating extract narrow"]
17310#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u16)"]
17311#[inline(always)]
17312#[target_feature(enable = "neon")]
17313#[cfg_attr(test, assert_instr(uqxtn2))]
17314#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17315pub fn vqmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
17316    unsafe {
17317        simd_shuffle!(
17318            a,
17319            vqmovn_u16(b),
17320            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
17321        )
17322    }
17323}
17324#[doc = "Signed saturating extract narrow"]
17325#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u32)"]
17326#[inline(always)]
17327#[target_feature(enable = "neon")]
17328#[cfg_attr(test, assert_instr(uqxtn2))]
17329#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17330pub fn vqmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
17331    unsafe { simd_shuffle!(a, vqmovn_u32(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
17332}
17333#[doc = "Signed saturating extract narrow"]
17334#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u64)"]
17335#[inline(always)]
17336#[target_feature(enable = "neon")]
17337#[cfg_attr(test, assert_instr(uqxtn2))]
17338#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17339pub fn vqmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
17340    unsafe { simd_shuffle!(a, vqmovn_u64(b), [0, 1, 2, 3]) }
17341}
17342#[doc = "Saturating extract narrow"]
17343#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_s64)"]
17344#[inline(always)]
17345#[target_feature(enable = "neon")]
17346#[cfg_attr(test, assert_instr(sqxtn))]
17347#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17348pub fn vqmovnd_s64(a: i64) -> i32 {
17349    unsafe extern "unadjusted" {
17350        #[cfg_attr(
17351            any(target_arch = "aarch64", target_arch = "arm64ec"),
17352            link_name = "llvm.aarch64.neon.scalar.sqxtn.i32.i64"
17353        )]
17354        fn _vqmovnd_s64(a: i64) -> i32;
17355    }
17356    unsafe { _vqmovnd_s64(a) }
17357}
17358#[doc = "Saturating extract narrow"]
17359#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_u64)"]
17360#[inline(always)]
17361#[target_feature(enable = "neon")]
17362#[cfg_attr(test, assert_instr(uqxtn))]
17363#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17364pub fn vqmovnd_u64(a: u64) -> u32 {
17365    unsafe extern "unadjusted" {
17366        #[cfg_attr(
17367            any(target_arch = "aarch64", target_arch = "arm64ec"),
17368            link_name = "llvm.aarch64.neon.scalar.uqxtn.i32.i64"
17369        )]
17370        fn _vqmovnd_u64(a: u64) -> u32;
17371    }
17372    unsafe { _vqmovnd_u64(a) }
17373}
17374#[doc = "Saturating extract narrow"]
17375#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_s16)"]
17376#[inline(always)]
17377#[target_feature(enable = "neon")]
17378#[cfg_attr(test, assert_instr(sqxtn))]
17379#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17380pub fn vqmovnh_s16(a: i16) -> i8 {
17381    unsafe { simd_extract!(vqmovn_s16(vdupq_n_s16(a)), 0) }
17382}
17383#[doc = "Saturating extract narrow"]
17384#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_s32)"]
17385#[inline(always)]
17386#[target_feature(enable = "neon")]
17387#[cfg_attr(test, assert_instr(sqxtn))]
17388#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17389pub fn vqmovns_s32(a: i32) -> i16 {
17390    unsafe { simd_extract!(vqmovn_s32(vdupq_n_s32(a)), 0) }
17391}
17392#[doc = "Saturating extract narrow"]
17393#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_u16)"]
17394#[inline(always)]
17395#[target_feature(enable = "neon")]
17396#[cfg_attr(test, assert_instr(uqxtn))]
17397#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17398pub fn vqmovnh_u16(a: u16) -> u8 {
17399    unsafe { simd_extract!(vqmovn_u16(vdupq_n_u16(a)), 0) }
17400}
17401#[doc = "Saturating extract narrow"]
17402#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_u32)"]
17403#[inline(always)]
17404#[target_feature(enable = "neon")]
17405#[cfg_attr(test, assert_instr(uqxtn))]
17406#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17407pub fn vqmovns_u32(a: u32) -> u16 {
17408    unsafe { simd_extract!(vqmovn_u32(vdupq_n_u32(a)), 0) }
17409}
17410#[doc = "Signed saturating extract unsigned narrow"]
17411#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s16)"]
17412#[inline(always)]
17413#[target_feature(enable = "neon")]
17414#[cfg_attr(test, assert_instr(sqxtun2))]
17415#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17416pub fn vqmovun_high_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
17417    unsafe {
17418        simd_shuffle!(
17419            a,
17420            vqmovun_s16(b),
17421            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
17422        )
17423    }
17424}
17425#[doc = "Signed saturating extract unsigned narrow"]
17426#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s32)"]
17427#[inline(always)]
17428#[target_feature(enable = "neon")]
17429#[cfg_attr(test, assert_instr(sqxtun2))]
17430#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17431pub fn vqmovun_high_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
17432    unsafe { simd_shuffle!(a, vqmovun_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
17433}
17434#[doc = "Signed saturating extract unsigned narrow"]
17435#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s64)"]
17436#[inline(always)]
17437#[target_feature(enable = "neon")]
17438#[cfg_attr(test, assert_instr(sqxtun2))]
17439#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17440pub fn vqmovun_high_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
17441    unsafe { simd_shuffle!(a, vqmovun_s64(b), [0, 1, 2, 3]) }
17442}
17443#[doc = "Signed saturating extract unsigned narrow"]
17444#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovunh_s16)"]
17445#[inline(always)]
17446#[target_feature(enable = "neon")]
17447#[cfg_attr(test, assert_instr(sqxtun))]
17448#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17449pub fn vqmovunh_s16(a: i16) -> u8 {
17450    unsafe { simd_extract!(vqmovun_s16(vdupq_n_s16(a)), 0) }
17451}
17452#[doc = "Signed saturating extract unsigned narrow"]
17453#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovuns_s32)"]
17454#[inline(always)]
17455#[target_feature(enable = "neon")]
17456#[cfg_attr(test, assert_instr(sqxtun))]
17457#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17458pub fn vqmovuns_s32(a: i32) -> u16 {
17459    unsafe { simd_extract!(vqmovun_s32(vdupq_n_s32(a)), 0) }
17460}
17461#[doc = "Signed saturating extract unsigned narrow"]
17462#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovund_s64)"]
17463#[inline(always)]
17464#[target_feature(enable = "neon")]
17465#[cfg_attr(test, assert_instr(sqxtun))]
17466#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17467pub fn vqmovund_s64(a: i64) -> u32 {
17468    unsafe { simd_extract!(vqmovun_s64(vdupq_n_s64(a)), 0) }
17469}
17470#[doc = "Signed saturating negate"]
17471#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s64)"]
17472#[inline(always)]
17473#[target_feature(enable = "neon")]
17474#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17475#[cfg_attr(test, assert_instr(sqneg))]
17476pub fn vqneg_s64(a: int64x1_t) -> int64x1_t {
17477    unsafe extern "unadjusted" {
17478        #[cfg_attr(
17479            any(target_arch = "aarch64", target_arch = "arm64ec"),
17480            link_name = "llvm.aarch64.neon.sqneg.v1i64"
17481        )]
17482        fn _vqneg_s64(a: int64x1_t) -> int64x1_t;
17483    }
17484    unsafe { _vqneg_s64(a) }
17485}
17486#[doc = "Signed saturating negate"]
17487#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s64)"]
17488#[inline(always)]
17489#[target_feature(enable = "neon")]
17490#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17491#[cfg_attr(test, assert_instr(sqneg))]
17492pub fn vqnegq_s64(a: int64x2_t) -> int64x2_t {
17493    unsafe extern "unadjusted" {
17494        #[cfg_attr(
17495            any(target_arch = "aarch64", target_arch = "arm64ec"),
17496            link_name = "llvm.aarch64.neon.sqneg.v2i64"
17497        )]
17498        fn _vqnegq_s64(a: int64x2_t) -> int64x2_t;
17499    }
17500    unsafe { _vqnegq_s64(a) }
17501}
17502#[doc = "Signed saturating negate"]
17503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegb_s8)"]
17504#[inline(always)]
17505#[target_feature(enable = "neon")]
17506#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17507#[cfg_attr(test, assert_instr(sqneg))]
17508pub fn vqnegb_s8(a: i8) -> i8 {
17509    unsafe { simd_extract!(vqneg_s8(vdup_n_s8(a)), 0) }
17510}
17511#[doc = "Signed saturating negate"]
17512#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegh_s16)"]
17513#[inline(always)]
17514#[target_feature(enable = "neon")]
17515#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17516#[cfg_attr(test, assert_instr(sqneg))]
17517pub fn vqnegh_s16(a: i16) -> i16 {
17518    unsafe { simd_extract!(vqneg_s16(vdup_n_s16(a)), 0) }
17519}
17520#[doc = "Signed saturating negate"]
17521#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegs_s32)"]
17522#[inline(always)]
17523#[target_feature(enable = "neon")]
17524#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17525#[cfg_attr(test, assert_instr(sqneg))]
17526pub fn vqnegs_s32(a: i32) -> i32 {
17527    unsafe { simd_extract!(vqneg_s32(vdup_n_s32(a)), 0) }
17528}
17529#[doc = "Signed saturating negate"]
17530#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegd_s64)"]
17531#[inline(always)]
17532#[target_feature(enable = "neon")]
17533#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17534#[cfg_attr(test, assert_instr(sqneg))]
17535pub fn vqnegd_s64(a: i64) -> i64 {
17536    unsafe { simd_extract!(vqneg_s64(vdup_n_s64(a)), 0) }
17537}
17538#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17539#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s16)"]
17540#[inline(always)]
17541#[target_feature(enable = "rdm")]
17542#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17543#[rustc_legacy_const_generics(3)]
17544#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17545pub fn vqrdmlah_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
17546    static_assert_uimm_bits!(LANE, 2);
17547    unsafe {
17548        let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32; 4]);
17549        vqrdmlah_s16(a, b, c)
17550    }
17551}
17552#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17553#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s32)"]
17554#[inline(always)]
17555#[target_feature(enable = "rdm")]
17556#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17557#[rustc_legacy_const_generics(3)]
17558#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17559pub fn vqrdmlah_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
17560    static_assert_uimm_bits!(LANE, 1);
17561    unsafe {
17562        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32; 2]);
17563        vqrdmlah_s32(a, b, c)
17564    }
17565}
17566#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17567#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s16)"]
17568#[inline(always)]
17569#[target_feature(enable = "rdm")]
17570#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17571#[rustc_legacy_const_generics(3)]
17572#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17573pub fn vqrdmlah_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t {
17574    static_assert_uimm_bits!(LANE, 3);
17575    unsafe {
17576        let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32; 4]);
17577        vqrdmlah_s16(a, b, c)
17578    }
17579}
17580#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17581#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s32)"]
17582#[inline(always)]
17583#[target_feature(enable = "rdm")]
17584#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17585#[rustc_legacy_const_generics(3)]
17586#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17587pub fn vqrdmlah_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t {
17588    static_assert_uimm_bits!(LANE, 2);
17589    unsafe {
17590        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32; 2]);
17591        vqrdmlah_s32(a, b, c)
17592    }
17593}
17594#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17595#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s16)"]
17596#[inline(always)]
17597#[target_feature(enable = "rdm")]
17598#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17599#[rustc_legacy_const_generics(3)]
17600#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17601pub fn vqrdmlahq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t {
17602    static_assert_uimm_bits!(LANE, 2);
17603    unsafe {
17604        let c: int16x8_t = simd_shuffle!(c, c, [LANE as u32; 8]);
17605        vqrdmlahq_s16(a, b, c)
17606    }
17607}
17608#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17609#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s32)"]
17610#[inline(always)]
17611#[target_feature(enable = "rdm")]
17612#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17613#[rustc_legacy_const_generics(3)]
17614#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17615pub fn vqrdmlahq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t {
17616    static_assert_uimm_bits!(LANE, 1);
17617    unsafe {
17618        let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32; 4]);
17619        vqrdmlahq_s32(a, b, c)
17620    }
17621}
17622#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17623#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s16)"]
17624#[inline(always)]
17625#[target_feature(enable = "rdm")]
17626#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17627#[rustc_legacy_const_generics(3)]
17628#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17629pub fn vqrdmlahq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
17630    static_assert_uimm_bits!(LANE, 3);
17631    unsafe {
17632        let c: int16x8_t = simd_shuffle!(c, c, [LANE as u32; 8]);
17633        vqrdmlahq_s16(a, b, c)
17634    }
17635}
17636#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17637#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s32)"]
17638#[inline(always)]
17639#[target_feature(enable = "rdm")]
17640#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17641#[rustc_legacy_const_generics(3)]
17642#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17643pub fn vqrdmlahq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
17644    static_assert_uimm_bits!(LANE, 2);
17645    unsafe {
17646        let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32; 4]);
17647        vqrdmlahq_s32(a, b, c)
17648    }
17649}
17650#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17651#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s16)"]
17652#[inline(always)]
17653#[target_feature(enable = "rdm")]
17654#[cfg_attr(test, assert_instr(sqrdmlah))]
17655#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17656pub fn vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
17657    unsafe extern "unadjusted" {
17658        #[cfg_attr(
17659            any(target_arch = "aarch64", target_arch = "arm64ec"),
17660            link_name = "llvm.aarch64.neon.sqrdmlah.v4i16"
17661        )]
17662        fn _vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t;
17663    }
17664    unsafe { _vqrdmlah_s16(a, b, c) }
17665}
17666#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17667#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s16)"]
17668#[inline(always)]
17669#[target_feature(enable = "rdm")]
17670#[cfg_attr(test, assert_instr(sqrdmlah))]
17671#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17672pub fn vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
17673    unsafe extern "unadjusted" {
17674        #[cfg_attr(
17675            any(target_arch = "aarch64", target_arch = "arm64ec"),
17676            link_name = "llvm.aarch64.neon.sqrdmlah.v8i16"
17677        )]
17678        fn _vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
17679    }
17680    unsafe { _vqrdmlahq_s16(a, b, c) }
17681}
17682#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17683#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s32)"]
17684#[inline(always)]
17685#[target_feature(enable = "rdm")]
17686#[cfg_attr(test, assert_instr(sqrdmlah))]
17687#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17688pub fn vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
17689    unsafe extern "unadjusted" {
17690        #[cfg_attr(
17691            any(target_arch = "aarch64", target_arch = "arm64ec"),
17692            link_name = "llvm.aarch64.neon.sqrdmlah.v2i32"
17693        )]
17694        fn _vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t;
17695    }
17696    unsafe { _vqrdmlah_s32(a, b, c) }
17697}
17698#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17699#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s32)"]
17700#[inline(always)]
17701#[target_feature(enable = "rdm")]
17702#[cfg_attr(test, assert_instr(sqrdmlah))]
17703#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17704pub fn vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
17705    unsafe extern "unadjusted" {
17706        #[cfg_attr(
17707            any(target_arch = "aarch64", target_arch = "arm64ec"),
17708            link_name = "llvm.aarch64.neon.sqrdmlah.v4i32"
17709        )]
17710        fn _vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
17711    }
17712    unsafe { _vqrdmlahq_s32(a, b, c) }
17713}
17714#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17715#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_lane_s16)"]
17716#[inline(always)]
17717#[target_feature(enable = "rdm")]
17718#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17719#[rustc_legacy_const_generics(3)]
17720#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17721pub fn vqrdmlahh_lane_s16<const LANE: i32>(a: i16, b: i16, c: int16x4_t) -> i16 {
17722    static_assert_uimm_bits!(LANE, 2);
17723    unsafe { vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) }
17724}
17725#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17726#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_laneq_s16)"]
17727#[inline(always)]
17728#[target_feature(enable = "rdm")]
17729#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17730#[rustc_legacy_const_generics(3)]
17731#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17732pub fn vqrdmlahh_laneq_s16<const LANE: i32>(a: i16, b: i16, c: int16x8_t) -> i16 {
17733    static_assert_uimm_bits!(LANE, 3);
17734    unsafe { vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) }
17735}
17736#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17737#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_lane_s32)"]
17738#[inline(always)]
17739#[target_feature(enable = "rdm")]
17740#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17741#[rustc_legacy_const_generics(3)]
17742#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17743pub fn vqrdmlahs_lane_s32<const LANE: i32>(a: i32, b: i32, c: int32x2_t) -> i32 {
17744    static_assert_uimm_bits!(LANE, 1);
17745    unsafe { vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) }
17746}
17747#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17748#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_laneq_s32)"]
17749#[inline(always)]
17750#[target_feature(enable = "rdm")]
17751#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17752#[rustc_legacy_const_generics(3)]
17753#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17754pub fn vqrdmlahs_laneq_s32<const LANE: i32>(a: i32, b: i32, c: int32x4_t) -> i32 {
17755    static_assert_uimm_bits!(LANE, 2);
17756    unsafe { vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) }
17757}
17758#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17759#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_s16)"]
17760#[inline(always)]
17761#[target_feature(enable = "rdm")]
17762#[cfg_attr(test, assert_instr(sqrdmlah))]
17763#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17764pub fn vqrdmlahh_s16(a: i16, b: i16, c: i16) -> i16 {
17765    let a: int16x4_t = vdup_n_s16(a);
17766    let b: int16x4_t = vdup_n_s16(b);
17767    let c: int16x4_t = vdup_n_s16(c);
17768    unsafe { simd_extract!(vqrdmlah_s16(a, b, c), 0) }
17769}
17770#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_s32)"]
17772#[inline(always)]
17773#[target_feature(enable = "rdm")]
17774#[cfg_attr(test, assert_instr(sqrdmlah))]
17775#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17776pub fn vqrdmlahs_s32(a: i32, b: i32, c: i32) -> i32 {
17777    let a: int32x2_t = vdup_n_s32(a);
17778    let b: int32x2_t = vdup_n_s32(b);
17779    let c: int32x2_t = vdup_n_s32(c);
17780    unsafe { simd_extract!(vqrdmlah_s32(a, b, c), 0) }
17781}
17782#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17783#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s16)"]
17784#[inline(always)]
17785#[target_feature(enable = "rdm")]
17786#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
17787#[rustc_legacy_const_generics(3)]
17788#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17789pub fn vqrdmlsh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
17790    static_assert_uimm_bits!(LANE, 2);
17791    unsafe {
17792        let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32; 4]);
17793        vqrdmlsh_s16(a, b, c)
17794    }
17795}
17796#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17797#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s32)"]
17798#[inline(always)]
17799#[target_feature(enable = "rdm")]
17800#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
17801#[rustc_legacy_const_generics(3)]
17802#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17803pub fn vqrdmlsh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
17804    static_assert_uimm_bits!(LANE, 1);
17805    unsafe {
17806        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32; 2]);
17807        vqrdmlsh_s32(a, b, c)
17808    }
17809}
17810#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17811#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s16)"]
17812#[inline(always)]
17813#[target_feature(enable = "rdm")]
17814#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
17815#[rustc_legacy_const_generics(3)]
17816#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17817pub fn vqrdmlsh_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t {
17818    static_assert_uimm_bits!(LANE, 3);
17819    unsafe {
17820        let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32; 4]);
17821        vqrdmlsh_s16(a, b, c)
17822    }
17823}
17824#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17825#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s32)"]
17826#[inline(always)]
17827#[target_feature(enable = "rdm")]
17828#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
17829#[rustc_legacy_const_generics(3)]
17830#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17831pub fn vqrdmlsh_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t {
17832    static_assert_uimm_bits!(LANE, 2);
17833    unsafe {
17834        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32; 2]);
17835        vqrdmlsh_s32(a, b, c)
17836    }
17837}
17838#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17839#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s16)"]
17840#[inline(always)]
17841#[target_feature(enable = "rdm")]
17842#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
17843#[rustc_legacy_const_generics(3)]
17844#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17845pub fn vqrdmlshq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t {
17846    static_assert_uimm_bits!(LANE, 2);
17847    unsafe {
17848        let c: int16x8_t = simd_shuffle!(c, c, [LANE as u32; 8]);
17849        vqrdmlshq_s16(a, b, c)
17850    }
17851}
17852#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s32)"]
17854#[inline(always)]
17855#[target_feature(enable = "rdm")]
17856#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
17857#[rustc_legacy_const_generics(3)]
17858#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17859pub fn vqrdmlshq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t {
17860    static_assert_uimm_bits!(LANE, 1);
17861    unsafe {
17862        let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32; 4]);
17863        vqrdmlshq_s32(a, b, c)
17864    }
17865}
17866#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17867#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s16)"]
17868#[inline(always)]
17869#[target_feature(enable = "rdm")]
17870#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
17871#[rustc_legacy_const_generics(3)]
17872#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17873pub fn vqrdmlshq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
17874    static_assert_uimm_bits!(LANE, 3);
17875    unsafe {
17876        let c: int16x8_t = simd_shuffle!(c, c, [LANE as u32; 8]);
17877        vqrdmlshq_s16(a, b, c)
17878    }
17879}
17880#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s32)"]
17882#[inline(always)]
17883#[target_feature(enable = "rdm")]
17884#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
17885#[rustc_legacy_const_generics(3)]
17886#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17887pub fn vqrdmlshq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
17888    static_assert_uimm_bits!(LANE, 2);
17889    unsafe {
17890        let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32; 4]);
17891        vqrdmlshq_s32(a, b, c)
17892    }
17893}
17894#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17895#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s16)"]
17896#[inline(always)]
17897#[target_feature(enable = "rdm")]
17898#[cfg_attr(test, assert_instr(sqrdmlsh))]
17899#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17900pub fn vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
17901    unsafe extern "unadjusted" {
17902        #[cfg_attr(
17903            any(target_arch = "aarch64", target_arch = "arm64ec"),
17904            link_name = "llvm.aarch64.neon.sqrdmlsh.v4i16"
17905        )]
17906        fn _vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t;
17907    }
17908    unsafe { _vqrdmlsh_s16(a, b, c) }
17909}
17910#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17911#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s16)"]
17912#[inline(always)]
17913#[target_feature(enable = "rdm")]
17914#[cfg_attr(test, assert_instr(sqrdmlsh))]
17915#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17916pub fn vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
17917    unsafe extern "unadjusted" {
17918        #[cfg_attr(
17919            any(target_arch = "aarch64", target_arch = "arm64ec"),
17920            link_name = "llvm.aarch64.neon.sqrdmlsh.v8i16"
17921        )]
17922        fn _vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
17923    }
17924    unsafe { _vqrdmlshq_s16(a, b, c) }
17925}
17926#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17927#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s32)"]
17928#[inline(always)]
17929#[target_feature(enable = "rdm")]
17930#[cfg_attr(test, assert_instr(sqrdmlsh))]
17931#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17932pub fn vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
17933    unsafe extern "unadjusted" {
17934        #[cfg_attr(
17935            any(target_arch = "aarch64", target_arch = "arm64ec"),
17936            link_name = "llvm.aarch64.neon.sqrdmlsh.v2i32"
17937        )]
17938        fn _vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t;
17939    }
17940    unsafe { _vqrdmlsh_s32(a, b, c) }
17941}
17942#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17943#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s32)"]
17944#[inline(always)]
17945#[target_feature(enable = "rdm")]
17946#[cfg_attr(test, assert_instr(sqrdmlsh))]
17947#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17948pub fn vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
17949    unsafe extern "unadjusted" {
17950        #[cfg_attr(
17951            any(target_arch = "aarch64", target_arch = "arm64ec"),
17952            link_name = "llvm.aarch64.neon.sqrdmlsh.v4i32"
17953        )]
17954        fn _vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
17955    }
17956    unsafe { _vqrdmlshq_s32(a, b, c) }
17957}
17958#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17959#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_lane_s16)"]
17960#[inline(always)]
17961#[target_feature(enable = "rdm")]
17962#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
17963#[rustc_legacy_const_generics(3)]
17964#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17965pub fn vqrdmlshh_lane_s16<const LANE: i32>(a: i16, b: i16, c: int16x4_t) -> i16 {
17966    static_assert_uimm_bits!(LANE, 2);
17967    unsafe { vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) }
17968}
17969#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17970#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_laneq_s16)"]
17971#[inline(always)]
17972#[target_feature(enable = "rdm")]
17973#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
17974#[rustc_legacy_const_generics(3)]
17975#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17976pub fn vqrdmlshh_laneq_s16<const LANE: i32>(a: i16, b: i16, c: int16x8_t) -> i16 {
17977    static_assert_uimm_bits!(LANE, 3);
17978    unsafe { vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) }
17979}
17980#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17981#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_lane_s32)"]
17982#[inline(always)]
17983#[target_feature(enable = "rdm")]
17984#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
17985#[rustc_legacy_const_generics(3)]
17986#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17987pub fn vqrdmlshs_lane_s32<const LANE: i32>(a: i32, b: i32, c: int32x2_t) -> i32 {
17988    static_assert_uimm_bits!(LANE, 1);
17989    unsafe { vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) }
17990}
17991#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17992#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_laneq_s32)"]
17993#[inline(always)]
17994#[target_feature(enable = "rdm")]
17995#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
17996#[rustc_legacy_const_generics(3)]
17997#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17998pub fn vqrdmlshs_laneq_s32<const LANE: i32>(a: i32, b: i32, c: int32x4_t) -> i32 {
17999    static_assert_uimm_bits!(LANE, 2);
18000    unsafe { vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) }
18001}
18002#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18003#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_s16)"]
18004#[inline(always)]
18005#[target_feature(enable = "rdm")]
18006#[cfg_attr(test, assert_instr(sqrdmlsh))]
18007#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18008pub fn vqrdmlshh_s16(a: i16, b: i16, c: i16) -> i16 {
18009    let a: int16x4_t = vdup_n_s16(a);
18010    let b: int16x4_t = vdup_n_s16(b);
18011    let c: int16x4_t = vdup_n_s16(c);
18012    unsafe { simd_extract!(vqrdmlsh_s16(a, b, c), 0) }
18013}
18014#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_s32)"]
18016#[inline(always)]
18017#[target_feature(enable = "rdm")]
18018#[cfg_attr(test, assert_instr(sqrdmlsh))]
18019#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18020pub fn vqrdmlshs_s32(a: i32, b: i32, c: i32) -> i32 {
18021    let a: int32x2_t = vdup_n_s32(a);
18022    let b: int32x2_t = vdup_n_s32(b);
18023    let c: int32x2_t = vdup_n_s32(c);
18024    unsafe { simd_extract!(vqrdmlsh_s32(a, b, c), 0) }
18025}
18026#[doc = "Signed saturating rounding doubling multiply returning high half"]
18027#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_lane_s16)"]
18028#[inline(always)]
18029#[target_feature(enable = "neon")]
18030#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18031#[rustc_legacy_const_generics(2)]
18032#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18033pub fn vqrdmulhh_lane_s16<const LANE: i32>(a: i16, b: int16x4_t) -> i16 {
18034    static_assert_uimm_bits!(LANE, 2);
18035    unsafe { vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) }
18036}
18037#[doc = "Signed saturating rounding doubling multiply returning high half"]
18038#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_laneq_s16)"]
18039#[inline(always)]
18040#[target_feature(enable = "neon")]
18041#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18042#[rustc_legacy_const_generics(2)]
18043#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18044pub fn vqrdmulhh_laneq_s16<const LANE: i32>(a: i16, b: int16x8_t) -> i16 {
18045    static_assert_uimm_bits!(LANE, 3);
18046    unsafe { vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) }
18047}
18048#[doc = "Signed saturating rounding doubling multiply returning high half"]
18049#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_lane_s32)"]
18050#[inline(always)]
18051#[target_feature(enable = "neon")]
18052#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18053#[rustc_legacy_const_generics(2)]
18054#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18055pub fn vqrdmulhs_lane_s32<const LANE: i32>(a: i32, b: int32x2_t) -> i32 {
18056    static_assert_uimm_bits!(LANE, 1);
18057    unsafe { vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) }
18058}
18059#[doc = "Signed saturating rounding doubling multiply returning high half"]
18060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_laneq_s32)"]
18061#[inline(always)]
18062#[target_feature(enable = "neon")]
18063#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18064#[rustc_legacy_const_generics(2)]
18065#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18066pub fn vqrdmulhs_laneq_s32<const LANE: i32>(a: i32, b: int32x4_t) -> i32 {
18067    static_assert_uimm_bits!(LANE, 2);
18068    unsafe { vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) }
18069}
18070#[doc = "Signed saturating rounding doubling multiply returning high half"]
18071#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_s16)"]
18072#[inline(always)]
18073#[target_feature(enable = "neon")]
18074#[cfg_attr(test, assert_instr(sqrdmulh))]
18075#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18076pub fn vqrdmulhh_s16(a: i16, b: i16) -> i16 {
18077    unsafe { simd_extract!(vqrdmulh_s16(vdup_n_s16(a), vdup_n_s16(b)), 0) }
18078}
18079#[doc = "Signed saturating rounding doubling multiply returning high half"]
18080#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_s32)"]
18081#[inline(always)]
18082#[target_feature(enable = "neon")]
18083#[cfg_attr(test, assert_instr(sqrdmulh))]
18084#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18085pub fn vqrdmulhs_s32(a: i32, b: i32) -> i32 {
18086    unsafe { simd_extract!(vqrdmulh_s32(vdup_n_s32(a), vdup_n_s32(b)), 0) }
18087}
18088#[doc = "Signed saturating rounding shift left"]
18089#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_s8)"]
18090#[inline(always)]
18091#[target_feature(enable = "neon")]
18092#[cfg_attr(test, assert_instr(sqrshl))]
18093#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18094pub fn vqrshlb_s8(a: i8, b: i8) -> i8 {
18095    let a: int8x8_t = vdup_n_s8(a);
18096    let b: int8x8_t = vdup_n_s8(b);
18097    unsafe { simd_extract!(vqrshl_s8(a, b), 0) }
18098}
18099#[doc = "Signed saturating rounding shift left"]
18100#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_s16)"]
18101#[inline(always)]
18102#[target_feature(enable = "neon")]
18103#[cfg_attr(test, assert_instr(sqrshl))]
18104#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18105pub fn vqrshlh_s16(a: i16, b: i16) -> i16 {
18106    let a: int16x4_t = vdup_n_s16(a);
18107    let b: int16x4_t = vdup_n_s16(b);
18108    unsafe { simd_extract!(vqrshl_s16(a, b), 0) }
18109}
18110#[doc = "Unsigned signed saturating rounding shift left"]
18111#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_u8)"]
18112#[inline(always)]
18113#[target_feature(enable = "neon")]
18114#[cfg_attr(test, assert_instr(uqrshl))]
18115#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18116pub fn vqrshlb_u8(a: u8, b: i8) -> u8 {
18117    let a: uint8x8_t = vdup_n_u8(a);
18118    let b: int8x8_t = vdup_n_s8(b);
18119    unsafe { simd_extract!(vqrshl_u8(a, b), 0) }
18120}
18121#[doc = "Unsigned signed saturating rounding shift left"]
18122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_u16)"]
18123#[inline(always)]
18124#[target_feature(enable = "neon")]
18125#[cfg_attr(test, assert_instr(uqrshl))]
18126#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18127pub fn vqrshlh_u16(a: u16, b: i16) -> u16 {
18128    let a: uint16x4_t = vdup_n_u16(a);
18129    let b: int16x4_t = vdup_n_s16(b);
18130    unsafe { simd_extract!(vqrshl_u16(a, b), 0) }
18131}
18132#[doc = "Signed saturating rounding shift left"]
18133#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_s64)"]
18134#[inline(always)]
18135#[target_feature(enable = "neon")]
18136#[cfg_attr(test, assert_instr(sqrshl))]
18137#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18138pub fn vqrshld_s64(a: i64, b: i64) -> i64 {
18139    unsafe extern "unadjusted" {
18140        #[cfg_attr(
18141            any(target_arch = "aarch64", target_arch = "arm64ec"),
18142            link_name = "llvm.aarch64.neon.sqrshl.i64"
18143        )]
18144        fn _vqrshld_s64(a: i64, b: i64) -> i64;
18145    }
18146    unsafe { _vqrshld_s64(a, b) }
18147}
18148#[doc = "Signed saturating rounding shift left"]
18149#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_s32)"]
18150#[inline(always)]
18151#[target_feature(enable = "neon")]
18152#[cfg_attr(test, assert_instr(sqrshl))]
18153#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18154pub fn vqrshls_s32(a: i32, b: i32) -> i32 {
18155    unsafe extern "unadjusted" {
18156        #[cfg_attr(
18157            any(target_arch = "aarch64", target_arch = "arm64ec"),
18158            link_name = "llvm.aarch64.neon.sqrshl.i32"
18159        )]
18160        fn _vqrshls_s32(a: i32, b: i32) -> i32;
18161    }
18162    unsafe { _vqrshls_s32(a, b) }
18163}
18164#[doc = "Unsigned signed saturating rounding shift left"]
18165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_u32)"]
18166#[inline(always)]
18167#[target_feature(enable = "neon")]
18168#[cfg_attr(test, assert_instr(uqrshl))]
18169#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18170pub fn vqrshls_u32(a: u32, b: i32) -> u32 {
18171    unsafe extern "unadjusted" {
18172        #[cfg_attr(
18173            any(target_arch = "aarch64", target_arch = "arm64ec"),
18174            link_name = "llvm.aarch64.neon.uqrshl.i32"
18175        )]
18176        fn _vqrshls_u32(a: u32, b: i32) -> u32;
18177    }
18178    unsafe { _vqrshls_u32(a, b) }
18179}
18180#[doc = "Unsigned signed saturating rounding shift left"]
18181#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_u64)"]
18182#[inline(always)]
18183#[target_feature(enable = "neon")]
18184#[cfg_attr(test, assert_instr(uqrshl))]
18185#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18186pub fn vqrshld_u64(a: u64, b: i64) -> u64 {
18187    unsafe extern "unadjusted" {
18188        #[cfg_attr(
18189            any(target_arch = "aarch64", target_arch = "arm64ec"),
18190            link_name = "llvm.aarch64.neon.uqrshl.i64"
18191        )]
18192        fn _vqrshld_u64(a: u64, b: i64) -> u64;
18193    }
18194    unsafe { _vqrshld_u64(a, b) }
18195}
18196#[doc = "Signed saturating rounded shift right narrow"]
18197#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s16)"]
18198#[inline(always)]
18199#[target_feature(enable = "neon")]
18200#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
18201#[rustc_legacy_const_generics(2)]
18202#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18203pub fn vqrshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
18204    static_assert!(N >= 1 && N <= 8);
18205    unsafe {
18206        simd_shuffle!(
18207            a,
18208            vqrshrn_n_s16::<N>(b),
18209            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18210        )
18211    }
18212}
18213#[doc = "Signed saturating rounded shift right narrow"]
18214#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s32)"]
18215#[inline(always)]
18216#[target_feature(enable = "neon")]
18217#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
18218#[rustc_legacy_const_generics(2)]
18219#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18220pub fn vqrshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
18221    static_assert!(N >= 1 && N <= 16);
18222    unsafe { simd_shuffle!(a, vqrshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18223}
18224#[doc = "Signed saturating rounded shift right narrow"]
18225#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s64)"]
18226#[inline(always)]
18227#[target_feature(enable = "neon")]
18228#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
18229#[rustc_legacy_const_generics(2)]
18230#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18231pub fn vqrshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
18232    static_assert!(N >= 1 && N <= 32);
18233    unsafe { simd_shuffle!(a, vqrshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
18234}
18235#[doc = "Unsigned saturating rounded shift right narrow"]
18236#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u16)"]
18237#[inline(always)]
18238#[target_feature(enable = "neon")]
18239#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
18240#[rustc_legacy_const_generics(2)]
18241#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18242pub fn vqrshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
18243    static_assert!(N >= 1 && N <= 8);
18244    unsafe {
18245        simd_shuffle!(
18246            a,
18247            vqrshrn_n_u16::<N>(b),
18248            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18249        )
18250    }
18251}
18252#[doc = "Unsigned saturating rounded shift right narrow"]
18253#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u32)"]
18254#[inline(always)]
18255#[target_feature(enable = "neon")]
18256#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
18257#[rustc_legacy_const_generics(2)]
18258#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18259pub fn vqrshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
18260    static_assert!(N >= 1 && N <= 16);
18261    unsafe { simd_shuffle!(a, vqrshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18262}
18263#[doc = "Unsigned saturating rounded shift right narrow"]
18264#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u64)"]
18265#[inline(always)]
18266#[target_feature(enable = "neon")]
18267#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
18268#[rustc_legacy_const_generics(2)]
18269#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18270pub fn vqrshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
18271    static_assert!(N >= 1 && N <= 32);
18272    unsafe { simd_shuffle!(a, vqrshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
18273}
18274#[doc = "Unsigned saturating rounded shift right narrow"]
18275#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_u64)"]
18276#[inline(always)]
18277#[target_feature(enable = "neon")]
18278#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
18279#[rustc_legacy_const_generics(1)]
18280#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18281pub fn vqrshrnd_n_u64<const N: i32>(a: u64) -> u32 {
18282    static_assert!(N >= 1 && N <= 32);
18283    let a: uint64x2_t = vdupq_n_u64(a);
18284    unsafe { simd_extract!(vqrshrn_n_u64::<N>(a), 0) }
18285}
18286#[doc = "Unsigned saturating rounded shift right narrow"]
18287#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_u16)"]
18288#[inline(always)]
18289#[target_feature(enable = "neon")]
18290#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
18291#[rustc_legacy_const_generics(1)]
18292#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18293pub fn vqrshrnh_n_u16<const N: i32>(a: u16) -> u8 {
18294    static_assert!(N >= 1 && N <= 8);
18295    let a: uint16x8_t = vdupq_n_u16(a);
18296    unsafe { simd_extract!(vqrshrn_n_u16::<N>(a), 0) }
18297}
18298#[doc = "Unsigned saturating rounded shift right narrow"]
18299#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_u32)"]
18300#[inline(always)]
18301#[target_feature(enable = "neon")]
18302#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
18303#[rustc_legacy_const_generics(1)]
18304#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18305pub fn vqrshrns_n_u32<const N: i32>(a: u32) -> u16 {
18306    static_assert!(N >= 1 && N <= 16);
18307    let a: uint32x4_t = vdupq_n_u32(a);
18308    unsafe { simd_extract!(vqrshrn_n_u32::<N>(a), 0) }
18309}
18310#[doc = "Signed saturating rounded shift right narrow"]
18311#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_s16)"]
18312#[inline(always)]
18313#[target_feature(enable = "neon")]
18314#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
18315#[rustc_legacy_const_generics(1)]
18316#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18317pub fn vqrshrnh_n_s16<const N: i32>(a: i16) -> i8 {
18318    static_assert!(N >= 1 && N <= 8);
18319    let a: int16x8_t = vdupq_n_s16(a);
18320    unsafe { simd_extract!(vqrshrn_n_s16::<N>(a), 0) }
18321}
18322#[doc = "Signed saturating rounded shift right narrow"]
18323#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_s32)"]
18324#[inline(always)]
18325#[target_feature(enable = "neon")]
18326#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
18327#[rustc_legacy_const_generics(1)]
18328#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18329pub fn vqrshrns_n_s32<const N: i32>(a: i32) -> i16 {
18330    static_assert!(N >= 1 && N <= 16);
18331    let a: int32x4_t = vdupq_n_s32(a);
18332    unsafe { simd_extract!(vqrshrn_n_s32::<N>(a), 0) }
18333}
18334#[doc = "Signed saturating rounded shift right narrow"]
18335#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_s64)"]
18336#[inline(always)]
18337#[target_feature(enable = "neon")]
18338#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
18339#[rustc_legacy_const_generics(1)]
18340#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18341pub fn vqrshrnd_n_s64<const N: i32>(a: i64) -> i32 {
18342    static_assert!(N >= 1 && N <= 32);
18343    let a: int64x2_t = vdupq_n_s64(a);
18344    unsafe { simd_extract!(vqrshrn_n_s64::<N>(a), 0) }
18345}
18346#[doc = "Signed saturating rounded shift right unsigned narrow"]
18347#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s16)"]
18348#[inline(always)]
18349#[target_feature(enable = "neon")]
18350#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
18351#[rustc_legacy_const_generics(2)]
18352#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18353pub fn vqrshrun_high_n_s16<const N: i32>(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
18354    static_assert!(N >= 1 && N <= 8);
18355    unsafe {
18356        simd_shuffle!(
18357            a,
18358            vqrshrun_n_s16::<N>(b),
18359            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18360        )
18361    }
18362}
18363#[doc = "Signed saturating rounded shift right unsigned narrow"]
18364#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s32)"]
18365#[inline(always)]
18366#[target_feature(enable = "neon")]
18367#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
18368#[rustc_legacy_const_generics(2)]
18369#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18370pub fn vqrshrun_high_n_s32<const N: i32>(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
18371    static_assert!(N >= 1 && N <= 16);
18372    unsafe { simd_shuffle!(a, vqrshrun_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18373}
18374#[doc = "Signed saturating rounded shift right unsigned narrow"]
18375#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s64)"]
18376#[inline(always)]
18377#[target_feature(enable = "neon")]
18378#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
18379#[rustc_legacy_const_generics(2)]
18380#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18381pub fn vqrshrun_high_n_s64<const N: i32>(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
18382    static_assert!(N >= 1 && N <= 32);
18383    unsafe { simd_shuffle!(a, vqrshrun_n_s64::<N>(b), [0, 1, 2, 3]) }
18384}
18385#[doc = "Signed saturating rounded shift right unsigned narrow"]
18386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrund_n_s64)"]
18387#[inline(always)]
18388#[target_feature(enable = "neon")]
18389#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
18390#[rustc_legacy_const_generics(1)]
18391#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18392pub fn vqrshrund_n_s64<const N: i32>(a: i64) -> u32 {
18393    static_assert!(N >= 1 && N <= 32);
18394    let a: int64x2_t = vdupq_n_s64(a);
18395    unsafe { simd_extract!(vqrshrun_n_s64::<N>(a), 0) }
18396}
18397#[doc = "Signed saturating rounded shift right unsigned narrow"]
18398#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrunh_n_s16)"]
18399#[inline(always)]
18400#[target_feature(enable = "neon")]
18401#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
18402#[rustc_legacy_const_generics(1)]
18403#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18404pub fn vqrshrunh_n_s16<const N: i32>(a: i16) -> u8 {
18405    static_assert!(N >= 1 && N <= 8);
18406    let a: int16x8_t = vdupq_n_s16(a);
18407    unsafe { simd_extract!(vqrshrun_n_s16::<N>(a), 0) }
18408}
18409#[doc = "Signed saturating rounded shift right unsigned narrow"]
18410#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshruns_n_s32)"]
18411#[inline(always)]
18412#[target_feature(enable = "neon")]
18413#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
18414#[rustc_legacy_const_generics(1)]
18415#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18416pub fn vqrshruns_n_s32<const N: i32>(a: i32) -> u16 {
18417    static_assert!(N >= 1 && N <= 16);
18418    let a: int32x4_t = vdupq_n_s32(a);
18419    unsafe { simd_extract!(vqrshrun_n_s32::<N>(a), 0) }
18420}
18421#[doc = "Signed saturating shift left"]
18422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_s8)"]
18423#[inline(always)]
18424#[target_feature(enable = "neon")]
18425#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18426#[rustc_legacy_const_generics(1)]
18427#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18428pub fn vqshlb_n_s8<const N: i32>(a: i8) -> i8 {
18429    static_assert_uimm_bits!(N, 3);
18430    unsafe { simd_extract!(vqshl_n_s8::<N>(vdup_n_s8(a)), 0) }
18431}
18432#[doc = "Signed saturating shift left"]
18433#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_s64)"]
18434#[inline(always)]
18435#[target_feature(enable = "neon")]
18436#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18437#[rustc_legacy_const_generics(1)]
18438#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18439pub fn vqshld_n_s64<const N: i32>(a: i64) -> i64 {
18440    static_assert_uimm_bits!(N, 6);
18441    unsafe { simd_extract!(vqshl_n_s64::<N>(vdup_n_s64(a)), 0) }
18442}
18443#[doc = "Signed saturating shift left"]
18444#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_s16)"]
18445#[inline(always)]
18446#[target_feature(enable = "neon")]
18447#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18448#[rustc_legacy_const_generics(1)]
18449#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18450pub fn vqshlh_n_s16<const N: i32>(a: i16) -> i16 {
18451    static_assert_uimm_bits!(N, 4);
18452    unsafe { simd_extract!(vqshl_n_s16::<N>(vdup_n_s16(a)), 0) }
18453}
18454#[doc = "Signed saturating shift left"]
18455#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_s32)"]
18456#[inline(always)]
18457#[target_feature(enable = "neon")]
18458#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18459#[rustc_legacy_const_generics(1)]
18460#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18461pub fn vqshls_n_s32<const N: i32>(a: i32) -> i32 {
18462    static_assert_uimm_bits!(N, 5);
18463    unsafe { simd_extract!(vqshl_n_s32::<N>(vdup_n_s32(a)), 0) }
18464}
18465#[doc = "Unsigned saturating shift left"]
18466#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_u8)"]
18467#[inline(always)]
18468#[target_feature(enable = "neon")]
18469#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18470#[rustc_legacy_const_generics(1)]
18471#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18472pub fn vqshlb_n_u8<const N: i32>(a: u8) -> u8 {
18473    static_assert_uimm_bits!(N, 3);
18474    unsafe { simd_extract!(vqshl_n_u8::<N>(vdup_n_u8(a)), 0) }
18475}
18476#[doc = "Unsigned saturating shift left"]
18477#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_u64)"]
18478#[inline(always)]
18479#[target_feature(enable = "neon")]
18480#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18481#[rustc_legacy_const_generics(1)]
18482#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18483pub fn vqshld_n_u64<const N: i32>(a: u64) -> u64 {
18484    static_assert_uimm_bits!(N, 6);
18485    unsafe { simd_extract!(vqshl_n_u64::<N>(vdup_n_u64(a)), 0) }
18486}
18487#[doc = "Unsigned saturating shift left"]
18488#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_u16)"]
18489#[inline(always)]
18490#[target_feature(enable = "neon")]
18491#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18492#[rustc_legacy_const_generics(1)]
18493#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18494pub fn vqshlh_n_u16<const N: i32>(a: u16) -> u16 {
18495    static_assert_uimm_bits!(N, 4);
18496    unsafe { simd_extract!(vqshl_n_u16::<N>(vdup_n_u16(a)), 0) }
18497}
18498#[doc = "Unsigned saturating shift left"]
18499#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_u32)"]
18500#[inline(always)]
18501#[target_feature(enable = "neon")]
18502#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18503#[rustc_legacy_const_generics(1)]
18504#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18505pub fn vqshls_n_u32<const N: i32>(a: u32) -> u32 {
18506    static_assert_uimm_bits!(N, 5);
18507    unsafe { simd_extract!(vqshl_n_u32::<N>(vdup_n_u32(a)), 0) }
18508}
18509#[doc = "Signed saturating shift left"]
18510#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_s8)"]
18511#[inline(always)]
18512#[target_feature(enable = "neon")]
18513#[cfg_attr(test, assert_instr(sqshl))]
18514#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18515pub fn vqshlb_s8(a: i8, b: i8) -> i8 {
18516    let c: int8x8_t = vqshl_s8(vdup_n_s8(a), vdup_n_s8(b));
18517    unsafe { simd_extract!(c, 0) }
18518}
18519#[doc = "Signed saturating shift left"]
18520#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_s16)"]
18521#[inline(always)]
18522#[target_feature(enable = "neon")]
18523#[cfg_attr(test, assert_instr(sqshl))]
18524#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18525pub fn vqshlh_s16(a: i16, b: i16) -> i16 {
18526    let c: int16x4_t = vqshl_s16(vdup_n_s16(a), vdup_n_s16(b));
18527    unsafe { simd_extract!(c, 0) }
18528}
18529#[doc = "Signed saturating shift left"]
18530#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_s32)"]
18531#[inline(always)]
18532#[target_feature(enable = "neon")]
18533#[cfg_attr(test, assert_instr(sqshl))]
18534#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18535pub fn vqshls_s32(a: i32, b: i32) -> i32 {
18536    let c: int32x2_t = vqshl_s32(vdup_n_s32(a), vdup_n_s32(b));
18537    unsafe { simd_extract!(c, 0) }
18538}
18539#[doc = "Unsigned saturating shift left"]
18540#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_u8)"]
18541#[inline(always)]
18542#[target_feature(enable = "neon")]
18543#[cfg_attr(test, assert_instr(uqshl))]
18544#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18545pub fn vqshlb_u8(a: u8, b: i8) -> u8 {
18546    let c: uint8x8_t = vqshl_u8(vdup_n_u8(a), vdup_n_s8(b));
18547    unsafe { simd_extract!(c, 0) }
18548}
18549#[doc = "Unsigned saturating shift left"]
18550#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_u16)"]
18551#[inline(always)]
18552#[target_feature(enable = "neon")]
18553#[cfg_attr(test, assert_instr(uqshl))]
18554#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18555pub fn vqshlh_u16(a: u16, b: i16) -> u16 {
18556    let c: uint16x4_t = vqshl_u16(vdup_n_u16(a), vdup_n_s16(b));
18557    unsafe { simd_extract!(c, 0) }
18558}
18559#[doc = "Unsigned saturating shift left"]
18560#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_u32)"]
18561#[inline(always)]
18562#[target_feature(enable = "neon")]
18563#[cfg_attr(test, assert_instr(uqshl))]
18564#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18565pub fn vqshls_u32(a: u32, b: i32) -> u32 {
18566    let c: uint32x2_t = vqshl_u32(vdup_n_u32(a), vdup_n_s32(b));
18567    unsafe { simd_extract!(c, 0) }
18568}
18569#[doc = "Signed saturating shift left"]
18570#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_s64)"]
18571#[inline(always)]
18572#[target_feature(enable = "neon")]
18573#[cfg_attr(test, assert_instr(sqshl))]
18574#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18575pub fn vqshld_s64(a: i64, b: i64) -> i64 {
18576    unsafe extern "unadjusted" {
18577        #[cfg_attr(
18578            any(target_arch = "aarch64", target_arch = "arm64ec"),
18579            link_name = "llvm.aarch64.neon.sqshl.i64"
18580        )]
18581        fn _vqshld_s64(a: i64, b: i64) -> i64;
18582    }
18583    unsafe { _vqshld_s64(a, b) }
18584}
18585#[doc = "Unsigned saturating shift left"]
18586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_u64)"]
18587#[inline(always)]
18588#[target_feature(enable = "neon")]
18589#[cfg_attr(test, assert_instr(uqshl))]
18590#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18591pub fn vqshld_u64(a: u64, b: i64) -> u64 {
18592    unsafe extern "unadjusted" {
18593        #[cfg_attr(
18594            any(target_arch = "aarch64", target_arch = "arm64ec"),
18595            link_name = "llvm.aarch64.neon.uqshl.i64"
18596        )]
18597        fn _vqshld_u64(a: u64, b: i64) -> u64;
18598    }
18599    unsafe { _vqshld_u64(a, b) }
18600}
18601#[doc = "Signed saturating shift left unsigned"]
18602#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlub_n_s8)"]
18603#[inline(always)]
18604#[target_feature(enable = "neon")]
18605#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
18606#[rustc_legacy_const_generics(1)]
18607#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18608pub fn vqshlub_n_s8<const N: i32>(a: i8) -> u8 {
18609    static_assert_uimm_bits!(N, 3);
18610    unsafe { simd_extract!(vqshlu_n_s8::<N>(vdup_n_s8(a)), 0) }
18611}
18612#[doc = "Signed saturating shift left unsigned"]
18613#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlud_n_s64)"]
18614#[inline(always)]
18615#[target_feature(enable = "neon")]
18616#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
18617#[rustc_legacy_const_generics(1)]
18618#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18619pub fn vqshlud_n_s64<const N: i32>(a: i64) -> u64 {
18620    static_assert_uimm_bits!(N, 6);
18621    unsafe { simd_extract!(vqshlu_n_s64::<N>(vdup_n_s64(a)), 0) }
18622}
18623#[doc = "Signed saturating shift left unsigned"]
18624#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluh_n_s16)"]
18625#[inline(always)]
18626#[target_feature(enable = "neon")]
18627#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
18628#[rustc_legacy_const_generics(1)]
18629#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18630pub fn vqshluh_n_s16<const N: i32>(a: i16) -> u16 {
18631    static_assert_uimm_bits!(N, 4);
18632    unsafe { simd_extract!(vqshlu_n_s16::<N>(vdup_n_s16(a)), 0) }
18633}
18634#[doc = "Signed saturating shift left unsigned"]
18635#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlus_n_s32)"]
18636#[inline(always)]
18637#[target_feature(enable = "neon")]
18638#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
18639#[rustc_legacy_const_generics(1)]
18640#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18641pub fn vqshlus_n_s32<const N: i32>(a: i32) -> u32 {
18642    static_assert_uimm_bits!(N, 5);
18643    unsafe { simd_extract!(vqshlu_n_s32::<N>(vdup_n_s32(a)), 0) }
18644}
18645#[doc = "Signed saturating shift right narrow"]
18646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s16)"]
18647#[inline(always)]
18648#[target_feature(enable = "neon")]
18649#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
18650#[rustc_legacy_const_generics(2)]
18651#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18652pub fn vqshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
18653    static_assert!(N >= 1 && N <= 8);
18654    unsafe {
18655        simd_shuffle!(
18656            a,
18657            vqshrn_n_s16::<N>(b),
18658            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18659        )
18660    }
18661}
18662#[doc = "Signed saturating shift right narrow"]
18663#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s32)"]
18664#[inline(always)]
18665#[target_feature(enable = "neon")]
18666#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
18667#[rustc_legacy_const_generics(2)]
18668#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18669pub fn vqshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
18670    static_assert!(N >= 1 && N <= 16);
18671    unsafe { simd_shuffle!(a, vqshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18672}
18673#[doc = "Signed saturating shift right narrow"]
18674#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s64)"]
18675#[inline(always)]
18676#[target_feature(enable = "neon")]
18677#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
18678#[rustc_legacy_const_generics(2)]
18679#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18680pub fn vqshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
18681    static_assert!(N >= 1 && N <= 32);
18682    unsafe { simd_shuffle!(a, vqshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
18683}
18684#[doc = "Unsigned saturating shift right narrow"]
18685#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u16)"]
18686#[inline(always)]
18687#[target_feature(enable = "neon")]
18688#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
18689#[rustc_legacy_const_generics(2)]
18690#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18691pub fn vqshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
18692    static_assert!(N >= 1 && N <= 8);
18693    unsafe {
18694        simd_shuffle!(
18695            a,
18696            vqshrn_n_u16::<N>(b),
18697            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18698        )
18699    }
18700}
18701#[doc = "Unsigned saturating shift right narrow"]
18702#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u32)"]
18703#[inline(always)]
18704#[target_feature(enable = "neon")]
18705#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
18706#[rustc_legacy_const_generics(2)]
18707#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18708pub fn vqshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
18709    static_assert!(N >= 1 && N <= 16);
18710    unsafe { simd_shuffle!(a, vqshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18711}
18712#[doc = "Unsigned saturating shift right narrow"]
18713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u64)"]
18714#[inline(always)]
18715#[target_feature(enable = "neon")]
18716#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
18717#[rustc_legacy_const_generics(2)]
18718#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18719pub fn vqshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
18720    static_assert!(N >= 1 && N <= 32);
18721    unsafe { simd_shuffle!(a, vqshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
18722}
18723#[doc = "Signed saturating shift right narrow"]
18724#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_s64)"]
18725#[inline(always)]
18726#[target_feature(enable = "neon")]
18727#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
18728#[rustc_legacy_const_generics(1)]
18729#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18730pub fn vqshrnd_n_s64<const N: i32>(a: i64) -> i32 {
18731    static_assert!(N >= 1 && N <= 32);
18732    unsafe extern "unadjusted" {
18733        #[cfg_attr(
18734            any(target_arch = "aarch64", target_arch = "arm64ec"),
18735            link_name = "llvm.aarch64.neon.sqshrn.i32"
18736        )]
18737        fn _vqshrnd_n_s64(a: i64, n: i32) -> i32;
18738    }
18739    unsafe { _vqshrnd_n_s64(a, N) }
18740}
18741#[doc = "Unsigned saturating shift right narrow"]
18742#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_u64)"]
18743#[inline(always)]
18744#[target_feature(enable = "neon")]
18745#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
18746#[rustc_legacy_const_generics(1)]
18747#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18748pub fn vqshrnd_n_u64<const N: i32>(a: u64) -> u32 {
18749    static_assert!(N >= 1 && N <= 32);
18750    unsafe extern "unadjusted" {
18751        #[cfg_attr(
18752            any(target_arch = "aarch64", target_arch = "arm64ec"),
18753            link_name = "llvm.aarch64.neon.uqshrn.i32"
18754        )]
18755        fn _vqshrnd_n_u64(a: u64, n: i32) -> u32;
18756    }
18757    unsafe { _vqshrnd_n_u64(a, N) }
18758}
18759#[doc = "Signed saturating shift right narrow"]
18760#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_s16)"]
18761#[inline(always)]
18762#[target_feature(enable = "neon")]
18763#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
18764#[rustc_legacy_const_generics(1)]
18765#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18766pub fn vqshrnh_n_s16<const N: i32>(a: i16) -> i8 {
18767    static_assert!(N >= 1 && N <= 8);
18768    unsafe { simd_extract!(vqshrn_n_s16::<N>(vdupq_n_s16(a)), 0) }
18769}
18770#[doc = "Signed saturating shift right narrow"]
18771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_s32)"]
18772#[inline(always)]
18773#[target_feature(enable = "neon")]
18774#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
18775#[rustc_legacy_const_generics(1)]
18776#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18777pub fn vqshrns_n_s32<const N: i32>(a: i32) -> i16 {
18778    static_assert!(N >= 1 && N <= 16);
18779    unsafe { simd_extract!(vqshrn_n_s32::<N>(vdupq_n_s32(a)), 0) }
18780}
18781#[doc = "Unsigned saturating shift right narrow"]
18782#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_u16)"]
18783#[inline(always)]
18784#[target_feature(enable = "neon")]
18785#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
18786#[rustc_legacy_const_generics(1)]
18787#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18788pub fn vqshrnh_n_u16<const N: i32>(a: u16) -> u8 {
18789    static_assert!(N >= 1 && N <= 8);
18790    unsafe { simd_extract!(vqshrn_n_u16::<N>(vdupq_n_u16(a)), 0) }
18791}
18792#[doc = "Unsigned saturating shift right narrow"]
18793#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_u32)"]
18794#[inline(always)]
18795#[target_feature(enable = "neon")]
18796#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
18797#[rustc_legacy_const_generics(1)]
18798#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18799pub fn vqshrns_n_u32<const N: i32>(a: u32) -> u16 {
18800    static_assert!(N >= 1 && N <= 16);
18801    unsafe { simd_extract!(vqshrn_n_u32::<N>(vdupq_n_u32(a)), 0) }
18802}
18803#[doc = "Signed saturating shift right unsigned narrow"]
18804#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s16)"]
18805#[inline(always)]
18806#[target_feature(enable = "neon")]
18807#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
18808#[rustc_legacy_const_generics(2)]
18809#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18810pub fn vqshrun_high_n_s16<const N: i32>(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
18811    static_assert!(N >= 1 && N <= 8);
18812    unsafe {
18813        simd_shuffle!(
18814            a,
18815            vqshrun_n_s16::<N>(b),
18816            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18817        )
18818    }
18819}
18820#[doc = "Signed saturating shift right unsigned narrow"]
18821#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s32)"]
18822#[inline(always)]
18823#[target_feature(enable = "neon")]
18824#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
18825#[rustc_legacy_const_generics(2)]
18826#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18827pub fn vqshrun_high_n_s32<const N: i32>(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
18828    static_assert!(N >= 1 && N <= 16);
18829    unsafe { simd_shuffle!(a, vqshrun_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18830}
18831#[doc = "Signed saturating shift right unsigned narrow"]
18832#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s64)"]
18833#[inline(always)]
18834#[target_feature(enable = "neon")]
18835#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
18836#[rustc_legacy_const_generics(2)]
18837#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18838pub fn vqshrun_high_n_s64<const N: i32>(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
18839    static_assert!(N >= 1 && N <= 32);
18840    unsafe { simd_shuffle!(a, vqshrun_n_s64::<N>(b), [0, 1, 2, 3]) }
18841}
18842#[doc = "Signed saturating shift right unsigned narrow"]
18843#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrund_n_s64)"]
18844#[inline(always)]
18845#[target_feature(enable = "neon")]
18846#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
18847#[rustc_legacy_const_generics(1)]
18848#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18849pub fn vqshrund_n_s64<const N: i32>(a: i64) -> u32 {
18850    static_assert!(N >= 1 && N <= 32);
18851    unsafe { simd_extract!(vqshrun_n_s64::<N>(vdupq_n_s64(a)), 0) }
18852}
18853#[doc = "Signed saturating shift right unsigned narrow"]
18854#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrunh_n_s16)"]
18855#[inline(always)]
18856#[target_feature(enable = "neon")]
18857#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
18858#[rustc_legacy_const_generics(1)]
18859#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18860pub fn vqshrunh_n_s16<const N: i32>(a: i16) -> u8 {
18861    static_assert!(N >= 1 && N <= 8);
18862    unsafe { simd_extract!(vqshrun_n_s16::<N>(vdupq_n_s16(a)), 0) }
18863}
18864#[doc = "Signed saturating shift right unsigned narrow"]
18865#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshruns_n_s32)"]
18866#[inline(always)]
18867#[target_feature(enable = "neon")]
18868#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
18869#[rustc_legacy_const_generics(1)]
18870#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18871pub fn vqshruns_n_s32<const N: i32>(a: i32) -> u16 {
18872    static_assert!(N >= 1 && N <= 16);
18873    unsafe { simd_extract!(vqshrun_n_s32::<N>(vdupq_n_s32(a)), 0) }
18874}
18875#[doc = "Saturating subtract"]
18876#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_s8)"]
18877#[inline(always)]
18878#[target_feature(enable = "neon")]
18879#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18880#[cfg_attr(test, assert_instr(sqsub))]
18881pub fn vqsubb_s8(a: i8, b: i8) -> i8 {
18882    let a: int8x8_t = vdup_n_s8(a);
18883    let b: int8x8_t = vdup_n_s8(b);
18884    unsafe { simd_extract!(vqsub_s8(a, b), 0) }
18885}
18886#[doc = "Saturating subtract"]
18887#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_s16)"]
18888#[inline(always)]
18889#[target_feature(enable = "neon")]
18890#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18891#[cfg_attr(test, assert_instr(sqsub))]
18892pub fn vqsubh_s16(a: i16, b: i16) -> i16 {
18893    let a: int16x4_t = vdup_n_s16(a);
18894    let b: int16x4_t = vdup_n_s16(b);
18895    unsafe { simd_extract!(vqsub_s16(a, b), 0) }
18896}
18897#[doc = "Saturating subtract"]
18898#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_u8)"]
18899#[inline(always)]
18900#[target_feature(enable = "neon")]
18901#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18902#[cfg_attr(test, assert_instr(uqsub))]
18903pub fn vqsubb_u8(a: u8, b: u8) -> u8 {
18904    let a: uint8x8_t = vdup_n_u8(a);
18905    let b: uint8x8_t = vdup_n_u8(b);
18906    unsafe { simd_extract!(vqsub_u8(a, b), 0) }
18907}
18908#[doc = "Saturating subtract"]
18909#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_u16)"]
18910#[inline(always)]
18911#[target_feature(enable = "neon")]
18912#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18913#[cfg_attr(test, assert_instr(uqsub))]
18914pub fn vqsubh_u16(a: u16, b: u16) -> u16 {
18915    let a: uint16x4_t = vdup_n_u16(a);
18916    let b: uint16x4_t = vdup_n_u16(b);
18917    unsafe { simd_extract!(vqsub_u16(a, b), 0) }
18918}
18919#[doc = "Saturating subtract"]
18920#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_s32)"]
18921#[inline(always)]
18922#[target_feature(enable = "neon")]
18923#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18924#[cfg_attr(test, assert_instr(sqsub))]
18925pub fn vqsubs_s32(a: i32, b: i32) -> i32 {
18926    unsafe extern "unadjusted" {
18927        #[cfg_attr(
18928            any(target_arch = "aarch64", target_arch = "arm64ec"),
18929            link_name = "llvm.aarch64.neon.sqsub.i32"
18930        )]
18931        fn _vqsubs_s32(a: i32, b: i32) -> i32;
18932    }
18933    unsafe { _vqsubs_s32(a, b) }
18934}
18935#[doc = "Saturating subtract"]
18936#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_s64)"]
18937#[inline(always)]
18938#[target_feature(enable = "neon")]
18939#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18940#[cfg_attr(test, assert_instr(sqsub))]
18941pub fn vqsubd_s64(a: i64, b: i64) -> i64 {
18942    unsafe extern "unadjusted" {
18943        #[cfg_attr(
18944            any(target_arch = "aarch64", target_arch = "arm64ec"),
18945            link_name = "llvm.aarch64.neon.sqsub.i64"
18946        )]
18947        fn _vqsubd_s64(a: i64, b: i64) -> i64;
18948    }
18949    unsafe { _vqsubd_s64(a, b) }
18950}
18951#[doc = "Saturating subtract"]
18952#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_u32)"]
18953#[inline(always)]
18954#[target_feature(enable = "neon")]
18955#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18956#[cfg_attr(test, assert_instr(uqsub))]
18957pub fn vqsubs_u32(a: u32, b: u32) -> u32 {
18958    unsafe extern "unadjusted" {
18959        #[cfg_attr(
18960            any(target_arch = "aarch64", target_arch = "arm64ec"),
18961            link_name = "llvm.aarch64.neon.uqsub.i32"
18962        )]
18963        fn _vqsubs_u32(a: u32, b: u32) -> u32;
18964    }
18965    unsafe { _vqsubs_u32(a, b) }
18966}
18967#[doc = "Saturating subtract"]
18968#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_u64)"]
18969#[inline(always)]
18970#[target_feature(enable = "neon")]
18971#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18972#[cfg_attr(test, assert_instr(uqsub))]
18973pub fn vqsubd_u64(a: u64, b: u64) -> u64 {
18974    unsafe extern "unadjusted" {
18975        #[cfg_attr(
18976            any(target_arch = "aarch64", target_arch = "arm64ec"),
18977            link_name = "llvm.aarch64.neon.uqsub.i64"
18978        )]
18979        fn _vqsubd_u64(a: u64, b: u64) -> u64;
18980    }
18981    unsafe { _vqsubd_u64(a, b) }
18982}
18983#[doc = "Table look-up"]
18984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1)"]
18985#[inline(always)]
18986#[target_feature(enable = "neon")]
18987#[cfg_attr(test, assert_instr(tbl))]
18988#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18989fn vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t {
18990    unsafe extern "unadjusted" {
18991        #[cfg_attr(
18992            any(target_arch = "aarch64", target_arch = "arm64ec"),
18993            link_name = "llvm.aarch64.neon.tbl1.v8i8"
18994        )]
18995        fn _vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t;
18996    }
18997    unsafe { _vqtbl1(a, b) }
18998}
18999#[doc = "Table look-up"]
19000#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q)"]
19001#[inline(always)]
19002#[target_feature(enable = "neon")]
19003#[cfg_attr(test, assert_instr(tbl))]
19004#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19005fn vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
19006    unsafe extern "unadjusted" {
19007        #[cfg_attr(
19008            any(target_arch = "aarch64", target_arch = "arm64ec"),
19009            link_name = "llvm.aarch64.neon.tbl1.v16i8"
19010        )]
19011        fn _vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t;
19012    }
19013    unsafe { _vqtbl1q(a, b) }
19014}
19015#[doc = "Table look-up"]
19016#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_s8)"]
19017#[inline(always)]
19018#[target_feature(enable = "neon")]
19019#[cfg_attr(test, assert_instr(tbl))]
19020#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19021pub fn vqtbl1_s8(a: int8x16_t, b: uint8x8_t) -> int8x8_t {
19022    vqtbl1(a, b)
19023}
19024#[doc = "Table look-up"]
19025#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_s8)"]
19026#[inline(always)]
19027#[target_feature(enable = "neon")]
19028#[cfg_attr(test, assert_instr(tbl))]
19029#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19030pub fn vqtbl1q_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
19031    vqtbl1q(a, b)
19032}
19033#[doc = "Table look-up"]
19034#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_u8)"]
19035#[inline(always)]
19036#[target_feature(enable = "neon")]
19037#[cfg_attr(test, assert_instr(tbl))]
19038#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19039pub fn vqtbl1_u8(a: uint8x16_t, b: uint8x8_t) -> uint8x8_t {
19040    unsafe { transmute(vqtbl1(transmute(a), b)) }
19041}
19042#[doc = "Table look-up"]
19043#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_u8)"]
19044#[inline(always)]
19045#[target_feature(enable = "neon")]
19046#[cfg_attr(test, assert_instr(tbl))]
19047#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19048pub fn vqtbl1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
19049    unsafe { transmute(vqtbl1q(transmute(a), b)) }
19050}
19051#[doc = "Table look-up"]
19052#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_p8)"]
19053#[inline(always)]
19054#[target_feature(enable = "neon")]
19055#[cfg_attr(test, assert_instr(tbl))]
19056#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19057pub fn vqtbl1_p8(a: poly8x16_t, b: uint8x8_t) -> poly8x8_t {
19058    unsafe { transmute(vqtbl1(transmute(a), b)) }
19059}
19060#[doc = "Table look-up"]
19061#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_p8)"]
19062#[inline(always)]
19063#[target_feature(enable = "neon")]
19064#[cfg_attr(test, assert_instr(tbl))]
19065#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19066pub fn vqtbl1q_p8(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t {
19067    unsafe { transmute(vqtbl1q(transmute(a), b)) }
19068}
19069#[doc = "Table look-up"]
19070#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2)"]
19071#[inline(always)]
19072#[target_feature(enable = "neon")]
19073#[cfg_attr(test, assert_instr(tbl))]
19074#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19075fn vqtbl2(a: int8x16_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t {
19076    unsafe extern "unadjusted" {
19077        #[cfg_attr(
19078            any(target_arch = "aarch64", target_arch = "arm64ec"),
19079            link_name = "llvm.aarch64.neon.tbl2.v8i8"
19080        )]
19081        fn _vqtbl2(a: int8x16_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t;
19082    }
19083    unsafe { _vqtbl2(a, b, c) }
19084}
19085#[doc = "Table look-up"]
19086#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q)"]
19087#[inline(always)]
19088#[target_feature(enable = "neon")]
19089#[cfg_attr(test, assert_instr(tbl))]
19090#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19091fn vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t {
19092    unsafe extern "unadjusted" {
19093        #[cfg_attr(
19094            any(target_arch = "aarch64", target_arch = "arm64ec"),
19095            link_name = "llvm.aarch64.neon.tbl2.v16i8"
19096        )]
19097        fn _vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t;
19098    }
19099    unsafe { _vqtbl2q(a, b, c) }
19100}
19101#[doc = "Table look-up"]
19102#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_s8)"]
19103#[inline(always)]
19104#[target_feature(enable = "neon")]
19105#[cfg_attr(test, assert_instr(tbl))]
19106#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19107pub fn vqtbl2_s8(a: int8x16x2_t, b: uint8x8_t) -> int8x8_t {
19108    vqtbl2(a.0, a.1, b)
19109}
19110#[doc = "Table look-up"]
19111#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_s8)"]
19112#[inline(always)]
19113#[target_feature(enable = "neon")]
19114#[cfg_attr(test, assert_instr(tbl))]
19115#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19116pub fn vqtbl2q_s8(a: int8x16x2_t, b: uint8x16_t) -> int8x16_t {
19117    vqtbl2q(a.0, a.1, b)
19118}
19119#[doc = "Table look-up"]
19120#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_u8)"]
19121#[inline(always)]
19122#[target_feature(enable = "neon")]
19123#[cfg_attr(test, assert_instr(tbl))]
19124#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19125pub fn vqtbl2_u8(a: uint8x16x2_t, b: uint8x8_t) -> uint8x8_t {
19126    unsafe { transmute(vqtbl2(transmute(a.0), transmute(a.1), b)) }
19127}
19128#[doc = "Table look-up"]
19129#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_u8)"]
19130#[inline(always)]
19131#[target_feature(enable = "neon")]
19132#[cfg_attr(test, assert_instr(tbl))]
19133#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19134pub fn vqtbl2q_u8(a: uint8x16x2_t, b: uint8x16_t) -> uint8x16_t {
19135    unsafe { transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)) }
19136}
19137#[doc = "Table look-up"]
19138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_p8)"]
19139#[inline(always)]
19140#[target_feature(enable = "neon")]
19141#[cfg_attr(test, assert_instr(tbl))]
19142#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19143pub fn vqtbl2_p8(a: poly8x16x2_t, b: uint8x8_t) -> poly8x8_t {
19144    unsafe { transmute(vqtbl2(transmute(a.0), transmute(a.1), b)) }
19145}
19146#[doc = "Table look-up"]
19147#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_p8)"]
19148#[inline(always)]
19149#[target_feature(enable = "neon")]
19150#[cfg_attr(test, assert_instr(tbl))]
19151#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19152pub fn vqtbl2q_p8(a: poly8x16x2_t, b: uint8x16_t) -> poly8x16_t {
19153    unsafe { transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)) }
19154}
19155#[doc = "Table look-up"]
19156#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3)"]
19157#[inline(always)]
19158#[target_feature(enable = "neon")]
19159#[cfg_attr(test, assert_instr(tbl))]
19160#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19161fn vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t {
19162    unsafe extern "unadjusted" {
19163        #[cfg_attr(
19164            any(target_arch = "aarch64", target_arch = "arm64ec"),
19165            link_name = "llvm.aarch64.neon.tbl3.v8i8"
19166        )]
19167        fn _vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t;
19168    }
19169    unsafe { _vqtbl3(a, b, c, d) }
19170}
19171#[doc = "Table look-up"]
19172#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q)"]
19173#[inline(always)]
19174#[target_feature(enable = "neon")]
19175#[cfg_attr(test, assert_instr(tbl))]
19176#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19177fn vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t {
19178    unsafe extern "unadjusted" {
19179        #[cfg_attr(
19180            any(target_arch = "aarch64", target_arch = "arm64ec"),
19181            link_name = "llvm.aarch64.neon.tbl3.v16i8"
19182        )]
19183        fn _vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t;
19184    }
19185    unsafe { _vqtbl3q(a, b, c, d) }
19186}
19187#[doc = "Table look-up"]
19188#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_s8)"]
19189#[inline(always)]
19190#[target_feature(enable = "neon")]
19191#[cfg_attr(test, assert_instr(tbl))]
19192#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19193pub fn vqtbl3_s8(a: int8x16x3_t, b: uint8x8_t) -> int8x8_t {
19194    vqtbl3(a.0, a.1, a.2, b)
19195}
19196#[doc = "Table look-up"]
19197#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_s8)"]
19198#[inline(always)]
19199#[target_feature(enable = "neon")]
19200#[cfg_attr(test, assert_instr(tbl))]
19201#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19202pub fn vqtbl3q_s8(a: int8x16x3_t, b: uint8x16_t) -> int8x16_t {
19203    vqtbl3q(a.0, a.1, a.2, b)
19204}
19205#[doc = "Table look-up"]
19206#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_u8)"]
19207#[inline(always)]
19208#[target_feature(enable = "neon")]
19209#[cfg_attr(test, assert_instr(tbl))]
19210#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19211pub fn vqtbl3_u8(a: uint8x16x3_t, b: uint8x8_t) -> uint8x8_t {
19212    unsafe { transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19213}
19214#[doc = "Table look-up"]
19215#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_u8)"]
19216#[inline(always)]
19217#[target_feature(enable = "neon")]
19218#[cfg_attr(test, assert_instr(tbl))]
19219#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19220pub fn vqtbl3q_u8(a: uint8x16x3_t, b: uint8x16_t) -> uint8x16_t {
19221    unsafe { transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19222}
19223#[doc = "Table look-up"]
19224#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_p8)"]
19225#[inline(always)]
19226#[target_feature(enable = "neon")]
19227#[cfg_attr(test, assert_instr(tbl))]
19228#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19229pub fn vqtbl3_p8(a: poly8x16x3_t, b: uint8x8_t) -> poly8x8_t {
19230    unsafe { transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19231}
19232#[doc = "Table look-up"]
19233#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_p8)"]
19234#[inline(always)]
19235#[target_feature(enable = "neon")]
19236#[cfg_attr(test, assert_instr(tbl))]
19237#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19238pub fn vqtbl3q_p8(a: poly8x16x3_t, b: uint8x16_t) -> poly8x16_t {
19239    unsafe { transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19240}
19241#[doc = "Table look-up"]
19242#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4)"]
19243#[inline(always)]
19244#[target_feature(enable = "neon")]
19245#[cfg_attr(test, assert_instr(tbl))]
19246#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19247fn vqtbl4(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) -> int8x8_t {
19248    unsafe extern "unadjusted" {
19249        #[cfg_attr(
19250            any(target_arch = "aarch64", target_arch = "arm64ec"),
19251            link_name = "llvm.aarch64.neon.tbl4.v8i8"
19252        )]
19253        fn _vqtbl4(
19254            a: int8x16_t,
19255            b: int8x16_t,
19256            c: int8x16_t,
19257            d: int8x16_t,
19258            e: uint8x8_t,
19259        ) -> int8x8_t;
19260    }
19261    unsafe { _vqtbl4(a, b, c, d, e) }
19262}
19263#[doc = "Table look-up"]
19264#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q)"]
19265#[inline(always)]
19266#[target_feature(enable = "neon")]
19267#[cfg_attr(test, assert_instr(tbl))]
19268#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19269fn vqtbl4q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x16_t) -> int8x16_t {
19270    unsafe extern "unadjusted" {
19271        #[cfg_attr(
19272            any(target_arch = "aarch64", target_arch = "arm64ec"),
19273            link_name = "llvm.aarch64.neon.tbl4.v16i8"
19274        )]
19275        fn _vqtbl4q(
19276            a: int8x16_t,
19277            b: int8x16_t,
19278            c: int8x16_t,
19279            d: int8x16_t,
19280            e: uint8x16_t,
19281        ) -> int8x16_t;
19282    }
19283    unsafe { _vqtbl4q(a, b, c, d, e) }
19284}
19285#[doc = "Table look-up"]
19286#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_s8)"]
19287#[inline(always)]
19288#[target_feature(enable = "neon")]
19289#[cfg_attr(test, assert_instr(tbl))]
19290#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19291pub fn vqtbl4_s8(a: int8x16x4_t, b: uint8x8_t) -> int8x8_t {
19292    vqtbl4(a.0, a.1, a.2, a.3, b)
19293}
19294#[doc = "Table look-up"]
19295#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_s8)"]
19296#[inline(always)]
19297#[target_feature(enable = "neon")]
19298#[cfg_attr(test, assert_instr(tbl))]
19299#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19300pub fn vqtbl4q_s8(a: int8x16x4_t, b: uint8x16_t) -> int8x16_t {
19301    vqtbl4q(a.0, a.1, a.2, a.3, b)
19302}
19303#[doc = "Table look-up"]
19304#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_u8)"]
19305#[inline(always)]
19306#[target_feature(enable = "neon")]
19307#[cfg_attr(test, assert_instr(tbl))]
19308#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19309pub fn vqtbl4_u8(a: uint8x16x4_t, b: uint8x8_t) -> uint8x8_t {
19310    unsafe {
19311        transmute(vqtbl4(
19312            transmute(a.0),
19313            transmute(a.1),
19314            transmute(a.2),
19315            transmute(a.3),
19316            b,
19317        ))
19318    }
19319}
19320#[doc = "Table look-up"]
19321#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_u8)"]
19322#[inline(always)]
19323#[target_feature(enable = "neon")]
19324#[cfg_attr(test, assert_instr(tbl))]
19325#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19326pub fn vqtbl4q_u8(a: uint8x16x4_t, b: uint8x16_t) -> uint8x16_t {
19327    unsafe {
19328        transmute(vqtbl4q(
19329            transmute(a.0),
19330            transmute(a.1),
19331            transmute(a.2),
19332            transmute(a.3),
19333            b,
19334        ))
19335    }
19336}
19337#[doc = "Table look-up"]
19338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_p8)"]
19339#[inline(always)]
19340#[target_feature(enable = "neon")]
19341#[cfg_attr(test, assert_instr(tbl))]
19342#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19343pub fn vqtbl4_p8(a: poly8x16x4_t, b: uint8x8_t) -> poly8x8_t {
19344    unsafe {
19345        transmute(vqtbl4(
19346            transmute(a.0),
19347            transmute(a.1),
19348            transmute(a.2),
19349            transmute(a.3),
19350            b,
19351        ))
19352    }
19353}
19354#[doc = "Table look-up"]
19355#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_p8)"]
19356#[inline(always)]
19357#[target_feature(enable = "neon")]
19358#[cfg_attr(test, assert_instr(tbl))]
19359#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19360pub fn vqtbl4q_p8(a: poly8x16x4_t, b: uint8x16_t) -> poly8x16_t {
19361    unsafe {
19362        transmute(vqtbl4q(
19363            transmute(a.0),
19364            transmute(a.1),
19365            transmute(a.2),
19366            transmute(a.3),
19367            b,
19368        ))
19369    }
19370}
19371#[doc = "Extended table look-up"]
19372#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1)"]
19373#[inline(always)]
19374#[target_feature(enable = "neon")]
19375#[cfg_attr(test, assert_instr(tbx))]
19376#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19377fn vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t {
19378    unsafe extern "unadjusted" {
19379        #[cfg_attr(
19380            any(target_arch = "aarch64", target_arch = "arm64ec"),
19381            link_name = "llvm.aarch64.neon.tbx1.v8i8"
19382        )]
19383        fn _vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t;
19384    }
19385    unsafe { _vqtbx1(a, b, c) }
19386}
19387#[doc = "Extended table look-up"]
19388#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q)"]
19389#[inline(always)]
19390#[target_feature(enable = "neon")]
19391#[cfg_attr(test, assert_instr(tbx))]
19392#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19393fn vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t {
19394    unsafe extern "unadjusted" {
19395        #[cfg_attr(
19396            any(target_arch = "aarch64", target_arch = "arm64ec"),
19397            link_name = "llvm.aarch64.neon.tbx1.v16i8"
19398        )]
19399        fn _vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t;
19400    }
19401    unsafe { _vqtbx1q(a, b, c) }
19402}
19403#[doc = "Extended table look-up"]
19404#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_s8)"]
19405#[inline(always)]
19406#[target_feature(enable = "neon")]
19407#[cfg_attr(test, assert_instr(tbx))]
19408#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19409pub fn vqtbx1_s8(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t {
19410    vqtbx1(a, b, c)
19411}
19412#[doc = "Extended table look-up"]
19413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_s8)"]
19414#[inline(always)]
19415#[target_feature(enable = "neon")]
19416#[cfg_attr(test, assert_instr(tbx))]
19417#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19418pub fn vqtbx1q_s8(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t {
19419    vqtbx1q(a, b, c)
19420}
19421#[doc = "Extended table look-up"]
19422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_u8)"]
19423#[inline(always)]
19424#[target_feature(enable = "neon")]
19425#[cfg_attr(test, assert_instr(tbx))]
19426#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19427pub fn vqtbx1_u8(a: uint8x8_t, b: uint8x16_t, c: uint8x8_t) -> uint8x8_t {
19428    unsafe { transmute(vqtbx1(transmute(a), transmute(b), c)) }
19429}
19430#[doc = "Extended table look-up"]
19431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_u8)"]
19432#[inline(always)]
19433#[target_feature(enable = "neon")]
19434#[cfg_attr(test, assert_instr(tbx))]
19435#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19436pub fn vqtbx1q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
19437    unsafe { transmute(vqtbx1q(transmute(a), transmute(b), c)) }
19438}
19439#[doc = "Extended table look-up"]
19440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_p8)"]
19441#[inline(always)]
19442#[target_feature(enable = "neon")]
19443#[cfg_attr(test, assert_instr(tbx))]
19444#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19445pub fn vqtbx1_p8(a: poly8x8_t, b: poly8x16_t, c: uint8x8_t) -> poly8x8_t {
19446    unsafe { transmute(vqtbx1(transmute(a), transmute(b), c)) }
19447}
19448#[doc = "Extended table look-up"]
19449#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_p8)"]
19450#[inline(always)]
19451#[target_feature(enable = "neon")]
19452#[cfg_attr(test, assert_instr(tbx))]
19453#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19454pub fn vqtbx1q_p8(a: poly8x16_t, b: poly8x16_t, c: uint8x16_t) -> poly8x16_t {
19455    unsafe { transmute(vqtbx1q(transmute(a), transmute(b), c)) }
19456}
19457#[doc = "Extended table look-up"]
19458#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2)"]
19459#[inline(always)]
19460#[target_feature(enable = "neon")]
19461#[cfg_attr(test, assert_instr(tbx))]
19462#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19463fn vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t {
19464    unsafe extern "unadjusted" {
19465        #[cfg_attr(
19466            any(target_arch = "aarch64", target_arch = "arm64ec"),
19467            link_name = "llvm.aarch64.neon.tbx2.v8i8"
19468        )]
19469        fn _vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t;
19470    }
19471    unsafe { _vqtbx2(a, b, c, d) }
19472}
19473#[doc = "Extended table look-up"]
19474#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q)"]
19475#[inline(always)]
19476#[target_feature(enable = "neon")]
19477#[cfg_attr(test, assert_instr(tbx))]
19478#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19479fn vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t {
19480    unsafe extern "unadjusted" {
19481        #[cfg_attr(
19482            any(target_arch = "aarch64", target_arch = "arm64ec"),
19483            link_name = "llvm.aarch64.neon.tbx2.v16i8"
19484        )]
19485        fn _vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t;
19486    }
19487    unsafe { _vqtbx2q(a, b, c, d) }
19488}
19489#[doc = "Extended table look-up"]
19490#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_s8)"]
19491#[inline(always)]
19492#[target_feature(enable = "neon")]
19493#[cfg_attr(test, assert_instr(tbx))]
19494#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19495pub fn vqtbx2_s8(a: int8x8_t, b: int8x16x2_t, c: uint8x8_t) -> int8x8_t {
19496    vqtbx2(a, b.0, b.1, c)
19497}
19498#[doc = "Extended table look-up"]
19499#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_s8)"]
19500#[inline(always)]
19501#[target_feature(enable = "neon")]
19502#[cfg_attr(test, assert_instr(tbx))]
19503#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19504pub fn vqtbx2q_s8(a: int8x16_t, b: int8x16x2_t, c: uint8x16_t) -> int8x16_t {
19505    vqtbx2q(a, b.0, b.1, c)
19506}
19507#[doc = "Extended table look-up"]
19508#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_u8)"]
19509#[inline(always)]
19510#[target_feature(enable = "neon")]
19511#[cfg_attr(test, assert_instr(tbx))]
19512#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19513pub fn vqtbx2_u8(a: uint8x8_t, b: uint8x16x2_t, c: uint8x8_t) -> uint8x8_t {
19514    unsafe { transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)) }
19515}
19516#[doc = "Extended table look-up"]
19517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_u8)"]
19518#[inline(always)]
19519#[target_feature(enable = "neon")]
19520#[cfg_attr(test, assert_instr(tbx))]
19521#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19522pub fn vqtbx2q_u8(a: uint8x16_t, b: uint8x16x2_t, c: uint8x16_t) -> uint8x16_t {
19523    unsafe { transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)) }
19524}
19525#[doc = "Extended table look-up"]
19526#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_p8)"]
19527#[inline(always)]
19528#[target_feature(enable = "neon")]
19529#[cfg_attr(test, assert_instr(tbx))]
19530#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19531pub fn vqtbx2_p8(a: poly8x8_t, b: poly8x16x2_t, c: uint8x8_t) -> poly8x8_t {
19532    unsafe { transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)) }
19533}
19534#[doc = "Extended table look-up"]
19535#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_p8)"]
19536#[inline(always)]
19537#[target_feature(enable = "neon")]
19538#[cfg_attr(test, assert_instr(tbx))]
19539#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19540pub fn vqtbx2q_p8(a: poly8x16_t, b: poly8x16x2_t, c: uint8x16_t) -> poly8x16_t {
19541    unsafe { transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)) }
19542}
19543#[doc = "Extended table look-up"]
19544#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3)"]
19545#[inline(always)]
19546#[target_feature(enable = "neon")]
19547#[cfg_attr(test, assert_instr(tbx))]
19548#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19549fn vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) -> int8x8_t {
19550    unsafe extern "unadjusted" {
19551        #[cfg_attr(
19552            any(target_arch = "aarch64", target_arch = "arm64ec"),
19553            link_name = "llvm.aarch64.neon.tbx3.v8i8"
19554        )]
19555        fn _vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t)
19556            -> int8x8_t;
19557    }
19558    unsafe { _vqtbx3(a, b, c, d, e) }
19559}
19560#[doc = "Extended table look-up"]
19561#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q)"]
19562#[inline(always)]
19563#[target_feature(enable = "neon")]
19564#[cfg_attr(test, assert_instr(tbx))]
19565#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19566fn vqtbx3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x16_t) -> int8x16_t {
19567    unsafe extern "unadjusted" {
19568        #[cfg_attr(
19569            any(target_arch = "aarch64", target_arch = "arm64ec"),
19570            link_name = "llvm.aarch64.neon.tbx3.v16i8"
19571        )]
19572        fn _vqtbx3q(
19573            a: int8x16_t,
19574            b: int8x16_t,
19575            c: int8x16_t,
19576            d: int8x16_t,
19577            e: uint8x16_t,
19578        ) -> int8x16_t;
19579    }
19580    unsafe { _vqtbx3q(a, b, c, d, e) }
19581}
19582#[doc = "Extended table look-up"]
19583#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_s8)"]
19584#[inline(always)]
19585#[target_feature(enable = "neon")]
19586#[cfg_attr(test, assert_instr(tbx))]
19587#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19588pub fn vqtbx3_s8(a: int8x8_t, b: int8x16x3_t, c: uint8x8_t) -> int8x8_t {
19589    vqtbx3(a, b.0, b.1, b.2, c)
19590}
19591#[doc = "Extended table look-up"]
19592#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_s8)"]
19593#[inline(always)]
19594#[target_feature(enable = "neon")]
19595#[cfg_attr(test, assert_instr(tbx))]
19596#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19597pub fn vqtbx3q_s8(a: int8x16_t, b: int8x16x3_t, c: uint8x16_t) -> int8x16_t {
19598    vqtbx3q(a, b.0, b.1, b.2, c)
19599}
19600#[doc = "Extended table look-up"]
19601#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_u8)"]
19602#[inline(always)]
19603#[target_feature(enable = "neon")]
19604#[cfg_attr(test, assert_instr(tbx))]
19605#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19606pub fn vqtbx3_u8(a: uint8x8_t, b: uint8x16x3_t, c: uint8x8_t) -> uint8x8_t {
19607    unsafe {
19608        transmute(vqtbx3(
19609            transmute(a),
19610            transmute(b.0),
19611            transmute(b.1),
19612            transmute(b.2),
19613            c,
19614        ))
19615    }
19616}
19617#[doc = "Extended table look-up"]
19618#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_u8)"]
19619#[inline(always)]
19620#[target_feature(enable = "neon")]
19621#[cfg_attr(test, assert_instr(tbx))]
19622#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19623pub fn vqtbx3q_u8(a: uint8x16_t, b: uint8x16x3_t, c: uint8x16_t) -> uint8x16_t {
19624    unsafe {
19625        transmute(vqtbx3q(
19626            transmute(a),
19627            transmute(b.0),
19628            transmute(b.1),
19629            transmute(b.2),
19630            c,
19631        ))
19632    }
19633}
19634#[doc = "Extended table look-up"]
19635#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_p8)"]
19636#[inline(always)]
19637#[target_feature(enable = "neon")]
19638#[cfg_attr(test, assert_instr(tbx))]
19639#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19640pub fn vqtbx3_p8(a: poly8x8_t, b: poly8x16x3_t, c: uint8x8_t) -> poly8x8_t {
19641    unsafe {
19642        transmute(vqtbx3(
19643            transmute(a),
19644            transmute(b.0),
19645            transmute(b.1),
19646            transmute(b.2),
19647            c,
19648        ))
19649    }
19650}
19651#[doc = "Extended table look-up"]
19652#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_p8)"]
19653#[inline(always)]
19654#[target_feature(enable = "neon")]
19655#[cfg_attr(test, assert_instr(tbx))]
19656#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19657pub fn vqtbx3q_p8(a: poly8x16_t, b: poly8x16x3_t, c: uint8x16_t) -> poly8x16_t {
19658    unsafe {
19659        transmute(vqtbx3q(
19660            transmute(a),
19661            transmute(b.0),
19662            transmute(b.1),
19663            transmute(b.2),
19664            c,
19665        ))
19666    }
19667}
19668#[doc = "Extended table look-up"]
19669#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4)"]
19670#[inline(always)]
19671#[target_feature(enable = "neon")]
19672#[cfg_attr(test, assert_instr(tbx))]
19673#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19674fn vqtbx4(
19675    a: int8x8_t,
19676    b: int8x16_t,
19677    c: int8x16_t,
19678    d: int8x16_t,
19679    e: int8x16_t,
19680    f: uint8x8_t,
19681) -> int8x8_t {
19682    unsafe extern "unadjusted" {
19683        #[cfg_attr(
19684            any(target_arch = "aarch64", target_arch = "arm64ec"),
19685            link_name = "llvm.aarch64.neon.tbx4.v8i8"
19686        )]
19687        fn _vqtbx4(
19688            a: int8x8_t,
19689            b: int8x16_t,
19690            c: int8x16_t,
19691            d: int8x16_t,
19692            e: int8x16_t,
19693            f: uint8x8_t,
19694        ) -> int8x8_t;
19695    }
19696    unsafe { _vqtbx4(a, b, c, d, e, f) }
19697}
19698#[doc = "Extended table look-up"]
19699#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q)"]
19700#[inline(always)]
19701#[target_feature(enable = "neon")]
19702#[cfg_attr(test, assert_instr(tbx))]
19703#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19704fn vqtbx4q(
19705    a: int8x16_t,
19706    b: int8x16_t,
19707    c: int8x16_t,
19708    d: int8x16_t,
19709    e: int8x16_t,
19710    f: uint8x16_t,
19711) -> int8x16_t {
19712    unsafe extern "unadjusted" {
19713        #[cfg_attr(
19714            any(target_arch = "aarch64", target_arch = "arm64ec"),
19715            link_name = "llvm.aarch64.neon.tbx4.v16i8"
19716        )]
19717        fn _vqtbx4q(
19718            a: int8x16_t,
19719            b: int8x16_t,
19720            c: int8x16_t,
19721            d: int8x16_t,
19722            e: int8x16_t,
19723            f: uint8x16_t,
19724        ) -> int8x16_t;
19725    }
19726    unsafe { _vqtbx4q(a, b, c, d, e, f) }
19727}
19728#[doc = "Extended table look-up"]
19729#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_s8)"]
19730#[inline(always)]
19731#[target_feature(enable = "neon")]
19732#[cfg_attr(test, assert_instr(tbx))]
19733#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19734pub fn vqtbx4_s8(a: int8x8_t, b: int8x16x4_t, c: uint8x8_t) -> int8x8_t {
19735    vqtbx4(a, b.0, b.1, b.2, b.3, c)
19736}
19737#[doc = "Extended table look-up"]
19738#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_s8)"]
19739#[inline(always)]
19740#[target_feature(enable = "neon")]
19741#[cfg_attr(test, assert_instr(tbx))]
19742#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19743pub fn vqtbx4q_s8(a: int8x16_t, b: int8x16x4_t, c: uint8x16_t) -> int8x16_t {
19744    vqtbx4q(a, b.0, b.1, b.2, b.3, c)
19745}
19746#[doc = "Extended table look-up"]
19747#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_u8)"]
19748#[inline(always)]
19749#[target_feature(enable = "neon")]
19750#[cfg_attr(test, assert_instr(tbx))]
19751#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19752pub fn vqtbx4_u8(a: uint8x8_t, b: uint8x16x4_t, c: uint8x8_t) -> uint8x8_t {
19753    unsafe {
19754        transmute(vqtbx4(
19755            transmute(a),
19756            transmute(b.0),
19757            transmute(b.1),
19758            transmute(b.2),
19759            transmute(b.3),
19760            c,
19761        ))
19762    }
19763}
19764#[doc = "Extended table look-up"]
19765#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_u8)"]
19766#[inline(always)]
19767#[target_feature(enable = "neon")]
19768#[cfg_attr(test, assert_instr(tbx))]
19769#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19770pub fn vqtbx4q_u8(a: uint8x16_t, b: uint8x16x4_t, c: uint8x16_t) -> uint8x16_t {
19771    unsafe {
19772        transmute(vqtbx4q(
19773            transmute(a),
19774            transmute(b.0),
19775            transmute(b.1),
19776            transmute(b.2),
19777            transmute(b.3),
19778            c,
19779        ))
19780    }
19781}
19782#[doc = "Extended table look-up"]
19783#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_p8)"]
19784#[inline(always)]
19785#[target_feature(enable = "neon")]
19786#[cfg_attr(test, assert_instr(tbx))]
19787#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19788pub fn vqtbx4_p8(a: poly8x8_t, b: poly8x16x4_t, c: uint8x8_t) -> poly8x8_t {
19789    unsafe {
19790        transmute(vqtbx4(
19791            transmute(a),
19792            transmute(b.0),
19793            transmute(b.1),
19794            transmute(b.2),
19795            transmute(b.3),
19796            c,
19797        ))
19798    }
19799}
19800#[doc = "Extended table look-up"]
19801#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_p8)"]
19802#[inline(always)]
19803#[target_feature(enable = "neon")]
19804#[cfg_attr(test, assert_instr(tbx))]
19805#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19806pub fn vqtbx4q_p8(a: poly8x16_t, b: poly8x16x4_t, c: uint8x16_t) -> poly8x16_t {
19807    unsafe {
19808        transmute(vqtbx4q(
19809            transmute(a),
19810            transmute(b.0),
19811            transmute(b.1),
19812            transmute(b.2),
19813            transmute(b.3),
19814            c,
19815        ))
19816    }
19817}
19818#[doc = "Rotate and exclusive OR"]
19819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrax1q_u64)"]
19820#[inline(always)]
19821#[target_feature(enable = "neon,sha3")]
19822#[cfg_attr(test, assert_instr(rax1))]
19823#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
19824pub fn vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
19825    unsafe extern "unadjusted" {
19826        #[cfg_attr(
19827            any(target_arch = "aarch64", target_arch = "arm64ec"),
19828            link_name = "llvm.aarch64.crypto.rax1"
19829        )]
19830        fn _vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t;
19831    }
19832    unsafe { _vrax1q_u64(a, b) }
19833}
19834#[doc = "Reverse bit order"]
19835#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_s8)"]
19836#[inline(always)]
19837#[target_feature(enable = "neon")]
19838#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19839#[cfg_attr(test, assert_instr(rbit))]
19840pub fn vrbit_s8(a: int8x8_t) -> int8x8_t {
19841    unsafe { simd_bitreverse(a) }
19842}
19843#[doc = "Reverse bit order"]
19844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_s8)"]
19845#[inline(always)]
19846#[target_feature(enable = "neon")]
19847#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19848#[cfg_attr(test, assert_instr(rbit))]
19849pub fn vrbitq_s8(a: int8x16_t) -> int8x16_t {
19850    unsafe { simd_bitreverse(a) }
19851}
19852#[doc = "Reverse bit order"]
19853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_u8)"]
19854#[inline(always)]
19855#[cfg(target_endian = "little")]
19856#[target_feature(enable = "neon")]
19857#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19858#[cfg_attr(test, assert_instr(rbit))]
19859pub fn vrbit_u8(a: uint8x8_t) -> uint8x8_t {
19860    unsafe { transmute(vrbit_s8(transmute(a))) }
19861}
19862#[doc = "Reverse bit order"]
19863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_u8)"]
19864#[inline(always)]
19865#[cfg(target_endian = "big")]
19866#[target_feature(enable = "neon")]
19867#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19868#[cfg_attr(test, assert_instr(rbit))]
19869pub fn vrbit_u8(a: uint8x8_t) -> uint8x8_t {
19870    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
19871    unsafe {
19872        let ret_val: uint8x8_t = transmute(vrbit_s8(transmute(a)));
19873        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
19874    }
19875}
19876#[doc = "Reverse bit order"]
19877#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_u8)"]
19878#[inline(always)]
19879#[cfg(target_endian = "little")]
19880#[target_feature(enable = "neon")]
19881#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19882#[cfg_attr(test, assert_instr(rbit))]
19883pub fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t {
19884    unsafe { transmute(vrbitq_s8(transmute(a))) }
19885}
19886#[doc = "Reverse bit order"]
19887#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_u8)"]
19888#[inline(always)]
19889#[cfg(target_endian = "big")]
19890#[target_feature(enable = "neon")]
19891#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19892#[cfg_attr(test, assert_instr(rbit))]
19893pub fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t {
19894    let a: uint8x16_t =
19895        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
19896    unsafe {
19897        let ret_val: uint8x16_t = transmute(vrbitq_s8(transmute(a)));
19898        simd_shuffle!(
19899            ret_val,
19900            ret_val,
19901            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19902        )
19903    }
19904}
19905#[doc = "Reverse bit order"]
19906#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_p8)"]
19907#[inline(always)]
19908#[cfg(target_endian = "little")]
19909#[target_feature(enable = "neon")]
19910#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19911#[cfg_attr(test, assert_instr(rbit))]
19912pub fn vrbit_p8(a: poly8x8_t) -> poly8x8_t {
19913    unsafe { transmute(vrbit_s8(transmute(a))) }
19914}
19915#[doc = "Reverse bit order"]
19916#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_p8)"]
19917#[inline(always)]
19918#[cfg(target_endian = "big")]
19919#[target_feature(enable = "neon")]
19920#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19921#[cfg_attr(test, assert_instr(rbit))]
19922pub fn vrbit_p8(a: poly8x8_t) -> poly8x8_t {
19923    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
19924    unsafe {
19925        let ret_val: poly8x8_t = transmute(vrbit_s8(transmute(a)));
19926        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
19927    }
19928}
19929#[doc = "Reverse bit order"]
19930#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_p8)"]
19931#[inline(always)]
19932#[cfg(target_endian = "little")]
19933#[target_feature(enable = "neon")]
19934#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19935#[cfg_attr(test, assert_instr(rbit))]
19936pub fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t {
19937    unsafe { transmute(vrbitq_s8(transmute(a))) }
19938}
19939#[doc = "Reverse bit order"]
19940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_p8)"]
19941#[inline(always)]
19942#[cfg(target_endian = "big")]
19943#[target_feature(enable = "neon")]
19944#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19945#[cfg_attr(test, assert_instr(rbit))]
19946pub fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t {
19947    let a: poly8x16_t =
19948        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
19949    unsafe {
19950        let ret_val: poly8x16_t = transmute(vrbitq_s8(transmute(a)));
19951        simd_shuffle!(
19952            ret_val,
19953            ret_val,
19954            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19955        )
19956    }
19957}
19958#[doc = "Reciprocal estimate."]
19959#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f64)"]
19960#[inline(always)]
19961#[target_feature(enable = "neon")]
19962#[cfg_attr(test, assert_instr(frecpe))]
19963#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19964pub fn vrecpe_f64(a: float64x1_t) -> float64x1_t {
19965    unsafe extern "unadjusted" {
19966        #[cfg_attr(
19967            any(target_arch = "aarch64", target_arch = "arm64ec"),
19968            link_name = "llvm.aarch64.neon.frecpe.v1f64"
19969        )]
19970        fn _vrecpe_f64(a: float64x1_t) -> float64x1_t;
19971    }
19972    unsafe { _vrecpe_f64(a) }
19973}
19974#[doc = "Reciprocal estimate."]
19975#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f64)"]
19976#[inline(always)]
19977#[target_feature(enable = "neon")]
19978#[cfg_attr(test, assert_instr(frecpe))]
19979#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19980pub fn vrecpeq_f64(a: float64x2_t) -> float64x2_t {
19981    unsafe extern "unadjusted" {
19982        #[cfg_attr(
19983            any(target_arch = "aarch64", target_arch = "arm64ec"),
19984            link_name = "llvm.aarch64.neon.frecpe.v2f64"
19985        )]
19986        fn _vrecpeq_f64(a: float64x2_t) -> float64x2_t;
19987    }
19988    unsafe { _vrecpeq_f64(a) }
19989}
19990#[doc = "Reciprocal estimate."]
19991#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecped_f64)"]
19992#[inline(always)]
19993#[target_feature(enable = "neon")]
19994#[cfg_attr(test, assert_instr(frecpe))]
19995#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19996pub fn vrecped_f64(a: f64) -> f64 {
19997    unsafe extern "unadjusted" {
19998        #[cfg_attr(
19999            any(target_arch = "aarch64", target_arch = "arm64ec"),
20000            link_name = "llvm.aarch64.neon.frecpe.f64"
20001        )]
20002        fn _vrecped_f64(a: f64) -> f64;
20003    }
20004    unsafe { _vrecped_f64(a) }
20005}
20006#[doc = "Reciprocal estimate."]
20007#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpes_f32)"]
20008#[inline(always)]
20009#[target_feature(enable = "neon")]
20010#[cfg_attr(test, assert_instr(frecpe))]
20011#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20012pub fn vrecpes_f32(a: f32) -> f32 {
20013    unsafe extern "unadjusted" {
20014        #[cfg_attr(
20015            any(target_arch = "aarch64", target_arch = "arm64ec"),
20016            link_name = "llvm.aarch64.neon.frecpe.f32"
20017        )]
20018        fn _vrecpes_f32(a: f32) -> f32;
20019    }
20020    unsafe { _vrecpes_f32(a) }
20021}
20022#[doc = "Reciprocal estimate."]
20023#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeh_f16)"]
20024#[inline(always)]
20025#[cfg_attr(test, assert_instr(frecpe))]
20026#[target_feature(enable = "neon,fp16")]
20027#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
20028#[cfg(not(target_arch = "arm64ec"))]
20029pub fn vrecpeh_f16(a: f16) -> f16 {
20030    unsafe extern "unadjusted" {
20031        #[cfg_attr(
20032            any(target_arch = "aarch64", target_arch = "arm64ec"),
20033            link_name = "llvm.aarch64.neon.frecpe.f16"
20034        )]
20035        fn _vrecpeh_f16(a: f16) -> f16;
20036    }
20037    unsafe { _vrecpeh_f16(a) }
20038}
20039#[doc = "Floating-point reciprocal step"]
20040#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f64)"]
20041#[inline(always)]
20042#[target_feature(enable = "neon")]
20043#[cfg_attr(test, assert_instr(frecps))]
20044#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20045pub fn vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
20046    unsafe extern "unadjusted" {
20047        #[cfg_attr(
20048            any(target_arch = "aarch64", target_arch = "arm64ec"),
20049            link_name = "llvm.aarch64.neon.frecps.v1f64"
20050        )]
20051        fn _vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
20052    }
20053    unsafe { _vrecps_f64(a, b) }
20054}
20055#[doc = "Floating-point reciprocal step"]
20056#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f64)"]
20057#[inline(always)]
20058#[target_feature(enable = "neon")]
20059#[cfg_attr(test, assert_instr(frecps))]
20060#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20061pub fn vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
20062    unsafe extern "unadjusted" {
20063        #[cfg_attr(
20064            any(target_arch = "aarch64", target_arch = "arm64ec"),
20065            link_name = "llvm.aarch64.neon.frecps.v2f64"
20066        )]
20067        fn _vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
20068    }
20069    unsafe { _vrecpsq_f64(a, b) }
20070}
20071#[doc = "Floating-point reciprocal step"]
20072#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsd_f64)"]
20073#[inline(always)]
20074#[target_feature(enable = "neon")]
20075#[cfg_attr(test, assert_instr(frecps))]
20076#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20077pub fn vrecpsd_f64(a: f64, b: f64) -> f64 {
20078    unsafe extern "unadjusted" {
20079        #[cfg_attr(
20080            any(target_arch = "aarch64", target_arch = "arm64ec"),
20081            link_name = "llvm.aarch64.neon.frecps.f64"
20082        )]
20083        fn _vrecpsd_f64(a: f64, b: f64) -> f64;
20084    }
20085    unsafe { _vrecpsd_f64(a, b) }
20086}
20087#[doc = "Floating-point reciprocal step"]
20088#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpss_f32)"]
20089#[inline(always)]
20090#[target_feature(enable = "neon")]
20091#[cfg_attr(test, assert_instr(frecps))]
20092#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20093pub fn vrecpss_f32(a: f32, b: f32) -> f32 {
20094    unsafe extern "unadjusted" {
20095        #[cfg_attr(
20096            any(target_arch = "aarch64", target_arch = "arm64ec"),
20097            link_name = "llvm.aarch64.neon.frecps.f32"
20098        )]
20099        fn _vrecpss_f32(a: f32, b: f32) -> f32;
20100    }
20101    unsafe { _vrecpss_f32(a, b) }
20102}
20103#[doc = "Floating-point reciprocal step"]
20104#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsh_f16)"]
20105#[inline(always)]
20106#[cfg_attr(test, assert_instr(frecps))]
20107#[target_feature(enable = "neon,fp16")]
20108#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
20109#[cfg(not(target_arch = "arm64ec"))]
20110pub fn vrecpsh_f16(a: f16, b: f16) -> f16 {
20111    unsafe extern "unadjusted" {
20112        #[cfg_attr(
20113            any(target_arch = "aarch64", target_arch = "arm64ec"),
20114            link_name = "llvm.aarch64.neon.frecps.f16"
20115        )]
20116        fn _vrecpsh_f16(a: f16, b: f16) -> f16;
20117    }
20118    unsafe { _vrecpsh_f16(a, b) }
20119}
20120#[doc = "Floating-point reciprocal exponent"]
20121#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxd_f64)"]
20122#[inline(always)]
20123#[target_feature(enable = "neon")]
20124#[cfg_attr(test, assert_instr(frecpx))]
20125#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20126pub fn vrecpxd_f64(a: f64) -> f64 {
20127    unsafe extern "unadjusted" {
20128        #[cfg_attr(
20129            any(target_arch = "aarch64", target_arch = "arm64ec"),
20130            link_name = "llvm.aarch64.neon.frecpx.f64"
20131        )]
20132        fn _vrecpxd_f64(a: f64) -> f64;
20133    }
20134    unsafe { _vrecpxd_f64(a) }
20135}
20136#[doc = "Floating-point reciprocal exponent"]
20137#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxs_f32)"]
20138#[inline(always)]
20139#[target_feature(enable = "neon")]
20140#[cfg_attr(test, assert_instr(frecpx))]
20141#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20142pub fn vrecpxs_f32(a: f32) -> f32 {
20143    unsafe extern "unadjusted" {
20144        #[cfg_attr(
20145            any(target_arch = "aarch64", target_arch = "arm64ec"),
20146            link_name = "llvm.aarch64.neon.frecpx.f32"
20147        )]
20148        fn _vrecpxs_f32(a: f32) -> f32;
20149    }
20150    unsafe { _vrecpxs_f32(a) }
20151}
20152#[doc = "Floating-point reciprocal exponent"]
20153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxh_f16)"]
20154#[inline(always)]
20155#[cfg_attr(test, assert_instr(frecpx))]
20156#[target_feature(enable = "neon,fp16")]
20157#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
20158#[cfg(not(target_arch = "arm64ec"))]
20159pub fn vrecpxh_f16(a: f16) -> f16 {
20160    unsafe extern "unadjusted" {
20161        #[cfg_attr(
20162            any(target_arch = "aarch64", target_arch = "arm64ec"),
20163            link_name = "llvm.aarch64.neon.frecpx.f16"
20164        )]
20165        fn _vrecpxh_f16(a: f16) -> f16;
20166    }
20167    unsafe { _vrecpxh_f16(a) }
20168}
20169#[doc = "Vector reinterpret cast operation"]
20170#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f16)"]
20171#[inline(always)]
20172#[cfg(target_endian = "little")]
20173#[target_feature(enable = "neon")]
20174#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
20175#[cfg(not(target_arch = "arm64ec"))]
20176#[cfg_attr(test, assert_instr(nop))]
20177pub fn vreinterpret_f64_f16(a: float16x4_t) -> float64x1_t {
20178    unsafe { transmute(a) }
20179}
20180#[doc = "Vector reinterpret cast operation"]
20181#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f16)"]
20182#[inline(always)]
20183#[cfg(target_endian = "big")]
20184#[target_feature(enable = "neon")]
20185#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
20186#[cfg(not(target_arch = "arm64ec"))]
20187#[cfg_attr(test, assert_instr(nop))]
20188pub fn vreinterpret_f64_f16(a: float16x4_t) -> float64x1_t {
20189    let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
20190    unsafe { transmute(a) }
20191}
20192#[doc = "Vector reinterpret cast operation"]
20193#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f16)"]
20194#[inline(always)]
20195#[cfg(target_endian = "little")]
20196#[target_feature(enable = "neon")]
20197#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
20198#[cfg(not(target_arch = "arm64ec"))]
20199#[cfg_attr(test, assert_instr(nop))]
20200pub fn vreinterpretq_f64_f16(a: float16x8_t) -> float64x2_t {
20201    unsafe { transmute(a) }
20202}
20203#[doc = "Vector reinterpret cast operation"]
20204#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f16)"]
20205#[inline(always)]
20206#[cfg(target_endian = "big")]
20207#[target_feature(enable = "neon")]
20208#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
20209#[cfg(not(target_arch = "arm64ec"))]
20210#[cfg_attr(test, assert_instr(nop))]
20211pub fn vreinterpretq_f64_f16(a: float16x8_t) -> float64x2_t {
20212    let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20213    unsafe {
20214        let ret_val: float64x2_t = transmute(a);
20215        simd_shuffle!(ret_val, ret_val, [1, 0])
20216    }
20217}
20218#[doc = "Vector reinterpret cast operation"]
20219#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f64)"]
20220#[inline(always)]
20221#[cfg(target_endian = "little")]
20222#[target_feature(enable = "neon")]
20223#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
20224#[cfg(not(target_arch = "arm64ec"))]
20225#[cfg_attr(test, assert_instr(nop))]
20226pub fn vreinterpret_f16_f64(a: float64x1_t) -> float16x4_t {
20227    unsafe { transmute(a) }
20228}
20229#[doc = "Vector reinterpret cast operation"]
20230#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f64)"]
20231#[inline(always)]
20232#[cfg(target_endian = "big")]
20233#[target_feature(enable = "neon")]
20234#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
20235#[cfg(not(target_arch = "arm64ec"))]
20236#[cfg_attr(test, assert_instr(nop))]
20237pub fn vreinterpret_f16_f64(a: float64x1_t) -> float16x4_t {
20238    unsafe {
20239        let ret_val: float16x4_t = transmute(a);
20240        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
20241    }
20242}
20243#[doc = "Vector reinterpret cast operation"]
20244#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f64)"]
20245#[inline(always)]
20246#[cfg(target_endian = "little")]
20247#[target_feature(enable = "neon")]
20248#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
20249#[cfg(not(target_arch = "arm64ec"))]
20250#[cfg_attr(test, assert_instr(nop))]
20251pub fn vreinterpretq_f16_f64(a: float64x2_t) -> float16x8_t {
20252    unsafe { transmute(a) }
20253}
20254#[doc = "Vector reinterpret cast operation"]
20255#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f64)"]
20256#[inline(always)]
20257#[cfg(target_endian = "big")]
20258#[target_feature(enable = "neon")]
20259#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
20260#[cfg(not(target_arch = "arm64ec"))]
20261#[cfg_attr(test, assert_instr(nop))]
20262pub fn vreinterpretq_f16_f64(a: float64x2_t) -> float16x8_t {
20263    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20264    unsafe {
20265        let ret_val: float16x8_t = transmute(a);
20266        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20267    }
20268}
20269#[doc = "Vector reinterpret cast operation"]
20270#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)"]
20271#[inline(always)]
20272#[cfg(target_endian = "little")]
20273#[target_feature(enable = "neon")]
20274#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20275#[cfg_attr(test, assert_instr(nop))]
20276pub fn vreinterpretq_f64_p128(a: p128) -> float64x2_t {
20277    unsafe { transmute(a) }
20278}
20279#[doc = "Vector reinterpret cast operation"]
20280#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)"]
20281#[inline(always)]
20282#[cfg(target_endian = "big")]
20283#[target_feature(enable = "neon")]
20284#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20285#[cfg_attr(test, assert_instr(nop))]
20286pub fn vreinterpretq_f64_p128(a: p128) -> float64x2_t {
20287    unsafe {
20288        let ret_val: float64x2_t = transmute(a);
20289        simd_shuffle!(ret_val, ret_val, [1, 0])
20290    }
20291}
20292#[doc = "Vector reinterpret cast operation"]
20293#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"]
20294#[inline(always)]
20295#[cfg(target_endian = "little")]
20296#[target_feature(enable = "neon")]
20297#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20298#[cfg_attr(test, assert_instr(nop))]
20299pub fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t {
20300    unsafe { transmute(a) }
20301}
20302#[doc = "Vector reinterpret cast operation"]
20303#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"]
20304#[inline(always)]
20305#[cfg(target_endian = "big")]
20306#[target_feature(enable = "neon")]
20307#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20308#[cfg_attr(test, assert_instr(nop))]
20309pub fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t {
20310    let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20311    unsafe { transmute(a) }
20312}
20313#[doc = "Vector reinterpret cast operation"]
20314#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"]
20315#[inline(always)]
20316#[cfg(target_endian = "little")]
20317#[target_feature(enable = "neon")]
20318#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20319#[cfg_attr(test, assert_instr(nop))]
20320pub fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t {
20321    unsafe { transmute(a) }
20322}
20323#[doc = "Vector reinterpret cast operation"]
20324#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"]
20325#[inline(always)]
20326#[cfg(target_endian = "big")]
20327#[target_feature(enable = "neon")]
20328#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20329#[cfg_attr(test, assert_instr(nop))]
20330pub fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t {
20331    let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20332    unsafe { transmute(a) }
20333}
20334#[doc = "Vector reinterpret cast operation"]
20335#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"]
20336#[inline(always)]
20337#[cfg(target_endian = "little")]
20338#[target_feature(enable = "neon")]
20339#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20340#[cfg_attr(test, assert_instr(nop))]
20341pub fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t {
20342    unsafe { transmute(a) }
20343}
20344#[doc = "Vector reinterpret cast operation"]
20345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"]
20346#[inline(always)]
20347#[cfg(target_endian = "big")]
20348#[target_feature(enable = "neon")]
20349#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20350#[cfg_attr(test, assert_instr(nop))]
20351pub fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t {
20352    let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
20353    unsafe {
20354        let ret_val: float64x2_t = transmute(a);
20355        simd_shuffle!(ret_val, ret_val, [1, 0])
20356    }
20357}
20358#[doc = "Vector reinterpret cast operation"]
20359#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"]
20360#[inline(always)]
20361#[cfg(target_endian = "little")]
20362#[target_feature(enable = "neon")]
20363#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20364#[cfg_attr(test, assert_instr(nop))]
20365pub fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t {
20366    unsafe { transmute(a) }
20367}
20368#[doc = "Vector reinterpret cast operation"]
20369#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"]
20370#[inline(always)]
20371#[cfg(target_endian = "big")]
20372#[target_feature(enable = "neon")]
20373#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20374#[cfg_attr(test, assert_instr(nop))]
20375pub fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t {
20376    let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
20377    unsafe {
20378        let ret_val: poly64x2_t = transmute(a);
20379        simd_shuffle!(ret_val, ret_val, [1, 0])
20380    }
20381}
20382#[doc = "Vector reinterpret cast operation"]
20383#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"]
20384#[inline(always)]
20385#[cfg(target_endian = "little")]
20386#[target_feature(enable = "neon")]
20387#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20388#[cfg_attr(test, assert_instr(nop))]
20389pub fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t {
20390    unsafe { transmute(a) }
20391}
20392#[doc = "Vector reinterpret cast operation"]
20393#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"]
20394#[inline(always)]
20395#[cfg(target_endian = "big")]
20396#[target_feature(enable = "neon")]
20397#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20398#[cfg_attr(test, assert_instr(nop))]
20399pub fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t {
20400    unsafe {
20401        let ret_val: float32x2_t = transmute(a);
20402        simd_shuffle!(ret_val, ret_val, [1, 0])
20403    }
20404}
20405#[doc = "Vector reinterpret cast operation"]
20406#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"]
20407#[inline(always)]
20408#[cfg(target_endian = "little")]
20409#[target_feature(enable = "neon")]
20410#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20411#[cfg_attr(test, assert_instr(nop))]
20412pub fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t {
20413    unsafe { transmute(a) }
20414}
20415#[doc = "Vector reinterpret cast operation"]
20416#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"]
20417#[inline(always)]
20418#[cfg(target_endian = "big")]
20419#[target_feature(enable = "neon")]
20420#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20421#[cfg_attr(test, assert_instr(nop))]
20422pub fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t {
20423    unsafe {
20424        let ret_val: int8x8_t = transmute(a);
20425        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20426    }
20427}
20428#[doc = "Vector reinterpret cast operation"]
20429#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"]
20430#[inline(always)]
20431#[cfg(target_endian = "little")]
20432#[target_feature(enable = "neon")]
20433#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20434#[cfg_attr(test, assert_instr(nop))]
20435pub fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t {
20436    unsafe { transmute(a) }
20437}
20438#[doc = "Vector reinterpret cast operation"]
20439#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"]
20440#[inline(always)]
20441#[cfg(target_endian = "big")]
20442#[target_feature(enable = "neon")]
20443#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20444#[cfg_attr(test, assert_instr(nop))]
20445pub fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t {
20446    unsafe {
20447        let ret_val: int16x4_t = transmute(a);
20448        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
20449    }
20450}
20451#[doc = "Vector reinterpret cast operation"]
20452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"]
20453#[inline(always)]
20454#[cfg(target_endian = "little")]
20455#[target_feature(enable = "neon")]
20456#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20457#[cfg_attr(test, assert_instr(nop))]
20458pub fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t {
20459    unsafe { transmute(a) }
20460}
20461#[doc = "Vector reinterpret cast operation"]
20462#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"]
20463#[inline(always)]
20464#[cfg(target_endian = "big")]
20465#[target_feature(enable = "neon")]
20466#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20467#[cfg_attr(test, assert_instr(nop))]
20468pub fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t {
20469    unsafe {
20470        let ret_val: int32x2_t = transmute(a);
20471        simd_shuffle!(ret_val, ret_val, [1, 0])
20472    }
20473}
20474#[doc = "Vector reinterpret cast operation"]
20475#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f64)"]
20476#[inline(always)]
20477#[target_feature(enable = "neon")]
20478#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20479#[cfg_attr(test, assert_instr(nop))]
20480pub fn vreinterpret_s64_f64(a: float64x1_t) -> int64x1_t {
20481    unsafe { transmute(a) }
20482}
20483#[doc = "Vector reinterpret cast operation"]
20484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"]
20485#[inline(always)]
20486#[cfg(target_endian = "little")]
20487#[target_feature(enable = "neon")]
20488#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20489#[cfg_attr(test, assert_instr(nop))]
20490pub fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t {
20491    unsafe { transmute(a) }
20492}
20493#[doc = "Vector reinterpret cast operation"]
20494#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"]
20495#[inline(always)]
20496#[cfg(target_endian = "big")]
20497#[target_feature(enable = "neon")]
20498#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20499#[cfg_attr(test, assert_instr(nop))]
20500pub fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t {
20501    unsafe {
20502        let ret_val: uint8x8_t = transmute(a);
20503        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20504    }
20505}
20506#[doc = "Vector reinterpret cast operation"]
20507#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"]
20508#[inline(always)]
20509#[cfg(target_endian = "little")]
20510#[target_feature(enable = "neon")]
20511#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20512#[cfg_attr(test, assert_instr(nop))]
20513pub fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t {
20514    unsafe { transmute(a) }
20515}
20516#[doc = "Vector reinterpret cast operation"]
20517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"]
20518#[inline(always)]
20519#[cfg(target_endian = "big")]
20520#[target_feature(enable = "neon")]
20521#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20522#[cfg_attr(test, assert_instr(nop))]
20523pub fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t {
20524    unsafe {
20525        let ret_val: uint16x4_t = transmute(a);
20526        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
20527    }
20528}
20529#[doc = "Vector reinterpret cast operation"]
20530#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"]
20531#[inline(always)]
20532#[cfg(target_endian = "little")]
20533#[target_feature(enable = "neon")]
20534#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20535#[cfg_attr(test, assert_instr(nop))]
20536pub fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t {
20537    unsafe { transmute(a) }
20538}
20539#[doc = "Vector reinterpret cast operation"]
20540#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"]
20541#[inline(always)]
20542#[cfg(target_endian = "big")]
20543#[target_feature(enable = "neon")]
20544#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20545#[cfg_attr(test, assert_instr(nop))]
20546pub fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t {
20547    unsafe {
20548        let ret_val: uint32x2_t = transmute(a);
20549        simd_shuffle!(ret_val, ret_val, [1, 0])
20550    }
20551}
20552#[doc = "Vector reinterpret cast operation"]
20553#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f64)"]
20554#[inline(always)]
20555#[target_feature(enable = "neon")]
20556#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20557#[cfg_attr(test, assert_instr(nop))]
20558pub fn vreinterpret_u64_f64(a: float64x1_t) -> uint64x1_t {
20559    unsafe { transmute(a) }
20560}
20561#[doc = "Vector reinterpret cast operation"]
20562#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"]
20563#[inline(always)]
20564#[cfg(target_endian = "little")]
20565#[target_feature(enable = "neon")]
20566#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20567#[cfg_attr(test, assert_instr(nop))]
20568pub fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t {
20569    unsafe { transmute(a) }
20570}
20571#[doc = "Vector reinterpret cast operation"]
20572#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"]
20573#[inline(always)]
20574#[cfg(target_endian = "big")]
20575#[target_feature(enable = "neon")]
20576#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20577#[cfg_attr(test, assert_instr(nop))]
20578pub fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t {
20579    unsafe {
20580        let ret_val: poly8x8_t = transmute(a);
20581        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20582    }
20583}
20584#[doc = "Vector reinterpret cast operation"]
20585#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"]
20586#[inline(always)]
20587#[cfg(target_endian = "little")]
20588#[target_feature(enable = "neon")]
20589#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20590#[cfg_attr(test, assert_instr(nop))]
20591pub fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t {
20592    unsafe { transmute(a) }
20593}
20594#[doc = "Vector reinterpret cast operation"]
20595#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"]
20596#[inline(always)]
20597#[cfg(target_endian = "big")]
20598#[target_feature(enable = "neon")]
20599#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20600#[cfg_attr(test, assert_instr(nop))]
20601pub fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t {
20602    unsafe {
20603        let ret_val: poly16x4_t = transmute(a);
20604        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
20605    }
20606}
20607#[doc = "Vector reinterpret cast operation"]
20608#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f64)"]
20609#[inline(always)]
20610#[target_feature(enable = "neon")]
20611#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20612#[cfg_attr(test, assert_instr(nop))]
20613pub fn vreinterpret_p64_f64(a: float64x1_t) -> poly64x1_t {
20614    unsafe { transmute(a) }
20615}
20616#[doc = "Vector reinterpret cast operation"]
20617#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"]
20618#[inline(always)]
20619#[cfg(target_endian = "little")]
20620#[target_feature(enable = "neon")]
20621#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20622#[cfg_attr(test, assert_instr(nop))]
20623pub fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 {
20624    unsafe { transmute(a) }
20625}
20626#[doc = "Vector reinterpret cast operation"]
20627#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"]
20628#[inline(always)]
20629#[cfg(target_endian = "big")]
20630#[target_feature(enable = "neon")]
20631#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20632#[cfg_attr(test, assert_instr(nop))]
20633pub fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 {
20634    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20635    unsafe { transmute(a) }
20636}
20637#[doc = "Vector reinterpret cast operation"]
20638#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"]
20639#[inline(always)]
20640#[cfg(target_endian = "little")]
20641#[target_feature(enable = "neon")]
20642#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20643#[cfg_attr(test, assert_instr(nop))]
20644pub fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t {
20645    unsafe { transmute(a) }
20646}
20647#[doc = "Vector reinterpret cast operation"]
20648#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"]
20649#[inline(always)]
20650#[cfg(target_endian = "big")]
20651#[target_feature(enable = "neon")]
20652#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20653#[cfg_attr(test, assert_instr(nop))]
20654pub fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t {
20655    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20656    unsafe {
20657        let ret_val: float32x4_t = transmute(a);
20658        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
20659    }
20660}
20661#[doc = "Vector reinterpret cast operation"]
20662#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"]
20663#[inline(always)]
20664#[cfg(target_endian = "little")]
20665#[target_feature(enable = "neon")]
20666#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20667#[cfg_attr(test, assert_instr(nop))]
20668pub fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t {
20669    unsafe { transmute(a) }
20670}
20671#[doc = "Vector reinterpret cast operation"]
20672#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"]
20673#[inline(always)]
20674#[cfg(target_endian = "big")]
20675#[target_feature(enable = "neon")]
20676#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20677#[cfg_attr(test, assert_instr(nop))]
20678pub fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t {
20679    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20680    unsafe {
20681        let ret_val: int8x16_t = transmute(a);
20682        simd_shuffle!(
20683            ret_val,
20684            ret_val,
20685            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20686        )
20687    }
20688}
20689#[doc = "Vector reinterpret cast operation"]
20690#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"]
20691#[inline(always)]
20692#[cfg(target_endian = "little")]
20693#[target_feature(enable = "neon")]
20694#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20695#[cfg_attr(test, assert_instr(nop))]
20696pub fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t {
20697    unsafe { transmute(a) }
20698}
20699#[doc = "Vector reinterpret cast operation"]
20700#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"]
20701#[inline(always)]
20702#[cfg(target_endian = "big")]
20703#[target_feature(enable = "neon")]
20704#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20705#[cfg_attr(test, assert_instr(nop))]
20706pub fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t {
20707    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20708    unsafe {
20709        let ret_val: int16x8_t = transmute(a);
20710        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20711    }
20712}
20713#[doc = "Vector reinterpret cast operation"]
20714#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"]
20715#[inline(always)]
20716#[cfg(target_endian = "little")]
20717#[target_feature(enable = "neon")]
20718#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20719#[cfg_attr(test, assert_instr(nop))]
20720pub fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t {
20721    unsafe { transmute(a) }
20722}
20723#[doc = "Vector reinterpret cast operation"]
20724#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"]
20725#[inline(always)]
20726#[cfg(target_endian = "big")]
20727#[target_feature(enable = "neon")]
20728#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20729#[cfg_attr(test, assert_instr(nop))]
20730pub fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t {
20731    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20732    unsafe {
20733        let ret_val: int32x4_t = transmute(a);
20734        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
20735    }
20736}
20737#[doc = "Vector reinterpret cast operation"]
20738#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"]
20739#[inline(always)]
20740#[cfg(target_endian = "little")]
20741#[target_feature(enable = "neon")]
20742#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20743#[cfg_attr(test, assert_instr(nop))]
20744pub fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t {
20745    unsafe { transmute(a) }
20746}
20747#[doc = "Vector reinterpret cast operation"]
20748#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"]
20749#[inline(always)]
20750#[cfg(target_endian = "big")]
20751#[target_feature(enable = "neon")]
20752#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20753#[cfg_attr(test, assert_instr(nop))]
20754pub fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t {
20755    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20756    unsafe {
20757        let ret_val: int64x2_t = transmute(a);
20758        simd_shuffle!(ret_val, ret_val, [1, 0])
20759    }
20760}
20761#[doc = "Vector reinterpret cast operation"]
20762#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"]
20763#[inline(always)]
20764#[cfg(target_endian = "little")]
20765#[target_feature(enable = "neon")]
20766#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20767#[cfg_attr(test, assert_instr(nop))]
20768pub fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t {
20769    unsafe { transmute(a) }
20770}
20771#[doc = "Vector reinterpret cast operation"]
20772#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"]
20773#[inline(always)]
20774#[cfg(target_endian = "big")]
20775#[target_feature(enable = "neon")]
20776#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20777#[cfg_attr(test, assert_instr(nop))]
20778pub fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t {
20779    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20780    unsafe {
20781        let ret_val: uint8x16_t = transmute(a);
20782        simd_shuffle!(
20783            ret_val,
20784            ret_val,
20785            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20786        )
20787    }
20788}
20789#[doc = "Vector reinterpret cast operation"]
20790#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"]
20791#[inline(always)]
20792#[cfg(target_endian = "little")]
20793#[target_feature(enable = "neon")]
20794#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20795#[cfg_attr(test, assert_instr(nop))]
20796pub fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t {
20797    unsafe { transmute(a) }
20798}
20799#[doc = "Vector reinterpret cast operation"]
20800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"]
20801#[inline(always)]
20802#[cfg(target_endian = "big")]
20803#[target_feature(enable = "neon")]
20804#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20805#[cfg_attr(test, assert_instr(nop))]
20806pub fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t {
20807    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20808    unsafe {
20809        let ret_val: uint16x8_t = transmute(a);
20810        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20811    }
20812}
20813#[doc = "Vector reinterpret cast operation"]
20814#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"]
20815#[inline(always)]
20816#[cfg(target_endian = "little")]
20817#[target_feature(enable = "neon")]
20818#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20819#[cfg_attr(test, assert_instr(nop))]
20820pub fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t {
20821    unsafe { transmute(a) }
20822}
20823#[doc = "Vector reinterpret cast operation"]
20824#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"]
20825#[inline(always)]
20826#[cfg(target_endian = "big")]
20827#[target_feature(enable = "neon")]
20828#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20829#[cfg_attr(test, assert_instr(nop))]
20830pub fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t {
20831    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20832    unsafe {
20833        let ret_val: uint32x4_t = transmute(a);
20834        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
20835    }
20836}
20837#[doc = "Vector reinterpret cast operation"]
20838#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)"]
20839#[inline(always)]
20840#[cfg(target_endian = "little")]
20841#[target_feature(enable = "neon")]
20842#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20843#[cfg_attr(test, assert_instr(nop))]
20844pub fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t {
20845    unsafe { transmute(a) }
20846}
20847#[doc = "Vector reinterpret cast operation"]
20848#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)"]
20849#[inline(always)]
20850#[cfg(target_endian = "big")]
20851#[target_feature(enable = "neon")]
20852#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20853#[cfg_attr(test, assert_instr(nop))]
20854pub fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t {
20855    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20856    unsafe {
20857        let ret_val: uint64x2_t = transmute(a);
20858        simd_shuffle!(ret_val, ret_val, [1, 0])
20859    }
20860}
20861#[doc = "Vector reinterpret cast operation"]
20862#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"]
20863#[inline(always)]
20864#[cfg(target_endian = "little")]
20865#[target_feature(enable = "neon")]
20866#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20867#[cfg_attr(test, assert_instr(nop))]
20868pub fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t {
20869    unsafe { transmute(a) }
20870}
20871#[doc = "Vector reinterpret cast operation"]
20872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"]
20873#[inline(always)]
20874#[cfg(target_endian = "big")]
20875#[target_feature(enable = "neon")]
20876#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20877#[cfg_attr(test, assert_instr(nop))]
20878pub fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t {
20879    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20880    unsafe {
20881        let ret_val: poly8x16_t = transmute(a);
20882        simd_shuffle!(
20883            ret_val,
20884            ret_val,
20885            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20886        )
20887    }
20888}
20889#[doc = "Vector reinterpret cast operation"]
20890#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"]
20891#[inline(always)]
20892#[cfg(target_endian = "little")]
20893#[target_feature(enable = "neon")]
20894#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20895#[cfg_attr(test, assert_instr(nop))]
20896pub fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t {
20897    unsafe { transmute(a) }
20898}
20899#[doc = "Vector reinterpret cast operation"]
20900#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"]
20901#[inline(always)]
20902#[cfg(target_endian = "big")]
20903#[target_feature(enable = "neon")]
20904#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20905#[cfg_attr(test, assert_instr(nop))]
20906pub fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t {
20907    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20908    unsafe {
20909        let ret_val: poly16x8_t = transmute(a);
20910        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20911    }
20912}
20913#[doc = "Vector reinterpret cast operation"]
20914#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)"]
20915#[inline(always)]
20916#[cfg(target_endian = "little")]
20917#[target_feature(enable = "neon")]
20918#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20919#[cfg_attr(test, assert_instr(nop))]
20920pub fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t {
20921    unsafe { transmute(a) }
20922}
20923#[doc = "Vector reinterpret cast operation"]
20924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)"]
20925#[inline(always)]
20926#[cfg(target_endian = "big")]
20927#[target_feature(enable = "neon")]
20928#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20929#[cfg_attr(test, assert_instr(nop))]
20930pub fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t {
20931    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20932    unsafe {
20933        let ret_val: poly64x2_t = transmute(a);
20934        simd_shuffle!(ret_val, ret_val, [1, 0])
20935    }
20936}
20937#[doc = "Vector reinterpret cast operation"]
20938#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"]
20939#[inline(always)]
20940#[cfg(target_endian = "little")]
20941#[target_feature(enable = "neon")]
20942#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20943#[cfg_attr(test, assert_instr(nop))]
20944pub fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t {
20945    unsafe { transmute(a) }
20946}
20947#[doc = "Vector reinterpret cast operation"]
20948#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"]
20949#[inline(always)]
20950#[cfg(target_endian = "big")]
20951#[target_feature(enable = "neon")]
20952#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20953#[cfg_attr(test, assert_instr(nop))]
20954pub fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t {
20955    let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20956    unsafe { transmute(a) }
20957}
20958#[doc = "Vector reinterpret cast operation"]
20959#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"]
20960#[inline(always)]
20961#[cfg(target_endian = "little")]
20962#[target_feature(enable = "neon")]
20963#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20964#[cfg_attr(test, assert_instr(nop))]
20965pub fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t {
20966    unsafe { transmute(a) }
20967}
20968#[doc = "Vector reinterpret cast operation"]
20969#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"]
20970#[inline(always)]
20971#[cfg(target_endian = "big")]
20972#[target_feature(enable = "neon")]
20973#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20974#[cfg_attr(test, assert_instr(nop))]
20975pub fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t {
20976    let a: int8x16_t =
20977        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20978    unsafe {
20979        let ret_val: float64x2_t = transmute(a);
20980        simd_shuffle!(ret_val, ret_val, [1, 0])
20981    }
20982}
20983#[doc = "Vector reinterpret cast operation"]
20984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"]
20985#[inline(always)]
20986#[cfg(target_endian = "little")]
20987#[target_feature(enable = "neon")]
20988#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20989#[cfg_attr(test, assert_instr(nop))]
20990pub fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t {
20991    unsafe { transmute(a) }
20992}
20993#[doc = "Vector reinterpret cast operation"]
20994#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"]
20995#[inline(always)]
20996#[cfg(target_endian = "big")]
20997#[target_feature(enable = "neon")]
20998#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20999#[cfg_attr(test, assert_instr(nop))]
21000pub fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t {
21001    let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
21002    unsafe { transmute(a) }
21003}
21004#[doc = "Vector reinterpret cast operation"]
21005#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"]
21006#[inline(always)]
21007#[cfg(target_endian = "little")]
21008#[target_feature(enable = "neon")]
21009#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21010#[cfg_attr(test, assert_instr(nop))]
21011pub fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t {
21012    unsafe { transmute(a) }
21013}
21014#[doc = "Vector reinterpret cast operation"]
21015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"]
21016#[inline(always)]
21017#[cfg(target_endian = "big")]
21018#[target_feature(enable = "neon")]
21019#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21020#[cfg_attr(test, assert_instr(nop))]
21021pub fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t {
21022    let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21023    unsafe {
21024        let ret_val: float64x2_t = transmute(a);
21025        simd_shuffle!(ret_val, ret_val, [1, 0])
21026    }
21027}
21028#[doc = "Vector reinterpret cast operation"]
21029#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"]
21030#[inline(always)]
21031#[cfg(target_endian = "little")]
21032#[target_feature(enable = "neon")]
21033#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21034#[cfg_attr(test, assert_instr(nop))]
21035pub fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t {
21036    unsafe { transmute(a) }
21037}
21038#[doc = "Vector reinterpret cast operation"]
21039#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"]
21040#[inline(always)]
21041#[cfg(target_endian = "big")]
21042#[target_feature(enable = "neon")]
21043#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21044#[cfg_attr(test, assert_instr(nop))]
21045pub fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t {
21046    let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21047    unsafe { transmute(a) }
21048}
21049#[doc = "Vector reinterpret cast operation"]
21050#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"]
21051#[inline(always)]
21052#[cfg(target_endian = "little")]
21053#[target_feature(enable = "neon")]
21054#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21055#[cfg_attr(test, assert_instr(nop))]
21056pub fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t {
21057    unsafe { transmute(a) }
21058}
21059#[doc = "Vector reinterpret cast operation"]
21060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"]
21061#[inline(always)]
21062#[cfg(target_endian = "big")]
21063#[target_feature(enable = "neon")]
21064#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21065#[cfg_attr(test, assert_instr(nop))]
21066pub fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t {
21067    let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
21068    unsafe {
21069        let ret_val: float64x2_t = transmute(a);
21070        simd_shuffle!(ret_val, ret_val, [1, 0])
21071    }
21072}
21073#[doc = "Vector reinterpret cast operation"]
21074#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s64)"]
21075#[inline(always)]
21076#[target_feature(enable = "neon")]
21077#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21078#[cfg_attr(test, assert_instr(nop))]
21079pub fn vreinterpret_f64_s64(a: int64x1_t) -> float64x1_t {
21080    unsafe { transmute(a) }
21081}
21082#[doc = "Vector reinterpret cast operation"]
21083#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s64)"]
21084#[inline(always)]
21085#[target_feature(enable = "neon")]
21086#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21087#[cfg_attr(test, assert_instr(nop))]
21088pub fn vreinterpret_p64_s64(a: int64x1_t) -> poly64x1_t {
21089    unsafe { transmute(a) }
21090}
21091#[doc = "Vector reinterpret cast operation"]
21092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)"]
21093#[inline(always)]
21094#[cfg(target_endian = "little")]
21095#[target_feature(enable = "neon")]
21096#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21097#[cfg_attr(test, assert_instr(nop))]
21098pub fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t {
21099    unsafe { transmute(a) }
21100}
21101#[doc = "Vector reinterpret cast operation"]
21102#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)"]
21103#[inline(always)]
21104#[cfg(target_endian = "big")]
21105#[target_feature(enable = "neon")]
21106#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21107#[cfg_attr(test, assert_instr(nop))]
21108pub fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t {
21109    let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21110    unsafe {
21111        let ret_val: float64x2_t = transmute(a);
21112        simd_shuffle!(ret_val, ret_val, [1, 0])
21113    }
21114}
21115#[doc = "Vector reinterpret cast operation"]
21116#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)"]
21117#[inline(always)]
21118#[cfg(target_endian = "little")]
21119#[target_feature(enable = "neon")]
21120#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21121#[cfg_attr(test, assert_instr(nop))]
21122pub fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t {
21123    unsafe { transmute(a) }
21124}
21125#[doc = "Vector reinterpret cast operation"]
21126#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)"]
21127#[inline(always)]
21128#[cfg(target_endian = "big")]
21129#[target_feature(enable = "neon")]
21130#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21131#[cfg_attr(test, assert_instr(nop))]
21132pub fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t {
21133    let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21134    unsafe {
21135        let ret_val: poly64x2_t = transmute(a);
21136        simd_shuffle!(ret_val, ret_val, [1, 0])
21137    }
21138}
21139#[doc = "Vector reinterpret cast operation"]
21140#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"]
21141#[inline(always)]
21142#[cfg(target_endian = "little")]
21143#[target_feature(enable = "neon")]
21144#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21145#[cfg_attr(test, assert_instr(nop))]
21146pub fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t {
21147    unsafe { transmute(a) }
21148}
21149#[doc = "Vector reinterpret cast operation"]
21150#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"]
21151#[inline(always)]
21152#[cfg(target_endian = "big")]
21153#[target_feature(enable = "neon")]
21154#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21155#[cfg_attr(test, assert_instr(nop))]
21156pub fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t {
21157    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21158    unsafe { transmute(a) }
21159}
21160#[doc = "Vector reinterpret cast operation"]
21161#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"]
21162#[inline(always)]
21163#[cfg(target_endian = "little")]
21164#[target_feature(enable = "neon")]
21165#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21166#[cfg_attr(test, assert_instr(nop))]
21167pub fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t {
21168    unsafe { transmute(a) }
21169}
21170#[doc = "Vector reinterpret cast operation"]
21171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"]
21172#[inline(always)]
21173#[cfg(target_endian = "big")]
21174#[target_feature(enable = "neon")]
21175#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21176#[cfg_attr(test, assert_instr(nop))]
21177pub fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t {
21178    let a: uint8x16_t =
21179        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21180    unsafe {
21181        let ret_val: float64x2_t = transmute(a);
21182        simd_shuffle!(ret_val, ret_val, [1, 0])
21183    }
21184}
21185#[doc = "Vector reinterpret cast operation"]
21186#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"]
21187#[inline(always)]
21188#[cfg(target_endian = "little")]
21189#[target_feature(enable = "neon")]
21190#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21191#[cfg_attr(test, assert_instr(nop))]
21192pub fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t {
21193    unsafe { transmute(a) }
21194}
21195#[doc = "Vector reinterpret cast operation"]
21196#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"]
21197#[inline(always)]
21198#[cfg(target_endian = "big")]
21199#[target_feature(enable = "neon")]
21200#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21201#[cfg_attr(test, assert_instr(nop))]
21202pub fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t {
21203    let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
21204    unsafe { transmute(a) }
21205}
21206#[doc = "Vector reinterpret cast operation"]
21207#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"]
21208#[inline(always)]
21209#[cfg(target_endian = "little")]
21210#[target_feature(enable = "neon")]
21211#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21212#[cfg_attr(test, assert_instr(nop))]
21213pub fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t {
21214    unsafe { transmute(a) }
21215}
21216#[doc = "Vector reinterpret cast operation"]
21217#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"]
21218#[inline(always)]
21219#[cfg(target_endian = "big")]
21220#[target_feature(enable = "neon")]
21221#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21222#[cfg_attr(test, assert_instr(nop))]
21223pub fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t {
21224    let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21225    unsafe {
21226        let ret_val: float64x2_t = transmute(a);
21227        simd_shuffle!(ret_val, ret_val, [1, 0])
21228    }
21229}
21230#[doc = "Vector reinterpret cast operation"]
21231#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"]
21232#[inline(always)]
21233#[cfg(target_endian = "little")]
21234#[target_feature(enable = "neon")]
21235#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21236#[cfg_attr(test, assert_instr(nop))]
21237pub fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t {
21238    unsafe { transmute(a) }
21239}
21240#[doc = "Vector reinterpret cast operation"]
21241#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"]
21242#[inline(always)]
21243#[cfg(target_endian = "big")]
21244#[target_feature(enable = "neon")]
21245#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21246#[cfg_attr(test, assert_instr(nop))]
21247pub fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t {
21248    let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21249    unsafe { transmute(a) }
21250}
21251#[doc = "Vector reinterpret cast operation"]
21252#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"]
21253#[inline(always)]
21254#[cfg(target_endian = "little")]
21255#[target_feature(enable = "neon")]
21256#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21257#[cfg_attr(test, assert_instr(nop))]
21258pub fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t {
21259    unsafe { transmute(a) }
21260}
21261#[doc = "Vector reinterpret cast operation"]
21262#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"]
21263#[inline(always)]
21264#[cfg(target_endian = "big")]
21265#[target_feature(enable = "neon")]
21266#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21267#[cfg_attr(test, assert_instr(nop))]
21268pub fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t {
21269    let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
21270    unsafe {
21271        let ret_val: float64x2_t = transmute(a);
21272        simd_shuffle!(ret_val, ret_val, [1, 0])
21273    }
21274}
21275#[doc = "Vector reinterpret cast operation"]
21276#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u64)"]
21277#[inline(always)]
21278#[target_feature(enable = "neon")]
21279#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21280#[cfg_attr(test, assert_instr(nop))]
21281pub fn vreinterpret_f64_u64(a: uint64x1_t) -> float64x1_t {
21282    unsafe { transmute(a) }
21283}
21284#[doc = "Vector reinterpret cast operation"]
21285#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u64)"]
21286#[inline(always)]
21287#[target_feature(enable = "neon")]
21288#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21289#[cfg_attr(test, assert_instr(nop))]
21290pub fn vreinterpret_p64_u64(a: uint64x1_t) -> poly64x1_t {
21291    unsafe { transmute(a) }
21292}
21293#[doc = "Vector reinterpret cast operation"]
21294#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)"]
21295#[inline(always)]
21296#[cfg(target_endian = "little")]
21297#[target_feature(enable = "neon")]
21298#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21299#[cfg_attr(test, assert_instr(nop))]
21300pub fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t {
21301    unsafe { transmute(a) }
21302}
21303#[doc = "Vector reinterpret cast operation"]
21304#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)"]
21305#[inline(always)]
21306#[cfg(target_endian = "big")]
21307#[target_feature(enable = "neon")]
21308#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21309#[cfg_attr(test, assert_instr(nop))]
21310pub fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t {
21311    let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21312    unsafe {
21313        let ret_val: float64x2_t = transmute(a);
21314        simd_shuffle!(ret_val, ret_val, [1, 0])
21315    }
21316}
21317#[doc = "Vector reinterpret cast operation"]
21318#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)"]
21319#[inline(always)]
21320#[cfg(target_endian = "little")]
21321#[target_feature(enable = "neon")]
21322#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21323#[cfg_attr(test, assert_instr(nop))]
21324pub fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t {
21325    unsafe { transmute(a) }
21326}
21327#[doc = "Vector reinterpret cast operation"]
21328#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)"]
21329#[inline(always)]
21330#[cfg(target_endian = "big")]
21331#[target_feature(enable = "neon")]
21332#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21333#[cfg_attr(test, assert_instr(nop))]
21334pub fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t {
21335    let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21336    unsafe {
21337        let ret_val: poly64x2_t = transmute(a);
21338        simd_shuffle!(ret_val, ret_val, [1, 0])
21339    }
21340}
21341#[doc = "Vector reinterpret cast operation"]
21342#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"]
21343#[inline(always)]
21344#[cfg(target_endian = "little")]
21345#[target_feature(enable = "neon")]
21346#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21347#[cfg_attr(test, assert_instr(nop))]
21348pub fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t {
21349    unsafe { transmute(a) }
21350}
21351#[doc = "Vector reinterpret cast operation"]
21352#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"]
21353#[inline(always)]
21354#[cfg(target_endian = "big")]
21355#[target_feature(enable = "neon")]
21356#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21357#[cfg_attr(test, assert_instr(nop))]
21358pub fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t {
21359    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21360    unsafe { transmute(a) }
21361}
21362#[doc = "Vector reinterpret cast operation"]
21363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"]
21364#[inline(always)]
21365#[cfg(target_endian = "little")]
21366#[target_feature(enable = "neon")]
21367#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21368#[cfg_attr(test, assert_instr(nop))]
21369pub fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t {
21370    unsafe { transmute(a) }
21371}
21372#[doc = "Vector reinterpret cast operation"]
21373#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"]
21374#[inline(always)]
21375#[cfg(target_endian = "big")]
21376#[target_feature(enable = "neon")]
21377#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21378#[cfg_attr(test, assert_instr(nop))]
21379pub fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t {
21380    let a: poly8x16_t =
21381        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21382    unsafe {
21383        let ret_val: float64x2_t = transmute(a);
21384        simd_shuffle!(ret_val, ret_val, [1, 0])
21385    }
21386}
21387#[doc = "Vector reinterpret cast operation"]
21388#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"]
21389#[inline(always)]
21390#[cfg(target_endian = "little")]
21391#[target_feature(enable = "neon")]
21392#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21393#[cfg_attr(test, assert_instr(nop))]
21394pub fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t {
21395    unsafe { transmute(a) }
21396}
21397#[doc = "Vector reinterpret cast operation"]
21398#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"]
21399#[inline(always)]
21400#[cfg(target_endian = "big")]
21401#[target_feature(enable = "neon")]
21402#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21403#[cfg_attr(test, assert_instr(nop))]
21404pub fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t {
21405    let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
21406    unsafe { transmute(a) }
21407}
21408#[doc = "Vector reinterpret cast operation"]
21409#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"]
21410#[inline(always)]
21411#[cfg(target_endian = "little")]
21412#[target_feature(enable = "neon")]
21413#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21414#[cfg_attr(test, assert_instr(nop))]
21415pub fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t {
21416    unsafe { transmute(a) }
21417}
21418#[doc = "Vector reinterpret cast operation"]
21419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"]
21420#[inline(always)]
21421#[cfg(target_endian = "big")]
21422#[target_feature(enable = "neon")]
21423#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21424#[cfg_attr(test, assert_instr(nop))]
21425pub fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t {
21426    let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21427    unsafe {
21428        let ret_val: float64x2_t = transmute(a);
21429        simd_shuffle!(ret_val, ret_val, [1, 0])
21430    }
21431}
21432#[doc = "Vector reinterpret cast operation"]
21433#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"]
21434#[inline(always)]
21435#[cfg(target_endian = "little")]
21436#[target_feature(enable = "neon")]
21437#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21438#[cfg_attr(test, assert_instr(nop))]
21439pub fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t {
21440    unsafe { transmute(a) }
21441}
21442#[doc = "Vector reinterpret cast operation"]
21443#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"]
21444#[inline(always)]
21445#[cfg(target_endian = "big")]
21446#[target_feature(enable = "neon")]
21447#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21448#[cfg_attr(test, assert_instr(nop))]
21449pub fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t {
21450    unsafe {
21451        let ret_val: float32x2_t = transmute(a);
21452        simd_shuffle!(ret_val, ret_val, [1, 0])
21453    }
21454}
21455#[doc = "Vector reinterpret cast operation"]
21456#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p64)"]
21457#[inline(always)]
21458#[target_feature(enable = "neon")]
21459#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21460#[cfg_attr(test, assert_instr(nop))]
21461pub fn vreinterpret_f64_p64(a: poly64x1_t) -> float64x1_t {
21462    unsafe { transmute(a) }
21463}
21464#[doc = "Vector reinterpret cast operation"]
21465#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p64)"]
21466#[inline(always)]
21467#[target_feature(enable = "neon")]
21468#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21469#[cfg_attr(test, assert_instr(nop))]
21470pub fn vreinterpret_s64_p64(a: poly64x1_t) -> int64x1_t {
21471    unsafe { transmute(a) }
21472}
21473#[doc = "Vector reinterpret cast operation"]
21474#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p64)"]
21475#[inline(always)]
21476#[target_feature(enable = "neon")]
21477#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21478#[cfg_attr(test, assert_instr(nop))]
21479pub fn vreinterpret_u64_p64(a: poly64x1_t) -> uint64x1_t {
21480    unsafe { transmute(a) }
21481}
21482#[doc = "Vector reinterpret cast operation"]
21483#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"]
21484#[inline(always)]
21485#[cfg(target_endian = "little")]
21486#[target_feature(enable = "neon")]
21487#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21488#[cfg_attr(test, assert_instr(nop))]
21489pub fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t {
21490    unsafe { transmute(a) }
21491}
21492#[doc = "Vector reinterpret cast operation"]
21493#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"]
21494#[inline(always)]
21495#[cfg(target_endian = "big")]
21496#[target_feature(enable = "neon")]
21497#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21498#[cfg_attr(test, assert_instr(nop))]
21499pub fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t {
21500    let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21501    unsafe {
21502        let ret_val: float32x4_t = transmute(a);
21503        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
21504    }
21505}
21506#[doc = "Vector reinterpret cast operation"]
21507#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)"]
21508#[inline(always)]
21509#[cfg(target_endian = "little")]
21510#[target_feature(enable = "neon")]
21511#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21512#[cfg_attr(test, assert_instr(nop))]
21513pub fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t {
21514    unsafe { transmute(a) }
21515}
21516#[doc = "Vector reinterpret cast operation"]
21517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)"]
21518#[inline(always)]
21519#[cfg(target_endian = "big")]
21520#[target_feature(enable = "neon")]
21521#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21522#[cfg_attr(test, assert_instr(nop))]
21523pub fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t {
21524    let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21525    unsafe {
21526        let ret_val: float64x2_t = transmute(a);
21527        simd_shuffle!(ret_val, ret_val, [1, 0])
21528    }
21529}
21530#[doc = "Vector reinterpret cast operation"]
21531#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)"]
21532#[inline(always)]
21533#[cfg(target_endian = "little")]
21534#[target_feature(enable = "neon")]
21535#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21536#[cfg_attr(test, assert_instr(nop))]
21537pub fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t {
21538    unsafe { transmute(a) }
21539}
21540#[doc = "Vector reinterpret cast operation"]
21541#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)"]
21542#[inline(always)]
21543#[cfg(target_endian = "big")]
21544#[target_feature(enable = "neon")]
21545#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21546#[cfg_attr(test, assert_instr(nop))]
21547pub fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t {
21548    let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21549    unsafe {
21550        let ret_val: int64x2_t = transmute(a);
21551        simd_shuffle!(ret_val, ret_val, [1, 0])
21552    }
21553}
21554#[doc = "Vector reinterpret cast operation"]
21555#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)"]
21556#[inline(always)]
21557#[cfg(target_endian = "little")]
21558#[target_feature(enable = "neon")]
21559#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21560#[cfg_attr(test, assert_instr(nop))]
21561pub fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t {
21562    unsafe { transmute(a) }
21563}
21564#[doc = "Vector reinterpret cast operation"]
21565#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)"]
21566#[inline(always)]
21567#[cfg(target_endian = "big")]
21568#[target_feature(enable = "neon")]
21569#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21570#[cfg_attr(test, assert_instr(nop))]
21571pub fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t {
21572    let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21573    unsafe {
21574        let ret_val: uint64x2_t = transmute(a);
21575        simd_shuffle!(ret_val, ret_val, [1, 0])
21576    }
21577}
21578#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
21579#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f32)"]
21580#[inline(always)]
21581#[target_feature(enable = "neon,frintts")]
21582#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21583#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
21584pub fn vrnd32x_f32(a: float32x2_t) -> float32x2_t {
21585    unsafe extern "unadjusted" {
21586        #[cfg_attr(
21587            any(target_arch = "aarch64", target_arch = "arm64ec"),
21588            link_name = "llvm.aarch64.neon.frint32x.v2f32"
21589        )]
21590        fn _vrnd32x_f32(a: float32x2_t) -> float32x2_t;
21591    }
21592    unsafe { _vrnd32x_f32(a) }
21593}
21594#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
21595#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f32)"]
21596#[inline(always)]
21597#[target_feature(enable = "neon,frintts")]
21598#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21599#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
21600pub fn vrnd32xq_f32(a: float32x4_t) -> float32x4_t {
21601    unsafe extern "unadjusted" {
21602        #[cfg_attr(
21603            any(target_arch = "aarch64", target_arch = "arm64ec"),
21604            link_name = "llvm.aarch64.neon.frint32x.v4f32"
21605        )]
21606        fn _vrnd32xq_f32(a: float32x4_t) -> float32x4_t;
21607    }
21608    unsafe { _vrnd32xq_f32(a) }
21609}
21610#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
21611#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f64)"]
21612#[inline(always)]
21613#[target_feature(enable = "neon,frintts")]
21614#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21615#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
21616pub fn vrnd32xq_f64(a: float64x2_t) -> float64x2_t {
21617    unsafe extern "unadjusted" {
21618        #[cfg_attr(
21619            any(target_arch = "aarch64", target_arch = "arm64ec"),
21620            link_name = "llvm.aarch64.neon.frint32x.v2f64"
21621        )]
21622        fn _vrnd32xq_f64(a: float64x2_t) -> float64x2_t;
21623    }
21624    unsafe { _vrnd32xq_f64(a) }
21625}
21626#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
21627#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f64)"]
21628#[inline(always)]
21629#[target_feature(enable = "neon,frintts")]
21630#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21631#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
21632pub fn vrnd32x_f64(a: float64x1_t) -> float64x1_t {
21633    unsafe extern "unadjusted" {
21634        #[cfg_attr(
21635            any(target_arch = "aarch64", target_arch = "arm64ec"),
21636            link_name = "llvm.aarch64.frint32x.f64"
21637        )]
21638        fn _vrnd32x_f64(a: f64) -> f64;
21639    }
21640    unsafe { transmute(_vrnd32x_f64(simd_extract!(a, 0))) }
21641}
21642#[doc = "Floating-point round to 32-bit integer toward zero"]
21643#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f32)"]
21644#[inline(always)]
21645#[target_feature(enable = "neon,frintts")]
21646#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21647#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
21648pub fn vrnd32z_f32(a: float32x2_t) -> float32x2_t {
21649    unsafe extern "unadjusted" {
21650        #[cfg_attr(
21651            any(target_arch = "aarch64", target_arch = "arm64ec"),
21652            link_name = "llvm.aarch64.neon.frint32z.v2f32"
21653        )]
21654        fn _vrnd32z_f32(a: float32x2_t) -> float32x2_t;
21655    }
21656    unsafe { _vrnd32z_f32(a) }
21657}
21658#[doc = "Floating-point round to 32-bit integer toward zero"]
21659#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f32)"]
21660#[inline(always)]
21661#[target_feature(enable = "neon,frintts")]
21662#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21663#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
21664pub fn vrnd32zq_f32(a: float32x4_t) -> float32x4_t {
21665    unsafe extern "unadjusted" {
21666        #[cfg_attr(
21667            any(target_arch = "aarch64", target_arch = "arm64ec"),
21668            link_name = "llvm.aarch64.neon.frint32z.v4f32"
21669        )]
21670        fn _vrnd32zq_f32(a: float32x4_t) -> float32x4_t;
21671    }
21672    unsafe { _vrnd32zq_f32(a) }
21673}
21674#[doc = "Floating-point round to 32-bit integer toward zero"]
21675#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f64)"]
21676#[inline(always)]
21677#[target_feature(enable = "neon,frintts")]
21678#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21679#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
21680pub fn vrnd32zq_f64(a: float64x2_t) -> float64x2_t {
21681    unsafe extern "unadjusted" {
21682        #[cfg_attr(
21683            any(target_arch = "aarch64", target_arch = "arm64ec"),
21684            link_name = "llvm.aarch64.neon.frint32z.v2f64"
21685        )]
21686        fn _vrnd32zq_f64(a: float64x2_t) -> float64x2_t;
21687    }
21688    unsafe { _vrnd32zq_f64(a) }
21689}
21690#[doc = "Floating-point round to 32-bit integer toward zero"]
21691#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f64)"]
21692#[inline(always)]
21693#[target_feature(enable = "neon,frintts")]
21694#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21695#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
21696pub fn vrnd32z_f64(a: float64x1_t) -> float64x1_t {
21697    unsafe extern "unadjusted" {
21698        #[cfg_attr(
21699            any(target_arch = "aarch64", target_arch = "arm64ec"),
21700            link_name = "llvm.aarch64.frint32z.f64"
21701        )]
21702        fn _vrnd32z_f64(a: f64) -> f64;
21703    }
21704    unsafe { transmute(_vrnd32z_f64(simd_extract!(a, 0))) }
21705}
21706#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
21707#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f32)"]
21708#[inline(always)]
21709#[target_feature(enable = "neon,frintts")]
21710#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21711#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
21712pub fn vrnd64x_f32(a: float32x2_t) -> float32x2_t {
21713    unsafe extern "unadjusted" {
21714        #[cfg_attr(
21715            any(target_arch = "aarch64", target_arch = "arm64ec"),
21716            link_name = "llvm.aarch64.neon.frint64x.v2f32"
21717        )]
21718        fn _vrnd64x_f32(a: float32x2_t) -> float32x2_t;
21719    }
21720    unsafe { _vrnd64x_f32(a) }
21721}
21722#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
21723#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f32)"]
21724#[inline(always)]
21725#[target_feature(enable = "neon,frintts")]
21726#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21727#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
21728pub fn vrnd64xq_f32(a: float32x4_t) -> float32x4_t {
21729    unsafe extern "unadjusted" {
21730        #[cfg_attr(
21731            any(target_arch = "aarch64", target_arch = "arm64ec"),
21732            link_name = "llvm.aarch64.neon.frint64x.v4f32"
21733        )]
21734        fn _vrnd64xq_f32(a: float32x4_t) -> float32x4_t;
21735    }
21736    unsafe { _vrnd64xq_f32(a) }
21737}
21738#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
21739#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f64)"]
21740#[inline(always)]
21741#[target_feature(enable = "neon,frintts")]
21742#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21743#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
21744pub fn vrnd64xq_f64(a: float64x2_t) -> float64x2_t {
21745    unsafe extern "unadjusted" {
21746        #[cfg_attr(
21747            any(target_arch = "aarch64", target_arch = "arm64ec"),
21748            link_name = "llvm.aarch64.neon.frint64x.v2f64"
21749        )]
21750        fn _vrnd64xq_f64(a: float64x2_t) -> float64x2_t;
21751    }
21752    unsafe { _vrnd64xq_f64(a) }
21753}
21754#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
21755#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f64)"]
21756#[inline(always)]
21757#[target_feature(enable = "neon,frintts")]
21758#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21759#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
21760pub fn vrnd64x_f64(a: float64x1_t) -> float64x1_t {
21761    unsafe extern "unadjusted" {
21762        #[cfg_attr(
21763            any(target_arch = "aarch64", target_arch = "arm64ec"),
21764            link_name = "llvm.aarch64.frint64x.f64"
21765        )]
21766        fn _vrnd64x_f64(a: f64) -> f64;
21767    }
21768    unsafe { transmute(_vrnd64x_f64(simd_extract!(a, 0))) }
21769}
21770#[doc = "Floating-point round to 64-bit integer toward zero"]
21771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f32)"]
21772#[inline(always)]
21773#[target_feature(enable = "neon,frintts")]
21774#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21775#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
21776pub fn vrnd64z_f32(a: float32x2_t) -> float32x2_t {
21777    unsafe extern "unadjusted" {
21778        #[cfg_attr(
21779            any(target_arch = "aarch64", target_arch = "arm64ec"),
21780            link_name = "llvm.aarch64.neon.frint64z.v2f32"
21781        )]
21782        fn _vrnd64z_f32(a: float32x2_t) -> float32x2_t;
21783    }
21784    unsafe { _vrnd64z_f32(a) }
21785}
21786#[doc = "Floating-point round to 64-bit integer toward zero"]
21787#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f32)"]
21788#[inline(always)]
21789#[target_feature(enable = "neon,frintts")]
21790#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21791#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
21792pub fn vrnd64zq_f32(a: float32x4_t) -> float32x4_t {
21793    unsafe extern "unadjusted" {
21794        #[cfg_attr(
21795            any(target_arch = "aarch64", target_arch = "arm64ec"),
21796            link_name = "llvm.aarch64.neon.frint64z.v4f32"
21797        )]
21798        fn _vrnd64zq_f32(a: float32x4_t) -> float32x4_t;
21799    }
21800    unsafe { _vrnd64zq_f32(a) }
21801}
21802#[doc = "Floating-point round to 64-bit integer toward zero"]
21803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f64)"]
21804#[inline(always)]
21805#[target_feature(enable = "neon,frintts")]
21806#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21807#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
21808pub fn vrnd64zq_f64(a: float64x2_t) -> float64x2_t {
21809    unsafe extern "unadjusted" {
21810        #[cfg_attr(
21811            any(target_arch = "aarch64", target_arch = "arm64ec"),
21812            link_name = "llvm.aarch64.neon.frint64z.v2f64"
21813        )]
21814        fn _vrnd64zq_f64(a: float64x2_t) -> float64x2_t;
21815    }
21816    unsafe { _vrnd64zq_f64(a) }
21817}
21818#[doc = "Floating-point round to 64-bit integer toward zero"]
21819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f64)"]
21820#[inline(always)]
21821#[target_feature(enable = "neon,frintts")]
21822#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21823#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
21824pub fn vrnd64z_f64(a: float64x1_t) -> float64x1_t {
21825    unsafe extern "unadjusted" {
21826        #[cfg_attr(
21827            any(target_arch = "aarch64", target_arch = "arm64ec"),
21828            link_name = "llvm.aarch64.frint64z.f64"
21829        )]
21830        fn _vrnd64z_f64(a: f64) -> f64;
21831    }
21832    unsafe { transmute(_vrnd64z_f64(simd_extract!(a, 0))) }
21833}
21834#[doc = "Floating-point round to integral, toward zero"]
21835#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f16)"]
21836#[inline(always)]
21837#[target_feature(enable = "neon,fp16")]
21838#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
21839#[cfg(not(target_arch = "arm64ec"))]
21840#[cfg_attr(test, assert_instr(frintz))]
21841pub fn vrnd_f16(a: float16x4_t) -> float16x4_t {
21842    unsafe { simd_trunc(a) }
21843}
21844#[doc = "Floating-point round to integral, toward zero"]
21845#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f16)"]
21846#[inline(always)]
21847#[target_feature(enable = "neon,fp16")]
21848#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
21849#[cfg(not(target_arch = "arm64ec"))]
21850#[cfg_attr(test, assert_instr(frintz))]
21851pub fn vrndq_f16(a: float16x8_t) -> float16x8_t {
21852    unsafe { simd_trunc(a) }
21853}
21854#[doc = "Floating-point round to integral, toward zero"]
21855#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f32)"]
21856#[inline(always)]
21857#[target_feature(enable = "neon")]
21858#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21859#[cfg_attr(test, assert_instr(frintz))]
21860pub fn vrnd_f32(a: float32x2_t) -> float32x2_t {
21861    unsafe { simd_trunc(a) }
21862}
21863#[doc = "Floating-point round to integral, toward zero"]
21864#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f32)"]
21865#[inline(always)]
21866#[target_feature(enable = "neon")]
21867#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21868#[cfg_attr(test, assert_instr(frintz))]
21869pub fn vrndq_f32(a: float32x4_t) -> float32x4_t {
21870    unsafe { simd_trunc(a) }
21871}
21872#[doc = "Floating-point round to integral, toward zero"]
21873#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f64)"]
21874#[inline(always)]
21875#[target_feature(enable = "neon")]
21876#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21877#[cfg_attr(test, assert_instr(frintz))]
21878pub fn vrnd_f64(a: float64x1_t) -> float64x1_t {
21879    unsafe { simd_trunc(a) }
21880}
21881#[doc = "Floating-point round to integral, toward zero"]
21882#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f64)"]
21883#[inline(always)]
21884#[target_feature(enable = "neon")]
21885#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21886#[cfg_attr(test, assert_instr(frintz))]
21887pub fn vrndq_f64(a: float64x2_t) -> float64x2_t {
21888    unsafe { simd_trunc(a) }
21889}
21890#[doc = "Floating-point round to integral, to nearest with ties to away"]
21891#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f16)"]
21892#[inline(always)]
21893#[target_feature(enable = "neon,fp16")]
21894#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
21895#[cfg(not(target_arch = "arm64ec"))]
21896#[cfg_attr(test, assert_instr(frinta))]
21897pub fn vrnda_f16(a: float16x4_t) -> float16x4_t {
21898    unsafe { simd_round(a) }
21899}
21900#[doc = "Floating-point round to integral, to nearest with ties to away"]
21901#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f16)"]
21902#[inline(always)]
21903#[target_feature(enable = "neon,fp16")]
21904#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
21905#[cfg(not(target_arch = "arm64ec"))]
21906#[cfg_attr(test, assert_instr(frinta))]
21907pub fn vrndaq_f16(a: float16x8_t) -> float16x8_t {
21908    unsafe { simd_round(a) }
21909}
21910#[doc = "Floating-point round to integral, to nearest with ties to away"]
21911#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f32)"]
21912#[inline(always)]
21913#[target_feature(enable = "neon")]
21914#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21915#[cfg_attr(test, assert_instr(frinta))]
21916pub fn vrnda_f32(a: float32x2_t) -> float32x2_t {
21917    unsafe { simd_round(a) }
21918}
21919#[doc = "Floating-point round to integral, to nearest with ties to away"]
21920#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f32)"]
21921#[inline(always)]
21922#[target_feature(enable = "neon")]
21923#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21924#[cfg_attr(test, assert_instr(frinta))]
21925pub fn vrndaq_f32(a: float32x4_t) -> float32x4_t {
21926    unsafe { simd_round(a) }
21927}
21928#[doc = "Floating-point round to integral, to nearest with ties to away"]
21929#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f64)"]
21930#[inline(always)]
21931#[target_feature(enable = "neon")]
21932#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21933#[cfg_attr(test, assert_instr(frinta))]
21934pub fn vrnda_f64(a: float64x1_t) -> float64x1_t {
21935    unsafe { simd_round(a) }
21936}
21937#[doc = "Floating-point round to integral, to nearest with ties to away"]
21938#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f64)"]
21939#[inline(always)]
21940#[target_feature(enable = "neon")]
21941#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21942#[cfg_attr(test, assert_instr(frinta))]
21943pub fn vrndaq_f64(a: float64x2_t) -> float64x2_t {
21944    unsafe { simd_round(a) }
21945}
21946#[doc = "Floating-point round to integral, to nearest with ties to away"]
21947#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndah_f16)"]
21948#[inline(always)]
21949#[target_feature(enable = "neon,fp16")]
21950#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21951#[cfg(not(target_arch = "arm64ec"))]
21952#[cfg_attr(test, assert_instr(frinta))]
21953pub fn vrndah_f16(a: f16) -> f16 {
21954    roundf16(a)
21955}
21956#[doc = "Floating-point round to integral, to nearest with ties to away"]
21957#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndh_f16)"]
21958#[inline(always)]
21959#[target_feature(enable = "neon,fp16")]
21960#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21961#[cfg(not(target_arch = "arm64ec"))]
21962#[cfg_attr(test, assert_instr(frintz))]
21963pub fn vrndh_f16(a: f16) -> f16 {
21964    truncf16(a)
21965}
21966#[doc = "Floating-point round to integral, using current rounding mode"]
21967#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f16)"]
21968#[inline(always)]
21969#[target_feature(enable = "neon,fp16")]
21970#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
21971#[cfg(not(target_arch = "arm64ec"))]
21972#[cfg_attr(test, assert_instr(frinti))]
21973pub fn vrndi_f16(a: float16x4_t) -> float16x4_t {
21974    unsafe extern "unadjusted" {
21975        #[cfg_attr(
21976            any(target_arch = "aarch64", target_arch = "arm64ec"),
21977            link_name = "llvm.nearbyint.v4f16"
21978        )]
21979        fn _vrndi_f16(a: float16x4_t) -> float16x4_t;
21980    }
21981    unsafe { _vrndi_f16(a) }
21982}
21983#[doc = "Floating-point round to integral, using current rounding mode"]
21984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f16)"]
21985#[inline(always)]
21986#[target_feature(enable = "neon,fp16")]
21987#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
21988#[cfg(not(target_arch = "arm64ec"))]
21989#[cfg_attr(test, assert_instr(frinti))]
21990pub fn vrndiq_f16(a: float16x8_t) -> float16x8_t {
21991    unsafe extern "unadjusted" {
21992        #[cfg_attr(
21993            any(target_arch = "aarch64", target_arch = "arm64ec"),
21994            link_name = "llvm.nearbyint.v8f16"
21995        )]
21996        fn _vrndiq_f16(a: float16x8_t) -> float16x8_t;
21997    }
21998    unsafe { _vrndiq_f16(a) }
21999}
22000#[doc = "Floating-point round to integral, using current rounding mode"]
22001#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f32)"]
22002#[inline(always)]
22003#[target_feature(enable = "neon")]
22004#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22005#[cfg_attr(test, assert_instr(frinti))]
22006pub fn vrndi_f32(a: float32x2_t) -> float32x2_t {
22007    unsafe extern "unadjusted" {
22008        #[cfg_attr(
22009            any(target_arch = "aarch64", target_arch = "arm64ec"),
22010            link_name = "llvm.nearbyint.v2f32"
22011        )]
22012        fn _vrndi_f32(a: float32x2_t) -> float32x2_t;
22013    }
22014    unsafe { _vrndi_f32(a) }
22015}
22016#[doc = "Floating-point round to integral, using current rounding mode"]
22017#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f32)"]
22018#[inline(always)]
22019#[target_feature(enable = "neon")]
22020#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22021#[cfg_attr(test, assert_instr(frinti))]
22022pub fn vrndiq_f32(a: float32x4_t) -> float32x4_t {
22023    unsafe extern "unadjusted" {
22024        #[cfg_attr(
22025            any(target_arch = "aarch64", target_arch = "arm64ec"),
22026            link_name = "llvm.nearbyint.v4f32"
22027        )]
22028        fn _vrndiq_f32(a: float32x4_t) -> float32x4_t;
22029    }
22030    unsafe { _vrndiq_f32(a) }
22031}
22032#[doc = "Floating-point round to integral, using current rounding mode"]
22033#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f64)"]
22034#[inline(always)]
22035#[target_feature(enable = "neon")]
22036#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22037#[cfg_attr(test, assert_instr(frinti))]
22038pub fn vrndi_f64(a: float64x1_t) -> float64x1_t {
22039    unsafe extern "unadjusted" {
22040        #[cfg_attr(
22041            any(target_arch = "aarch64", target_arch = "arm64ec"),
22042            link_name = "llvm.nearbyint.v1f64"
22043        )]
22044        fn _vrndi_f64(a: float64x1_t) -> float64x1_t;
22045    }
22046    unsafe { _vrndi_f64(a) }
22047}
22048#[doc = "Floating-point round to integral, using current rounding mode"]
22049#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f64)"]
22050#[inline(always)]
22051#[target_feature(enable = "neon")]
22052#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22053#[cfg_attr(test, assert_instr(frinti))]
22054pub fn vrndiq_f64(a: float64x2_t) -> float64x2_t {
22055    unsafe extern "unadjusted" {
22056        #[cfg_attr(
22057            any(target_arch = "aarch64", target_arch = "arm64ec"),
22058            link_name = "llvm.nearbyint.v2f64"
22059        )]
22060        fn _vrndiq_f64(a: float64x2_t) -> float64x2_t;
22061    }
22062    unsafe { _vrndiq_f64(a) }
22063}
22064#[doc = "Floating-point round to integral, using current rounding mode"]
22065#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndih_f16)"]
22066#[inline(always)]
22067#[target_feature(enable = "neon,fp16")]
22068#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22069#[cfg(not(target_arch = "arm64ec"))]
22070#[cfg_attr(test, assert_instr(frinti))]
22071pub fn vrndih_f16(a: f16) -> f16 {
22072    unsafe extern "unadjusted" {
22073        #[cfg_attr(
22074            any(target_arch = "aarch64", target_arch = "arm64ec"),
22075            link_name = "llvm.nearbyint.f16"
22076        )]
22077        fn _vrndih_f16(a: f16) -> f16;
22078    }
22079    unsafe { _vrndih_f16(a) }
22080}
22081#[doc = "Floating-point round to integral, toward minus infinity"]
22082#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f16)"]
22083#[inline(always)]
22084#[target_feature(enable = "neon,fp16")]
22085#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
22086#[cfg(not(target_arch = "arm64ec"))]
22087#[cfg_attr(test, assert_instr(frintm))]
22088pub fn vrndm_f16(a: float16x4_t) -> float16x4_t {
22089    unsafe { simd_floor(a) }
22090}
22091#[doc = "Floating-point round to integral, toward minus infinity"]
22092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f16)"]
22093#[inline(always)]
22094#[target_feature(enable = "neon,fp16")]
22095#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
22096#[cfg(not(target_arch = "arm64ec"))]
22097#[cfg_attr(test, assert_instr(frintm))]
22098pub fn vrndmq_f16(a: float16x8_t) -> float16x8_t {
22099    unsafe { simd_floor(a) }
22100}
22101#[doc = "Floating-point round to integral, toward minus infinity"]
22102#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f32)"]
22103#[inline(always)]
22104#[target_feature(enable = "neon")]
22105#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22106#[cfg_attr(test, assert_instr(frintm))]
22107pub fn vrndm_f32(a: float32x2_t) -> float32x2_t {
22108    unsafe { simd_floor(a) }
22109}
22110#[doc = "Floating-point round to integral, toward minus infinity"]
22111#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f32)"]
22112#[inline(always)]
22113#[target_feature(enable = "neon")]
22114#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22115#[cfg_attr(test, assert_instr(frintm))]
22116pub fn vrndmq_f32(a: float32x4_t) -> float32x4_t {
22117    unsafe { simd_floor(a) }
22118}
22119#[doc = "Floating-point round to integral, toward minus infinity"]
22120#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f64)"]
22121#[inline(always)]
22122#[target_feature(enable = "neon")]
22123#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22124#[cfg_attr(test, assert_instr(frintm))]
22125pub fn vrndm_f64(a: float64x1_t) -> float64x1_t {
22126    unsafe { simd_floor(a) }
22127}
22128#[doc = "Floating-point round to integral, toward minus infinity"]
22129#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f64)"]
22130#[inline(always)]
22131#[target_feature(enable = "neon")]
22132#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22133#[cfg_attr(test, assert_instr(frintm))]
22134pub fn vrndmq_f64(a: float64x2_t) -> float64x2_t {
22135    unsafe { simd_floor(a) }
22136}
22137#[doc = "Floating-point round to integral, toward minus infinity"]
22138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmh_f16)"]
22139#[inline(always)]
22140#[target_feature(enable = "neon,fp16")]
22141#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22142#[cfg(not(target_arch = "arm64ec"))]
22143#[cfg_attr(test, assert_instr(frintm))]
22144pub fn vrndmh_f16(a: f16) -> f16 {
22145    floorf16(a)
22146}
22147#[doc = "Floating-point round to integral, to nearest with ties to even"]
22148#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f64)"]
22149#[inline(always)]
22150#[target_feature(enable = "neon")]
22151#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22152#[cfg_attr(test, assert_instr(frintn))]
22153pub fn vrndn_f64(a: float64x1_t) -> float64x1_t {
22154    unsafe extern "unadjusted" {
22155        #[cfg_attr(
22156            any(target_arch = "aarch64", target_arch = "arm64ec"),
22157            link_name = "llvm.roundeven.v1f64"
22158        )]
22159        fn _vrndn_f64(a: float64x1_t) -> float64x1_t;
22160    }
22161    unsafe { _vrndn_f64(a) }
22162}
22163#[doc = "Floating-point round to integral, to nearest with ties to even"]
22164#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f64)"]
22165#[inline(always)]
22166#[target_feature(enable = "neon")]
22167#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22168#[cfg_attr(test, assert_instr(frintn))]
22169pub fn vrndnq_f64(a: float64x2_t) -> float64x2_t {
22170    unsafe extern "unadjusted" {
22171        #[cfg_attr(
22172            any(target_arch = "aarch64", target_arch = "arm64ec"),
22173            link_name = "llvm.roundeven.v2f64"
22174        )]
22175        fn _vrndnq_f64(a: float64x2_t) -> float64x2_t;
22176    }
22177    unsafe { _vrndnq_f64(a) }
22178}
22179#[doc = "Floating-point round to integral, toward minus infinity"]
22180#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnh_f16)"]
22181#[inline(always)]
22182#[target_feature(enable = "neon,fp16")]
22183#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22184#[cfg(not(target_arch = "arm64ec"))]
22185#[cfg_attr(test, assert_instr(frintn))]
22186pub fn vrndnh_f16(a: f16) -> f16 {
22187    unsafe extern "unadjusted" {
22188        #[cfg_attr(
22189            any(target_arch = "aarch64", target_arch = "arm64ec"),
22190            link_name = "llvm.roundeven.f16"
22191        )]
22192        fn _vrndnh_f16(a: f16) -> f16;
22193    }
22194    unsafe { _vrndnh_f16(a) }
22195}
22196#[doc = "Floating-point round to integral, to nearest with ties to even"]
22197#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndns_f32)"]
22198#[inline(always)]
22199#[target_feature(enable = "neon")]
22200#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22201#[cfg_attr(test, assert_instr(frintn))]
22202pub fn vrndns_f32(a: f32) -> f32 {
22203    unsafe extern "unadjusted" {
22204        #[cfg_attr(
22205            any(target_arch = "aarch64", target_arch = "arm64ec"),
22206            link_name = "llvm.roundeven.f32"
22207        )]
22208        fn _vrndns_f32(a: f32) -> f32;
22209    }
22210    unsafe { _vrndns_f32(a) }
22211}
22212#[doc = "Floating-point round to integral, toward plus infinity"]
22213#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f16)"]
22214#[inline(always)]
22215#[target_feature(enable = "neon,fp16")]
22216#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
22217#[cfg(not(target_arch = "arm64ec"))]
22218#[cfg_attr(test, assert_instr(frintp))]
22219pub fn vrndp_f16(a: float16x4_t) -> float16x4_t {
22220    unsafe { simd_ceil(a) }
22221}
22222#[doc = "Floating-point round to integral, toward plus infinity"]
22223#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f16)"]
22224#[inline(always)]
22225#[target_feature(enable = "neon,fp16")]
22226#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
22227#[cfg(not(target_arch = "arm64ec"))]
22228#[cfg_attr(test, assert_instr(frintp))]
22229pub fn vrndpq_f16(a: float16x8_t) -> float16x8_t {
22230    unsafe { simd_ceil(a) }
22231}
22232#[doc = "Floating-point round to integral, toward plus infinity"]
22233#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f32)"]
22234#[inline(always)]
22235#[target_feature(enable = "neon")]
22236#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22237#[cfg_attr(test, assert_instr(frintp))]
22238pub fn vrndp_f32(a: float32x2_t) -> float32x2_t {
22239    unsafe { simd_ceil(a) }
22240}
22241#[doc = "Floating-point round to integral, toward plus infinity"]
22242#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f32)"]
22243#[inline(always)]
22244#[target_feature(enable = "neon")]
22245#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22246#[cfg_attr(test, assert_instr(frintp))]
22247pub fn vrndpq_f32(a: float32x4_t) -> float32x4_t {
22248    unsafe { simd_ceil(a) }
22249}
22250#[doc = "Floating-point round to integral, toward plus infinity"]
22251#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f64)"]
22252#[inline(always)]
22253#[target_feature(enable = "neon")]
22254#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22255#[cfg_attr(test, assert_instr(frintp))]
22256pub fn vrndp_f64(a: float64x1_t) -> float64x1_t {
22257    unsafe { simd_ceil(a) }
22258}
22259#[doc = "Floating-point round to integral, toward plus infinity"]
22260#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f64)"]
22261#[inline(always)]
22262#[target_feature(enable = "neon")]
22263#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22264#[cfg_attr(test, assert_instr(frintp))]
22265pub fn vrndpq_f64(a: float64x2_t) -> float64x2_t {
22266    unsafe { simd_ceil(a) }
22267}
22268#[doc = "Floating-point round to integral, toward plus infinity"]
22269#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndph_f16)"]
22270#[inline(always)]
22271#[target_feature(enable = "neon,fp16")]
22272#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22273#[cfg(not(target_arch = "arm64ec"))]
22274#[cfg_attr(test, assert_instr(frintp))]
22275pub fn vrndph_f16(a: f16) -> f16 {
22276    ceilf16(a)
22277}
22278#[doc = "Floating-point round to integral exact, using current rounding mode"]
22279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f16)"]
22280#[inline(always)]
22281#[target_feature(enable = "neon,fp16")]
22282#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
22283#[cfg(not(target_arch = "arm64ec"))]
22284#[cfg_attr(test, assert_instr(frintx))]
22285pub fn vrndx_f16(a: float16x4_t) -> float16x4_t {
22286    unsafe { simd_round_ties_even(a) }
22287}
22288#[doc = "Floating-point round to integral exact, using current rounding mode"]
22289#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f16)"]
22290#[inline(always)]
22291#[target_feature(enable = "neon,fp16")]
22292#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
22293#[cfg(not(target_arch = "arm64ec"))]
22294#[cfg_attr(test, assert_instr(frintx))]
22295pub fn vrndxq_f16(a: float16x8_t) -> float16x8_t {
22296    unsafe { simd_round_ties_even(a) }
22297}
22298#[doc = "Floating-point round to integral exact, using current rounding mode"]
22299#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f32)"]
22300#[inline(always)]
22301#[target_feature(enable = "neon")]
22302#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22303#[cfg_attr(test, assert_instr(frintx))]
22304pub fn vrndx_f32(a: float32x2_t) -> float32x2_t {
22305    unsafe { simd_round_ties_even(a) }
22306}
22307#[doc = "Floating-point round to integral exact, using current rounding mode"]
22308#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f32)"]
22309#[inline(always)]
22310#[target_feature(enable = "neon")]
22311#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22312#[cfg_attr(test, assert_instr(frintx))]
22313pub fn vrndxq_f32(a: float32x4_t) -> float32x4_t {
22314    unsafe { simd_round_ties_even(a) }
22315}
22316#[doc = "Floating-point round to integral exact, using current rounding mode"]
22317#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f64)"]
22318#[inline(always)]
22319#[target_feature(enable = "neon")]
22320#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22321#[cfg_attr(test, assert_instr(frintx))]
22322pub fn vrndx_f64(a: float64x1_t) -> float64x1_t {
22323    unsafe { simd_round_ties_even(a) }
22324}
22325#[doc = "Floating-point round to integral exact, using current rounding mode"]
22326#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f64)"]
22327#[inline(always)]
22328#[target_feature(enable = "neon")]
22329#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22330#[cfg_attr(test, assert_instr(frintx))]
22331pub fn vrndxq_f64(a: float64x2_t) -> float64x2_t {
22332    unsafe { simd_round_ties_even(a) }
22333}
22334#[doc = "Floating-point round to integral, using current rounding mode"]
22335#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxh_f16)"]
22336#[inline(always)]
22337#[target_feature(enable = "neon,fp16")]
22338#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22339#[cfg(not(target_arch = "arm64ec"))]
22340#[cfg_attr(test, assert_instr(frintx))]
22341pub fn vrndxh_f16(a: f16) -> f16 {
22342    round_ties_even_f16(a)
22343}
22344#[doc = "Signed rounding shift left"]
22345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_s64)"]
22346#[inline(always)]
22347#[target_feature(enable = "neon")]
22348#[cfg_attr(test, assert_instr(srshl))]
22349#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22350pub fn vrshld_s64(a: i64, b: i64) -> i64 {
22351    unsafe extern "unadjusted" {
22352        #[cfg_attr(
22353            any(target_arch = "aarch64", target_arch = "arm64ec"),
22354            link_name = "llvm.aarch64.neon.srshl.i64"
22355        )]
22356        fn _vrshld_s64(a: i64, b: i64) -> i64;
22357    }
22358    unsafe { _vrshld_s64(a, b) }
22359}
22360#[doc = "Unsigned rounding shift left"]
22361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_u64)"]
22362#[inline(always)]
22363#[target_feature(enable = "neon")]
22364#[cfg_attr(test, assert_instr(urshl))]
22365#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22366pub fn vrshld_u64(a: u64, b: i64) -> u64 {
22367    unsafe extern "unadjusted" {
22368        #[cfg_attr(
22369            any(target_arch = "aarch64", target_arch = "arm64ec"),
22370            link_name = "llvm.aarch64.neon.urshl.i64"
22371        )]
22372        fn _vrshld_u64(a: u64, b: i64) -> u64;
22373    }
22374    unsafe { _vrshld_u64(a, b) }
22375}
22376#[doc = "Signed rounding shift right"]
22377#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_s64)"]
22378#[inline(always)]
22379#[target_feature(enable = "neon")]
22380#[cfg_attr(test, assert_instr(srshr, N = 2))]
22381#[rustc_legacy_const_generics(1)]
22382#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22383pub fn vrshrd_n_s64<const N: i32>(a: i64) -> i64 {
22384    static_assert!(N >= 1 && N <= 64);
22385    vrshld_s64(a, -N as i64)
22386}
22387#[doc = "Unsigned rounding shift right"]
22388#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_u64)"]
22389#[inline(always)]
22390#[target_feature(enable = "neon")]
22391#[cfg_attr(test, assert_instr(urshr, N = 2))]
22392#[rustc_legacy_const_generics(1)]
22393#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22394pub fn vrshrd_n_u64<const N: i32>(a: u64) -> u64 {
22395    static_assert!(N >= 1 && N <= 64);
22396    vrshld_u64(a, -N as i64)
22397}
22398#[doc = "Rounding shift right narrow"]
22399#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s16)"]
22400#[inline(always)]
22401#[target_feature(enable = "neon")]
22402#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
22403#[rustc_legacy_const_generics(2)]
22404#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22405pub fn vrshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
22406    static_assert!(N >= 1 && N <= 8);
22407    unsafe {
22408        simd_shuffle!(
22409            a,
22410            vrshrn_n_s16::<N>(b),
22411            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
22412        )
22413    }
22414}
22415#[doc = "Rounding shift right narrow"]
22416#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s32)"]
22417#[inline(always)]
22418#[target_feature(enable = "neon")]
22419#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
22420#[rustc_legacy_const_generics(2)]
22421#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22422pub fn vrshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
22423    static_assert!(N >= 1 && N <= 16);
22424    unsafe { simd_shuffle!(a, vrshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
22425}
22426#[doc = "Rounding shift right narrow"]
22427#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s64)"]
22428#[inline(always)]
22429#[target_feature(enable = "neon")]
22430#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
22431#[rustc_legacy_const_generics(2)]
22432#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22433pub fn vrshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
22434    static_assert!(N >= 1 && N <= 32);
22435    unsafe { simd_shuffle!(a, vrshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
22436}
22437#[doc = "Rounding shift right narrow"]
22438#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u16)"]
22439#[inline(always)]
22440#[target_feature(enable = "neon")]
22441#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
22442#[rustc_legacy_const_generics(2)]
22443#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22444pub fn vrshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
22445    static_assert!(N >= 1 && N <= 8);
22446    unsafe {
22447        simd_shuffle!(
22448            a,
22449            vrshrn_n_u16::<N>(b),
22450            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
22451        )
22452    }
22453}
22454#[doc = "Rounding shift right narrow"]
22455#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u32)"]
22456#[inline(always)]
22457#[target_feature(enable = "neon")]
22458#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
22459#[rustc_legacy_const_generics(2)]
22460#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22461pub fn vrshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
22462    static_assert!(N >= 1 && N <= 16);
22463    unsafe { simd_shuffle!(a, vrshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
22464}
22465#[doc = "Rounding shift right narrow"]
22466#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u64)"]
22467#[inline(always)]
22468#[target_feature(enable = "neon")]
22469#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
22470#[rustc_legacy_const_generics(2)]
22471#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22472pub fn vrshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
22473    static_assert!(N >= 1 && N <= 32);
22474    unsafe { simd_shuffle!(a, vrshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
22475}
22476#[doc = "Reciprocal square-root estimate."]
22477#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f64)"]
22478#[inline(always)]
22479#[target_feature(enable = "neon")]
22480#[cfg_attr(test, assert_instr(frsqrte))]
22481#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22482pub fn vrsqrte_f64(a: float64x1_t) -> float64x1_t {
22483    unsafe extern "unadjusted" {
22484        #[cfg_attr(
22485            any(target_arch = "aarch64", target_arch = "arm64ec"),
22486            link_name = "llvm.aarch64.neon.frsqrte.v1f64"
22487        )]
22488        fn _vrsqrte_f64(a: float64x1_t) -> float64x1_t;
22489    }
22490    unsafe { _vrsqrte_f64(a) }
22491}
22492#[doc = "Reciprocal square-root estimate."]
22493#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f64)"]
22494#[inline(always)]
22495#[target_feature(enable = "neon")]
22496#[cfg_attr(test, assert_instr(frsqrte))]
22497#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22498pub fn vrsqrteq_f64(a: float64x2_t) -> float64x2_t {
22499    unsafe extern "unadjusted" {
22500        #[cfg_attr(
22501            any(target_arch = "aarch64", target_arch = "arm64ec"),
22502            link_name = "llvm.aarch64.neon.frsqrte.v2f64"
22503        )]
22504        fn _vrsqrteq_f64(a: float64x2_t) -> float64x2_t;
22505    }
22506    unsafe { _vrsqrteq_f64(a) }
22507}
22508#[doc = "Reciprocal square-root estimate."]
22509#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrted_f64)"]
22510#[inline(always)]
22511#[target_feature(enable = "neon")]
22512#[cfg_attr(test, assert_instr(frsqrte))]
22513#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22514pub fn vrsqrted_f64(a: f64) -> f64 {
22515    unsafe extern "unadjusted" {
22516        #[cfg_attr(
22517            any(target_arch = "aarch64", target_arch = "arm64ec"),
22518            link_name = "llvm.aarch64.neon.frsqrte.f64"
22519        )]
22520        fn _vrsqrted_f64(a: f64) -> f64;
22521    }
22522    unsafe { _vrsqrted_f64(a) }
22523}
22524#[doc = "Reciprocal square-root estimate."]
22525#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtes_f32)"]
22526#[inline(always)]
22527#[target_feature(enable = "neon")]
22528#[cfg_attr(test, assert_instr(frsqrte))]
22529#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22530pub fn vrsqrtes_f32(a: f32) -> f32 {
22531    unsafe extern "unadjusted" {
22532        #[cfg_attr(
22533            any(target_arch = "aarch64", target_arch = "arm64ec"),
22534            link_name = "llvm.aarch64.neon.frsqrte.f32"
22535        )]
22536        fn _vrsqrtes_f32(a: f32) -> f32;
22537    }
22538    unsafe { _vrsqrtes_f32(a) }
22539}
22540#[doc = "Reciprocal square-root estimate."]
22541#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteh_f16)"]
22542#[inline(always)]
22543#[cfg_attr(test, assert_instr(frsqrte))]
22544#[target_feature(enable = "neon,fp16")]
22545#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22546#[cfg(not(target_arch = "arm64ec"))]
22547pub fn vrsqrteh_f16(a: f16) -> f16 {
22548    unsafe extern "unadjusted" {
22549        #[cfg_attr(
22550            any(target_arch = "aarch64", target_arch = "arm64ec"),
22551            link_name = "llvm.aarch64.neon.frsqrte.f16"
22552        )]
22553        fn _vrsqrteh_f16(a: f16) -> f16;
22554    }
22555    unsafe { _vrsqrteh_f16(a) }
22556}
22557#[doc = "Floating-point reciprocal square root step"]
22558#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f64)"]
22559#[inline(always)]
22560#[target_feature(enable = "neon")]
22561#[cfg_attr(test, assert_instr(frsqrts))]
22562#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22563pub fn vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
22564    unsafe extern "unadjusted" {
22565        #[cfg_attr(
22566            any(target_arch = "aarch64", target_arch = "arm64ec"),
22567            link_name = "llvm.aarch64.neon.frsqrts.v1f64"
22568        )]
22569        fn _vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
22570    }
22571    unsafe { _vrsqrts_f64(a, b) }
22572}
22573#[doc = "Floating-point reciprocal square root step"]
22574#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f64)"]
22575#[inline(always)]
22576#[target_feature(enable = "neon")]
22577#[cfg_attr(test, assert_instr(frsqrts))]
22578#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22579pub fn vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
22580    unsafe extern "unadjusted" {
22581        #[cfg_attr(
22582            any(target_arch = "aarch64", target_arch = "arm64ec"),
22583            link_name = "llvm.aarch64.neon.frsqrts.v2f64"
22584        )]
22585        fn _vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
22586    }
22587    unsafe { _vrsqrtsq_f64(a, b) }
22588}
22589#[doc = "Floating-point reciprocal square root step"]
22590#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsd_f64)"]
22591#[inline(always)]
22592#[target_feature(enable = "neon")]
22593#[cfg_attr(test, assert_instr(frsqrts))]
22594#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22595pub fn vrsqrtsd_f64(a: f64, b: f64) -> f64 {
22596    unsafe extern "unadjusted" {
22597        #[cfg_attr(
22598            any(target_arch = "aarch64", target_arch = "arm64ec"),
22599            link_name = "llvm.aarch64.neon.frsqrts.f64"
22600        )]
22601        fn _vrsqrtsd_f64(a: f64, b: f64) -> f64;
22602    }
22603    unsafe { _vrsqrtsd_f64(a, b) }
22604}
22605#[doc = "Floating-point reciprocal square root step"]
22606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtss_f32)"]
22607#[inline(always)]
22608#[target_feature(enable = "neon")]
22609#[cfg_attr(test, assert_instr(frsqrts))]
22610#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22611pub fn vrsqrtss_f32(a: f32, b: f32) -> f32 {
22612    unsafe extern "unadjusted" {
22613        #[cfg_attr(
22614            any(target_arch = "aarch64", target_arch = "arm64ec"),
22615            link_name = "llvm.aarch64.neon.frsqrts.f32"
22616        )]
22617        fn _vrsqrtss_f32(a: f32, b: f32) -> f32;
22618    }
22619    unsafe { _vrsqrtss_f32(a, b) }
22620}
22621#[doc = "Floating-point reciprocal square root step"]
22622#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsh_f16)"]
22623#[inline(always)]
22624#[target_feature(enable = "neon,fp16")]
22625#[cfg_attr(test, assert_instr(frsqrts))]
22626#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22627#[cfg(not(target_arch = "arm64ec"))]
22628pub fn vrsqrtsh_f16(a: f16, b: f16) -> f16 {
22629    unsafe extern "unadjusted" {
22630        #[cfg_attr(
22631            any(target_arch = "aarch64", target_arch = "arm64ec"),
22632            link_name = "llvm.aarch64.neon.frsqrts.f16"
22633        )]
22634        fn _vrsqrtsh_f16(a: f16, b: f16) -> f16;
22635    }
22636    unsafe { _vrsqrtsh_f16(a, b) }
22637}
22638#[doc = "Signed rounding shift right and accumulate."]
22639#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_s64)"]
22640#[inline(always)]
22641#[target_feature(enable = "neon")]
22642#[cfg_attr(test, assert_instr(srshr, N = 2))]
22643#[rustc_legacy_const_generics(2)]
22644#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22645pub fn vrsrad_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
22646    static_assert!(N >= 1 && N <= 64);
22647    let b: i64 = vrshrd_n_s64::<N>(b);
22648    a.wrapping_add(b)
22649}
22650#[doc = "Unsigned rounding shift right and accumulate."]
22651#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_u64)"]
22652#[inline(always)]
22653#[target_feature(enable = "neon")]
22654#[cfg_attr(test, assert_instr(urshr, N = 2))]
22655#[rustc_legacy_const_generics(2)]
22656#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22657pub fn vrsrad_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
22658    static_assert!(N >= 1 && N <= 64);
22659    let b: u64 = vrshrd_n_u64::<N>(b);
22660    a.wrapping_add(b)
22661}
22662#[doc = "Rounding subtract returning high narrow"]
22663#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)"]
22664#[inline(always)]
22665#[target_feature(enable = "neon")]
22666#[cfg(target_endian = "little")]
22667#[cfg_attr(test, assert_instr(rsubhn2))]
22668#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22669pub fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t {
22670    let x: int8x8_t = vrsubhn_s16(b, c);
22671    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
22672}
22673#[doc = "Rounding subtract returning high narrow"]
22674#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)"]
22675#[inline(always)]
22676#[target_feature(enable = "neon")]
22677#[cfg(target_endian = "little")]
22678#[cfg_attr(test, assert_instr(rsubhn2))]
22679#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22680pub fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t {
22681    let x: int16x4_t = vrsubhn_s32(b, c);
22682    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
22683}
22684#[doc = "Rounding subtract returning high narrow"]
22685#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)"]
22686#[inline(always)]
22687#[target_feature(enable = "neon")]
22688#[cfg(target_endian = "little")]
22689#[cfg_attr(test, assert_instr(rsubhn2))]
22690#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22691pub fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t {
22692    let x: int32x2_t = vrsubhn_s64(b, c);
22693    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
22694}
22695#[doc = "Rounding subtract returning high narrow"]
22696#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)"]
22697#[inline(always)]
22698#[target_feature(enable = "neon")]
22699#[cfg(target_endian = "little")]
22700#[cfg_attr(test, assert_instr(rsubhn2))]
22701#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22702pub fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t {
22703    let x: uint8x8_t = vrsubhn_u16(b, c);
22704    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
22705}
22706#[doc = "Rounding subtract returning high narrow"]
22707#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)"]
22708#[inline(always)]
22709#[target_feature(enable = "neon")]
22710#[cfg(target_endian = "little")]
22711#[cfg_attr(test, assert_instr(rsubhn2))]
22712#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22713pub fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t {
22714    let x: uint16x4_t = vrsubhn_u32(b, c);
22715    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
22716}
22717#[doc = "Rounding subtract returning high narrow"]
22718#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)"]
22719#[inline(always)]
22720#[target_feature(enable = "neon")]
22721#[cfg(target_endian = "little")]
22722#[cfg_attr(test, assert_instr(rsubhn2))]
22723#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22724pub fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t {
22725    let x: uint32x2_t = vrsubhn_u64(b, c);
22726    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
22727}
22728#[doc = "Rounding subtract returning high narrow"]
22729#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)"]
22730#[inline(always)]
22731#[target_feature(enable = "neon")]
22732#[cfg(target_endian = "big")]
22733#[cfg_attr(test, assert_instr(rsubhn))]
22734#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22735pub fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t {
22736    let x: int8x8_t = vrsubhn_s16(b, c);
22737    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
22738}
22739#[doc = "Rounding subtract returning high narrow"]
22740#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)"]
22741#[inline(always)]
22742#[target_feature(enable = "neon")]
22743#[cfg(target_endian = "big")]
22744#[cfg_attr(test, assert_instr(rsubhn))]
22745#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22746pub fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t {
22747    let x: int16x4_t = vrsubhn_s32(b, c);
22748    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
22749}
22750#[doc = "Rounding subtract returning high narrow"]
22751#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)"]
22752#[inline(always)]
22753#[target_feature(enable = "neon")]
22754#[cfg(target_endian = "big")]
22755#[cfg_attr(test, assert_instr(rsubhn))]
22756#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22757pub fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t {
22758    let x: int32x2_t = vrsubhn_s64(b, c);
22759    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
22760}
22761#[doc = "Rounding subtract returning high narrow"]
22762#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)"]
22763#[inline(always)]
22764#[target_feature(enable = "neon")]
22765#[cfg(target_endian = "big")]
22766#[cfg_attr(test, assert_instr(rsubhn))]
22767#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22768pub fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t {
22769    let x: uint8x8_t = vrsubhn_u16(b, c);
22770    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
22771}
22772#[doc = "Rounding subtract returning high narrow"]
22773#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)"]
22774#[inline(always)]
22775#[target_feature(enable = "neon")]
22776#[cfg(target_endian = "big")]
22777#[cfg_attr(test, assert_instr(rsubhn))]
22778#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22779pub fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t {
22780    let x: uint16x4_t = vrsubhn_u32(b, c);
22781    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
22782}
22783#[doc = "Rounding subtract returning high narrow"]
22784#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)"]
22785#[inline(always)]
22786#[target_feature(enable = "neon")]
22787#[cfg(target_endian = "big")]
22788#[cfg_attr(test, assert_instr(rsubhn))]
22789#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22790pub fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t {
22791    let x: uint32x2_t = vrsubhn_u64(b, c);
22792    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
22793}
22794#[doc = "Multi-vector floating-point adjust exponent"]
22795#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscale_f16)"]
22796#[inline(always)]
22797#[unstable(feature = "stdarch_neon_fp8", issue = "none")]
22798#[target_feature(enable = "neon,fp8")]
22799#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))]
22800pub fn vscale_f16(vn: float16x4_t, vm: int16x4_t) -> float16x4_t {
22801    unsafe extern "unadjusted" {
22802        #[cfg_attr(
22803            any(target_arch = "aarch64", target_arch = "arm64ec"),
22804            link_name = "llvm.aarch64.neon.fp8.fscale.v4f16"
22805        )]
22806        fn _vscale_f16(vn: float16x4_t, vm: int16x4_t) -> float16x4_t;
22807    }
22808    unsafe { _vscale_f16(vn, vm) }
22809}
22810#[doc = "Multi-vector floating-point adjust exponent"]
22811#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscaleq_f16)"]
22812#[inline(always)]
22813#[unstable(feature = "stdarch_neon_fp8", issue = "none")]
22814#[target_feature(enable = "neon,fp8")]
22815#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))]
22816pub fn vscaleq_f16(vn: float16x8_t, vm: int16x8_t) -> float16x8_t {
22817    unsafe extern "unadjusted" {
22818        #[cfg_attr(
22819            any(target_arch = "aarch64", target_arch = "arm64ec"),
22820            link_name = "llvm.aarch64.neon.fp8.fscale.v8f16"
22821        )]
22822        fn _vscaleq_f16(vn: float16x8_t, vm: int16x8_t) -> float16x8_t;
22823    }
22824    unsafe { _vscaleq_f16(vn, vm) }
22825}
22826#[doc = "Multi-vector floating-point adjust exponent"]
22827#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscale_f32)"]
22828#[inline(always)]
22829#[unstable(feature = "stdarch_neon_fp8", issue = "none")]
22830#[target_feature(enable = "neon,fp8")]
22831#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))]
22832pub fn vscale_f32(vn: float32x2_t, vm: int32x2_t) -> float32x2_t {
22833    unsafe extern "unadjusted" {
22834        #[cfg_attr(
22835            any(target_arch = "aarch64", target_arch = "arm64ec"),
22836            link_name = "llvm.aarch64.neon.fp8.fscale.v2f32"
22837        )]
22838        fn _vscale_f32(vn: float32x2_t, vm: int32x2_t) -> float32x2_t;
22839    }
22840    unsafe { _vscale_f32(vn, vm) }
22841}
22842#[doc = "Multi-vector floating-point adjust exponent"]
22843#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscaleq_f32)"]
22844#[inline(always)]
22845#[unstable(feature = "stdarch_neon_fp8", issue = "none")]
22846#[target_feature(enable = "neon,fp8")]
22847#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))]
22848pub fn vscaleq_f32(vn: float32x4_t, vm: int32x4_t) -> float32x4_t {
22849    unsafe extern "unadjusted" {
22850        #[cfg_attr(
22851            any(target_arch = "aarch64", target_arch = "arm64ec"),
22852            link_name = "llvm.aarch64.neon.fp8.fscale.v4f32"
22853        )]
22854        fn _vscaleq_f32(vn: float32x4_t, vm: int32x4_t) -> float32x4_t;
22855    }
22856    unsafe { _vscaleq_f32(vn, vm) }
22857}
22858#[doc = "Multi-vector floating-point adjust exponent"]
22859#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscaleq_f64)"]
22860#[inline(always)]
22861#[unstable(feature = "stdarch_neon_fp8", issue = "none")]
22862#[target_feature(enable = "neon,fp8")]
22863#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))]
22864pub fn vscaleq_f64(vn: float64x2_t, vm: int64x2_t) -> float64x2_t {
22865    unsafe extern "unadjusted" {
22866        #[cfg_attr(
22867            any(target_arch = "aarch64", target_arch = "arm64ec"),
22868            link_name = "llvm.aarch64.neon.fp8.fscale.v2f64"
22869        )]
22870        fn _vscaleq_f64(vn: float64x2_t, vm: int64x2_t) -> float64x2_t;
22871    }
22872    unsafe { _vscaleq_f64(vn, vm) }
22873}
22874#[doc = "Insert vector element from another vector element"]
22875#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f64)"]
22876#[inline(always)]
22877#[target_feature(enable = "neon")]
22878#[cfg_attr(test, assert_instr(nop, LANE = 0))]
22879#[rustc_legacy_const_generics(2)]
22880#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22881pub fn vset_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> float64x1_t {
22882    static_assert!(LANE == 0);
22883    unsafe { simd_insert!(b, LANE as u32, a) }
22884}
22885#[doc = "Insert vector element from another vector element"]
22886#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f64)"]
22887#[inline(always)]
22888#[target_feature(enable = "neon")]
22889#[cfg_attr(test, assert_instr(nop, LANE = 0))]
22890#[rustc_legacy_const_generics(2)]
22891#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22892pub fn vsetq_lane_f64<const LANE: i32>(a: f64, b: float64x2_t) -> float64x2_t {
22893    static_assert_uimm_bits!(LANE, 1);
22894    unsafe { simd_insert!(b, LANE as u32, a) }
22895}
22896#[doc = "SHA512 hash update part 2"]
22897#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512h2q_u64)"]
22898#[inline(always)]
22899#[target_feature(enable = "neon,sha3")]
22900#[cfg_attr(test, assert_instr(sha512h2))]
22901#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
22902pub fn vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
22903    unsafe extern "unadjusted" {
22904        #[cfg_attr(
22905            any(target_arch = "aarch64", target_arch = "arm64ec"),
22906            link_name = "llvm.aarch64.crypto.sha512h2"
22907        )]
22908        fn _vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
22909    }
22910    unsafe { _vsha512h2q_u64(a, b, c) }
22911}
22912#[doc = "SHA512 hash update part 1"]
22913#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512hq_u64)"]
22914#[inline(always)]
22915#[target_feature(enable = "neon,sha3")]
22916#[cfg_attr(test, assert_instr(sha512h))]
22917#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
22918pub fn vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
22919    unsafe extern "unadjusted" {
22920        #[cfg_attr(
22921            any(target_arch = "aarch64", target_arch = "arm64ec"),
22922            link_name = "llvm.aarch64.crypto.sha512h"
22923        )]
22924        fn _vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
22925    }
22926    unsafe { _vsha512hq_u64(a, b, c) }
22927}
22928#[doc = "SHA512 schedule update 0"]
22929#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su0q_u64)"]
22930#[inline(always)]
22931#[target_feature(enable = "neon,sha3")]
22932#[cfg_attr(test, assert_instr(sha512su0))]
22933#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
22934pub fn vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
22935    unsafe extern "unadjusted" {
22936        #[cfg_attr(
22937            any(target_arch = "aarch64", target_arch = "arm64ec"),
22938            link_name = "llvm.aarch64.crypto.sha512su0"
22939        )]
22940        fn _vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t;
22941    }
22942    unsafe { _vsha512su0q_u64(a, b) }
22943}
22944#[doc = "SHA512 schedule update 1"]
22945#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su1q_u64)"]
22946#[inline(always)]
22947#[target_feature(enable = "neon,sha3")]
22948#[cfg_attr(test, assert_instr(sha512su1))]
22949#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
22950pub fn vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
22951    unsafe extern "unadjusted" {
22952        #[cfg_attr(
22953            any(target_arch = "aarch64", target_arch = "arm64ec"),
22954            link_name = "llvm.aarch64.crypto.sha512su1"
22955        )]
22956        fn _vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
22957    }
22958    unsafe { _vsha512su1q_u64(a, b, c) }
22959}
22960#[doc = "Signed Shift left"]
22961#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_s64)"]
22962#[inline(always)]
22963#[target_feature(enable = "neon")]
22964#[cfg_attr(test, assert_instr(sshl))]
22965#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22966pub fn vshld_s64(a: i64, b: i64) -> i64 {
22967    unsafe { transmute(vshl_s64(transmute(a), transmute(b))) }
22968}
22969#[doc = "Unsigned Shift left"]
22970#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_u64)"]
22971#[inline(always)]
22972#[target_feature(enable = "neon")]
22973#[cfg_attr(test, assert_instr(ushl))]
22974#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22975pub fn vshld_u64(a: u64, b: i64) -> u64 {
22976    unsafe { transmute(vshl_u64(transmute(a), transmute(b))) }
22977}
22978#[doc = "Signed shift left long"]
22979#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s8)"]
22980#[inline(always)]
22981#[target_feature(enable = "neon")]
22982#[cfg_attr(test, assert_instr(sshll2, N = 2))]
22983#[rustc_legacy_const_generics(1)]
22984#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22985pub fn vshll_high_n_s8<const N: i32>(a: int8x16_t) -> int16x8_t {
22986    static_assert!(N >= 0 && N <= 8);
22987    unsafe {
22988        let b: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
22989        vshll_n_s8::<N>(b)
22990    }
22991}
22992#[doc = "Signed shift left long"]
22993#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s16)"]
22994#[inline(always)]
22995#[target_feature(enable = "neon")]
22996#[cfg_attr(test, assert_instr(sshll2, N = 2))]
22997#[rustc_legacy_const_generics(1)]
22998#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22999pub fn vshll_high_n_s16<const N: i32>(a: int16x8_t) -> int32x4_t {
23000    static_assert!(N >= 0 && N <= 16);
23001    unsafe {
23002        let b: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
23003        vshll_n_s16::<N>(b)
23004    }
23005}
23006#[doc = "Signed shift left long"]
23007#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s32)"]
23008#[inline(always)]
23009#[target_feature(enable = "neon")]
23010#[cfg_attr(test, assert_instr(sshll2, N = 2))]
23011#[rustc_legacy_const_generics(1)]
23012#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23013pub fn vshll_high_n_s32<const N: i32>(a: int32x4_t) -> int64x2_t {
23014    static_assert!(N >= 0 && N <= 32);
23015    unsafe {
23016        let b: int32x2_t = simd_shuffle!(a, a, [2, 3]);
23017        vshll_n_s32::<N>(b)
23018    }
23019}
23020#[doc = "Signed shift left long"]
23021#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u8)"]
23022#[inline(always)]
23023#[target_feature(enable = "neon")]
23024#[cfg_attr(test, assert_instr(ushll2, N = 2))]
23025#[rustc_legacy_const_generics(1)]
23026#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23027pub fn vshll_high_n_u8<const N: i32>(a: uint8x16_t) -> uint16x8_t {
23028    static_assert!(N >= 0 && N <= 8);
23029    unsafe {
23030        let b: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
23031        vshll_n_u8::<N>(b)
23032    }
23033}
23034#[doc = "Signed shift left long"]
23035#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u16)"]
23036#[inline(always)]
23037#[target_feature(enable = "neon")]
23038#[cfg_attr(test, assert_instr(ushll2, N = 2))]
23039#[rustc_legacy_const_generics(1)]
23040#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23041pub fn vshll_high_n_u16<const N: i32>(a: uint16x8_t) -> uint32x4_t {
23042    static_assert!(N >= 0 && N <= 16);
23043    unsafe {
23044        let b: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
23045        vshll_n_u16::<N>(b)
23046    }
23047}
23048#[doc = "Signed shift left long"]
23049#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u32)"]
23050#[inline(always)]
23051#[target_feature(enable = "neon")]
23052#[cfg_attr(test, assert_instr(ushll2, N = 2))]
23053#[rustc_legacy_const_generics(1)]
23054#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23055pub fn vshll_high_n_u32<const N: i32>(a: uint32x4_t) -> uint64x2_t {
23056    static_assert!(N >= 0 && N <= 32);
23057    unsafe {
23058        let b: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
23059        vshll_n_u32::<N>(b)
23060    }
23061}
23062#[doc = "Shift right narrow"]
23063#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s16)"]
23064#[inline(always)]
23065#[target_feature(enable = "neon")]
23066#[cfg_attr(test, assert_instr(shrn2, N = 2))]
23067#[rustc_legacy_const_generics(2)]
23068#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23069pub fn vshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
23070    static_assert!(N >= 1 && N <= 8);
23071    unsafe {
23072        simd_shuffle!(
23073            a,
23074            vshrn_n_s16::<N>(b),
23075            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
23076        )
23077    }
23078}
23079#[doc = "Shift right narrow"]
23080#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s32)"]
23081#[inline(always)]
23082#[target_feature(enable = "neon")]
23083#[cfg_attr(test, assert_instr(shrn2, N = 2))]
23084#[rustc_legacy_const_generics(2)]
23085#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23086pub fn vshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
23087    static_assert!(N >= 1 && N <= 16);
23088    unsafe { simd_shuffle!(a, vshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
23089}
23090#[doc = "Shift right narrow"]
23091#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s64)"]
23092#[inline(always)]
23093#[target_feature(enable = "neon")]
23094#[cfg_attr(test, assert_instr(shrn2, N = 2))]
23095#[rustc_legacy_const_generics(2)]
23096#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23097pub fn vshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
23098    static_assert!(N >= 1 && N <= 32);
23099    unsafe { simd_shuffle!(a, vshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
23100}
23101#[doc = "Shift right narrow"]
23102#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u16)"]
23103#[inline(always)]
23104#[target_feature(enable = "neon")]
23105#[cfg_attr(test, assert_instr(shrn2, N = 2))]
23106#[rustc_legacy_const_generics(2)]
23107#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23108pub fn vshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
23109    static_assert!(N >= 1 && N <= 8);
23110    unsafe {
23111        simd_shuffle!(
23112            a,
23113            vshrn_n_u16::<N>(b),
23114            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
23115        )
23116    }
23117}
23118#[doc = "Shift right narrow"]
23119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u32)"]
23120#[inline(always)]
23121#[target_feature(enable = "neon")]
23122#[cfg_attr(test, assert_instr(shrn2, N = 2))]
23123#[rustc_legacy_const_generics(2)]
23124#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23125pub fn vshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
23126    static_assert!(N >= 1 && N <= 16);
23127    unsafe { simd_shuffle!(a, vshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
23128}
23129#[doc = "Shift right narrow"]
23130#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u64)"]
23131#[inline(always)]
23132#[target_feature(enable = "neon")]
23133#[cfg_attr(test, assert_instr(shrn2, N = 2))]
23134#[rustc_legacy_const_generics(2)]
23135#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23136pub fn vshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
23137    static_assert!(N >= 1 && N <= 32);
23138    unsafe { simd_shuffle!(a, vshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
23139}
23140#[doc = "Shift Left and Insert (immediate)"]
23141#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s8)"]
23142#[inline(always)]
23143#[target_feature(enable = "neon")]
23144#[cfg_attr(test, assert_instr(sli, N = 1))]
23145#[rustc_legacy_const_generics(2)]
23146#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23147pub fn vsli_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
23148    static_assert_uimm_bits!(N, 3);
23149    unsafe extern "unadjusted" {
23150        #[cfg_attr(
23151            any(target_arch = "aarch64", target_arch = "arm64ec"),
23152            link_name = "llvm.aarch64.neon.vsli.v8i8"
23153        )]
23154        fn _vsli_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t;
23155    }
23156    unsafe { _vsli_n_s8(a, b, N) }
23157}
23158#[doc = "Shift Left and Insert (immediate)"]
23159#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s8)"]
23160#[inline(always)]
23161#[target_feature(enable = "neon")]
23162#[cfg_attr(test, assert_instr(sli, N = 1))]
23163#[rustc_legacy_const_generics(2)]
23164#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23165pub fn vsliq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
23166    static_assert_uimm_bits!(N, 3);
23167    unsafe extern "unadjusted" {
23168        #[cfg_attr(
23169            any(target_arch = "aarch64", target_arch = "arm64ec"),
23170            link_name = "llvm.aarch64.neon.vsli.v16i8"
23171        )]
23172        fn _vsliq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t;
23173    }
23174    unsafe { _vsliq_n_s8(a, b, N) }
23175}
23176#[doc = "Shift Left and Insert (immediate)"]
23177#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s16)"]
23178#[inline(always)]
23179#[target_feature(enable = "neon")]
23180#[cfg_attr(test, assert_instr(sli, N = 1))]
23181#[rustc_legacy_const_generics(2)]
23182#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23183pub fn vsli_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
23184    static_assert_uimm_bits!(N, 4);
23185    unsafe extern "unadjusted" {
23186        #[cfg_attr(
23187            any(target_arch = "aarch64", target_arch = "arm64ec"),
23188            link_name = "llvm.aarch64.neon.vsli.v4i16"
23189        )]
23190        fn _vsli_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t;
23191    }
23192    unsafe { _vsli_n_s16(a, b, N) }
23193}
23194#[doc = "Shift Left and Insert (immediate)"]
23195#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s16)"]
23196#[inline(always)]
23197#[target_feature(enable = "neon")]
23198#[cfg_attr(test, assert_instr(sli, N = 1))]
23199#[rustc_legacy_const_generics(2)]
23200#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23201pub fn vsliq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
23202    static_assert_uimm_bits!(N, 4);
23203    unsafe extern "unadjusted" {
23204        #[cfg_attr(
23205            any(target_arch = "aarch64", target_arch = "arm64ec"),
23206            link_name = "llvm.aarch64.neon.vsli.v8i16"
23207        )]
23208        fn _vsliq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t;
23209    }
23210    unsafe { _vsliq_n_s16(a, b, N) }
23211}
23212#[doc = "Shift Left and Insert (immediate)"]
23213#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s32)"]
23214#[inline(always)]
23215#[target_feature(enable = "neon")]
23216#[cfg_attr(test, assert_instr(sli, N = 1))]
23217#[rustc_legacy_const_generics(2)]
23218#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23219pub fn vsli_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
23220    static_assert!(N >= 0 && N <= 31);
23221    unsafe extern "unadjusted" {
23222        #[cfg_attr(
23223            any(target_arch = "aarch64", target_arch = "arm64ec"),
23224            link_name = "llvm.aarch64.neon.vsli.v2i32"
23225        )]
23226        fn _vsli_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t;
23227    }
23228    unsafe { _vsli_n_s32(a, b, N) }
23229}
23230#[doc = "Shift Left and Insert (immediate)"]
23231#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s32)"]
23232#[inline(always)]
23233#[target_feature(enable = "neon")]
23234#[cfg_attr(test, assert_instr(sli, N = 1))]
23235#[rustc_legacy_const_generics(2)]
23236#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23237pub fn vsliq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
23238    static_assert!(N >= 0 && N <= 31);
23239    unsafe extern "unadjusted" {
23240        #[cfg_attr(
23241            any(target_arch = "aarch64", target_arch = "arm64ec"),
23242            link_name = "llvm.aarch64.neon.vsli.v4i32"
23243        )]
23244        fn _vsliq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t;
23245    }
23246    unsafe { _vsliq_n_s32(a, b, N) }
23247}
23248#[doc = "Shift Left and Insert (immediate)"]
23249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s64)"]
23250#[inline(always)]
23251#[target_feature(enable = "neon")]
23252#[cfg_attr(test, assert_instr(sli, N = 1))]
23253#[rustc_legacy_const_generics(2)]
23254#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23255pub fn vsli_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t {
23256    static_assert!(N >= 0 && N <= 63);
23257    unsafe extern "unadjusted" {
23258        #[cfg_attr(
23259            any(target_arch = "aarch64", target_arch = "arm64ec"),
23260            link_name = "llvm.aarch64.neon.vsli.v1i64"
23261        )]
23262        fn _vsli_n_s64(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t;
23263    }
23264    unsafe { _vsli_n_s64(a, b, N) }
23265}
23266#[doc = "Shift Left and Insert (immediate)"]
23267#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s64)"]
23268#[inline(always)]
23269#[target_feature(enable = "neon")]
23270#[cfg_attr(test, assert_instr(sli, N = 1))]
23271#[rustc_legacy_const_generics(2)]
23272#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23273pub fn vsliq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
23274    static_assert!(N >= 0 && N <= 63);
23275    unsafe extern "unadjusted" {
23276        #[cfg_attr(
23277            any(target_arch = "aarch64", target_arch = "arm64ec"),
23278            link_name = "llvm.aarch64.neon.vsli.v2i64"
23279        )]
23280        fn _vsliq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t;
23281    }
23282    unsafe { _vsliq_n_s64(a, b, N) }
23283}
23284#[doc = "Shift Left and Insert (immediate)"]
23285#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u8)"]
23286#[inline(always)]
23287#[target_feature(enable = "neon")]
23288#[cfg_attr(test, assert_instr(sli, N = 1))]
23289#[rustc_legacy_const_generics(2)]
23290#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23291pub fn vsli_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
23292    static_assert_uimm_bits!(N, 3);
23293    unsafe { transmute(vsli_n_s8::<N>(transmute(a), transmute(b))) }
23294}
23295#[doc = "Shift Left and Insert (immediate)"]
23296#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u8)"]
23297#[inline(always)]
23298#[target_feature(enable = "neon")]
23299#[cfg_attr(test, assert_instr(sli, N = 1))]
23300#[rustc_legacy_const_generics(2)]
23301#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23302pub fn vsliq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
23303    static_assert_uimm_bits!(N, 3);
23304    unsafe { transmute(vsliq_n_s8::<N>(transmute(a), transmute(b))) }
23305}
23306#[doc = "Shift Left and Insert (immediate)"]
23307#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u16)"]
23308#[inline(always)]
23309#[target_feature(enable = "neon")]
23310#[cfg_attr(test, assert_instr(sli, N = 1))]
23311#[rustc_legacy_const_generics(2)]
23312#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23313pub fn vsli_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
23314    static_assert_uimm_bits!(N, 4);
23315    unsafe { transmute(vsli_n_s16::<N>(transmute(a), transmute(b))) }
23316}
23317#[doc = "Shift Left and Insert (immediate)"]
23318#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u16)"]
23319#[inline(always)]
23320#[target_feature(enable = "neon")]
23321#[cfg_attr(test, assert_instr(sli, N = 1))]
23322#[rustc_legacy_const_generics(2)]
23323#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23324pub fn vsliq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
23325    static_assert_uimm_bits!(N, 4);
23326    unsafe { transmute(vsliq_n_s16::<N>(transmute(a), transmute(b))) }
23327}
23328#[doc = "Shift Left and Insert (immediate)"]
23329#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u32)"]
23330#[inline(always)]
23331#[target_feature(enable = "neon")]
23332#[cfg_attr(test, assert_instr(sli, N = 1))]
23333#[rustc_legacy_const_generics(2)]
23334#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23335pub fn vsli_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
23336    static_assert!(N >= 0 && N <= 31);
23337    unsafe { transmute(vsli_n_s32::<N>(transmute(a), transmute(b))) }
23338}
23339#[doc = "Shift Left and Insert (immediate)"]
23340#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u32)"]
23341#[inline(always)]
23342#[target_feature(enable = "neon")]
23343#[cfg_attr(test, assert_instr(sli, N = 1))]
23344#[rustc_legacy_const_generics(2)]
23345#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23346pub fn vsliq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
23347    static_assert!(N >= 0 && N <= 31);
23348    unsafe { transmute(vsliq_n_s32::<N>(transmute(a), transmute(b))) }
23349}
23350#[doc = "Shift Left and Insert (immediate)"]
23351#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u64)"]
23352#[inline(always)]
23353#[target_feature(enable = "neon")]
23354#[cfg_attr(test, assert_instr(sli, N = 1))]
23355#[rustc_legacy_const_generics(2)]
23356#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23357pub fn vsli_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
23358    static_assert!(N >= 0 && N <= 63);
23359    unsafe { transmute(vsli_n_s64::<N>(transmute(a), transmute(b))) }
23360}
23361#[doc = "Shift Left and Insert (immediate)"]
23362#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u64)"]
23363#[inline(always)]
23364#[target_feature(enable = "neon")]
23365#[cfg_attr(test, assert_instr(sli, N = 1))]
23366#[rustc_legacy_const_generics(2)]
23367#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23368pub fn vsliq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
23369    static_assert!(N >= 0 && N <= 63);
23370    unsafe { transmute(vsliq_n_s64::<N>(transmute(a), transmute(b))) }
23371}
23372#[doc = "Shift Left and Insert (immediate)"]
23373#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p8)"]
23374#[inline(always)]
23375#[target_feature(enable = "neon")]
23376#[cfg_attr(test, assert_instr(sli, N = 1))]
23377#[rustc_legacy_const_generics(2)]
23378#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23379pub fn vsli_n_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
23380    static_assert_uimm_bits!(N, 3);
23381    unsafe { transmute(vsli_n_s8::<N>(transmute(a), transmute(b))) }
23382}
23383#[doc = "Shift Left and Insert (immediate)"]
23384#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p8)"]
23385#[inline(always)]
23386#[target_feature(enable = "neon")]
23387#[cfg_attr(test, assert_instr(sli, N = 1))]
23388#[rustc_legacy_const_generics(2)]
23389#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23390pub fn vsliq_n_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
23391    static_assert_uimm_bits!(N, 3);
23392    unsafe { transmute(vsliq_n_s8::<N>(transmute(a), transmute(b))) }
23393}
23394#[doc = "Shift Left and Insert (immediate)"]
23395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p16)"]
23396#[inline(always)]
23397#[target_feature(enable = "neon")]
23398#[cfg_attr(test, assert_instr(sli, N = 1))]
23399#[rustc_legacy_const_generics(2)]
23400#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23401pub fn vsli_n_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
23402    static_assert_uimm_bits!(N, 4);
23403    unsafe { transmute(vsli_n_s16::<N>(transmute(a), transmute(b))) }
23404}
23405#[doc = "Shift Left and Insert (immediate)"]
23406#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p16)"]
23407#[inline(always)]
23408#[target_feature(enable = "neon")]
23409#[cfg_attr(test, assert_instr(sli, N = 1))]
23410#[rustc_legacy_const_generics(2)]
23411#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23412pub fn vsliq_n_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
23413    static_assert_uimm_bits!(N, 4);
23414    unsafe { transmute(vsliq_n_s16::<N>(transmute(a), transmute(b))) }
23415}
23416#[doc = "Shift Left and Insert (immediate)"]
23417#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p64)"]
23418#[inline(always)]
23419#[target_feature(enable = "neon,aes")]
23420#[cfg_attr(test, assert_instr(sli, N = 1))]
23421#[rustc_legacy_const_generics(2)]
23422#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23423pub fn vsli_n_p64<const N: i32>(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t {
23424    static_assert!(N >= 0 && N <= 63);
23425    unsafe { transmute(vsli_n_s64::<N>(transmute(a), transmute(b))) }
23426}
23427#[doc = "Shift Left and Insert (immediate)"]
23428#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p64)"]
23429#[inline(always)]
23430#[target_feature(enable = "neon,aes")]
23431#[cfg_attr(test, assert_instr(sli, N = 1))]
23432#[rustc_legacy_const_generics(2)]
23433#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23434pub fn vsliq_n_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
23435    static_assert!(N >= 0 && N <= 63);
23436    unsafe { transmute(vsliq_n_s64::<N>(transmute(a), transmute(b))) }
23437}
23438#[doc = "Shift left and insert"]
23439#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_s64)"]
23440#[inline(always)]
23441#[target_feature(enable = "neon")]
23442#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23443#[rustc_legacy_const_generics(2)]
23444#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sli, N = 2))]
23445pub fn vslid_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
23446    static_assert!(N >= 0 && N <= 63);
23447    unsafe { transmute(vsli_n_s64::<N>(transmute(a), transmute(b))) }
23448}
23449#[doc = "Shift left and insert"]
23450#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_u64)"]
23451#[inline(always)]
23452#[target_feature(enable = "neon")]
23453#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23454#[rustc_legacy_const_generics(2)]
23455#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sli, N = 2))]
23456pub fn vslid_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
23457    static_assert!(N >= 0 && N <= 63);
23458    unsafe { transmute(vsli_n_u64::<N>(transmute(a), transmute(b))) }
23459}
23460#[doc = "SM3PARTW1"]
23461#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw1q_u32)"]
23462#[inline(always)]
23463#[target_feature(enable = "neon,sm4")]
23464#[cfg_attr(test, assert_instr(sm3partw1))]
23465#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23466pub fn vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
23467    unsafe extern "unadjusted" {
23468        #[cfg_attr(
23469            any(target_arch = "aarch64", target_arch = "arm64ec"),
23470            link_name = "llvm.aarch64.crypto.sm3partw1"
23471        )]
23472        fn _vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
23473    }
23474    unsafe { _vsm3partw1q_u32(a, b, c) }
23475}
23476#[doc = "SM3PARTW2"]
23477#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw2q_u32)"]
23478#[inline(always)]
23479#[target_feature(enable = "neon,sm4")]
23480#[cfg_attr(test, assert_instr(sm3partw2))]
23481#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23482pub fn vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
23483    unsafe extern "unadjusted" {
23484        #[cfg_attr(
23485            any(target_arch = "aarch64", target_arch = "arm64ec"),
23486            link_name = "llvm.aarch64.crypto.sm3partw2"
23487        )]
23488        fn _vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
23489    }
23490    unsafe { _vsm3partw2q_u32(a, b, c) }
23491}
23492#[doc = "SM3SS1"]
23493#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3ss1q_u32)"]
23494#[inline(always)]
23495#[target_feature(enable = "neon,sm4")]
23496#[cfg_attr(test, assert_instr(sm3ss1))]
23497#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23498pub fn vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
23499    unsafe extern "unadjusted" {
23500        #[cfg_attr(
23501            any(target_arch = "aarch64", target_arch = "arm64ec"),
23502            link_name = "llvm.aarch64.crypto.sm3ss1"
23503        )]
23504        fn _vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
23505    }
23506    unsafe { _vsm3ss1q_u32(a, b, c) }
23507}
23508#[doc = "SM3TT1A"]
23509#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1aq_u32)"]
23510#[inline(always)]
23511#[target_feature(enable = "neon,sm4")]
23512#[cfg_attr(test, assert_instr(sm3tt1a, IMM2 = 0))]
23513#[rustc_legacy_const_generics(3)]
23514#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23515pub fn vsm3tt1aq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
23516    static_assert_uimm_bits!(IMM2, 2);
23517    unsafe extern "unadjusted" {
23518        #[cfg_attr(
23519            any(target_arch = "aarch64", target_arch = "arm64ec"),
23520            link_name = "llvm.aarch64.crypto.sm3tt1a"
23521        )]
23522        fn _vsm3tt1aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
23523    }
23524    unsafe { _vsm3tt1aq_u32(a, b, c, IMM2 as i64) }
23525}
23526#[doc = "SM3TT1B"]
23527#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1bq_u32)"]
23528#[inline(always)]
23529#[target_feature(enable = "neon,sm4")]
23530#[cfg_attr(test, assert_instr(sm3tt1b, IMM2 = 0))]
23531#[rustc_legacy_const_generics(3)]
23532#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23533pub fn vsm3tt1bq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
23534    static_assert_uimm_bits!(IMM2, 2);
23535    unsafe extern "unadjusted" {
23536        #[cfg_attr(
23537            any(target_arch = "aarch64", target_arch = "arm64ec"),
23538            link_name = "llvm.aarch64.crypto.sm3tt1b"
23539        )]
23540        fn _vsm3tt1bq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
23541    }
23542    unsafe { _vsm3tt1bq_u32(a, b, c, IMM2 as i64) }
23543}
23544#[doc = "SM3TT2A"]
23545#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2aq_u32)"]
23546#[inline(always)]
23547#[target_feature(enable = "neon,sm4")]
23548#[cfg_attr(test, assert_instr(sm3tt2a, IMM2 = 0))]
23549#[rustc_legacy_const_generics(3)]
23550#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23551pub fn vsm3tt2aq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
23552    static_assert_uimm_bits!(IMM2, 2);
23553    unsafe extern "unadjusted" {
23554        #[cfg_attr(
23555            any(target_arch = "aarch64", target_arch = "arm64ec"),
23556            link_name = "llvm.aarch64.crypto.sm3tt2a"
23557        )]
23558        fn _vsm3tt2aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
23559    }
23560    unsafe { _vsm3tt2aq_u32(a, b, c, IMM2 as i64) }
23561}
23562#[doc = "SM3TT2B"]
23563#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2bq_u32)"]
23564#[inline(always)]
23565#[target_feature(enable = "neon,sm4")]
23566#[cfg_attr(test, assert_instr(sm3tt2b, IMM2 = 0))]
23567#[rustc_legacy_const_generics(3)]
23568#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23569pub fn vsm3tt2bq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
23570    static_assert_uimm_bits!(IMM2, 2);
23571    unsafe extern "unadjusted" {
23572        #[cfg_attr(
23573            any(target_arch = "aarch64", target_arch = "arm64ec"),
23574            link_name = "llvm.aarch64.crypto.sm3tt2b"
23575        )]
23576        fn _vsm3tt2bq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
23577    }
23578    unsafe { _vsm3tt2bq_u32(a, b, c, IMM2 as i64) }
23579}
23580#[doc = "SM4 key"]
23581#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4ekeyq_u32)"]
23582#[inline(always)]
23583#[target_feature(enable = "neon,sm4")]
23584#[cfg_attr(test, assert_instr(sm4ekey))]
23585#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23586pub fn vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
23587    unsafe extern "unadjusted" {
23588        #[cfg_attr(
23589            any(target_arch = "aarch64", target_arch = "arm64ec"),
23590            link_name = "llvm.aarch64.crypto.sm4ekey"
23591        )]
23592        fn _vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
23593    }
23594    unsafe { _vsm4ekeyq_u32(a, b) }
23595}
23596#[doc = "SM4 encode"]
23597#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4eq_u32)"]
23598#[inline(always)]
23599#[target_feature(enable = "neon,sm4")]
23600#[cfg_attr(test, assert_instr(sm4e))]
23601#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23602pub fn vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
23603    unsafe extern "unadjusted" {
23604        #[cfg_attr(
23605            any(target_arch = "aarch64", target_arch = "arm64ec"),
23606            link_name = "llvm.aarch64.crypto.sm4e"
23607        )]
23608        fn _vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
23609    }
23610    unsafe { _vsm4eq_u32(a, b) }
23611}
23612#[doc = "Unsigned saturating Accumulate of Signed value."]
23613#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u8)"]
23614#[inline(always)]
23615#[target_feature(enable = "neon")]
23616#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23617#[cfg_attr(test, assert_instr(usqadd))]
23618pub fn vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t {
23619    unsafe extern "unadjusted" {
23620        #[cfg_attr(
23621            any(target_arch = "aarch64", target_arch = "arm64ec"),
23622            link_name = "llvm.aarch64.neon.usqadd.v8i8"
23623        )]
23624        fn _vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t;
23625    }
23626    unsafe { _vsqadd_u8(a, b) }
23627}
23628#[doc = "Unsigned saturating Accumulate of Signed value."]
23629#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u8)"]
23630#[inline(always)]
23631#[target_feature(enable = "neon")]
23632#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23633#[cfg_attr(test, assert_instr(usqadd))]
23634pub fn vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t {
23635    unsafe extern "unadjusted" {
23636        #[cfg_attr(
23637            any(target_arch = "aarch64", target_arch = "arm64ec"),
23638            link_name = "llvm.aarch64.neon.usqadd.v16i8"
23639        )]
23640        fn _vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t;
23641    }
23642    unsafe { _vsqaddq_u8(a, b) }
23643}
23644#[doc = "Unsigned saturating Accumulate of Signed value."]
23645#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u16)"]
23646#[inline(always)]
23647#[target_feature(enable = "neon")]
23648#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23649#[cfg_attr(test, assert_instr(usqadd))]
23650pub fn vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t {
23651    unsafe extern "unadjusted" {
23652        #[cfg_attr(
23653            any(target_arch = "aarch64", target_arch = "arm64ec"),
23654            link_name = "llvm.aarch64.neon.usqadd.v4i16"
23655        )]
23656        fn _vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t;
23657    }
23658    unsafe { _vsqadd_u16(a, b) }
23659}
23660#[doc = "Unsigned saturating Accumulate of Signed value."]
23661#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u16)"]
23662#[inline(always)]
23663#[target_feature(enable = "neon")]
23664#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23665#[cfg_attr(test, assert_instr(usqadd))]
23666pub fn vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t {
23667    unsafe extern "unadjusted" {
23668        #[cfg_attr(
23669            any(target_arch = "aarch64", target_arch = "arm64ec"),
23670            link_name = "llvm.aarch64.neon.usqadd.v8i16"
23671        )]
23672        fn _vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t;
23673    }
23674    unsafe { _vsqaddq_u16(a, b) }
23675}
23676#[doc = "Unsigned saturating Accumulate of Signed value."]
23677#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u32)"]
23678#[inline(always)]
23679#[target_feature(enable = "neon")]
23680#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23681#[cfg_attr(test, assert_instr(usqadd))]
23682pub fn vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t {
23683    unsafe extern "unadjusted" {
23684        #[cfg_attr(
23685            any(target_arch = "aarch64", target_arch = "arm64ec"),
23686            link_name = "llvm.aarch64.neon.usqadd.v2i32"
23687        )]
23688        fn _vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t;
23689    }
23690    unsafe { _vsqadd_u32(a, b) }
23691}
23692#[doc = "Unsigned saturating Accumulate of Signed value."]
23693#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u32)"]
23694#[inline(always)]
23695#[target_feature(enable = "neon")]
23696#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23697#[cfg_attr(test, assert_instr(usqadd))]
23698pub fn vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t {
23699    unsafe extern "unadjusted" {
23700        #[cfg_attr(
23701            any(target_arch = "aarch64", target_arch = "arm64ec"),
23702            link_name = "llvm.aarch64.neon.usqadd.v4i32"
23703        )]
23704        fn _vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t;
23705    }
23706    unsafe { _vsqaddq_u32(a, b) }
23707}
23708#[doc = "Unsigned saturating Accumulate of Signed value."]
23709#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u64)"]
23710#[inline(always)]
23711#[target_feature(enable = "neon")]
23712#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23713#[cfg_attr(test, assert_instr(usqadd))]
23714pub fn vsqadd_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t {
23715    unsafe extern "unadjusted" {
23716        #[cfg_attr(
23717            any(target_arch = "aarch64", target_arch = "arm64ec"),
23718            link_name = "llvm.aarch64.neon.usqadd.v1i64"
23719        )]
23720        fn _vsqadd_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t;
23721    }
23722    unsafe { _vsqadd_u64(a, b) }
23723}
23724#[doc = "Unsigned saturating Accumulate of Signed value."]
23725#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u64)"]
23726#[inline(always)]
23727#[target_feature(enable = "neon")]
23728#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23729#[cfg_attr(test, assert_instr(usqadd))]
23730pub fn vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t {
23731    unsafe extern "unadjusted" {
23732        #[cfg_attr(
23733            any(target_arch = "aarch64", target_arch = "arm64ec"),
23734            link_name = "llvm.aarch64.neon.usqadd.v2i64"
23735        )]
23736        fn _vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t;
23737    }
23738    unsafe { _vsqaddq_u64(a, b) }
23739}
23740#[doc = "Unsigned saturating accumulate of signed value"]
23741#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddb_u8)"]
23742#[inline(always)]
23743#[target_feature(enable = "neon")]
23744#[cfg_attr(test, assert_instr(usqadd))]
23745#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23746pub fn vsqaddb_u8(a: u8, b: i8) -> u8 {
23747    unsafe { simd_extract!(vsqadd_u8(vdup_n_u8(a), vdup_n_s8(b)), 0) }
23748}
23749#[doc = "Unsigned saturating accumulate of signed value"]
23750#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddh_u16)"]
23751#[inline(always)]
23752#[target_feature(enable = "neon")]
23753#[cfg_attr(test, assert_instr(usqadd))]
23754#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23755pub fn vsqaddh_u16(a: u16, b: i16) -> u16 {
23756    unsafe { simd_extract!(vsqadd_u16(vdup_n_u16(a), vdup_n_s16(b)), 0) }
23757}
23758#[doc = "Unsigned saturating accumulate of signed value"]
23759#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddd_u64)"]
23760#[inline(always)]
23761#[target_feature(enable = "neon")]
23762#[cfg_attr(test, assert_instr(usqadd))]
23763#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23764pub fn vsqaddd_u64(a: u64, b: i64) -> u64 {
23765    unsafe extern "unadjusted" {
23766        #[cfg_attr(
23767            any(target_arch = "aarch64", target_arch = "arm64ec"),
23768            link_name = "llvm.aarch64.neon.usqadd.i64"
23769        )]
23770        fn _vsqaddd_u64(a: u64, b: i64) -> u64;
23771    }
23772    unsafe { _vsqaddd_u64(a, b) }
23773}
23774#[doc = "Unsigned saturating accumulate of signed value"]
23775#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadds_u32)"]
23776#[inline(always)]
23777#[target_feature(enable = "neon")]
23778#[cfg_attr(test, assert_instr(usqadd))]
23779#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23780pub fn vsqadds_u32(a: u32, b: i32) -> u32 {
23781    unsafe extern "unadjusted" {
23782        #[cfg_attr(
23783            any(target_arch = "aarch64", target_arch = "arm64ec"),
23784            link_name = "llvm.aarch64.neon.usqadd.i32"
23785        )]
23786        fn _vsqadds_u32(a: u32, b: i32) -> u32;
23787    }
23788    unsafe { _vsqadds_u32(a, b) }
23789}
23790#[doc = "Calculates the square root of each lane."]
23791#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f16)"]
23792#[inline(always)]
23793#[cfg_attr(test, assert_instr(fsqrt))]
23794#[target_feature(enable = "neon,fp16")]
23795#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
23796#[cfg(not(target_arch = "arm64ec"))]
23797pub fn vsqrt_f16(a: float16x4_t) -> float16x4_t {
23798    unsafe { simd_fsqrt(a) }
23799}
23800#[doc = "Calculates the square root of each lane."]
23801#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f16)"]
23802#[inline(always)]
23803#[cfg_attr(test, assert_instr(fsqrt))]
23804#[target_feature(enable = "neon,fp16")]
23805#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
23806#[cfg(not(target_arch = "arm64ec"))]
23807pub fn vsqrtq_f16(a: float16x8_t) -> float16x8_t {
23808    unsafe { simd_fsqrt(a) }
23809}
23810#[doc = "Calculates the square root of each lane."]
23811#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f32)"]
23812#[inline(always)]
23813#[target_feature(enable = "neon")]
23814#[cfg_attr(test, assert_instr(fsqrt))]
23815#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23816pub fn vsqrt_f32(a: float32x2_t) -> float32x2_t {
23817    unsafe { simd_fsqrt(a) }
23818}
23819#[doc = "Calculates the square root of each lane."]
23820#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f32)"]
23821#[inline(always)]
23822#[target_feature(enable = "neon")]
23823#[cfg_attr(test, assert_instr(fsqrt))]
23824#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23825pub fn vsqrtq_f32(a: float32x4_t) -> float32x4_t {
23826    unsafe { simd_fsqrt(a) }
23827}
23828#[doc = "Calculates the square root of each lane."]
23829#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f64)"]
23830#[inline(always)]
23831#[target_feature(enable = "neon")]
23832#[cfg_attr(test, assert_instr(fsqrt))]
23833#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23834pub fn vsqrt_f64(a: float64x1_t) -> float64x1_t {
23835    unsafe { simd_fsqrt(a) }
23836}
23837#[doc = "Calculates the square root of each lane."]
23838#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f64)"]
23839#[inline(always)]
23840#[target_feature(enable = "neon")]
23841#[cfg_attr(test, assert_instr(fsqrt))]
23842#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23843pub fn vsqrtq_f64(a: float64x2_t) -> float64x2_t {
23844    unsafe { simd_fsqrt(a) }
23845}
23846#[doc = "Floating-point round to integral, using current rounding mode"]
23847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrth_f16)"]
23848#[inline(always)]
23849#[target_feature(enable = "neon,fp16")]
23850#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23851#[cfg(not(target_arch = "arm64ec"))]
23852#[cfg_attr(test, assert_instr(fsqrt))]
23853pub fn vsqrth_f16(a: f16) -> f16 {
23854    sqrtf16(a)
23855}
23856#[doc = "Shift Right and Insert (immediate)"]
23857#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s8)"]
23858#[inline(always)]
23859#[target_feature(enable = "neon")]
23860#[cfg_attr(test, assert_instr(sri, N = 1))]
23861#[rustc_legacy_const_generics(2)]
23862#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23863pub fn vsri_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
23864    static_assert!(N >= 1 && N <= 8);
23865    unsafe { super::shift_right_and_insert!(u8, 8, N, a, b) }
23866}
23867#[doc = "Shift Right and Insert (immediate)"]
23868#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s8)"]
23869#[inline(always)]
23870#[target_feature(enable = "neon")]
23871#[cfg_attr(test, assert_instr(sri, N = 1))]
23872#[rustc_legacy_const_generics(2)]
23873#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23874pub fn vsriq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
23875    static_assert!(N >= 1 && N <= 8);
23876    unsafe { super::shift_right_and_insert!(u8, 16, N, a, b) }
23877}
23878#[doc = "Shift Right and Insert (immediate)"]
23879#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s16)"]
23880#[inline(always)]
23881#[target_feature(enable = "neon")]
23882#[cfg_attr(test, assert_instr(sri, N = 1))]
23883#[rustc_legacy_const_generics(2)]
23884#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23885pub fn vsri_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
23886    static_assert!(N >= 1 && N <= 16);
23887    unsafe { super::shift_right_and_insert!(u16, 4, N, a, b) }
23888}
23889#[doc = "Shift Right and Insert (immediate)"]
23890#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s16)"]
23891#[inline(always)]
23892#[target_feature(enable = "neon")]
23893#[cfg_attr(test, assert_instr(sri, N = 1))]
23894#[rustc_legacy_const_generics(2)]
23895#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23896pub fn vsriq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
23897    static_assert!(N >= 1 && N <= 16);
23898    unsafe { super::shift_right_and_insert!(u16, 8, N, a, b) }
23899}
23900#[doc = "Shift Right and Insert (immediate)"]
23901#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s32)"]
23902#[inline(always)]
23903#[target_feature(enable = "neon")]
23904#[cfg_attr(test, assert_instr(sri, N = 1))]
23905#[rustc_legacy_const_generics(2)]
23906#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23907pub fn vsri_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
23908    static_assert!(N >= 1 && N <= 32);
23909    unsafe { super::shift_right_and_insert!(u32, 2, N, a, b) }
23910}
23911#[doc = "Shift Right and Insert (immediate)"]
23912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s32)"]
23913#[inline(always)]
23914#[target_feature(enable = "neon")]
23915#[cfg_attr(test, assert_instr(sri, N = 1))]
23916#[rustc_legacy_const_generics(2)]
23917#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23918pub fn vsriq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
23919    static_assert!(N >= 1 && N <= 32);
23920    unsafe { super::shift_right_and_insert!(u32, 4, N, a, b) }
23921}
23922#[doc = "Shift Right and Insert (immediate)"]
23923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s64)"]
23924#[inline(always)]
23925#[target_feature(enable = "neon")]
23926#[cfg_attr(test, assert_instr(sri, N = 1))]
23927#[rustc_legacy_const_generics(2)]
23928#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23929pub fn vsri_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t {
23930    static_assert!(N >= 1 && N <= 64);
23931    unsafe { super::shift_right_and_insert!(u64, 1, N, a, b) }
23932}
23933#[doc = "Shift Right and Insert (immediate)"]
23934#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s64)"]
23935#[inline(always)]
23936#[target_feature(enable = "neon")]
23937#[cfg_attr(test, assert_instr(sri, N = 1))]
23938#[rustc_legacy_const_generics(2)]
23939#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23940pub fn vsriq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
23941    static_assert!(N >= 1 && N <= 64);
23942    unsafe { super::shift_right_and_insert!(u64, 2, N, a, b) }
23943}
23944#[doc = "Shift Right and Insert (immediate)"]
23945#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u8)"]
23946#[inline(always)]
23947#[target_feature(enable = "neon")]
23948#[cfg_attr(test, assert_instr(sri, N = 1))]
23949#[rustc_legacy_const_generics(2)]
23950#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23951pub fn vsri_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
23952    static_assert!(N >= 1 && N <= 8);
23953    unsafe { transmute(vsri_n_s8::<N>(transmute(a), transmute(b))) }
23954}
23955#[doc = "Shift Right and Insert (immediate)"]
23956#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u8)"]
23957#[inline(always)]
23958#[target_feature(enable = "neon")]
23959#[cfg_attr(test, assert_instr(sri, N = 1))]
23960#[rustc_legacy_const_generics(2)]
23961#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23962pub fn vsriq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
23963    static_assert!(N >= 1 && N <= 8);
23964    unsafe { transmute(vsriq_n_s8::<N>(transmute(a), transmute(b))) }
23965}
23966#[doc = "Shift Right and Insert (immediate)"]
23967#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u16)"]
23968#[inline(always)]
23969#[target_feature(enable = "neon")]
23970#[cfg_attr(test, assert_instr(sri, N = 1))]
23971#[rustc_legacy_const_generics(2)]
23972#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23973pub fn vsri_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
23974    static_assert!(N >= 1 && N <= 16);
23975    unsafe { transmute(vsri_n_s16::<N>(transmute(a), transmute(b))) }
23976}
23977#[doc = "Shift Right and Insert (immediate)"]
23978#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u16)"]
23979#[inline(always)]
23980#[target_feature(enable = "neon")]
23981#[cfg_attr(test, assert_instr(sri, N = 1))]
23982#[rustc_legacy_const_generics(2)]
23983#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23984pub fn vsriq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
23985    static_assert!(N >= 1 && N <= 16);
23986    unsafe { transmute(vsriq_n_s16::<N>(transmute(a), transmute(b))) }
23987}
23988#[doc = "Shift Right and Insert (immediate)"]
23989#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u32)"]
23990#[inline(always)]
23991#[target_feature(enable = "neon")]
23992#[cfg_attr(test, assert_instr(sri, N = 1))]
23993#[rustc_legacy_const_generics(2)]
23994#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23995pub fn vsri_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
23996    static_assert!(N >= 1 && N <= 32);
23997    unsafe { transmute(vsri_n_s32::<N>(transmute(a), transmute(b))) }
23998}
23999#[doc = "Shift Right and Insert (immediate)"]
24000#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u32)"]
24001#[inline(always)]
24002#[target_feature(enable = "neon")]
24003#[cfg_attr(test, assert_instr(sri, N = 1))]
24004#[rustc_legacy_const_generics(2)]
24005#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24006pub fn vsriq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
24007    static_assert!(N >= 1 && N <= 32);
24008    unsafe { transmute(vsriq_n_s32::<N>(transmute(a), transmute(b))) }
24009}
24010#[doc = "Shift Right and Insert (immediate)"]
24011#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u64)"]
24012#[inline(always)]
24013#[target_feature(enable = "neon")]
24014#[cfg_attr(test, assert_instr(sri, N = 1))]
24015#[rustc_legacy_const_generics(2)]
24016#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24017pub fn vsri_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
24018    static_assert!(N >= 1 && N <= 64);
24019    unsafe { transmute(vsri_n_s64::<N>(transmute(a), transmute(b))) }
24020}
24021#[doc = "Shift Right and Insert (immediate)"]
24022#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u64)"]
24023#[inline(always)]
24024#[target_feature(enable = "neon")]
24025#[cfg_attr(test, assert_instr(sri, N = 1))]
24026#[rustc_legacy_const_generics(2)]
24027#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24028pub fn vsriq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
24029    static_assert!(N >= 1 && N <= 64);
24030    unsafe { transmute(vsriq_n_s64::<N>(transmute(a), transmute(b))) }
24031}
24032#[doc = "Shift Right and Insert (immediate)"]
24033#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p8)"]
24034#[inline(always)]
24035#[target_feature(enable = "neon")]
24036#[cfg_attr(test, assert_instr(sri, N = 1))]
24037#[rustc_legacy_const_generics(2)]
24038#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24039pub fn vsri_n_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
24040    static_assert!(N >= 1 && N <= 8);
24041    unsafe { transmute(vsri_n_s8::<N>(transmute(a), transmute(b))) }
24042}
24043#[doc = "Shift Right and Insert (immediate)"]
24044#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p8)"]
24045#[inline(always)]
24046#[target_feature(enable = "neon")]
24047#[cfg_attr(test, assert_instr(sri, N = 1))]
24048#[rustc_legacy_const_generics(2)]
24049#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24050pub fn vsriq_n_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
24051    static_assert!(N >= 1 && N <= 8);
24052    unsafe { transmute(vsriq_n_s8::<N>(transmute(a), transmute(b))) }
24053}
24054#[doc = "Shift Right and Insert (immediate)"]
24055#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p16)"]
24056#[inline(always)]
24057#[target_feature(enable = "neon")]
24058#[cfg_attr(test, assert_instr(sri, N = 1))]
24059#[rustc_legacy_const_generics(2)]
24060#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24061pub fn vsri_n_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
24062    static_assert!(N >= 1 && N <= 16);
24063    unsafe { transmute(vsri_n_s16::<N>(transmute(a), transmute(b))) }
24064}
24065#[doc = "Shift Right and Insert (immediate)"]
24066#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p16)"]
24067#[inline(always)]
24068#[target_feature(enable = "neon")]
24069#[cfg_attr(test, assert_instr(sri, N = 1))]
24070#[rustc_legacy_const_generics(2)]
24071#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24072pub fn vsriq_n_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
24073    static_assert!(N >= 1 && N <= 16);
24074    unsafe { transmute(vsriq_n_s16::<N>(transmute(a), transmute(b))) }
24075}
24076#[doc = "Shift Right and Insert (immediate)"]
24077#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p64)"]
24078#[inline(always)]
24079#[target_feature(enable = "neon,aes")]
24080#[cfg_attr(test, assert_instr(sri, N = 1))]
24081#[rustc_legacy_const_generics(2)]
24082#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24083pub fn vsri_n_p64<const N: i32>(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t {
24084    static_assert!(N >= 1 && N <= 64);
24085    unsafe { transmute(vsri_n_s64::<N>(transmute(a), transmute(b))) }
24086}
24087#[doc = "Shift Right and Insert (immediate)"]
24088#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p64)"]
24089#[inline(always)]
24090#[target_feature(enable = "neon,aes")]
24091#[cfg_attr(test, assert_instr(sri, N = 1))]
24092#[rustc_legacy_const_generics(2)]
24093#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24094pub fn vsriq_n_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
24095    static_assert!(N >= 1 && N <= 64);
24096    unsafe { transmute(vsriq_n_s64::<N>(transmute(a), transmute(b))) }
24097}
24098#[doc = "Shift right and insert"]
24099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_s64)"]
24100#[inline(always)]
24101#[target_feature(enable = "neon")]
24102#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24103#[rustc_legacy_const_generics(2)]
24104#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(bfxil, N = 2))]
24105pub fn vsrid_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
24106    static_assert!(N >= 1 && N <= 64);
24107    unsafe { transmute(vsri_n_s64::<N>(transmute(a), transmute(b))) }
24108}
24109#[doc = "Shift right and insert"]
24110#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_u64)"]
24111#[inline(always)]
24112#[target_feature(enable = "neon")]
24113#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24114#[rustc_legacy_const_generics(2)]
24115#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(bfxil, N = 2))]
24116pub fn vsrid_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
24117    static_assert!(N >= 1 && N <= 64);
24118    unsafe { transmute(vsri_n_u64::<N>(transmute(a), transmute(b))) }
24119}
24120#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24121#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f16)"]
24122#[doc = "## Safety"]
24123#[doc = "  * Neon intrinsic unsafe"]
24124#[inline(always)]
24125#[target_feature(enable = "neon,fp16")]
24126#[cfg_attr(test, assert_instr(str))]
24127#[allow(clippy::cast_ptr_alignment)]
24128#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
24129#[cfg(not(target_arch = "arm64ec"))]
24130pub unsafe fn vst1_f16(ptr: *mut f16, a: float16x4_t) {
24131    crate::ptr::write_unaligned(ptr.cast(), a)
24132}
24133#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24134#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f16)"]
24135#[doc = "## Safety"]
24136#[doc = "  * Neon intrinsic unsafe"]
24137#[inline(always)]
24138#[target_feature(enable = "neon,fp16")]
24139#[cfg_attr(test, assert_instr(str))]
24140#[allow(clippy::cast_ptr_alignment)]
24141#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
24142#[cfg(not(target_arch = "arm64ec"))]
24143pub unsafe fn vst1q_f16(ptr: *mut f16, a: float16x8_t) {
24144    crate::ptr::write_unaligned(ptr.cast(), a)
24145}
24146#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24147#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32)"]
24148#[doc = "## Safety"]
24149#[doc = "  * Neon intrinsic unsafe"]
24150#[inline(always)]
24151#[target_feature(enable = "neon")]
24152#[cfg_attr(test, assert_instr(str))]
24153#[allow(clippy::cast_ptr_alignment)]
24154#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24155pub unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t) {
24156    crate::ptr::write_unaligned(ptr.cast(), a)
24157}
24158#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24159#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32)"]
24160#[doc = "## Safety"]
24161#[doc = "  * Neon intrinsic unsafe"]
24162#[inline(always)]
24163#[target_feature(enable = "neon")]
24164#[cfg_attr(test, assert_instr(str))]
24165#[allow(clippy::cast_ptr_alignment)]
24166#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24167pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) {
24168    crate::ptr::write_unaligned(ptr.cast(), a)
24169}
24170#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64)"]
24172#[doc = "## Safety"]
24173#[doc = "  * Neon intrinsic unsafe"]
24174#[inline(always)]
24175#[target_feature(enable = "neon")]
24176#[cfg_attr(test, assert_instr(str))]
24177#[allow(clippy::cast_ptr_alignment)]
24178#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24179pub unsafe fn vst1_f64(ptr: *mut f64, a: float64x1_t) {
24180    crate::ptr::write_unaligned(ptr.cast(), a)
24181}
24182#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64)"]
24184#[doc = "## Safety"]
24185#[doc = "  * Neon intrinsic unsafe"]
24186#[inline(always)]
24187#[target_feature(enable = "neon")]
24188#[cfg_attr(test, assert_instr(str))]
24189#[allow(clippy::cast_ptr_alignment)]
24190#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24191pub unsafe fn vst1q_f64(ptr: *mut f64, a: float64x2_t) {
24192    crate::ptr::write_unaligned(ptr.cast(), a)
24193}
24194#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24195#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8)"]
24196#[doc = "## Safety"]
24197#[doc = "  * Neon intrinsic unsafe"]
24198#[inline(always)]
24199#[target_feature(enable = "neon")]
24200#[cfg_attr(test, assert_instr(str))]
24201#[allow(clippy::cast_ptr_alignment)]
24202#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24203pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) {
24204    crate::ptr::write_unaligned(ptr.cast(), a)
24205}
24206#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24207#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8)"]
24208#[doc = "## Safety"]
24209#[doc = "  * Neon intrinsic unsafe"]
24210#[inline(always)]
24211#[target_feature(enable = "neon")]
24212#[cfg_attr(test, assert_instr(str))]
24213#[allow(clippy::cast_ptr_alignment)]
24214#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24215pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) {
24216    crate::ptr::write_unaligned(ptr.cast(), a)
24217}
24218#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24219#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16)"]
24220#[doc = "## Safety"]
24221#[doc = "  * Neon intrinsic unsafe"]
24222#[inline(always)]
24223#[target_feature(enable = "neon")]
24224#[cfg_attr(test, assert_instr(str))]
24225#[allow(clippy::cast_ptr_alignment)]
24226#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24227pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) {
24228    crate::ptr::write_unaligned(ptr.cast(), a)
24229}
24230#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24231#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16)"]
24232#[doc = "## Safety"]
24233#[doc = "  * Neon intrinsic unsafe"]
24234#[inline(always)]
24235#[target_feature(enable = "neon")]
24236#[cfg_attr(test, assert_instr(str))]
24237#[allow(clippy::cast_ptr_alignment)]
24238#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24239pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) {
24240    crate::ptr::write_unaligned(ptr.cast(), a)
24241}
24242#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24243#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32)"]
24244#[doc = "## Safety"]
24245#[doc = "  * Neon intrinsic unsafe"]
24246#[inline(always)]
24247#[target_feature(enable = "neon")]
24248#[cfg_attr(test, assert_instr(str))]
24249#[allow(clippy::cast_ptr_alignment)]
24250#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24251pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) {
24252    crate::ptr::write_unaligned(ptr.cast(), a)
24253}
24254#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24255#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32)"]
24256#[doc = "## Safety"]
24257#[doc = "  * Neon intrinsic unsafe"]
24258#[inline(always)]
24259#[target_feature(enable = "neon")]
24260#[cfg_attr(test, assert_instr(str))]
24261#[allow(clippy::cast_ptr_alignment)]
24262#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24263pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) {
24264    crate::ptr::write_unaligned(ptr.cast(), a)
24265}
24266#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24267#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64)"]
24268#[doc = "## Safety"]
24269#[doc = "  * Neon intrinsic unsafe"]
24270#[inline(always)]
24271#[target_feature(enable = "neon")]
24272#[cfg_attr(test, assert_instr(str))]
24273#[allow(clippy::cast_ptr_alignment)]
24274#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24275pub unsafe fn vst1_s64(ptr: *mut i64, a: int64x1_t) {
24276    crate::ptr::write_unaligned(ptr.cast(), a)
24277}
24278#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64)"]
24280#[doc = "## Safety"]
24281#[doc = "  * Neon intrinsic unsafe"]
24282#[inline(always)]
24283#[target_feature(enable = "neon")]
24284#[cfg_attr(test, assert_instr(str))]
24285#[allow(clippy::cast_ptr_alignment)]
24286#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24287pub unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t) {
24288    crate::ptr::write_unaligned(ptr.cast(), a)
24289}
24290#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24291#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8)"]
24292#[doc = "## Safety"]
24293#[doc = "  * Neon intrinsic unsafe"]
24294#[inline(always)]
24295#[target_feature(enable = "neon")]
24296#[cfg_attr(test, assert_instr(str))]
24297#[allow(clippy::cast_ptr_alignment)]
24298#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24299pub unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t) {
24300    crate::ptr::write_unaligned(ptr.cast(), a)
24301}
24302#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24303#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8)"]
24304#[doc = "## Safety"]
24305#[doc = "  * Neon intrinsic unsafe"]
24306#[inline(always)]
24307#[target_feature(enable = "neon")]
24308#[cfg_attr(test, assert_instr(str))]
24309#[allow(clippy::cast_ptr_alignment)]
24310#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24311pub unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t) {
24312    crate::ptr::write_unaligned(ptr.cast(), a)
24313}
24314#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24315#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16)"]
24316#[doc = "## Safety"]
24317#[doc = "  * Neon intrinsic unsafe"]
24318#[inline(always)]
24319#[target_feature(enable = "neon")]
24320#[cfg_attr(test, assert_instr(str))]
24321#[allow(clippy::cast_ptr_alignment)]
24322#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24323pub unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t) {
24324    crate::ptr::write_unaligned(ptr.cast(), a)
24325}
24326#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24327#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16)"]
24328#[doc = "## Safety"]
24329#[doc = "  * Neon intrinsic unsafe"]
24330#[inline(always)]
24331#[target_feature(enable = "neon")]
24332#[cfg_attr(test, assert_instr(str))]
24333#[allow(clippy::cast_ptr_alignment)]
24334#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24335pub unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t) {
24336    crate::ptr::write_unaligned(ptr.cast(), a)
24337}
24338#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24339#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32)"]
24340#[doc = "## Safety"]
24341#[doc = "  * Neon intrinsic unsafe"]
24342#[inline(always)]
24343#[target_feature(enable = "neon")]
24344#[cfg_attr(test, assert_instr(str))]
24345#[allow(clippy::cast_ptr_alignment)]
24346#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24347pub unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t) {
24348    crate::ptr::write_unaligned(ptr.cast(), a)
24349}
24350#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24351#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32)"]
24352#[doc = "## Safety"]
24353#[doc = "  * Neon intrinsic unsafe"]
24354#[inline(always)]
24355#[target_feature(enable = "neon")]
24356#[cfg_attr(test, assert_instr(str))]
24357#[allow(clippy::cast_ptr_alignment)]
24358#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24359pub unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t) {
24360    crate::ptr::write_unaligned(ptr.cast(), a)
24361}
24362#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64)"]
24364#[doc = "## Safety"]
24365#[doc = "  * Neon intrinsic unsafe"]
24366#[inline(always)]
24367#[target_feature(enable = "neon")]
24368#[cfg_attr(test, assert_instr(str))]
24369#[allow(clippy::cast_ptr_alignment)]
24370#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24371pub unsafe fn vst1_u64(ptr: *mut u64, a: uint64x1_t) {
24372    crate::ptr::write_unaligned(ptr.cast(), a)
24373}
24374#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24375#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64)"]
24376#[doc = "## Safety"]
24377#[doc = "  * Neon intrinsic unsafe"]
24378#[inline(always)]
24379#[target_feature(enable = "neon")]
24380#[cfg_attr(test, assert_instr(str))]
24381#[allow(clippy::cast_ptr_alignment)]
24382#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24383pub unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t) {
24384    crate::ptr::write_unaligned(ptr.cast(), a)
24385}
24386#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24387#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8)"]
24388#[doc = "## Safety"]
24389#[doc = "  * Neon intrinsic unsafe"]
24390#[inline(always)]
24391#[target_feature(enable = "neon")]
24392#[cfg_attr(test, assert_instr(str))]
24393#[allow(clippy::cast_ptr_alignment)]
24394#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24395pub unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t) {
24396    crate::ptr::write_unaligned(ptr.cast(), a)
24397}
24398#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24399#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8)"]
24400#[doc = "## Safety"]
24401#[doc = "  * Neon intrinsic unsafe"]
24402#[inline(always)]
24403#[target_feature(enable = "neon")]
24404#[cfg_attr(test, assert_instr(str))]
24405#[allow(clippy::cast_ptr_alignment)]
24406#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24407pub unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t) {
24408    crate::ptr::write_unaligned(ptr.cast(), a)
24409}
24410#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24411#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16)"]
24412#[doc = "## Safety"]
24413#[doc = "  * Neon intrinsic unsafe"]
24414#[inline(always)]
24415#[target_feature(enable = "neon")]
24416#[cfg_attr(test, assert_instr(str))]
24417#[allow(clippy::cast_ptr_alignment)]
24418#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24419pub unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t) {
24420    crate::ptr::write_unaligned(ptr.cast(), a)
24421}
24422#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24423#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16)"]
24424#[doc = "## Safety"]
24425#[doc = "  * Neon intrinsic unsafe"]
24426#[inline(always)]
24427#[target_feature(enable = "neon")]
24428#[cfg_attr(test, assert_instr(str))]
24429#[allow(clippy::cast_ptr_alignment)]
24430#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24431pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) {
24432    crate::ptr::write_unaligned(ptr.cast(), a)
24433}
24434#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24435#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64)"]
24436#[doc = "## Safety"]
24437#[doc = "  * Neon intrinsic unsafe"]
24438#[inline(always)]
24439#[target_feature(enable = "neon,aes")]
24440#[cfg_attr(test, assert_instr(str))]
24441#[allow(clippy::cast_ptr_alignment)]
24442#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24443pub unsafe fn vst1_p64(ptr: *mut p64, a: poly64x1_t) {
24444    crate::ptr::write_unaligned(ptr.cast(), a)
24445}
24446#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24447#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64)"]
24448#[doc = "## Safety"]
24449#[doc = "  * Neon intrinsic unsafe"]
24450#[inline(always)]
24451#[target_feature(enable = "neon,aes")]
24452#[cfg_attr(test, assert_instr(str))]
24453#[allow(clippy::cast_ptr_alignment)]
24454#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24455pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) {
24456    crate::ptr::write_unaligned(ptr.cast(), a)
24457}
24458#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
24459#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x2)"]
24460#[doc = "## Safety"]
24461#[doc = "  * Neon intrinsic unsafe"]
24462#[inline(always)]
24463#[target_feature(enable = "neon")]
24464#[cfg_attr(test, assert_instr(st1))]
24465#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24466pub unsafe fn vst1_f64_x2(a: *mut f64, b: float64x1x2_t) {
24467    unsafe extern "unadjusted" {
24468        #[cfg_attr(
24469            any(target_arch = "aarch64", target_arch = "arm64ec"),
24470            link_name = "llvm.aarch64.neon.st1x2.v1f64.p0"
24471        )]
24472        fn _vst1_f64_x2(a: float64x1_t, b: float64x1_t, ptr: *mut f64);
24473    }
24474    _vst1_f64_x2(b.0, b.1, a)
24475}
24476#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
24477#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x2)"]
24478#[doc = "## Safety"]
24479#[doc = "  * Neon intrinsic unsafe"]
24480#[inline(always)]
24481#[target_feature(enable = "neon")]
24482#[cfg_attr(test, assert_instr(st1))]
24483#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24484pub unsafe fn vst1q_f64_x2(a: *mut f64, b: float64x2x2_t) {
24485    unsafe extern "unadjusted" {
24486        #[cfg_attr(
24487            any(target_arch = "aarch64", target_arch = "arm64ec"),
24488            link_name = "llvm.aarch64.neon.st1x2.v2f64.p0"
24489        )]
24490        fn _vst1q_f64_x2(a: float64x2_t, b: float64x2_t, ptr: *mut f64);
24491    }
24492    _vst1q_f64_x2(b.0, b.1, a)
24493}
24494#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
24495#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x3)"]
24496#[doc = "## Safety"]
24497#[doc = "  * Neon intrinsic unsafe"]
24498#[inline(always)]
24499#[target_feature(enable = "neon")]
24500#[cfg_attr(test, assert_instr(st1))]
24501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24502pub unsafe fn vst1_f64_x3(a: *mut f64, b: float64x1x3_t) {
24503    unsafe extern "unadjusted" {
24504        #[cfg_attr(
24505            any(target_arch = "aarch64", target_arch = "arm64ec"),
24506            link_name = "llvm.aarch64.neon.st1x3.v1f64.p0"
24507        )]
24508        fn _vst1_f64_x3(a: float64x1_t, b: float64x1_t, c: float64x1_t, ptr: *mut f64);
24509    }
24510    _vst1_f64_x3(b.0, b.1, b.2, a)
24511}
24512#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
24513#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x3)"]
24514#[doc = "## Safety"]
24515#[doc = "  * Neon intrinsic unsafe"]
24516#[inline(always)]
24517#[target_feature(enable = "neon")]
24518#[cfg_attr(test, assert_instr(st1))]
24519#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24520pub unsafe fn vst1q_f64_x3(a: *mut f64, b: float64x2x3_t) {
24521    unsafe extern "unadjusted" {
24522        #[cfg_attr(
24523            any(target_arch = "aarch64", target_arch = "arm64ec"),
24524            link_name = "llvm.aarch64.neon.st1x3.v2f64.p0"
24525        )]
24526        fn _vst1q_f64_x3(a: float64x2_t, b: float64x2_t, c: float64x2_t, ptr: *mut f64);
24527    }
24528    _vst1q_f64_x3(b.0, b.1, b.2, a)
24529}
24530#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
24531#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x4)"]
24532#[doc = "## Safety"]
24533#[doc = "  * Neon intrinsic unsafe"]
24534#[inline(always)]
24535#[target_feature(enable = "neon")]
24536#[cfg_attr(test, assert_instr(st1))]
24537#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24538pub unsafe fn vst1_f64_x4(a: *mut f64, b: float64x1x4_t) {
24539    unsafe extern "unadjusted" {
24540        #[cfg_attr(
24541            any(target_arch = "aarch64", target_arch = "arm64ec"),
24542            link_name = "llvm.aarch64.neon.st1x4.v1f64.p0"
24543        )]
24544        fn _vst1_f64_x4(
24545            a: float64x1_t,
24546            b: float64x1_t,
24547            c: float64x1_t,
24548            d: float64x1_t,
24549            ptr: *mut f64,
24550        );
24551    }
24552    _vst1_f64_x4(b.0, b.1, b.2, b.3, a)
24553}
24554#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
24555#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x4)"]
24556#[doc = "## Safety"]
24557#[doc = "  * Neon intrinsic unsafe"]
24558#[inline(always)]
24559#[target_feature(enable = "neon")]
24560#[cfg_attr(test, assert_instr(st1))]
24561#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24562pub unsafe fn vst1q_f64_x4(a: *mut f64, b: float64x2x4_t) {
24563    unsafe extern "unadjusted" {
24564        #[cfg_attr(
24565            any(target_arch = "aarch64", target_arch = "arm64ec"),
24566            link_name = "llvm.aarch64.neon.st1x4.v2f64.p0"
24567        )]
24568        fn _vst1q_f64_x4(
24569            a: float64x2_t,
24570            b: float64x2_t,
24571            c: float64x2_t,
24572            d: float64x2_t,
24573            ptr: *mut f64,
24574        );
24575    }
24576    _vst1q_f64_x4(b.0, b.1, b.2, b.3, a)
24577}
24578#[doc = "Store multiple single-element structures from one, two, three, or four registers"]
24579#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_f64)"]
24580#[doc = "## Safety"]
24581#[doc = "  * Neon intrinsic unsafe"]
24582#[inline(always)]
24583#[target_feature(enable = "neon")]
24584#[cfg_attr(test, assert_instr(nop, LANE = 0))]
24585#[rustc_legacy_const_generics(2)]
24586#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24587pub unsafe fn vst1_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1_t) {
24588    static_assert!(LANE == 0);
24589    *a = simd_extract!(b, LANE as u32);
24590}
24591#[doc = "Store multiple single-element structures from one, two, three, or four registers"]
24592#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f64)"]
24593#[doc = "## Safety"]
24594#[doc = "  * Neon intrinsic unsafe"]
24595#[inline(always)]
24596#[target_feature(enable = "neon")]
24597#[cfg_attr(test, assert_instr(nop, LANE = 0))]
24598#[rustc_legacy_const_generics(2)]
24599#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24600pub unsafe fn vst1q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2_t) {
24601    static_assert_uimm_bits!(LANE, 1);
24602    *a = simd_extract!(b, LANE as u32);
24603}
24604#[doc = "Store multiple 2-element structures from two registers"]
24605#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f64)"]
24606#[doc = "## Safety"]
24607#[doc = "  * Neon intrinsic unsafe"]
24608#[inline(always)]
24609#[target_feature(enable = "neon")]
24610#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24611#[cfg_attr(test, assert_instr(stp))]
24612pub unsafe fn vst2_f64(a: *mut f64, b: float64x1x2_t) {
24613    core::ptr::write_unaligned(a.cast(), b)
24614}
24615#[doc = "Store multiple 2-element structures from two registers"]
24616#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f64)"]
24617#[doc = "## Safety"]
24618#[doc = "  * Neon intrinsic unsafe"]
24619#[inline(always)]
24620#[target_feature(enable = "neon")]
24621#[cfg_attr(test, assert_instr(st2, LANE = 0))]
24622#[rustc_legacy_const_generics(2)]
24623#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24624pub unsafe fn vst2_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x2_t) {
24625    static_assert!(LANE == 0);
24626    unsafe extern "unadjusted" {
24627        #[cfg_attr(
24628            any(target_arch = "aarch64", target_arch = "arm64ec"),
24629            link_name = "llvm.aarch64.neon.st2lane.v1f64.p0"
24630        )]
24631        fn _vst2_lane_f64(a: float64x1_t, b: float64x1_t, n: i64, ptr: *mut i8);
24632    }
24633    _vst2_lane_f64(b.0, b.1, LANE as i64, a as _)
24634}
24635#[doc = "Store multiple 2-element structures from two registers"]
24636#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s64)"]
24637#[doc = "## Safety"]
24638#[doc = "  * Neon intrinsic unsafe"]
24639#[inline(always)]
24640#[target_feature(enable = "neon")]
24641#[cfg_attr(test, assert_instr(st2, LANE = 0))]
24642#[rustc_legacy_const_generics(2)]
24643#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24644pub unsafe fn vst2_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x2_t) {
24645    static_assert!(LANE == 0);
24646    unsafe extern "unadjusted" {
24647        #[cfg_attr(
24648            any(target_arch = "aarch64", target_arch = "arm64ec"),
24649            link_name = "llvm.aarch64.neon.st2lane.v1i64.p0"
24650        )]
24651        fn _vst2_lane_s64(a: int64x1_t, b: int64x1_t, n: i64, ptr: *mut i8);
24652    }
24653    _vst2_lane_s64(b.0, b.1, LANE as i64, a as _)
24654}
24655#[doc = "Store multiple 2-element structures from two registers"]
24656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p64)"]
24657#[doc = "## Safety"]
24658#[doc = "  * Neon intrinsic unsafe"]
24659#[inline(always)]
24660#[target_feature(enable = "neon,aes")]
24661#[cfg_attr(test, assert_instr(st2, LANE = 0))]
24662#[rustc_legacy_const_generics(2)]
24663#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24664pub unsafe fn vst2_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x2_t) {
24665    static_assert!(LANE == 0);
24666    vst2_lane_s64::<LANE>(transmute(a), transmute(b))
24667}
24668#[doc = "Store multiple 2-element structures from two registers"]
24669#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u64)"]
24670#[doc = "## Safety"]
24671#[doc = "  * Neon intrinsic unsafe"]
24672#[inline(always)]
24673#[target_feature(enable = "neon")]
24674#[cfg_attr(test, assert_instr(st2, LANE = 0))]
24675#[rustc_legacy_const_generics(2)]
24676#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24677pub unsafe fn vst2_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x2_t) {
24678    static_assert!(LANE == 0);
24679    vst2_lane_s64::<LANE>(transmute(a), transmute(b))
24680}
24681#[doc = "Store multiple 2-element structures from two registers"]
24682#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f64)"]
24683#[doc = "## Safety"]
24684#[doc = "  * Neon intrinsic unsafe"]
24685#[inline(always)]
24686#[target_feature(enable = "neon")]
24687#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24688#[cfg_attr(test, assert_instr(st2))]
24689pub unsafe fn vst2q_f64(a: *mut f64, b: float64x2x2_t) {
24690    crate::core_arch::macros::interleaving_store!(f64, 2, 2, a, b)
24691}
24692#[doc = "Store multiple 2-element structures from two registers"]
24693#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s64)"]
24694#[doc = "## Safety"]
24695#[doc = "  * Neon intrinsic unsafe"]
24696#[inline(always)]
24697#[target_feature(enable = "neon")]
24698#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24699#[cfg_attr(test, assert_instr(st2))]
24700pub unsafe fn vst2q_s64(a: *mut i64, b: int64x2x2_t) {
24701    crate::core_arch::macros::interleaving_store!(i64, 2, 2, a, b)
24702}
24703#[doc = "Store multiple 2-element structures from two registers"]
24704#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f64)"]
24705#[doc = "## Safety"]
24706#[doc = "  * Neon intrinsic unsafe"]
24707#[inline(always)]
24708#[target_feature(enable = "neon")]
24709#[cfg_attr(test, assert_instr(st2, LANE = 0))]
24710#[rustc_legacy_const_generics(2)]
24711#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24712pub unsafe fn vst2q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x2_t) {
24713    static_assert_uimm_bits!(LANE, 1);
24714    unsafe extern "unadjusted" {
24715        #[cfg_attr(
24716            any(target_arch = "aarch64", target_arch = "arm64ec"),
24717            link_name = "llvm.aarch64.neon.st2lane.v2f64.p0"
24718        )]
24719        fn _vst2q_lane_f64(a: float64x2_t, b: float64x2_t, n: i64, ptr: *mut i8);
24720    }
24721    _vst2q_lane_f64(b.0, b.1, LANE as i64, a as _)
24722}
24723#[doc = "Store multiple 2-element structures from two registers"]
24724#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s8)"]
24725#[doc = "## Safety"]
24726#[doc = "  * Neon intrinsic unsafe"]
24727#[inline(always)]
24728#[target_feature(enable = "neon")]
24729#[cfg_attr(test, assert_instr(st2, LANE = 0))]
24730#[rustc_legacy_const_generics(2)]
24731#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24732pub unsafe fn vst2q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x2_t) {
24733    static_assert_uimm_bits!(LANE, 4);
24734    unsafe extern "unadjusted" {
24735        #[cfg_attr(
24736            any(target_arch = "aarch64", target_arch = "arm64ec"),
24737            link_name = "llvm.aarch64.neon.st2lane.v16i8.p0"
24738        )]
24739        fn _vst2q_lane_s8(a: int8x16_t, b: int8x16_t, n: i64, ptr: *mut i8);
24740    }
24741    _vst2q_lane_s8(b.0, b.1, LANE as i64, a as _)
24742}
24743#[doc = "Store multiple 2-element structures from two registers"]
24744#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s64)"]
24745#[doc = "## Safety"]
24746#[doc = "  * Neon intrinsic unsafe"]
24747#[inline(always)]
24748#[target_feature(enable = "neon")]
24749#[cfg_attr(test, assert_instr(st2, LANE = 0))]
24750#[rustc_legacy_const_generics(2)]
24751#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24752pub unsafe fn vst2q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x2_t) {
24753    static_assert_uimm_bits!(LANE, 1);
24754    unsafe extern "unadjusted" {
24755        #[cfg_attr(
24756            any(target_arch = "aarch64", target_arch = "arm64ec"),
24757            link_name = "llvm.aarch64.neon.st2lane.v2i64.p0"
24758        )]
24759        fn _vst2q_lane_s64(a: int64x2_t, b: int64x2_t, n: i64, ptr: *mut i8);
24760    }
24761    _vst2q_lane_s64(b.0, b.1, LANE as i64, a as _)
24762}
24763#[doc = "Store multiple 2-element structures from two registers"]
24764#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p64)"]
24765#[doc = "## Safety"]
24766#[doc = "  * Neon intrinsic unsafe"]
24767#[inline(always)]
24768#[target_feature(enable = "neon,aes")]
24769#[cfg_attr(test, assert_instr(st2, LANE = 0))]
24770#[rustc_legacy_const_generics(2)]
24771#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24772pub unsafe fn vst2q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x2_t) {
24773    static_assert_uimm_bits!(LANE, 1);
24774    vst2q_lane_s64::<LANE>(transmute(a), transmute(b))
24775}
24776#[doc = "Store multiple 2-element structures from two registers"]
24777#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u8)"]
24778#[doc = "## Safety"]
24779#[doc = "  * Neon intrinsic unsafe"]
24780#[inline(always)]
24781#[target_feature(enable = "neon")]
24782#[cfg_attr(test, assert_instr(st2, LANE = 0))]
24783#[rustc_legacy_const_generics(2)]
24784#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24785pub unsafe fn vst2q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x2_t) {
24786    static_assert_uimm_bits!(LANE, 4);
24787    vst2q_lane_s8::<LANE>(transmute(a), transmute(b))
24788}
24789#[doc = "Store multiple 2-element structures from two registers"]
24790#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u64)"]
24791#[doc = "## Safety"]
24792#[doc = "  * Neon intrinsic unsafe"]
24793#[inline(always)]
24794#[target_feature(enable = "neon")]
24795#[cfg_attr(test, assert_instr(st2, LANE = 0))]
24796#[rustc_legacy_const_generics(2)]
24797#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24798pub unsafe fn vst2q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x2_t) {
24799    static_assert_uimm_bits!(LANE, 1);
24800    vst2q_lane_s64::<LANE>(transmute(a), transmute(b))
24801}
24802#[doc = "Store multiple 2-element structures from two registers"]
24803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p8)"]
24804#[doc = "## Safety"]
24805#[doc = "  * Neon intrinsic unsafe"]
24806#[inline(always)]
24807#[target_feature(enable = "neon")]
24808#[cfg_attr(test, assert_instr(st2, LANE = 0))]
24809#[rustc_legacy_const_generics(2)]
24810#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24811pub unsafe fn vst2q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x2_t) {
24812    static_assert_uimm_bits!(LANE, 4);
24813    vst2q_lane_s8::<LANE>(transmute(a), transmute(b))
24814}
24815#[doc = "Store multiple 2-element structures from two registers"]
24816#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p64)"]
24817#[doc = "## Safety"]
24818#[doc = "  * Neon intrinsic unsafe"]
24819#[inline(always)]
24820#[target_feature(enable = "neon,aes")]
24821#[cfg_attr(test, assert_instr(st2))]
24822#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24823pub unsafe fn vst2q_p64(a: *mut p64, b: poly64x2x2_t) {
24824    vst2q_s64(transmute(a), transmute(b))
24825}
24826#[doc = "Store multiple 2-element structures from two registers"]
24827#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u64)"]
24828#[doc = "## Safety"]
24829#[doc = "  * Neon intrinsic unsafe"]
24830#[inline(always)]
24831#[target_feature(enable = "neon")]
24832#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24833#[cfg_attr(test, assert_instr(st2))]
24834pub unsafe fn vst2q_u64(a: *mut u64, b: uint64x2x2_t) {
24835    vst2q_s64(transmute(a), transmute(b))
24836}
24837#[doc = "Store multiple 3-element structures from three registers"]
24838#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f64)"]
24839#[doc = "## Safety"]
24840#[doc = "  * Neon intrinsic unsafe"]
24841#[inline(always)]
24842#[target_feature(enable = "neon")]
24843#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24844#[cfg_attr(test, assert_instr(nop))]
24845pub unsafe fn vst3_f64(a: *mut f64, b: float64x1x3_t) {
24846    core::ptr::write_unaligned(a.cast(), b)
24847}
24848#[doc = "Store multiple 3-element structures from three registers"]
24849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f64)"]
24850#[doc = "## Safety"]
24851#[doc = "  * Neon intrinsic unsafe"]
24852#[inline(always)]
24853#[target_feature(enable = "neon")]
24854#[cfg_attr(test, assert_instr(st3, LANE = 0))]
24855#[rustc_legacy_const_generics(2)]
24856#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24857pub unsafe fn vst3_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x3_t) {
24858    static_assert!(LANE == 0);
24859    unsafe extern "unadjusted" {
24860        #[cfg_attr(
24861            any(target_arch = "aarch64", target_arch = "arm64ec"),
24862            link_name = "llvm.aarch64.neon.st3lane.v1f64.p0"
24863        )]
24864        fn _vst3_lane_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, n: i64, ptr: *mut i8);
24865    }
24866    _vst3_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
24867}
24868#[doc = "Store multiple 3-element structures from three registers"]
24869#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s64)"]
24870#[doc = "## Safety"]
24871#[doc = "  * Neon intrinsic unsafe"]
24872#[inline(always)]
24873#[target_feature(enable = "neon")]
24874#[cfg_attr(test, assert_instr(st3, LANE = 0))]
24875#[rustc_legacy_const_generics(2)]
24876#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24877pub unsafe fn vst3_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x3_t) {
24878    static_assert!(LANE == 0);
24879    unsafe extern "unadjusted" {
24880        #[cfg_attr(
24881            any(target_arch = "aarch64", target_arch = "arm64ec"),
24882            link_name = "llvm.aarch64.neon.st3lane.v1i64.p0"
24883        )]
24884        fn _vst3_lane_s64(a: int64x1_t, b: int64x1_t, c: int64x1_t, n: i64, ptr: *mut i8);
24885    }
24886    _vst3_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
24887}
24888#[doc = "Store multiple 3-element structures from three registers"]
24889#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p64)"]
24890#[doc = "## Safety"]
24891#[doc = "  * Neon intrinsic unsafe"]
24892#[inline(always)]
24893#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24894#[target_feature(enable = "neon,aes")]
24895#[cfg_attr(test, assert_instr(st3, LANE = 0))]
24896#[rustc_legacy_const_generics(2)]
24897pub unsafe fn vst3_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x3_t) {
24898    static_assert!(LANE == 0);
24899    vst3_lane_s64::<LANE>(transmute(a), transmute(b))
24900}
24901#[doc = "Store multiple 3-element structures from three registers"]
24902#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u64)"]
24903#[doc = "## Safety"]
24904#[doc = "  * Neon intrinsic unsafe"]
24905#[inline(always)]
24906#[target_feature(enable = "neon")]
24907#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24908#[cfg_attr(test, assert_instr(st3, LANE = 0))]
24909#[rustc_legacy_const_generics(2)]
24910pub unsafe fn vst3_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x3_t) {
24911    static_assert!(LANE == 0);
24912    vst3_lane_s64::<LANE>(transmute(a), transmute(b))
24913}
24914#[doc = "Store multiple 3-element structures from three registers"]
24915#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f64)"]
24916#[doc = "## Safety"]
24917#[doc = "  * Neon intrinsic unsafe"]
24918#[inline(always)]
24919#[target_feature(enable = "neon")]
24920#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24921#[cfg_attr(test, assert_instr(st3))]
24922pub unsafe fn vst3q_f64(a: *mut f64, b: float64x2x3_t) {
24923    crate::core_arch::macros::interleaving_store!(f64, 2, 3, a, b)
24924}
24925#[doc = "Store multiple 3-element structures from three registers"]
24926#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s64)"]
24927#[doc = "## Safety"]
24928#[doc = "  * Neon intrinsic unsafe"]
24929#[inline(always)]
24930#[target_feature(enable = "neon")]
24931#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24932#[cfg_attr(test, assert_instr(st3))]
24933pub unsafe fn vst3q_s64(a: *mut i64, b: int64x2x3_t) {
24934    crate::core_arch::macros::interleaving_store!(i64, 2, 3, a, b)
24935}
24936#[doc = "Store multiple 3-element structures from three registers"]
24937#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f64)"]
24938#[doc = "## Safety"]
24939#[doc = "  * Neon intrinsic unsafe"]
24940#[inline(always)]
24941#[target_feature(enable = "neon")]
24942#[cfg_attr(test, assert_instr(st3, LANE = 0))]
24943#[rustc_legacy_const_generics(2)]
24944#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24945pub unsafe fn vst3q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x3_t) {
24946    static_assert_uimm_bits!(LANE, 1);
24947    unsafe extern "unadjusted" {
24948        #[cfg_attr(
24949            any(target_arch = "aarch64", target_arch = "arm64ec"),
24950            link_name = "llvm.aarch64.neon.st3lane.v2f64.p0"
24951        )]
24952        fn _vst3q_lane_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, n: i64, ptr: *mut i8);
24953    }
24954    _vst3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
24955}
24956#[doc = "Store multiple 3-element structures from three registers"]
24957#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s8)"]
24958#[doc = "## Safety"]
24959#[doc = "  * Neon intrinsic unsafe"]
24960#[inline(always)]
24961#[target_feature(enable = "neon")]
24962#[cfg_attr(test, assert_instr(st3, LANE = 0))]
24963#[rustc_legacy_const_generics(2)]
24964#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24965pub unsafe fn vst3q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x3_t) {
24966    static_assert_uimm_bits!(LANE, 4);
24967    unsafe extern "unadjusted" {
24968        #[cfg_attr(
24969            any(target_arch = "aarch64", target_arch = "arm64ec"),
24970            link_name = "llvm.aarch64.neon.st3lane.v16i8.p0"
24971        )]
24972        fn _vst3q_lane_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t, n: i64, ptr: *mut i8);
24973    }
24974    _vst3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _)
24975}
24976#[doc = "Store multiple 3-element structures from three registers"]
24977#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s64)"]
24978#[doc = "## Safety"]
24979#[doc = "  * Neon intrinsic unsafe"]
24980#[inline(always)]
24981#[target_feature(enable = "neon")]
24982#[cfg_attr(test, assert_instr(st3, LANE = 0))]
24983#[rustc_legacy_const_generics(2)]
24984#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24985pub unsafe fn vst3q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x3_t) {
24986    static_assert_uimm_bits!(LANE, 1);
24987    unsafe extern "unadjusted" {
24988        #[cfg_attr(
24989            any(target_arch = "aarch64", target_arch = "arm64ec"),
24990            link_name = "llvm.aarch64.neon.st3lane.v2i64.p0"
24991        )]
24992        fn _vst3q_lane_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, n: i64, ptr: *mut i8);
24993    }
24994    _vst3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
24995}
24996#[doc = "Store multiple 3-element structures from three registers"]
24997#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p64)"]
24998#[doc = "## Safety"]
24999#[doc = "  * Neon intrinsic unsafe"]
25000#[inline(always)]
25001#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25002#[target_feature(enable = "neon,aes")]
25003#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25004#[rustc_legacy_const_generics(2)]
25005pub unsafe fn vst3q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x3_t) {
25006    static_assert_uimm_bits!(LANE, 1);
25007    vst3q_lane_s64::<LANE>(transmute(a), transmute(b))
25008}
25009#[doc = "Store multiple 3-element structures from three registers"]
25010#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u8)"]
25011#[doc = "## Safety"]
25012#[doc = "  * Neon intrinsic unsafe"]
25013#[inline(always)]
25014#[target_feature(enable = "neon")]
25015#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25016#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25017#[rustc_legacy_const_generics(2)]
25018pub unsafe fn vst3q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x3_t) {
25019    static_assert_uimm_bits!(LANE, 4);
25020    vst3q_lane_s8::<LANE>(transmute(a), transmute(b))
25021}
25022#[doc = "Store multiple 3-element structures from three registers"]
25023#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u64)"]
25024#[doc = "## Safety"]
25025#[doc = "  * Neon intrinsic unsafe"]
25026#[inline(always)]
25027#[target_feature(enable = "neon")]
25028#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25029#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25030#[rustc_legacy_const_generics(2)]
25031pub unsafe fn vst3q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x3_t) {
25032    static_assert_uimm_bits!(LANE, 1);
25033    vst3q_lane_s64::<LANE>(transmute(a), transmute(b))
25034}
25035#[doc = "Store multiple 3-element structures from three registers"]
25036#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p8)"]
25037#[doc = "## Safety"]
25038#[doc = "  * Neon intrinsic unsafe"]
25039#[inline(always)]
25040#[target_feature(enable = "neon")]
25041#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25042#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25043#[rustc_legacy_const_generics(2)]
25044pub unsafe fn vst3q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x3_t) {
25045    static_assert_uimm_bits!(LANE, 4);
25046    vst3q_lane_s8::<LANE>(transmute(a), transmute(b))
25047}
25048#[doc = "Store multiple 3-element structures from three registers"]
25049#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p64)"]
25050#[doc = "## Safety"]
25051#[doc = "  * Neon intrinsic unsafe"]
25052#[inline(always)]
25053#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25054#[target_feature(enable = "neon,aes")]
25055#[cfg_attr(test, assert_instr(st3))]
25056pub unsafe fn vst3q_p64(a: *mut p64, b: poly64x2x3_t) {
25057    vst3q_s64(transmute(a), transmute(b))
25058}
25059#[doc = "Store multiple 3-element structures from three registers"]
25060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u64)"]
25061#[doc = "## Safety"]
25062#[doc = "  * Neon intrinsic unsafe"]
25063#[inline(always)]
25064#[target_feature(enable = "neon")]
25065#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25066#[cfg_attr(test, assert_instr(st3))]
25067pub unsafe fn vst3q_u64(a: *mut u64, b: uint64x2x3_t) {
25068    vst3q_s64(transmute(a), transmute(b))
25069}
25070#[doc = "Store multiple 4-element structures from four registers"]
25071#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f64)"]
25072#[doc = "## Safety"]
25073#[doc = "  * Neon intrinsic unsafe"]
25074#[inline(always)]
25075#[target_feature(enable = "neon")]
25076#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25077#[cfg_attr(test, assert_instr(nop))]
25078pub unsafe fn vst4_f64(a: *mut f64, b: float64x1x4_t) {
25079    core::ptr::write_unaligned(a.cast(), b)
25080}
25081#[doc = "Store multiple 4-element structures from four registers"]
25082#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f64)"]
25083#[doc = "## Safety"]
25084#[doc = "  * Neon intrinsic unsafe"]
25085#[inline(always)]
25086#[target_feature(enable = "neon")]
25087#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25088#[rustc_legacy_const_generics(2)]
25089#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25090pub unsafe fn vst4_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x4_t) {
25091    static_assert!(LANE == 0);
25092    unsafe extern "unadjusted" {
25093        #[cfg_attr(
25094            any(target_arch = "aarch64", target_arch = "arm64ec"),
25095            link_name = "llvm.aarch64.neon.st4lane.v1f64.p0"
25096        )]
25097        fn _vst4_lane_f64(
25098            a: float64x1_t,
25099            b: float64x1_t,
25100            c: float64x1_t,
25101            d: float64x1_t,
25102            n: i64,
25103            ptr: *mut i8,
25104        );
25105    }
25106    _vst4_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
25107}
25108#[doc = "Store multiple 4-element structures from four registers"]
25109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s64)"]
25110#[doc = "## Safety"]
25111#[doc = "  * Neon intrinsic unsafe"]
25112#[inline(always)]
25113#[target_feature(enable = "neon")]
25114#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25115#[rustc_legacy_const_generics(2)]
25116#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25117pub unsafe fn vst4_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x4_t) {
25118    static_assert!(LANE == 0);
25119    unsafe extern "unadjusted" {
25120        #[cfg_attr(
25121            any(target_arch = "aarch64", target_arch = "arm64ec"),
25122            link_name = "llvm.aarch64.neon.st4lane.v1i64.p0"
25123        )]
25124        fn _vst4_lane_s64(
25125            a: int64x1_t,
25126            b: int64x1_t,
25127            c: int64x1_t,
25128            d: int64x1_t,
25129            n: i64,
25130            ptr: *mut i8,
25131        );
25132    }
25133    _vst4_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
25134}
25135#[doc = "Store multiple 4-element structures from four registers"]
25136#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p64)"]
25137#[doc = "## Safety"]
25138#[doc = "  * Neon intrinsic unsafe"]
25139#[inline(always)]
25140#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25141#[target_feature(enable = "neon,aes")]
25142#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25143#[rustc_legacy_const_generics(2)]
25144pub unsafe fn vst4_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x4_t) {
25145    static_assert!(LANE == 0);
25146    vst4_lane_s64::<LANE>(transmute(a), transmute(b))
25147}
25148#[doc = "Store multiple 4-element structures from four registers"]
25149#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u64)"]
25150#[doc = "## Safety"]
25151#[doc = "  * Neon intrinsic unsafe"]
25152#[inline(always)]
25153#[target_feature(enable = "neon")]
25154#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25155#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25156#[rustc_legacy_const_generics(2)]
25157pub unsafe fn vst4_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x4_t) {
25158    static_assert!(LANE == 0);
25159    vst4_lane_s64::<LANE>(transmute(a), transmute(b))
25160}
25161#[doc = "Store multiple 4-element structures from four registers"]
25162#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f64)"]
25163#[doc = "## Safety"]
25164#[doc = "  * Neon intrinsic unsafe"]
25165#[inline(always)]
25166#[target_feature(enable = "neon")]
25167#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25168#[cfg_attr(test, assert_instr(st4))]
25169pub unsafe fn vst4q_f64(a: *mut f64, b: float64x2x4_t) {
25170    crate::core_arch::macros::interleaving_store!(f64, 2, 4, a, b)
25171}
25172#[doc = "Store multiple 4-element structures from four registers"]
25173#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s64)"]
25174#[doc = "## Safety"]
25175#[doc = "  * Neon intrinsic unsafe"]
25176#[inline(always)]
25177#[target_feature(enable = "neon")]
25178#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25179#[cfg_attr(test, assert_instr(st4))]
25180pub unsafe fn vst4q_s64(a: *mut i64, b: int64x2x4_t) {
25181    crate::core_arch::macros::interleaving_store!(i64, 2, 4, a, b)
25182}
25183#[doc = "Store multiple 4-element structures from four registers"]
25184#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f64)"]
25185#[doc = "## Safety"]
25186#[doc = "  * Neon intrinsic unsafe"]
25187#[inline(always)]
25188#[target_feature(enable = "neon")]
25189#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25190#[rustc_legacy_const_generics(2)]
25191#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25192pub unsafe fn vst4q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x4_t) {
25193    static_assert_uimm_bits!(LANE, 1);
25194    unsafe extern "unadjusted" {
25195        #[cfg_attr(
25196            any(target_arch = "aarch64", target_arch = "arm64ec"),
25197            link_name = "llvm.aarch64.neon.st4lane.v2f64.p0"
25198        )]
25199        fn _vst4q_lane_f64(
25200            a: float64x2_t,
25201            b: float64x2_t,
25202            c: float64x2_t,
25203            d: float64x2_t,
25204            n: i64,
25205            ptr: *mut i8,
25206        );
25207    }
25208    _vst4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
25209}
25210#[doc = "Store multiple 4-element structures from four registers"]
25211#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s8)"]
25212#[doc = "## Safety"]
25213#[doc = "  * Neon intrinsic unsafe"]
25214#[inline(always)]
25215#[target_feature(enable = "neon")]
25216#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25217#[rustc_legacy_const_generics(2)]
25218#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25219pub unsafe fn vst4q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x4_t) {
25220    static_assert_uimm_bits!(LANE, 4);
25221    unsafe extern "unadjusted" {
25222        #[cfg_attr(
25223            any(target_arch = "aarch64", target_arch = "arm64ec"),
25224            link_name = "llvm.aarch64.neon.st4lane.v16i8.p0"
25225        )]
25226        fn _vst4q_lane_s8(
25227            a: int8x16_t,
25228            b: int8x16_t,
25229            c: int8x16_t,
25230            d: int8x16_t,
25231            n: i64,
25232            ptr: *mut i8,
25233        );
25234    }
25235    _vst4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _)
25236}
25237#[doc = "Store multiple 4-element structures from four registers"]
25238#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s64)"]
25239#[doc = "## Safety"]
25240#[doc = "  * Neon intrinsic unsafe"]
25241#[inline(always)]
25242#[target_feature(enable = "neon")]
25243#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25244#[rustc_legacy_const_generics(2)]
25245#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25246pub unsafe fn vst4q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x4_t) {
25247    static_assert_uimm_bits!(LANE, 1);
25248    unsafe extern "unadjusted" {
25249        #[cfg_attr(
25250            any(target_arch = "aarch64", target_arch = "arm64ec"),
25251            link_name = "llvm.aarch64.neon.st4lane.v2i64.p0"
25252        )]
25253        fn _vst4q_lane_s64(
25254            a: int64x2_t,
25255            b: int64x2_t,
25256            c: int64x2_t,
25257            d: int64x2_t,
25258            n: i64,
25259            ptr: *mut i8,
25260        );
25261    }
25262    _vst4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
25263}
25264#[doc = "Store multiple 4-element structures from four registers"]
25265#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p64)"]
25266#[doc = "## Safety"]
25267#[doc = "  * Neon intrinsic unsafe"]
25268#[inline(always)]
25269#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25270#[target_feature(enable = "neon,aes")]
25271#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25272#[rustc_legacy_const_generics(2)]
25273pub unsafe fn vst4q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x4_t) {
25274    static_assert_uimm_bits!(LANE, 1);
25275    vst4q_lane_s64::<LANE>(transmute(a), transmute(b))
25276}
25277#[doc = "Store multiple 4-element structures from four registers"]
25278#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u8)"]
25279#[doc = "## Safety"]
25280#[doc = "  * Neon intrinsic unsafe"]
25281#[inline(always)]
25282#[target_feature(enable = "neon")]
25283#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25284#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25285#[rustc_legacy_const_generics(2)]
25286pub unsafe fn vst4q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x4_t) {
25287    static_assert_uimm_bits!(LANE, 4);
25288    vst4q_lane_s8::<LANE>(transmute(a), transmute(b))
25289}
25290#[doc = "Store multiple 4-element structures from four registers"]
25291#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u64)"]
25292#[doc = "## Safety"]
25293#[doc = "  * Neon intrinsic unsafe"]
25294#[inline(always)]
25295#[target_feature(enable = "neon")]
25296#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25297#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25298#[rustc_legacy_const_generics(2)]
25299pub unsafe fn vst4q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x4_t) {
25300    static_assert_uimm_bits!(LANE, 1);
25301    vst4q_lane_s64::<LANE>(transmute(a), transmute(b))
25302}
25303#[doc = "Store multiple 4-element structures from four registers"]
25304#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p8)"]
25305#[doc = "## Safety"]
25306#[doc = "  * Neon intrinsic unsafe"]
25307#[inline(always)]
25308#[target_feature(enable = "neon")]
25309#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25310#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25311#[rustc_legacy_const_generics(2)]
25312pub unsafe fn vst4q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x4_t) {
25313    static_assert_uimm_bits!(LANE, 4);
25314    vst4q_lane_s8::<LANE>(transmute(a), transmute(b))
25315}
25316#[doc = "Store multiple 4-element structures from four registers"]
25317#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p64)"]
25318#[doc = "## Safety"]
25319#[doc = "  * Neon intrinsic unsafe"]
25320#[inline(always)]
25321#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25322#[target_feature(enable = "neon,aes")]
25323#[cfg_attr(test, assert_instr(st4))]
25324pub unsafe fn vst4q_p64(a: *mut p64, b: poly64x2x4_t) {
25325    vst4q_s64(transmute(a), transmute(b))
25326}
25327#[doc = "Store multiple 4-element structures from four registers"]
25328#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u64)"]
25329#[doc = "## Safety"]
25330#[doc = "  * Neon intrinsic unsafe"]
25331#[inline(always)]
25332#[target_feature(enable = "neon")]
25333#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25334#[cfg_attr(test, assert_instr(st4))]
25335pub unsafe fn vst4q_u64(a: *mut u64, b: uint64x2x4_t) {
25336    vst4q_s64(transmute(a), transmute(b))
25337}
25338#[doc = "Store-Release a single-element structure from one lane of one register."]
25339#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1_lane_f64)"]
25340#[inline(always)]
25341#[target_feature(enable = "neon,rcpc3")]
25342#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
25343#[rustc_legacy_const_generics(2)]
25344#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
25345#[cfg(target_has_atomic = "64")]
25346pub fn vstl1_lane_f64<const LANE: i32>(ptr: *mut f64, val: float64x1_t) {
25347    static_assert!(LANE == 0);
25348    unsafe { vstl1_lane_s64::<LANE>(ptr as *mut i64, transmute(val)) }
25349}
25350#[doc = "Store-Release a single-element structure from one lane of one register."]
25351#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1q_lane_f64)"]
25352#[inline(always)]
25353#[target_feature(enable = "neon,rcpc3")]
25354#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
25355#[rustc_legacy_const_generics(2)]
25356#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
25357#[cfg(target_has_atomic = "64")]
25358pub fn vstl1q_lane_f64<const LANE: i32>(ptr: *mut f64, val: float64x2_t) {
25359    static_assert_uimm_bits!(LANE, 1);
25360    unsafe { vstl1q_lane_s64::<LANE>(ptr as *mut i64, transmute(val)) }
25361}
25362#[doc = "Store-Release a single-element structure from one lane of one register."]
25363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1_lane_u64)"]
25364#[inline(always)]
25365#[target_feature(enable = "neon,rcpc3")]
25366#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
25367#[rustc_legacy_const_generics(2)]
25368#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
25369#[cfg(target_has_atomic = "64")]
25370pub fn vstl1_lane_u64<const LANE: i32>(ptr: *mut u64, val: uint64x1_t) {
25371    static_assert!(LANE == 0);
25372    unsafe { vstl1_lane_s64::<LANE>(ptr as *mut i64, transmute(val)) }
25373}
25374#[doc = "Store-Release a single-element structure from one lane of one register."]
25375#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1q_lane_u64)"]
25376#[inline(always)]
25377#[target_feature(enable = "neon,rcpc3")]
25378#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
25379#[rustc_legacy_const_generics(2)]
25380#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
25381#[cfg(target_has_atomic = "64")]
25382pub fn vstl1q_lane_u64<const LANE: i32>(ptr: *mut u64, val: uint64x2_t) {
25383    static_assert_uimm_bits!(LANE, 1);
25384    unsafe { vstl1q_lane_s64::<LANE>(ptr as *mut i64, transmute(val)) }
25385}
25386#[doc = "Store-Release a single-element structure from one lane of one register."]
25387#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1_lane_p64)"]
25388#[inline(always)]
25389#[target_feature(enable = "neon,rcpc3")]
25390#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
25391#[rustc_legacy_const_generics(2)]
25392#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
25393#[cfg(target_has_atomic = "64")]
25394pub fn vstl1_lane_p64<const LANE: i32>(ptr: *mut p64, val: poly64x1_t) {
25395    static_assert!(LANE == 0);
25396    unsafe { vstl1_lane_s64::<LANE>(ptr as *mut i64, transmute(val)) }
25397}
25398#[doc = "Store-Release a single-element structure from one lane of one register."]
25399#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1q_lane_p64)"]
25400#[inline(always)]
25401#[target_feature(enable = "neon,rcpc3")]
25402#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
25403#[rustc_legacy_const_generics(2)]
25404#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
25405#[cfg(target_has_atomic = "64")]
25406pub fn vstl1q_lane_p64<const LANE: i32>(ptr: *mut p64, val: poly64x2_t) {
25407    static_assert_uimm_bits!(LANE, 1);
25408    unsafe { vstl1q_lane_s64::<LANE>(ptr as *mut i64, transmute(val)) }
25409}
25410#[doc = "Store-Release a single-element structure from one lane of one register."]
25411#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1_lane_s64)"]
25412#[inline(always)]
25413#[target_feature(enable = "neon,rcpc3")]
25414#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
25415#[rustc_legacy_const_generics(2)]
25416#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
25417#[cfg(target_has_atomic = "64")]
25418pub fn vstl1_lane_s64<const LANE: i32>(ptr: *mut i64, val: int64x1_t) {
25419    static_assert!(LANE == 0);
25420    let atomic_dst = ptr as *mut crate::sync::atomic::AtomicI64;
25421    unsafe {
25422        let lane: i64 = simd_extract!(val, LANE as u32);
25423        (*atomic_dst).store(transmute(lane), crate::sync::atomic::Ordering::Release)
25424    }
25425}
25426#[doc = "Store-Release a single-element structure from one lane of one register."]
25427#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1q_lane_s64)"]
25428#[inline(always)]
25429#[target_feature(enable = "neon,rcpc3")]
25430#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
25431#[rustc_legacy_const_generics(2)]
25432#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
25433#[cfg(target_has_atomic = "64")]
25434pub fn vstl1q_lane_s64<const LANE: i32>(ptr: *mut i64, val: int64x2_t) {
25435    static_assert_uimm_bits!(LANE, 1);
25436    let atomic_dst = ptr as *mut crate::sync::atomic::AtomicI64;
25437    unsafe {
25438        let lane: i64 = simd_extract!(val, LANE as u32);
25439        (*atomic_dst).store(transmute(lane), crate::sync::atomic::Ordering::Release)
25440    }
25441}
25442#[doc = "Subtract"]
25443#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f64)"]
25444#[inline(always)]
25445#[target_feature(enable = "neon")]
25446#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25447#[cfg_attr(test, assert_instr(fsub))]
25448pub fn vsub_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
25449    unsafe { simd_sub(a, b) }
25450}
25451#[doc = "Subtract"]
25452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f64)"]
25453#[inline(always)]
25454#[target_feature(enable = "neon")]
25455#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25456#[cfg_attr(test, assert_instr(fsub))]
25457pub fn vsubq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
25458    unsafe { simd_sub(a, b) }
25459}
25460#[doc = "Subtract"]
25461#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_s64)"]
25462#[inline(always)]
25463#[target_feature(enable = "neon")]
25464#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25465#[cfg_attr(test, assert_instr(sub))]
25466pub fn vsubd_s64(a: i64, b: i64) -> i64 {
25467    a.wrapping_sub(b)
25468}
25469#[doc = "Subtract"]
25470#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_u64)"]
25471#[inline(always)]
25472#[target_feature(enable = "neon")]
25473#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25474#[cfg_attr(test, assert_instr(sub))]
25475pub fn vsubd_u64(a: u64, b: u64) -> u64 {
25476    a.wrapping_sub(b)
25477}
25478#[doc = "Subtract"]
25479#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubh_f16)"]
25480#[inline(always)]
25481#[target_feature(enable = "neon,fp16")]
25482#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
25483#[cfg(not(target_arch = "arm64ec"))]
25484#[cfg_attr(test, assert_instr(fsub))]
25485pub fn vsubh_f16(a: f16, b: f16) -> f16 {
25486    a - b
25487}
25488#[doc = "Signed Subtract Long"]
25489#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s8)"]
25490#[inline(always)]
25491#[target_feature(enable = "neon")]
25492#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25493#[cfg_attr(test, assert_instr(ssubl2))]
25494pub fn vsubl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
25495    unsafe {
25496        let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
25497        let d: int16x8_t = simd_cast(c);
25498        let e: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
25499        let f: int16x8_t = simd_cast(e);
25500        simd_sub(d, f)
25501    }
25502}
25503#[doc = "Signed Subtract Long"]
25504#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s16)"]
25505#[inline(always)]
25506#[target_feature(enable = "neon")]
25507#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25508#[cfg_attr(test, assert_instr(ssubl2))]
25509pub fn vsubl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
25510    unsafe {
25511        let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
25512        let d: int32x4_t = simd_cast(c);
25513        let e: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
25514        let f: int32x4_t = simd_cast(e);
25515        simd_sub(d, f)
25516    }
25517}
25518#[doc = "Signed Subtract Long"]
25519#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s32)"]
25520#[inline(always)]
25521#[target_feature(enable = "neon")]
25522#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25523#[cfg_attr(test, assert_instr(ssubl2))]
25524pub fn vsubl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
25525    unsafe {
25526        let c: int32x2_t = simd_shuffle!(a, a, [2, 3]);
25527        let d: int64x2_t = simd_cast(c);
25528        let e: int32x2_t = simd_shuffle!(b, b, [2, 3]);
25529        let f: int64x2_t = simd_cast(e);
25530        simd_sub(d, f)
25531    }
25532}
25533#[doc = "Unsigned Subtract Long"]
25534#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u8)"]
25535#[inline(always)]
25536#[target_feature(enable = "neon")]
25537#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25538#[cfg_attr(test, assert_instr(usubl2))]
25539pub fn vsubl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
25540    unsafe {
25541        let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
25542        let d: uint16x8_t = simd_cast(c);
25543        let e: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
25544        let f: uint16x8_t = simd_cast(e);
25545        simd_sub(d, f)
25546    }
25547}
25548#[doc = "Unsigned Subtract Long"]
25549#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u16)"]
25550#[inline(always)]
25551#[target_feature(enable = "neon")]
25552#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25553#[cfg_attr(test, assert_instr(usubl2))]
25554pub fn vsubl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
25555    unsafe {
25556        let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
25557        let d: uint32x4_t = simd_cast(c);
25558        let e: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
25559        let f: uint32x4_t = simd_cast(e);
25560        simd_sub(d, f)
25561    }
25562}
25563#[doc = "Unsigned Subtract Long"]
25564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u32)"]
25565#[inline(always)]
25566#[target_feature(enable = "neon")]
25567#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25568#[cfg_attr(test, assert_instr(usubl2))]
25569pub fn vsubl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
25570    unsafe {
25571        let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
25572        let d: uint64x2_t = simd_cast(c);
25573        let e: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
25574        let f: uint64x2_t = simd_cast(e);
25575        simd_sub(d, f)
25576    }
25577}
25578#[doc = "Signed Subtract Wide"]
25579#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s8)"]
25580#[inline(always)]
25581#[target_feature(enable = "neon")]
25582#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25583#[cfg_attr(test, assert_instr(ssubw2))]
25584pub fn vsubw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t {
25585    unsafe {
25586        let c: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
25587        simd_sub(a, simd_cast(c))
25588    }
25589}
25590#[doc = "Signed Subtract Wide"]
25591#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s16)"]
25592#[inline(always)]
25593#[target_feature(enable = "neon")]
25594#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25595#[cfg_attr(test, assert_instr(ssubw2))]
25596pub fn vsubw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t {
25597    unsafe {
25598        let c: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
25599        simd_sub(a, simd_cast(c))
25600    }
25601}
25602#[doc = "Signed Subtract Wide"]
25603#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s32)"]
25604#[inline(always)]
25605#[target_feature(enable = "neon")]
25606#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25607#[cfg_attr(test, assert_instr(ssubw2))]
25608pub fn vsubw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t {
25609    unsafe {
25610        let c: int32x2_t = simd_shuffle!(b, b, [2, 3]);
25611        simd_sub(a, simd_cast(c))
25612    }
25613}
25614#[doc = "Unsigned Subtract Wide"]
25615#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u8)"]
25616#[inline(always)]
25617#[target_feature(enable = "neon")]
25618#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25619#[cfg_attr(test, assert_instr(usubw2))]
25620pub fn vsubw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t {
25621    unsafe {
25622        let c: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
25623        simd_sub(a, simd_cast(c))
25624    }
25625}
25626#[doc = "Unsigned Subtract Wide"]
25627#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u16)"]
25628#[inline(always)]
25629#[target_feature(enable = "neon")]
25630#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25631#[cfg_attr(test, assert_instr(usubw2))]
25632pub fn vsubw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t {
25633    unsafe {
25634        let c: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
25635        simd_sub(a, simd_cast(c))
25636    }
25637}
25638#[doc = "Unsigned Subtract Wide"]
25639#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u32)"]
25640#[inline(always)]
25641#[target_feature(enable = "neon")]
25642#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25643#[cfg_attr(test, assert_instr(usubw2))]
25644pub fn vsubw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t {
25645    unsafe {
25646        let c: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
25647        simd_sub(a, simd_cast(c))
25648    }
25649}
25650#[doc = "Table look-up"]
25651#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_s8)"]
25652#[inline(always)]
25653#[target_feature(enable = "neon")]
25654#[cfg_attr(test, assert_instr(tbl))]
25655#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25656pub fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
25657    vqtbl1_s8(vcombine_s8(a, unsafe { crate::mem::zeroed() }), unsafe {
25658        {
25659            transmute(b)
25660        }
25661    })
25662}
25663#[doc = "Table look-up"]
25664#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_u8)"]
25665#[inline(always)]
25666#[target_feature(enable = "neon")]
25667#[cfg_attr(test, assert_instr(tbl))]
25668#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25669pub fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
25670    vqtbl1_u8(vcombine_u8(a, unsafe { crate::mem::zeroed() }), b)
25671}
25672#[doc = "Table look-up"]
25673#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_p8)"]
25674#[inline(always)]
25675#[target_feature(enable = "neon")]
25676#[cfg_attr(test, assert_instr(tbl))]
25677#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25678pub fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t {
25679    vqtbl1_p8(vcombine_p8(a, unsafe { crate::mem::zeroed() }), b)
25680}
25681#[doc = "Table look-up"]
25682#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_s8)"]
25683#[inline(always)]
25684#[target_feature(enable = "neon")]
25685#[cfg_attr(test, assert_instr(tbl))]
25686#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25687pub fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t {
25688    unsafe { vqtbl1(transmute(vcombine_s8(a.0, a.1)), transmute(b)) }
25689}
25690#[doc = "Table look-up"]
25691#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"]
25692#[inline(always)]
25693#[target_feature(enable = "neon")]
25694#[cfg_attr(test, assert_instr(tbl))]
25695#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25696pub fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t {
25697    unsafe { transmute(vqtbl1(transmute(vcombine_u8(a.0, a.1)), b)) }
25698}
25699#[doc = "Table look-up"]
25700#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"]
25701#[inline(always)]
25702#[target_feature(enable = "neon")]
25703#[cfg_attr(test, assert_instr(tbl))]
25704#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25705pub fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t {
25706    unsafe { transmute(vqtbl1(transmute(vcombine_p8(a.0, a.1)), b)) }
25707}
25708#[doc = "Table look-up"]
25709#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_s8)"]
25710#[inline(always)]
25711#[target_feature(enable = "neon")]
25712#[cfg_attr(test, assert_instr(tbl))]
25713#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25714pub fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t {
25715    let x = int8x16x2_t(
25716        vcombine_s8(a.0, a.1),
25717        vcombine_s8(a.2, unsafe { crate::mem::zeroed() }),
25718    );
25719    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), transmute(b))) }
25720}
25721#[doc = "Table look-up"]
25722#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"]
25723#[inline(always)]
25724#[target_feature(enable = "neon")]
25725#[cfg_attr(test, assert_instr(tbl))]
25726#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25727pub fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t {
25728    let x = uint8x16x2_t(
25729        vcombine_u8(a.0, a.1),
25730        vcombine_u8(a.2, unsafe { crate::mem::zeroed() }),
25731    );
25732    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
25733}
25734#[doc = "Table look-up"]
25735#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"]
25736#[inline(always)]
25737#[target_feature(enable = "neon")]
25738#[cfg_attr(test, assert_instr(tbl))]
25739#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25740pub fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t {
25741    let x = poly8x16x2_t(
25742        vcombine_p8(a.0, a.1),
25743        vcombine_p8(a.2, unsafe { crate::mem::zeroed() }),
25744    );
25745    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
25746}
25747#[doc = "Table look-up"]
25748#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_s8)"]
25749#[inline(always)]
25750#[target_feature(enable = "neon")]
25751#[cfg_attr(test, assert_instr(tbl))]
25752#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25753pub fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t {
25754    let x = int8x16x2_t(vcombine_s8(a.0, a.1), vcombine_s8(a.2, a.3));
25755    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), transmute(b))) }
25756}
25757#[doc = "Table look-up"]
25758#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"]
25759#[inline(always)]
25760#[target_feature(enable = "neon")]
25761#[cfg_attr(test, assert_instr(tbl))]
25762#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25763pub fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t {
25764    let x = uint8x16x2_t(vcombine_u8(a.0, a.1), vcombine_u8(a.2, a.3));
25765    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
25766}
25767#[doc = "Table look-up"]
25768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"]
25769#[inline(always)]
25770#[target_feature(enable = "neon")]
25771#[cfg_attr(test, assert_instr(tbl))]
25772#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25773pub fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t {
25774    let x = poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, a.3));
25775    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
25776}
25777#[doc = "Extended table look-up"]
25778#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_s8)"]
25779#[inline(always)]
25780#[target_feature(enable = "neon")]
25781#[cfg_attr(test, assert_instr(tbx))]
25782#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25783pub fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t {
25784    unsafe {
25785        simd_select(
25786            simd_lt::<int8x8_t, int8x8_t>(c, transmute(i8x8::splat(8))),
25787            transmute(vqtbx1(
25788                transmute(a),
25789                transmute(vcombine_s8(b, crate::mem::zeroed())),
25790                transmute(c),
25791            )),
25792            a,
25793        )
25794    }
25795}
25796#[doc = "Extended table look-up"]
25797#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_u8)"]
25798#[inline(always)]
25799#[target_feature(enable = "neon")]
25800#[cfg_attr(test, assert_instr(tbx))]
25801#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25802pub fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t {
25803    unsafe {
25804        simd_select(
25805            simd_lt::<uint8x8_t, int8x8_t>(c, transmute(u8x8::splat(8))),
25806            transmute(vqtbx1(
25807                transmute(a),
25808                transmute(vcombine_u8(b, crate::mem::zeroed())),
25809                c,
25810            )),
25811            a,
25812        )
25813    }
25814}
25815#[doc = "Extended table look-up"]
25816#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_p8)"]
25817#[inline(always)]
25818#[target_feature(enable = "neon")]
25819#[cfg_attr(test, assert_instr(tbx))]
25820#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25821pub fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t {
25822    unsafe {
25823        simd_select(
25824            simd_lt::<uint8x8_t, int8x8_t>(c, transmute(u8x8::splat(8))),
25825            transmute(vqtbx1(
25826                transmute(a),
25827                transmute(vcombine_p8(b, crate::mem::zeroed())),
25828                c,
25829            )),
25830            a,
25831        )
25832    }
25833}
25834#[doc = "Extended table look-up"]
25835#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_s8)"]
25836#[inline(always)]
25837#[target_feature(enable = "neon")]
25838#[cfg_attr(test, assert_instr(tbx))]
25839#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25840pub fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t {
25841    unsafe { vqtbx1(transmute(a), transmute(vcombine_s8(b.0, b.1)), transmute(c)) }
25842}
25843#[doc = "Extended table look-up"]
25844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"]
25845#[inline(always)]
25846#[target_feature(enable = "neon")]
25847#[cfg_attr(test, assert_instr(tbx))]
25848#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25849pub fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t {
25850    unsafe { transmute(vqtbx1(transmute(a), transmute(vcombine_u8(b.0, b.1)), c)) }
25851}
25852#[doc = "Extended table look-up"]
25853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"]
25854#[inline(always)]
25855#[target_feature(enable = "neon")]
25856#[cfg_attr(test, assert_instr(tbx))]
25857#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25858pub fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t {
25859    unsafe { transmute(vqtbx1(transmute(a), transmute(vcombine_p8(b.0, b.1)), c)) }
25860}
25861#[doc = "Extended table look-up"]
25862#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_s8)"]
25863#[inline(always)]
25864#[target_feature(enable = "neon")]
25865#[cfg_attr(test, assert_instr(tbx))]
25866#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25867pub fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t {
25868    let x = int8x16x2_t(
25869        vcombine_s8(b.0, b.1),
25870        vcombine_s8(b.2, unsafe { crate::mem::zeroed() }),
25871    );
25872    unsafe {
25873        transmute(simd_select(
25874            simd_lt::<int8x8_t, int8x8_t>(transmute(c), transmute(i8x8::splat(24))),
25875            transmute(vqtbx2(
25876                transmute(a),
25877                transmute(x.0),
25878                transmute(x.1),
25879                transmute(c),
25880            )),
25881            a,
25882        ))
25883    }
25884}
25885#[doc = "Extended table look-up"]
25886#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"]
25887#[inline(always)]
25888#[target_feature(enable = "neon")]
25889#[cfg_attr(test, assert_instr(tbx))]
25890#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25891pub fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t {
25892    let x = uint8x16x2_t(
25893        vcombine_u8(b.0, b.1),
25894        vcombine_u8(b.2, unsafe { crate::mem::zeroed() }),
25895    );
25896    unsafe {
25897        transmute(simd_select(
25898            simd_lt::<uint8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
25899            transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
25900            a,
25901        ))
25902    }
25903}
25904#[doc = "Extended table look-up"]
25905#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"]
25906#[inline(always)]
25907#[target_feature(enable = "neon")]
25908#[cfg_attr(test, assert_instr(tbx))]
25909#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25910pub fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t {
25911    let x = poly8x16x2_t(
25912        vcombine_p8(b.0, b.1),
25913        vcombine_p8(b.2, unsafe { crate::mem::zeroed() }),
25914    );
25915    unsafe {
25916        transmute(simd_select(
25917            simd_lt::<poly8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
25918            transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
25919            a,
25920        ))
25921    }
25922}
25923#[doc = "Extended table look-up"]
25924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_s8)"]
25925#[inline(always)]
25926#[target_feature(enable = "neon")]
25927#[cfg_attr(test, assert_instr(tbx))]
25928#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25929pub fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t {
25930    unsafe {
25931        vqtbx2(
25932            transmute(a),
25933            transmute(vcombine_s8(b.0, b.1)),
25934            transmute(vcombine_s8(b.2, b.3)),
25935            transmute(c),
25936        )
25937    }
25938}
25939#[doc = "Extended table look-up"]
25940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"]
25941#[inline(always)]
25942#[target_feature(enable = "neon")]
25943#[cfg_attr(test, assert_instr(tbx))]
25944#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25945pub fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t {
25946    unsafe {
25947        transmute(vqtbx2(
25948            transmute(a),
25949            transmute(vcombine_u8(b.0, b.1)),
25950            transmute(vcombine_u8(b.2, b.3)),
25951            c,
25952        ))
25953    }
25954}
25955#[doc = "Extended table look-up"]
25956#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"]
25957#[inline(always)]
25958#[target_feature(enable = "neon")]
25959#[cfg_attr(test, assert_instr(tbx))]
25960#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25961pub fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t {
25962    unsafe {
25963        transmute(vqtbx2(
25964            transmute(a),
25965            transmute(vcombine_p8(b.0, b.1)),
25966            transmute(vcombine_p8(b.2, b.3)),
25967            c,
25968        ))
25969    }
25970}
25971#[doc = "Transpose vectors"]
25972#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f16)"]
25973#[inline(always)]
25974#[target_feature(enable = "neon,fp16")]
25975#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
25976#[cfg(not(target_arch = "arm64ec"))]
25977#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
25978pub fn vtrn1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
25979    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
25980}
25981#[doc = "Transpose vectors"]
25982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f16)"]
25983#[inline(always)]
25984#[target_feature(enable = "neon,fp16")]
25985#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
25986#[cfg(not(target_arch = "arm64ec"))]
25987#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
25988pub fn vtrn1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
25989    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
25990}
25991#[doc = "Transpose vectors"]
25992#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f32)"]
25993#[inline(always)]
25994#[target_feature(enable = "neon")]
25995#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25996#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
25997pub fn vtrn1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
25998    unsafe { simd_shuffle!(a, b, [0, 2]) }
25999}
26000#[doc = "Transpose vectors"]
26001#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f64)"]
26002#[inline(always)]
26003#[target_feature(enable = "neon")]
26004#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26005#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26006pub fn vtrn1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
26007    unsafe { simd_shuffle!(a, b, [0, 2]) }
26008}
26009#[doc = "Transpose vectors"]
26010#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s32)"]
26011#[inline(always)]
26012#[target_feature(enable = "neon")]
26013#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26014#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26015pub fn vtrn1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
26016    unsafe { simd_shuffle!(a, b, [0, 2]) }
26017}
26018#[doc = "Transpose vectors"]
26019#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s64)"]
26020#[inline(always)]
26021#[target_feature(enable = "neon")]
26022#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26023#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26024pub fn vtrn1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
26025    unsafe { simd_shuffle!(a, b, [0, 2]) }
26026}
26027#[doc = "Transpose vectors"]
26028#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u32)"]
26029#[inline(always)]
26030#[target_feature(enable = "neon")]
26031#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26032#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26033pub fn vtrn1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
26034    unsafe { simd_shuffle!(a, b, [0, 2]) }
26035}
26036#[doc = "Transpose vectors"]
26037#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u64)"]
26038#[inline(always)]
26039#[target_feature(enable = "neon")]
26040#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26041#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26042pub fn vtrn1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
26043    unsafe { simd_shuffle!(a, b, [0, 2]) }
26044}
26045#[doc = "Transpose vectors"]
26046#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p64)"]
26047#[inline(always)]
26048#[target_feature(enable = "neon")]
26049#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26050#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26051pub fn vtrn1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
26052    unsafe { simd_shuffle!(a, b, [0, 2]) }
26053}
26054#[doc = "Transpose vectors"]
26055#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f32)"]
26056#[inline(always)]
26057#[target_feature(enable = "neon")]
26058#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26059#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26060pub fn vtrn1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
26061    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
26062}
26063#[doc = "Transpose vectors"]
26064#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s8)"]
26065#[inline(always)]
26066#[target_feature(enable = "neon")]
26067#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26068#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26069pub fn vtrn1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
26070    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
26071}
26072#[doc = "Transpose vectors"]
26073#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s8)"]
26074#[inline(always)]
26075#[target_feature(enable = "neon")]
26076#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26077#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26078pub fn vtrn1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
26079    unsafe {
26080        simd_shuffle!(
26081            a,
26082            b,
26083            [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]
26084        )
26085    }
26086}
26087#[doc = "Transpose vectors"]
26088#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s16)"]
26089#[inline(always)]
26090#[target_feature(enable = "neon")]
26091#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26092#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26093pub fn vtrn1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
26094    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
26095}
26096#[doc = "Transpose vectors"]
26097#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s16)"]
26098#[inline(always)]
26099#[target_feature(enable = "neon")]
26100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26101#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26102pub fn vtrn1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
26103    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
26104}
26105#[doc = "Transpose vectors"]
26106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s32)"]
26107#[inline(always)]
26108#[target_feature(enable = "neon")]
26109#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26110#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26111pub fn vtrn1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
26112    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
26113}
26114#[doc = "Transpose vectors"]
26115#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u8)"]
26116#[inline(always)]
26117#[target_feature(enable = "neon")]
26118#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26119#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26120pub fn vtrn1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
26121    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
26122}
26123#[doc = "Transpose vectors"]
26124#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u8)"]
26125#[inline(always)]
26126#[target_feature(enable = "neon")]
26127#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26128#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26129pub fn vtrn1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
26130    unsafe {
26131        simd_shuffle!(
26132            a,
26133            b,
26134            [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]
26135        )
26136    }
26137}
26138#[doc = "Transpose vectors"]
26139#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u16)"]
26140#[inline(always)]
26141#[target_feature(enable = "neon")]
26142#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26143#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26144pub fn vtrn1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
26145    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
26146}
26147#[doc = "Transpose vectors"]
26148#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u16)"]
26149#[inline(always)]
26150#[target_feature(enable = "neon")]
26151#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26152#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26153pub fn vtrn1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
26154    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
26155}
26156#[doc = "Transpose vectors"]
26157#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u32)"]
26158#[inline(always)]
26159#[target_feature(enable = "neon")]
26160#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26161#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26162pub fn vtrn1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
26163    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
26164}
26165#[doc = "Transpose vectors"]
26166#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p8)"]
26167#[inline(always)]
26168#[target_feature(enable = "neon")]
26169#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26170#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26171pub fn vtrn1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
26172    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
26173}
26174#[doc = "Transpose vectors"]
26175#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p8)"]
26176#[inline(always)]
26177#[target_feature(enable = "neon")]
26178#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26179#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26180pub fn vtrn1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
26181    unsafe {
26182        simd_shuffle!(
26183            a,
26184            b,
26185            [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]
26186        )
26187    }
26188}
26189#[doc = "Transpose vectors"]
26190#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p16)"]
26191#[inline(always)]
26192#[target_feature(enable = "neon")]
26193#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26194#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26195pub fn vtrn1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
26196    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
26197}
26198#[doc = "Transpose vectors"]
26199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p16)"]
26200#[inline(always)]
26201#[target_feature(enable = "neon")]
26202#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26203#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26204pub fn vtrn1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
26205    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
26206}
26207#[doc = "Transpose vectors"]
26208#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f16)"]
26209#[inline(always)]
26210#[target_feature(enable = "neon,fp16")]
26211#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
26212#[cfg(not(target_arch = "arm64ec"))]
26213#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26214pub fn vtrn2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
26215    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
26216}
26217#[doc = "Transpose vectors"]
26218#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f16)"]
26219#[inline(always)]
26220#[target_feature(enable = "neon,fp16")]
26221#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
26222#[cfg(not(target_arch = "arm64ec"))]
26223#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26224pub fn vtrn2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
26225    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
26226}
26227#[doc = "Transpose vectors"]
26228#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f32)"]
26229#[inline(always)]
26230#[target_feature(enable = "neon")]
26231#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26232#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
26233pub fn vtrn2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
26234    unsafe { simd_shuffle!(a, b, [1, 3]) }
26235}
26236#[doc = "Transpose vectors"]
26237#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f64)"]
26238#[inline(always)]
26239#[target_feature(enable = "neon")]
26240#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26241#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
26242pub fn vtrn2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
26243    unsafe { simd_shuffle!(a, b, [1, 3]) }
26244}
26245#[doc = "Transpose vectors"]
26246#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s32)"]
26247#[inline(always)]
26248#[target_feature(enable = "neon")]
26249#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26250#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
26251pub fn vtrn2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
26252    unsafe { simd_shuffle!(a, b, [1, 3]) }
26253}
26254#[doc = "Transpose vectors"]
26255#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s64)"]
26256#[inline(always)]
26257#[target_feature(enable = "neon")]
26258#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26259#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
26260pub fn vtrn2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
26261    unsafe { simd_shuffle!(a, b, [1, 3]) }
26262}
26263#[doc = "Transpose vectors"]
26264#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u32)"]
26265#[inline(always)]
26266#[target_feature(enable = "neon")]
26267#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26268#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
26269pub fn vtrn2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
26270    unsafe { simd_shuffle!(a, b, [1, 3]) }
26271}
26272#[doc = "Transpose vectors"]
26273#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u64)"]
26274#[inline(always)]
26275#[target_feature(enable = "neon")]
26276#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26277#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
26278pub fn vtrn2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
26279    unsafe { simd_shuffle!(a, b, [1, 3]) }
26280}
26281#[doc = "Transpose vectors"]
26282#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p64)"]
26283#[inline(always)]
26284#[target_feature(enable = "neon")]
26285#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26286#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
26287pub fn vtrn2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
26288    unsafe { simd_shuffle!(a, b, [1, 3]) }
26289}
26290#[doc = "Transpose vectors"]
26291#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f32)"]
26292#[inline(always)]
26293#[target_feature(enable = "neon")]
26294#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26295#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26296pub fn vtrn2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
26297    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
26298}
26299#[doc = "Transpose vectors"]
26300#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s8)"]
26301#[inline(always)]
26302#[target_feature(enable = "neon")]
26303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26304#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26305pub fn vtrn2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
26306    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
26307}
26308#[doc = "Transpose vectors"]
26309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s8)"]
26310#[inline(always)]
26311#[target_feature(enable = "neon")]
26312#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26313#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26314pub fn vtrn2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
26315    unsafe {
26316        simd_shuffle!(
26317            a,
26318            b,
26319            [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]
26320        )
26321    }
26322}
26323#[doc = "Transpose vectors"]
26324#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s16)"]
26325#[inline(always)]
26326#[target_feature(enable = "neon")]
26327#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26328#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26329pub fn vtrn2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
26330    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
26331}
26332#[doc = "Transpose vectors"]
26333#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s16)"]
26334#[inline(always)]
26335#[target_feature(enable = "neon")]
26336#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26337#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26338pub fn vtrn2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
26339    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
26340}
26341#[doc = "Transpose vectors"]
26342#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s32)"]
26343#[inline(always)]
26344#[target_feature(enable = "neon")]
26345#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26346#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26347pub fn vtrn2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
26348    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
26349}
26350#[doc = "Transpose vectors"]
26351#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u8)"]
26352#[inline(always)]
26353#[target_feature(enable = "neon")]
26354#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26355#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26356pub fn vtrn2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
26357    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
26358}
26359#[doc = "Transpose vectors"]
26360#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u8)"]
26361#[inline(always)]
26362#[target_feature(enable = "neon")]
26363#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26364#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26365pub fn vtrn2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
26366    unsafe {
26367        simd_shuffle!(
26368            a,
26369            b,
26370            [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]
26371        )
26372    }
26373}
26374#[doc = "Transpose vectors"]
26375#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u16)"]
26376#[inline(always)]
26377#[target_feature(enable = "neon")]
26378#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26379#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26380pub fn vtrn2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
26381    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
26382}
26383#[doc = "Transpose vectors"]
26384#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u16)"]
26385#[inline(always)]
26386#[target_feature(enable = "neon")]
26387#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26388#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26389pub fn vtrn2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
26390    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
26391}
26392#[doc = "Transpose vectors"]
26393#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u32)"]
26394#[inline(always)]
26395#[target_feature(enable = "neon")]
26396#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26397#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26398pub fn vtrn2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
26399    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
26400}
26401#[doc = "Transpose vectors"]
26402#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p8)"]
26403#[inline(always)]
26404#[target_feature(enable = "neon")]
26405#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26406#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26407pub fn vtrn2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
26408    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
26409}
26410#[doc = "Transpose vectors"]
26411#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p8)"]
26412#[inline(always)]
26413#[target_feature(enable = "neon")]
26414#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26415#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26416pub fn vtrn2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
26417    unsafe {
26418        simd_shuffle!(
26419            a,
26420            b,
26421            [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]
26422        )
26423    }
26424}
26425#[doc = "Transpose vectors"]
26426#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p16)"]
26427#[inline(always)]
26428#[target_feature(enable = "neon")]
26429#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26430#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26431pub fn vtrn2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
26432    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
26433}
26434#[doc = "Transpose vectors"]
26435#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p16)"]
26436#[inline(always)]
26437#[target_feature(enable = "neon")]
26438#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26439#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26440pub fn vtrn2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
26441    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
26442}
26443#[doc = "Signed compare bitwise Test bits nonzero"]
26444#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s64)"]
26445#[inline(always)]
26446#[target_feature(enable = "neon")]
26447#[cfg_attr(test, assert_instr(cmtst))]
26448#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26449pub fn vtst_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
26450    unsafe {
26451        let c: int64x1_t = simd_and(a, b);
26452        let d: i64x1 = i64x1::new(0);
26453        simd_ne(c, transmute(d))
26454    }
26455}
26456#[doc = "Signed compare bitwise Test bits nonzero"]
26457#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s64)"]
26458#[inline(always)]
26459#[target_feature(enable = "neon")]
26460#[cfg_attr(test, assert_instr(cmtst))]
26461#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26462pub fn vtstq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
26463    unsafe {
26464        let c: int64x2_t = simd_and(a, b);
26465        let d: i64x2 = i64x2::new(0, 0);
26466        simd_ne(c, transmute(d))
26467    }
26468}
26469#[doc = "Signed compare bitwise Test bits nonzero"]
26470#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p64)"]
26471#[inline(always)]
26472#[target_feature(enable = "neon")]
26473#[cfg_attr(test, assert_instr(cmtst))]
26474#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26475pub fn vtst_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t {
26476    unsafe {
26477        let c: poly64x1_t = simd_and(a, b);
26478        let d: i64x1 = i64x1::new(0);
26479        simd_ne(c, transmute(d))
26480    }
26481}
26482#[doc = "Signed compare bitwise Test bits nonzero"]
26483#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p64)"]
26484#[inline(always)]
26485#[target_feature(enable = "neon")]
26486#[cfg_attr(test, assert_instr(cmtst))]
26487#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26488pub fn vtstq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t {
26489    unsafe {
26490        let c: poly64x2_t = simd_and(a, b);
26491        let d: i64x2 = i64x2::new(0, 0);
26492        simd_ne(c, transmute(d))
26493    }
26494}
26495#[doc = "Unsigned compare bitwise Test bits nonzero"]
26496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u64)"]
26497#[inline(always)]
26498#[target_feature(enable = "neon")]
26499#[cfg_attr(test, assert_instr(cmtst))]
26500#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26501pub fn vtst_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
26502    unsafe {
26503        let c: uint64x1_t = simd_and(a, b);
26504        let d: u64x1 = u64x1::new(0);
26505        simd_ne(c, transmute(d))
26506    }
26507}
26508#[doc = "Unsigned compare bitwise Test bits nonzero"]
26509#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u64)"]
26510#[inline(always)]
26511#[target_feature(enable = "neon")]
26512#[cfg_attr(test, assert_instr(cmtst))]
26513#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26514pub fn vtstq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
26515    unsafe {
26516        let c: uint64x2_t = simd_and(a, b);
26517        let d: u64x2 = u64x2::new(0, 0);
26518        simd_ne(c, transmute(d))
26519    }
26520}
26521#[doc = "Compare bitwise test bits nonzero"]
26522#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_s64)"]
26523#[inline(always)]
26524#[target_feature(enable = "neon")]
26525#[cfg_attr(test, assert_instr(tst))]
26526#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26527pub fn vtstd_s64(a: i64, b: i64) -> u64 {
26528    unsafe { transmute(vtst_s64(transmute(a), transmute(b))) }
26529}
26530#[doc = "Compare bitwise test bits nonzero"]
26531#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_u64)"]
26532#[inline(always)]
26533#[target_feature(enable = "neon")]
26534#[cfg_attr(test, assert_instr(tst))]
26535#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26536pub fn vtstd_u64(a: u64, b: u64) -> u64 {
26537    unsafe { transmute(vtst_u64(transmute(a), transmute(b))) }
26538}
26539#[doc = "Signed saturating Accumulate of Unsigned value."]
26540#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s8)"]
26541#[inline(always)]
26542#[target_feature(enable = "neon")]
26543#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26544#[cfg_attr(test, assert_instr(suqadd))]
26545pub fn vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t {
26546    unsafe extern "unadjusted" {
26547        #[cfg_attr(
26548            any(target_arch = "aarch64", target_arch = "arm64ec"),
26549            link_name = "llvm.aarch64.neon.suqadd.v8i8"
26550        )]
26551        fn _vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t;
26552    }
26553    unsafe { _vuqadd_s8(a, b) }
26554}
26555#[doc = "Signed saturating Accumulate of Unsigned value."]
26556#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s8)"]
26557#[inline(always)]
26558#[target_feature(enable = "neon")]
26559#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26560#[cfg_attr(test, assert_instr(suqadd))]
26561pub fn vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
26562    unsafe extern "unadjusted" {
26563        #[cfg_attr(
26564            any(target_arch = "aarch64", target_arch = "arm64ec"),
26565            link_name = "llvm.aarch64.neon.suqadd.v16i8"
26566        )]
26567        fn _vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t;
26568    }
26569    unsafe { _vuqaddq_s8(a, b) }
26570}
26571#[doc = "Signed saturating Accumulate of Unsigned value."]
26572#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s16)"]
26573#[inline(always)]
26574#[target_feature(enable = "neon")]
26575#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26576#[cfg_attr(test, assert_instr(suqadd))]
26577pub fn vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t {
26578    unsafe extern "unadjusted" {
26579        #[cfg_attr(
26580            any(target_arch = "aarch64", target_arch = "arm64ec"),
26581            link_name = "llvm.aarch64.neon.suqadd.v4i16"
26582        )]
26583        fn _vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t;
26584    }
26585    unsafe { _vuqadd_s16(a, b) }
26586}
26587#[doc = "Signed saturating Accumulate of Unsigned value."]
26588#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s16)"]
26589#[inline(always)]
26590#[target_feature(enable = "neon")]
26591#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26592#[cfg_attr(test, assert_instr(suqadd))]
26593pub fn vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t {
26594    unsafe extern "unadjusted" {
26595        #[cfg_attr(
26596            any(target_arch = "aarch64", target_arch = "arm64ec"),
26597            link_name = "llvm.aarch64.neon.suqadd.v8i16"
26598        )]
26599        fn _vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t;
26600    }
26601    unsafe { _vuqaddq_s16(a, b) }
26602}
26603#[doc = "Signed saturating Accumulate of Unsigned value."]
26604#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s32)"]
26605#[inline(always)]
26606#[target_feature(enable = "neon")]
26607#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26608#[cfg_attr(test, assert_instr(suqadd))]
26609pub fn vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t {
26610    unsafe extern "unadjusted" {
26611        #[cfg_attr(
26612            any(target_arch = "aarch64", target_arch = "arm64ec"),
26613            link_name = "llvm.aarch64.neon.suqadd.v2i32"
26614        )]
26615        fn _vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t;
26616    }
26617    unsafe { _vuqadd_s32(a, b) }
26618}
26619#[doc = "Signed saturating Accumulate of Unsigned value."]
26620#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s32)"]
26621#[inline(always)]
26622#[target_feature(enable = "neon")]
26623#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26624#[cfg_attr(test, assert_instr(suqadd))]
26625pub fn vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t {
26626    unsafe extern "unadjusted" {
26627        #[cfg_attr(
26628            any(target_arch = "aarch64", target_arch = "arm64ec"),
26629            link_name = "llvm.aarch64.neon.suqadd.v4i32"
26630        )]
26631        fn _vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t;
26632    }
26633    unsafe { _vuqaddq_s32(a, b) }
26634}
26635#[doc = "Signed saturating Accumulate of Unsigned value."]
26636#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s64)"]
26637#[inline(always)]
26638#[target_feature(enable = "neon")]
26639#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26640#[cfg_attr(test, assert_instr(suqadd))]
26641pub fn vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t {
26642    unsafe extern "unadjusted" {
26643        #[cfg_attr(
26644            any(target_arch = "aarch64", target_arch = "arm64ec"),
26645            link_name = "llvm.aarch64.neon.suqadd.v1i64"
26646        )]
26647        fn _vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t;
26648    }
26649    unsafe { _vuqadd_s64(a, b) }
26650}
26651#[doc = "Signed saturating Accumulate of Unsigned value."]
26652#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s64)"]
26653#[inline(always)]
26654#[target_feature(enable = "neon")]
26655#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26656#[cfg_attr(test, assert_instr(suqadd))]
26657pub fn vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t {
26658    unsafe extern "unadjusted" {
26659        #[cfg_attr(
26660            any(target_arch = "aarch64", target_arch = "arm64ec"),
26661            link_name = "llvm.aarch64.neon.suqadd.v2i64"
26662        )]
26663        fn _vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t;
26664    }
26665    unsafe { _vuqaddq_s64(a, b) }
26666}
26667#[doc = "Signed saturating accumulate of unsigned value"]
26668#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddb_s8)"]
26669#[inline(always)]
26670#[target_feature(enable = "neon")]
26671#[cfg_attr(test, assert_instr(suqadd))]
26672#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26673pub fn vuqaddb_s8(a: i8, b: u8) -> i8 {
26674    unsafe { simd_extract!(vuqadd_s8(vdup_n_s8(a), vdup_n_u8(b)), 0) }
26675}
26676#[doc = "Signed saturating accumulate of unsigned value"]
26677#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddh_s16)"]
26678#[inline(always)]
26679#[target_feature(enable = "neon")]
26680#[cfg_attr(test, assert_instr(suqadd))]
26681#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26682pub fn vuqaddh_s16(a: i16, b: u16) -> i16 {
26683    unsafe { simd_extract!(vuqadd_s16(vdup_n_s16(a), vdup_n_u16(b)), 0) }
26684}
26685#[doc = "Signed saturating accumulate of unsigned value"]
26686#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddd_s64)"]
26687#[inline(always)]
26688#[target_feature(enable = "neon")]
26689#[cfg_attr(test, assert_instr(suqadd))]
26690#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26691pub fn vuqaddd_s64(a: i64, b: u64) -> i64 {
26692    unsafe extern "unadjusted" {
26693        #[cfg_attr(
26694            any(target_arch = "aarch64", target_arch = "arm64ec"),
26695            link_name = "llvm.aarch64.neon.suqadd.i64"
26696        )]
26697        fn _vuqaddd_s64(a: i64, b: u64) -> i64;
26698    }
26699    unsafe { _vuqaddd_s64(a, b) }
26700}
26701#[doc = "Signed saturating accumulate of unsigned value"]
26702#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadds_s32)"]
26703#[inline(always)]
26704#[target_feature(enable = "neon")]
26705#[cfg_attr(test, assert_instr(suqadd))]
26706#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26707pub fn vuqadds_s32(a: i32, b: u32) -> i32 {
26708    unsafe extern "unadjusted" {
26709        #[cfg_attr(
26710            any(target_arch = "aarch64", target_arch = "arm64ec"),
26711            link_name = "llvm.aarch64.neon.suqadd.i32"
26712        )]
26713        fn _vuqadds_s32(a: i32, b: u32) -> i32;
26714    }
26715    unsafe { _vuqadds_s32(a, b) }
26716}
26717#[doc = "Unzip vectors"]
26718#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f16)"]
26719#[inline(always)]
26720#[target_feature(enable = "neon,fp16")]
26721#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
26722#[cfg(not(target_arch = "arm64ec"))]
26723#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26724pub fn vuzp1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
26725    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
26726}
26727#[doc = "Unzip vectors"]
26728#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f16)"]
26729#[inline(always)]
26730#[target_feature(enable = "neon,fp16")]
26731#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
26732#[cfg(not(target_arch = "arm64ec"))]
26733#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26734pub fn vuzp1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
26735    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
26736}
26737#[doc = "Unzip vectors"]
26738#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f32)"]
26739#[inline(always)]
26740#[target_feature(enable = "neon")]
26741#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26742#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26743pub fn vuzp1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
26744    unsafe { simd_shuffle!(a, b, [0, 2]) }
26745}
26746#[doc = "Unzip vectors"]
26747#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f64)"]
26748#[inline(always)]
26749#[target_feature(enable = "neon")]
26750#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26751#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26752pub fn vuzp1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
26753    unsafe { simd_shuffle!(a, b, [0, 2]) }
26754}
26755#[doc = "Unzip vectors"]
26756#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s32)"]
26757#[inline(always)]
26758#[target_feature(enable = "neon")]
26759#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26760#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26761pub fn vuzp1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
26762    unsafe { simd_shuffle!(a, b, [0, 2]) }
26763}
26764#[doc = "Unzip vectors"]
26765#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s64)"]
26766#[inline(always)]
26767#[target_feature(enable = "neon")]
26768#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26769#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26770pub fn vuzp1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
26771    unsafe { simd_shuffle!(a, b, [0, 2]) }
26772}
26773#[doc = "Unzip vectors"]
26774#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u32)"]
26775#[inline(always)]
26776#[target_feature(enable = "neon")]
26777#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26778#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26779pub fn vuzp1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
26780    unsafe { simd_shuffle!(a, b, [0, 2]) }
26781}
26782#[doc = "Unzip vectors"]
26783#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u64)"]
26784#[inline(always)]
26785#[target_feature(enable = "neon")]
26786#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26787#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26788pub fn vuzp1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
26789    unsafe { simd_shuffle!(a, b, [0, 2]) }
26790}
26791#[doc = "Unzip vectors"]
26792#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p64)"]
26793#[inline(always)]
26794#[target_feature(enable = "neon")]
26795#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26796#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26797pub fn vuzp1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
26798    unsafe { simd_shuffle!(a, b, [0, 2]) }
26799}
26800#[doc = "Unzip vectors"]
26801#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f32)"]
26802#[inline(always)]
26803#[target_feature(enable = "neon")]
26804#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26805#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26806pub fn vuzp1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
26807    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
26808}
26809#[doc = "Unzip vectors"]
26810#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s8)"]
26811#[inline(always)]
26812#[target_feature(enable = "neon")]
26813#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26814#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26815pub fn vuzp1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
26816    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
26817}
26818#[doc = "Unzip vectors"]
26819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s8)"]
26820#[inline(always)]
26821#[target_feature(enable = "neon")]
26822#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26823#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26824pub fn vuzp1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
26825    unsafe {
26826        simd_shuffle!(
26827            a,
26828            b,
26829            [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
26830        )
26831    }
26832}
26833#[doc = "Unzip vectors"]
26834#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s16)"]
26835#[inline(always)]
26836#[target_feature(enable = "neon")]
26837#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26838#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26839pub fn vuzp1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
26840    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
26841}
26842#[doc = "Unzip vectors"]
26843#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s16)"]
26844#[inline(always)]
26845#[target_feature(enable = "neon")]
26846#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26847#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26848pub fn vuzp1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
26849    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
26850}
26851#[doc = "Unzip vectors"]
26852#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s32)"]
26853#[inline(always)]
26854#[target_feature(enable = "neon")]
26855#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26856#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26857pub fn vuzp1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
26858    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
26859}
26860#[doc = "Unzip vectors"]
26861#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u8)"]
26862#[inline(always)]
26863#[target_feature(enable = "neon")]
26864#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26865#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26866pub fn vuzp1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
26867    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
26868}
26869#[doc = "Unzip vectors"]
26870#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u8)"]
26871#[inline(always)]
26872#[target_feature(enable = "neon")]
26873#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26874#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26875pub fn vuzp1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
26876    unsafe {
26877        simd_shuffle!(
26878            a,
26879            b,
26880            [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
26881        )
26882    }
26883}
26884#[doc = "Unzip vectors"]
26885#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u16)"]
26886#[inline(always)]
26887#[target_feature(enable = "neon")]
26888#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26889#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26890pub fn vuzp1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
26891    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
26892}
26893#[doc = "Unzip vectors"]
26894#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u16)"]
26895#[inline(always)]
26896#[target_feature(enable = "neon")]
26897#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26898#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26899pub fn vuzp1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
26900    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
26901}
26902#[doc = "Unzip vectors"]
26903#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u32)"]
26904#[inline(always)]
26905#[target_feature(enable = "neon")]
26906#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26907#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26908pub fn vuzp1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
26909    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
26910}
26911#[doc = "Unzip vectors"]
26912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p8)"]
26913#[inline(always)]
26914#[target_feature(enable = "neon")]
26915#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26916#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26917pub fn vuzp1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
26918    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
26919}
26920#[doc = "Unzip vectors"]
26921#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p8)"]
26922#[inline(always)]
26923#[target_feature(enable = "neon")]
26924#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26925#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26926pub fn vuzp1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
26927    unsafe {
26928        simd_shuffle!(
26929            a,
26930            b,
26931            [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
26932        )
26933    }
26934}
26935#[doc = "Unzip vectors"]
26936#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p16)"]
26937#[inline(always)]
26938#[target_feature(enable = "neon")]
26939#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26940#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26941pub fn vuzp1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
26942    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
26943}
26944#[doc = "Unzip vectors"]
26945#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p16)"]
26946#[inline(always)]
26947#[target_feature(enable = "neon")]
26948#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26949#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26950pub fn vuzp1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
26951    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
26952}
26953#[doc = "Unzip vectors"]
26954#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f16)"]
26955#[inline(always)]
26956#[target_feature(enable = "neon,fp16")]
26957#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
26958#[cfg(not(target_arch = "arm64ec"))]
26959#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
26960pub fn vuzp2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
26961    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
26962}
26963#[doc = "Unzip vectors"]
26964#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f16)"]
26965#[inline(always)]
26966#[target_feature(enable = "neon,fp16")]
26967#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
26968#[cfg(not(target_arch = "arm64ec"))]
26969#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
26970pub fn vuzp2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
26971    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
26972}
26973#[doc = "Unzip vectors"]
26974#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f32)"]
26975#[inline(always)]
26976#[target_feature(enable = "neon")]
26977#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26978#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
26979pub fn vuzp2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
26980    unsafe { simd_shuffle!(a, b, [1, 3]) }
26981}
26982#[doc = "Unzip vectors"]
26983#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f64)"]
26984#[inline(always)]
26985#[target_feature(enable = "neon")]
26986#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26987#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
26988pub fn vuzp2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
26989    unsafe { simd_shuffle!(a, b, [1, 3]) }
26990}
26991#[doc = "Unzip vectors"]
26992#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s32)"]
26993#[inline(always)]
26994#[target_feature(enable = "neon")]
26995#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26996#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
26997pub fn vuzp2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
26998    unsafe { simd_shuffle!(a, b, [1, 3]) }
26999}
27000#[doc = "Unzip vectors"]
27001#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s64)"]
27002#[inline(always)]
27003#[target_feature(enable = "neon")]
27004#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27005#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27006pub fn vuzp2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
27007    unsafe { simd_shuffle!(a, b, [1, 3]) }
27008}
27009#[doc = "Unzip vectors"]
27010#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u32)"]
27011#[inline(always)]
27012#[target_feature(enable = "neon")]
27013#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27014#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27015pub fn vuzp2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
27016    unsafe { simd_shuffle!(a, b, [1, 3]) }
27017}
27018#[doc = "Unzip vectors"]
27019#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u64)"]
27020#[inline(always)]
27021#[target_feature(enable = "neon")]
27022#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27023#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27024pub fn vuzp2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
27025    unsafe { simd_shuffle!(a, b, [1, 3]) }
27026}
27027#[doc = "Unzip vectors"]
27028#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p64)"]
27029#[inline(always)]
27030#[target_feature(enable = "neon")]
27031#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27032#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27033pub fn vuzp2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
27034    unsafe { simd_shuffle!(a, b, [1, 3]) }
27035}
27036#[doc = "Unzip vectors"]
27037#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f32)"]
27038#[inline(always)]
27039#[target_feature(enable = "neon")]
27040#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27041#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27042pub fn vuzp2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
27043    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
27044}
27045#[doc = "Unzip vectors"]
27046#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s8)"]
27047#[inline(always)]
27048#[target_feature(enable = "neon")]
27049#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27050#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27051pub fn vuzp2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
27052    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
27053}
27054#[doc = "Unzip vectors"]
27055#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s8)"]
27056#[inline(always)]
27057#[target_feature(enable = "neon")]
27058#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27059#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27060pub fn vuzp2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
27061    unsafe {
27062        simd_shuffle!(
27063            a,
27064            b,
27065            [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]
27066        )
27067    }
27068}
27069#[doc = "Unzip vectors"]
27070#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s16)"]
27071#[inline(always)]
27072#[target_feature(enable = "neon")]
27073#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27074#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27075pub fn vuzp2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
27076    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
27077}
27078#[doc = "Unzip vectors"]
27079#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s16)"]
27080#[inline(always)]
27081#[target_feature(enable = "neon")]
27082#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27083#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27084pub fn vuzp2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
27085    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
27086}
27087#[doc = "Unzip vectors"]
27088#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s32)"]
27089#[inline(always)]
27090#[target_feature(enable = "neon")]
27091#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27092#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27093pub fn vuzp2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
27094    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
27095}
27096#[doc = "Unzip vectors"]
27097#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u8)"]
27098#[inline(always)]
27099#[target_feature(enable = "neon")]
27100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27101#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27102pub fn vuzp2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
27103    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
27104}
27105#[doc = "Unzip vectors"]
27106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u8)"]
27107#[inline(always)]
27108#[target_feature(enable = "neon")]
27109#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27110#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27111pub fn vuzp2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
27112    unsafe {
27113        simd_shuffle!(
27114            a,
27115            b,
27116            [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]
27117        )
27118    }
27119}
27120#[doc = "Unzip vectors"]
27121#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u16)"]
27122#[inline(always)]
27123#[target_feature(enable = "neon")]
27124#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27125#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27126pub fn vuzp2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
27127    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
27128}
27129#[doc = "Unzip vectors"]
27130#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u16)"]
27131#[inline(always)]
27132#[target_feature(enable = "neon")]
27133#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27134#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27135pub fn vuzp2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
27136    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
27137}
27138#[doc = "Unzip vectors"]
27139#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u32)"]
27140#[inline(always)]
27141#[target_feature(enable = "neon")]
27142#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27143#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27144pub fn vuzp2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
27145    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
27146}
27147#[doc = "Unzip vectors"]
27148#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p8)"]
27149#[inline(always)]
27150#[target_feature(enable = "neon")]
27151#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27152#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27153pub fn vuzp2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
27154    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
27155}
27156#[doc = "Unzip vectors"]
27157#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p8)"]
27158#[inline(always)]
27159#[target_feature(enable = "neon")]
27160#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27161#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27162pub fn vuzp2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
27163    unsafe {
27164        simd_shuffle!(
27165            a,
27166            b,
27167            [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]
27168        )
27169    }
27170}
27171#[doc = "Unzip vectors"]
27172#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p16)"]
27173#[inline(always)]
27174#[target_feature(enable = "neon")]
27175#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27176#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27177pub fn vuzp2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
27178    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
27179}
27180#[doc = "Unzip vectors"]
27181#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p16)"]
27182#[inline(always)]
27183#[target_feature(enable = "neon")]
27184#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27185#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27186pub fn vuzp2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
27187    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
27188}
27189#[doc = "Exclusive OR and rotate"]
27190#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vxarq_u64)"]
27191#[inline(always)]
27192#[target_feature(enable = "neon,sha3")]
27193#[cfg_attr(test, assert_instr(xar, IMM6 = 0))]
27194#[rustc_legacy_const_generics(2)]
27195#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
27196pub fn vxarq_u64<const IMM6: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
27197    static_assert_uimm_bits!(IMM6, 6);
27198    unsafe extern "unadjusted" {
27199        #[cfg_attr(
27200            any(target_arch = "aarch64", target_arch = "arm64ec"),
27201            link_name = "llvm.aarch64.crypto.xar"
27202        )]
27203        fn _vxarq_u64(a: uint64x2_t, b: uint64x2_t, n: i64) -> uint64x2_t;
27204    }
27205    unsafe { _vxarq_u64(a, b, IMM6 as i64) }
27206}
27207#[doc = "Zip vectors"]
27208#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f16)"]
27209#[inline(always)]
27210#[target_feature(enable = "neon,fp16")]
27211#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
27212#[cfg(not(target_arch = "arm64ec"))]
27213#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27214pub fn vzip1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
27215    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
27216}
27217#[doc = "Zip vectors"]
27218#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f16)"]
27219#[inline(always)]
27220#[target_feature(enable = "neon,fp16")]
27221#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
27222#[cfg(not(target_arch = "arm64ec"))]
27223#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27224pub fn vzip1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
27225    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
27226}
27227#[doc = "Zip vectors"]
27228#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f32)"]
27229#[inline(always)]
27230#[target_feature(enable = "neon")]
27231#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27232#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27233pub fn vzip1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
27234    unsafe { simd_shuffle!(a, b, [0, 2]) }
27235}
27236#[doc = "Zip vectors"]
27237#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f32)"]
27238#[inline(always)]
27239#[target_feature(enable = "neon")]
27240#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27241#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27242pub fn vzip1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
27243    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
27244}
27245#[doc = "Zip vectors"]
27246#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f64)"]
27247#[inline(always)]
27248#[target_feature(enable = "neon")]
27249#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27250#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27251pub fn vzip1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
27252    unsafe { simd_shuffle!(a, b, [0, 2]) }
27253}
27254#[doc = "Zip vectors"]
27255#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s8)"]
27256#[inline(always)]
27257#[target_feature(enable = "neon")]
27258#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27259#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27260pub fn vzip1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
27261    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
27262}
27263#[doc = "Zip vectors"]
27264#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s8)"]
27265#[inline(always)]
27266#[target_feature(enable = "neon")]
27267#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27268#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27269pub fn vzip1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
27270    unsafe {
27271        simd_shuffle!(
27272            a,
27273            b,
27274            [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]
27275        )
27276    }
27277}
27278#[doc = "Zip vectors"]
27279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s16)"]
27280#[inline(always)]
27281#[target_feature(enable = "neon")]
27282#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27283#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27284pub fn vzip1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
27285    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
27286}
27287#[doc = "Zip vectors"]
27288#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s16)"]
27289#[inline(always)]
27290#[target_feature(enable = "neon")]
27291#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27292#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27293pub fn vzip1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
27294    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
27295}
27296#[doc = "Zip vectors"]
27297#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s32)"]
27298#[inline(always)]
27299#[target_feature(enable = "neon")]
27300#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27301#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27302pub fn vzip1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
27303    unsafe { simd_shuffle!(a, b, [0, 2]) }
27304}
27305#[doc = "Zip vectors"]
27306#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s32)"]
27307#[inline(always)]
27308#[target_feature(enable = "neon")]
27309#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27310#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27311pub fn vzip1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
27312    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
27313}
27314#[doc = "Zip vectors"]
27315#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s64)"]
27316#[inline(always)]
27317#[target_feature(enable = "neon")]
27318#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27319#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27320pub fn vzip1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
27321    unsafe { simd_shuffle!(a, b, [0, 2]) }
27322}
27323#[doc = "Zip vectors"]
27324#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u8)"]
27325#[inline(always)]
27326#[target_feature(enable = "neon")]
27327#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27328#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27329pub fn vzip1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
27330    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
27331}
27332#[doc = "Zip vectors"]
27333#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u8)"]
27334#[inline(always)]
27335#[target_feature(enable = "neon")]
27336#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27337#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27338pub fn vzip1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
27339    unsafe {
27340        simd_shuffle!(
27341            a,
27342            b,
27343            [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]
27344        )
27345    }
27346}
27347#[doc = "Zip vectors"]
27348#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u16)"]
27349#[inline(always)]
27350#[target_feature(enable = "neon")]
27351#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27352#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27353pub fn vzip1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
27354    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
27355}
27356#[doc = "Zip vectors"]
27357#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u16)"]
27358#[inline(always)]
27359#[target_feature(enable = "neon")]
27360#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27361#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27362pub fn vzip1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
27363    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
27364}
27365#[doc = "Zip vectors"]
27366#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u32)"]
27367#[inline(always)]
27368#[target_feature(enable = "neon")]
27369#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27370#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27371pub fn vzip1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
27372    unsafe { simd_shuffle!(a, b, [0, 2]) }
27373}
27374#[doc = "Zip vectors"]
27375#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u32)"]
27376#[inline(always)]
27377#[target_feature(enable = "neon")]
27378#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27379#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27380pub fn vzip1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
27381    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
27382}
27383#[doc = "Zip vectors"]
27384#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u64)"]
27385#[inline(always)]
27386#[target_feature(enable = "neon")]
27387#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27388#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27389pub fn vzip1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
27390    unsafe { simd_shuffle!(a, b, [0, 2]) }
27391}
27392#[doc = "Zip vectors"]
27393#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p8)"]
27394#[inline(always)]
27395#[target_feature(enable = "neon")]
27396#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27397#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27398pub fn vzip1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
27399    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
27400}
27401#[doc = "Zip vectors"]
27402#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p8)"]
27403#[inline(always)]
27404#[target_feature(enable = "neon")]
27405#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27406#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27407pub fn vzip1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
27408    unsafe {
27409        simd_shuffle!(
27410            a,
27411            b,
27412            [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]
27413        )
27414    }
27415}
27416#[doc = "Zip vectors"]
27417#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p16)"]
27418#[inline(always)]
27419#[target_feature(enable = "neon")]
27420#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27421#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27422pub fn vzip1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
27423    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
27424}
27425#[doc = "Zip vectors"]
27426#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p16)"]
27427#[inline(always)]
27428#[target_feature(enable = "neon")]
27429#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27430#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27431pub fn vzip1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
27432    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
27433}
27434#[doc = "Zip vectors"]
27435#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p64)"]
27436#[inline(always)]
27437#[target_feature(enable = "neon")]
27438#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27439#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27440pub fn vzip1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
27441    unsafe { simd_shuffle!(a, b, [0, 2]) }
27442}
27443#[doc = "Zip vectors"]
27444#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f16)"]
27445#[inline(always)]
27446#[target_feature(enable = "neon,fp16")]
27447#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
27448#[cfg(not(target_arch = "arm64ec"))]
27449#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27450pub fn vzip2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
27451    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
27452}
27453#[doc = "Zip vectors"]
27454#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f16)"]
27455#[inline(always)]
27456#[target_feature(enable = "neon,fp16")]
27457#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
27458#[cfg(not(target_arch = "arm64ec"))]
27459#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27460pub fn vzip2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
27461    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
27462}
27463#[doc = "Zip vectors"]
27464#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f32)"]
27465#[inline(always)]
27466#[target_feature(enable = "neon")]
27467#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27468#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27469pub fn vzip2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
27470    unsafe { simd_shuffle!(a, b, [1, 3]) }
27471}
27472#[doc = "Zip vectors"]
27473#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f32)"]
27474#[inline(always)]
27475#[target_feature(enable = "neon")]
27476#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27477#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27478pub fn vzip2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
27479    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
27480}
27481#[doc = "Zip vectors"]
27482#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f64)"]
27483#[inline(always)]
27484#[target_feature(enable = "neon")]
27485#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27486#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27487pub fn vzip2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
27488    unsafe { simd_shuffle!(a, b, [1, 3]) }
27489}
27490#[doc = "Zip vectors"]
27491#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s8)"]
27492#[inline(always)]
27493#[target_feature(enable = "neon")]
27494#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27495#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27496pub fn vzip2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
27497    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
27498}
27499#[doc = "Zip vectors"]
27500#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s8)"]
27501#[inline(always)]
27502#[target_feature(enable = "neon")]
27503#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27504#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27505pub fn vzip2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
27506    unsafe {
27507        simd_shuffle!(
27508            a,
27509            b,
27510            [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]
27511        )
27512    }
27513}
27514#[doc = "Zip vectors"]
27515#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s16)"]
27516#[inline(always)]
27517#[target_feature(enable = "neon")]
27518#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27519#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27520pub fn vzip2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
27521    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
27522}
27523#[doc = "Zip vectors"]
27524#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s16)"]
27525#[inline(always)]
27526#[target_feature(enable = "neon")]
27527#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27528#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27529pub fn vzip2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
27530    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
27531}
27532#[doc = "Zip vectors"]
27533#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s32)"]
27534#[inline(always)]
27535#[target_feature(enable = "neon")]
27536#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27537#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27538pub fn vzip2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
27539    unsafe { simd_shuffle!(a, b, [1, 3]) }
27540}
27541#[doc = "Zip vectors"]
27542#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s32)"]
27543#[inline(always)]
27544#[target_feature(enable = "neon")]
27545#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27546#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27547pub fn vzip2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
27548    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
27549}
27550#[doc = "Zip vectors"]
27551#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s64)"]
27552#[inline(always)]
27553#[target_feature(enable = "neon")]
27554#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27555#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27556pub fn vzip2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
27557    unsafe { simd_shuffle!(a, b, [1, 3]) }
27558}
27559#[doc = "Zip vectors"]
27560#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u8)"]
27561#[inline(always)]
27562#[target_feature(enable = "neon")]
27563#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27564#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27565pub fn vzip2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
27566    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
27567}
27568#[doc = "Zip vectors"]
27569#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u8)"]
27570#[inline(always)]
27571#[target_feature(enable = "neon")]
27572#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27573#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27574pub fn vzip2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
27575    unsafe {
27576        simd_shuffle!(
27577            a,
27578            b,
27579            [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]
27580        )
27581    }
27582}
27583#[doc = "Zip vectors"]
27584#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u16)"]
27585#[inline(always)]
27586#[target_feature(enable = "neon")]
27587#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27588#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27589pub fn vzip2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
27590    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
27591}
27592#[doc = "Zip vectors"]
27593#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u16)"]
27594#[inline(always)]
27595#[target_feature(enable = "neon")]
27596#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27597#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27598pub fn vzip2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
27599    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
27600}
27601#[doc = "Zip vectors"]
27602#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u32)"]
27603#[inline(always)]
27604#[target_feature(enable = "neon")]
27605#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27606#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27607pub fn vzip2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
27608    unsafe { simd_shuffle!(a, b, [1, 3]) }
27609}
27610#[doc = "Zip vectors"]
27611#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u32)"]
27612#[inline(always)]
27613#[target_feature(enable = "neon")]
27614#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27615#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27616pub fn vzip2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
27617    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
27618}
27619#[doc = "Zip vectors"]
27620#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u64)"]
27621#[inline(always)]
27622#[target_feature(enable = "neon")]
27623#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27624#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27625pub fn vzip2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
27626    unsafe { simd_shuffle!(a, b, [1, 3]) }
27627}
27628#[doc = "Zip vectors"]
27629#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p8)"]
27630#[inline(always)]
27631#[target_feature(enable = "neon")]
27632#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27633#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27634pub fn vzip2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
27635    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
27636}
27637#[doc = "Zip vectors"]
27638#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p8)"]
27639#[inline(always)]
27640#[target_feature(enable = "neon")]
27641#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27642#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27643pub fn vzip2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
27644    unsafe {
27645        simd_shuffle!(
27646            a,
27647            b,
27648            [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]
27649        )
27650    }
27651}
27652#[doc = "Zip vectors"]
27653#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p16)"]
27654#[inline(always)]
27655#[target_feature(enable = "neon")]
27656#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27657#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27658pub fn vzip2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
27659    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
27660}
27661#[doc = "Zip vectors"]
27662#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p16)"]
27663#[inline(always)]
27664#[target_feature(enable = "neon")]
27665#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27666#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27667pub fn vzip2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
27668    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
27669}
27670#[doc = "Zip vectors"]
27671#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p64)"]
27672#[inline(always)]
27673#[target_feature(enable = "neon")]
27674#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27675#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27676pub fn vzip2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
27677    unsafe { simd_shuffle!(a, b, [1, 3]) }
27678}