1#![allow(improper_ctypes)]
9
10#[cfg(test)]
11use stdarch_test::assert_instr;
12
13use super::*;
14
15#[doc = "CRC32-C single round checksum for quad words (64 bits)."]
16#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32cd)"]
17#[inline]
18#[target_feature(enable = "crc")]
19#[cfg(not(target_arch = "arm"))]
20#[cfg_attr(test, assert_instr(crc32cx))]
21#[stable(feature = "stdarch_aarch64_crc32", since = "1.80.0")]
22pub fn __crc32cd(crc: u32, data: u64) -> u32 {
23 unsafe extern "unadjusted" {
24 #[cfg_attr(
25 any(target_arch = "aarch64", target_arch = "arm64ec"),
26 link_name = "llvm.aarch64.crc32cx"
27 )]
28 fn ___crc32cd(crc: u32, data: u64) -> u32;
29 }
30 unsafe { ___crc32cd(crc, data) }
31}
32#[doc = "CRC32 single round checksum for quad words (64 bits)."]
33#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32d)"]
34#[inline]
35#[target_feature(enable = "crc")]
36#[cfg(not(target_arch = "arm"))]
37#[cfg_attr(test, assert_instr(crc32x))]
38#[stable(feature = "stdarch_aarch64_crc32", since = "1.80.0")]
39pub fn __crc32d(crc: u32, data: u64) -> u32 {
40 unsafe extern "unadjusted" {
41 #[cfg_attr(
42 any(target_arch = "aarch64", target_arch = "arm64ec"),
43 link_name = "llvm.aarch64.crc32x"
44 )]
45 fn ___crc32d(crc: u32, data: u64) -> u32;
46 }
47 unsafe { ___crc32d(crc, data) }
48}
49#[doc = "Signed Absolute difference and Accumulate Long"]
50#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s8)"]
51#[inline]
52#[target_feature(enable = "neon")]
53#[stable(feature = "neon_intrinsics", since = "1.59.0")]
54#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal))]
55pub fn vabal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
56 unsafe {
57 let d: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
58 let e: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
59 let f: int8x8_t = vabd_s8(d, e);
60 let f: uint8x8_t = simd_cast(f);
61 simd_add(a, simd_cast(f))
62 }
63}
64#[doc = "Signed Absolute difference and Accumulate Long"]
65#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s16)"]
66#[inline]
67#[target_feature(enable = "neon")]
68#[stable(feature = "neon_intrinsics", since = "1.59.0")]
69#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal))]
70pub fn vabal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
71 unsafe {
72 let d: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
73 let e: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
74 let f: int16x4_t = vabd_s16(d, e);
75 let f: uint16x4_t = simd_cast(f);
76 simd_add(a, simd_cast(f))
77 }
78}
79#[doc = "Signed Absolute difference and Accumulate Long"]
80#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s32)"]
81#[inline]
82#[target_feature(enable = "neon")]
83#[stable(feature = "neon_intrinsics", since = "1.59.0")]
84#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal))]
85pub fn vabal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
86 unsafe {
87 let d: int32x2_t = simd_shuffle!(b, b, [2, 3]);
88 let e: int32x2_t = simd_shuffle!(c, c, [2, 3]);
89 let f: int32x2_t = vabd_s32(d, e);
90 let f: uint32x2_t = simd_cast(f);
91 simd_add(a, simd_cast(f))
92 }
93}
94#[doc = "Unsigned Absolute difference and Accumulate Long"]
95#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u8)"]
96#[inline]
97#[target_feature(enable = "neon")]
98#[stable(feature = "neon_intrinsics", since = "1.59.0")]
99#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal))]
100pub fn vabal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
101 unsafe {
102 let d: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
103 let e: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
104 let f: uint8x8_t = vabd_u8(d, e);
105 simd_add(a, simd_cast(f))
106 }
107}
108#[doc = "Unsigned Absolute difference and Accumulate Long"]
109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u16)"]
110#[inline]
111#[target_feature(enable = "neon")]
112#[stable(feature = "neon_intrinsics", since = "1.59.0")]
113#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal))]
114pub fn vabal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
115 unsafe {
116 let d: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
117 let e: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
118 let f: uint16x4_t = vabd_u16(d, e);
119 simd_add(a, simd_cast(f))
120 }
121}
122#[doc = "Unsigned Absolute difference and Accumulate Long"]
123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u32)"]
124#[inline]
125#[target_feature(enable = "neon")]
126#[stable(feature = "neon_intrinsics", since = "1.59.0")]
127#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal))]
128pub fn vabal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
129 unsafe {
130 let d: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
131 let e: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
132 let f: uint32x2_t = vabd_u32(d, e);
133 simd_add(a, simd_cast(f))
134 }
135}
136#[doc = "Absolute difference between the arguments of Floating"]
137#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_f64)"]
138#[inline]
139#[target_feature(enable = "neon")]
140#[stable(feature = "neon_intrinsics", since = "1.59.0")]
141#[cfg_attr(test, assert_instr(fabd))]
142pub fn vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
143 unsafe extern "unadjusted" {
144 #[cfg_attr(
145 any(target_arch = "aarch64", target_arch = "arm64ec"),
146 link_name = "llvm.aarch64.neon.fabd.v1f64"
147 )]
148 fn _vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
149 }
150 unsafe { _vabd_f64(a, b) }
151}
152#[doc = "Absolute difference between the arguments of Floating"]
153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_f64)"]
154#[inline]
155#[target_feature(enable = "neon")]
156#[stable(feature = "neon_intrinsics", since = "1.59.0")]
157#[cfg_attr(test, assert_instr(fabd))]
158pub fn vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
159 unsafe extern "unadjusted" {
160 #[cfg_attr(
161 any(target_arch = "aarch64", target_arch = "arm64ec"),
162 link_name = "llvm.aarch64.neon.fabd.v2f64"
163 )]
164 fn _vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
165 }
166 unsafe { _vabdq_f64(a, b) }
167}
168#[doc = "Floating-point absolute difference"]
169#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdd_f64)"]
170#[inline]
171#[target_feature(enable = "neon")]
172#[stable(feature = "neon_intrinsics", since = "1.59.0")]
173#[cfg_attr(test, assert_instr(fabd))]
174pub fn vabdd_f64(a: f64, b: f64) -> f64 {
175 unsafe { simd_extract!(vabd_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
176}
177#[doc = "Floating-point absolute difference"]
178#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabds_f32)"]
179#[inline]
180#[target_feature(enable = "neon")]
181#[stable(feature = "neon_intrinsics", since = "1.59.0")]
182#[cfg_attr(test, assert_instr(fabd))]
183pub fn vabds_f32(a: f32, b: f32) -> f32 {
184 unsafe { simd_extract!(vabd_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
185}
186#[doc = "Floating-point absolute difference"]
187#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdh_f16)"]
188#[inline]
189#[target_feature(enable = "neon,fp16")]
190#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
191#[cfg_attr(test, assert_instr(fabd))]
192pub fn vabdh_f16(a: f16, b: f16) -> f16 {
193 unsafe { simd_extract!(vabd_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
194}
195#[doc = "Signed Absolute difference Long"]
196#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s16)"]
197#[inline]
198#[target_feature(enable = "neon")]
199#[stable(feature = "neon_intrinsics", since = "1.59.0")]
200#[cfg_attr(test, assert_instr(sabdl))]
201pub fn vabdl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
202 unsafe {
203 let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
204 let d: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
205 let e: uint16x4_t = simd_cast(vabd_s16(c, d));
206 simd_cast(e)
207 }
208}
209#[doc = "Signed Absolute difference Long"]
210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s32)"]
211#[inline]
212#[target_feature(enable = "neon")]
213#[stable(feature = "neon_intrinsics", since = "1.59.0")]
214#[cfg_attr(test, assert_instr(sabdl))]
215pub fn vabdl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
216 unsafe {
217 let c: int32x2_t = simd_shuffle!(a, a, [2, 3]);
218 let d: int32x2_t = simd_shuffle!(b, b, [2, 3]);
219 let e: uint32x2_t = simd_cast(vabd_s32(c, d));
220 simd_cast(e)
221 }
222}
223#[doc = "Signed Absolute difference Long"]
224#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s8)"]
225#[inline]
226#[target_feature(enable = "neon")]
227#[stable(feature = "neon_intrinsics", since = "1.59.0")]
228#[cfg_attr(test, assert_instr(sabdl))]
229pub fn vabdl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
230 unsafe {
231 let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
232 let d: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
233 let e: uint8x8_t = simd_cast(vabd_s8(c, d));
234 simd_cast(e)
235 }
236}
237#[doc = "Unsigned Absolute difference Long"]
238#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u8)"]
239#[inline]
240#[target_feature(enable = "neon")]
241#[cfg_attr(test, assert_instr(uabdl))]
242#[stable(feature = "neon_intrinsics", since = "1.59.0")]
243pub fn vabdl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
244 unsafe {
245 let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
246 let d: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
247 simd_cast(vabd_u8(c, d))
248 }
249}
250#[doc = "Unsigned Absolute difference Long"]
251#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u16)"]
252#[inline]
253#[target_feature(enable = "neon")]
254#[cfg_attr(test, assert_instr(uabdl))]
255#[stable(feature = "neon_intrinsics", since = "1.59.0")]
256pub fn vabdl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
257 unsafe {
258 let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
259 let d: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
260 simd_cast(vabd_u16(c, d))
261 }
262}
263#[doc = "Unsigned Absolute difference Long"]
264#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u32)"]
265#[inline]
266#[target_feature(enable = "neon")]
267#[cfg_attr(test, assert_instr(uabdl))]
268#[stable(feature = "neon_intrinsics", since = "1.59.0")]
269pub fn vabdl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
270 unsafe {
271 let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
272 let d: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
273 simd_cast(vabd_u32(c, d))
274 }
275}
276#[doc = "Floating-point absolute value"]
277#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_f64)"]
278#[inline]
279#[target_feature(enable = "neon")]
280#[cfg_attr(test, assert_instr(fabs))]
281#[stable(feature = "neon_intrinsics", since = "1.59.0")]
282pub fn vabs_f64(a: float64x1_t) -> float64x1_t {
283 unsafe { simd_fabs(a) }
284}
285#[doc = "Floating-point absolute value"]
286#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_f64)"]
287#[inline]
288#[target_feature(enable = "neon")]
289#[cfg_attr(test, assert_instr(fabs))]
290#[stable(feature = "neon_intrinsics", since = "1.59.0")]
291pub fn vabsq_f64(a: float64x2_t) -> float64x2_t {
292 unsafe { simd_fabs(a) }
293}
294#[doc = "Absolute Value (wrapping)."]
295#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s64)"]
296#[inline]
297#[target_feature(enable = "neon")]
298#[stable(feature = "neon_intrinsics", since = "1.59.0")]
299#[cfg_attr(test, assert_instr(abs))]
300pub fn vabs_s64(a: int64x1_t) -> int64x1_t {
301 unsafe extern "unadjusted" {
302 #[cfg_attr(
303 any(target_arch = "aarch64", target_arch = "arm64ec"),
304 link_name = "llvm.aarch64.neon.abs.v1i64"
305 )]
306 fn _vabs_s64(a: int64x1_t) -> int64x1_t;
307 }
308 unsafe { _vabs_s64(a) }
309}
310#[doc = "Absolute Value (wrapping)."]
311#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsd_s64)"]
312#[inline]
313#[target_feature(enable = "neon")]
314#[stable(feature = "neon_intrinsics", since = "1.59.0")]
315#[cfg_attr(test, assert_instr(abs))]
316pub fn vabsd_s64(a: i64) -> i64 {
317 unsafe extern "unadjusted" {
318 #[cfg_attr(
319 any(target_arch = "aarch64", target_arch = "arm64ec"),
320 link_name = "llvm.aarch64.neon.abs.i64"
321 )]
322 fn _vabsd_s64(a: i64) -> i64;
323 }
324 unsafe { _vabsd_s64(a) }
325}
326#[doc = "Absolute Value (wrapping)."]
327#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s64)"]
328#[inline]
329#[target_feature(enable = "neon")]
330#[stable(feature = "neon_intrinsics", since = "1.59.0")]
331#[cfg_attr(test, assert_instr(abs))]
332pub fn vabsq_s64(a: int64x2_t) -> int64x2_t {
333 unsafe extern "unadjusted" {
334 #[cfg_attr(
335 any(target_arch = "aarch64", target_arch = "arm64ec"),
336 link_name = "llvm.aarch64.neon.abs.v2i64"
337 )]
338 fn _vabsq_s64(a: int64x2_t) -> int64x2_t;
339 }
340 unsafe { _vabsq_s64(a) }
341}
342#[doc = "Add"]
343#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_s64)"]
344#[inline]
345#[target_feature(enable = "neon")]
346#[stable(feature = "neon_intrinsics", since = "1.59.0")]
347#[cfg_attr(test, assert_instr(nop))]
348pub fn vaddd_s64(a: i64, b: i64) -> i64 {
349 a.wrapping_add(b)
350}
351#[doc = "Add"]
352#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_u64)"]
353#[inline]
354#[target_feature(enable = "neon")]
355#[stable(feature = "neon_intrinsics", since = "1.59.0")]
356#[cfg_attr(test, assert_instr(nop))]
357pub fn vaddd_u64(a: u64, b: u64) -> u64 {
358 a.wrapping_add(b)
359}
360#[doc = "Signed Add Long across Vector"]
361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s16)"]
362#[inline]
363#[target_feature(enable = "neon")]
364#[stable(feature = "neon_intrinsics", since = "1.59.0")]
365#[cfg_attr(test, assert_instr(saddlv))]
366pub fn vaddlv_s16(a: int16x4_t) -> i32 {
367 unsafe extern "unadjusted" {
368 #[cfg_attr(
369 any(target_arch = "aarch64", target_arch = "arm64ec"),
370 link_name = "llvm.aarch64.neon.saddlv.i32.v4i16"
371 )]
372 fn _vaddlv_s16(a: int16x4_t) -> i32;
373 }
374 unsafe { _vaddlv_s16(a) }
375}
376#[doc = "Signed Add Long across Vector"]
377#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s16)"]
378#[inline]
379#[target_feature(enable = "neon")]
380#[stable(feature = "neon_intrinsics", since = "1.59.0")]
381#[cfg_attr(test, assert_instr(saddlv))]
382pub fn vaddlvq_s16(a: int16x8_t) -> i32 {
383 unsafe extern "unadjusted" {
384 #[cfg_attr(
385 any(target_arch = "aarch64", target_arch = "arm64ec"),
386 link_name = "llvm.aarch64.neon.saddlv.i32.v8i16"
387 )]
388 fn _vaddlvq_s16(a: int16x8_t) -> i32;
389 }
390 unsafe { _vaddlvq_s16(a) }
391}
392#[doc = "Signed Add Long across Vector"]
393#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s32)"]
394#[inline]
395#[target_feature(enable = "neon")]
396#[stable(feature = "neon_intrinsics", since = "1.59.0")]
397#[cfg_attr(test, assert_instr(saddlv))]
398pub fn vaddlvq_s32(a: int32x4_t) -> i64 {
399 unsafe extern "unadjusted" {
400 #[cfg_attr(
401 any(target_arch = "aarch64", target_arch = "arm64ec"),
402 link_name = "llvm.aarch64.neon.saddlv.i64.v4i32"
403 )]
404 fn _vaddlvq_s32(a: int32x4_t) -> i64;
405 }
406 unsafe { _vaddlvq_s32(a) }
407}
408#[doc = "Signed Add Long across Vector"]
409#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s32)"]
410#[inline]
411#[target_feature(enable = "neon")]
412#[stable(feature = "neon_intrinsics", since = "1.59.0")]
413#[cfg_attr(test, assert_instr(saddlp))]
414pub fn vaddlv_s32(a: int32x2_t) -> i64 {
415 unsafe extern "unadjusted" {
416 #[cfg_attr(
417 any(target_arch = "aarch64", target_arch = "arm64ec"),
418 link_name = "llvm.aarch64.neon.saddlv.i64.v2i32"
419 )]
420 fn _vaddlv_s32(a: int32x2_t) -> i64;
421 }
422 unsafe { _vaddlv_s32(a) }
423}
424#[doc = "Signed Add Long across Vector"]
425#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s8)"]
426#[inline]
427#[target_feature(enable = "neon")]
428#[stable(feature = "neon_intrinsics", since = "1.59.0")]
429#[cfg_attr(test, assert_instr(saddlv))]
430pub fn vaddlv_s8(a: int8x8_t) -> i16 {
431 unsafe extern "unadjusted" {
432 #[cfg_attr(
433 any(target_arch = "aarch64", target_arch = "arm64ec"),
434 link_name = "llvm.aarch64.neon.saddlv.i32.v8i8"
435 )]
436 fn _vaddlv_s8(a: int8x8_t) -> i32;
437 }
438 unsafe { _vaddlv_s8(a) as i16 }
439}
440#[doc = "Signed Add Long across Vector"]
441#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s8)"]
442#[inline]
443#[target_feature(enable = "neon")]
444#[stable(feature = "neon_intrinsics", since = "1.59.0")]
445#[cfg_attr(test, assert_instr(saddlv))]
446pub fn vaddlvq_s8(a: int8x16_t) -> i16 {
447 unsafe extern "unadjusted" {
448 #[cfg_attr(
449 any(target_arch = "aarch64", target_arch = "arm64ec"),
450 link_name = "llvm.aarch64.neon.saddlv.i32.v16i8"
451 )]
452 fn _vaddlvq_s8(a: int8x16_t) -> i32;
453 }
454 unsafe { _vaddlvq_s8(a) as i16 }
455}
456#[doc = "Unsigned Add Long across Vector"]
457#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u16)"]
458#[inline]
459#[target_feature(enable = "neon")]
460#[stable(feature = "neon_intrinsics", since = "1.59.0")]
461#[cfg_attr(test, assert_instr(uaddlv))]
462pub fn vaddlv_u16(a: uint16x4_t) -> u32 {
463 unsafe extern "unadjusted" {
464 #[cfg_attr(
465 any(target_arch = "aarch64", target_arch = "arm64ec"),
466 link_name = "llvm.aarch64.neon.uaddlv.i32.v4i16"
467 )]
468 fn _vaddlv_u16(a: uint16x4_t) -> u32;
469 }
470 unsafe { _vaddlv_u16(a) }
471}
472#[doc = "Unsigned Add Long across Vector"]
473#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u16)"]
474#[inline]
475#[target_feature(enable = "neon")]
476#[stable(feature = "neon_intrinsics", since = "1.59.0")]
477#[cfg_attr(test, assert_instr(uaddlv))]
478pub fn vaddlvq_u16(a: uint16x8_t) -> u32 {
479 unsafe extern "unadjusted" {
480 #[cfg_attr(
481 any(target_arch = "aarch64", target_arch = "arm64ec"),
482 link_name = "llvm.aarch64.neon.uaddlv.i32.v8i16"
483 )]
484 fn _vaddlvq_u16(a: uint16x8_t) -> u32;
485 }
486 unsafe { _vaddlvq_u16(a) }
487}
488#[doc = "Unsigned Add Long across Vector"]
489#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u32)"]
490#[inline]
491#[target_feature(enable = "neon")]
492#[stable(feature = "neon_intrinsics", since = "1.59.0")]
493#[cfg_attr(test, assert_instr(uaddlv))]
494pub fn vaddlvq_u32(a: uint32x4_t) -> u64 {
495 unsafe extern "unadjusted" {
496 #[cfg_attr(
497 any(target_arch = "aarch64", target_arch = "arm64ec"),
498 link_name = "llvm.aarch64.neon.uaddlv.i64.v4i32"
499 )]
500 fn _vaddlvq_u32(a: uint32x4_t) -> u64;
501 }
502 unsafe { _vaddlvq_u32(a) }
503}
504#[doc = "Unsigned Add Long across Vector"]
505#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u32)"]
506#[inline]
507#[target_feature(enable = "neon")]
508#[stable(feature = "neon_intrinsics", since = "1.59.0")]
509#[cfg_attr(test, assert_instr(uaddlp))]
510pub fn vaddlv_u32(a: uint32x2_t) -> u64 {
511 unsafe extern "unadjusted" {
512 #[cfg_attr(
513 any(target_arch = "aarch64", target_arch = "arm64ec"),
514 link_name = "llvm.aarch64.neon.uaddlv.i64.v2i32"
515 )]
516 fn _vaddlv_u32(a: uint32x2_t) -> u64;
517 }
518 unsafe { _vaddlv_u32(a) }
519}
520#[doc = "Unsigned Add Long across Vector"]
521#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u8)"]
522#[inline]
523#[target_feature(enable = "neon")]
524#[stable(feature = "neon_intrinsics", since = "1.59.0")]
525#[cfg_attr(test, assert_instr(uaddlv))]
526pub fn vaddlv_u8(a: uint8x8_t) -> u16 {
527 unsafe extern "unadjusted" {
528 #[cfg_attr(
529 any(target_arch = "aarch64", target_arch = "arm64ec"),
530 link_name = "llvm.aarch64.neon.uaddlv.i32.v8i8"
531 )]
532 fn _vaddlv_u8(a: uint8x8_t) -> i32;
533 }
534 unsafe { _vaddlv_u8(a) as u16 }
535}
536#[doc = "Unsigned Add Long across Vector"]
537#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u8)"]
538#[inline]
539#[target_feature(enable = "neon")]
540#[stable(feature = "neon_intrinsics", since = "1.59.0")]
541#[cfg_attr(test, assert_instr(uaddlv))]
542pub fn vaddlvq_u8(a: uint8x16_t) -> u16 {
543 unsafe extern "unadjusted" {
544 #[cfg_attr(
545 any(target_arch = "aarch64", target_arch = "arm64ec"),
546 link_name = "llvm.aarch64.neon.uaddlv.i32.v16i8"
547 )]
548 fn _vaddlvq_u8(a: uint8x16_t) -> i32;
549 }
550 unsafe { _vaddlvq_u8(a) as u16 }
551}
552#[doc = "Floating-point add across vector"]
553#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_f32)"]
554#[inline]
555#[target_feature(enable = "neon")]
556#[stable(feature = "neon_intrinsics", since = "1.59.0")]
557#[cfg_attr(test, assert_instr(faddp))]
558pub fn vaddv_f32(a: float32x2_t) -> f32 {
559 unsafe extern "unadjusted" {
560 #[cfg_attr(
561 any(target_arch = "aarch64", target_arch = "arm64ec"),
562 link_name = "llvm.aarch64.neon.faddv.f32.v2f32"
563 )]
564 fn _vaddv_f32(a: float32x2_t) -> f32;
565 }
566 unsafe { _vaddv_f32(a) }
567}
568#[doc = "Floating-point add across vector"]
569#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f32)"]
570#[inline]
571#[target_feature(enable = "neon")]
572#[stable(feature = "neon_intrinsics", since = "1.59.0")]
573#[cfg_attr(test, assert_instr(faddp))]
574pub fn vaddvq_f32(a: float32x4_t) -> f32 {
575 unsafe extern "unadjusted" {
576 #[cfg_attr(
577 any(target_arch = "aarch64", target_arch = "arm64ec"),
578 link_name = "llvm.aarch64.neon.faddv.f32.v4f32"
579 )]
580 fn _vaddvq_f32(a: float32x4_t) -> f32;
581 }
582 unsafe { _vaddvq_f32(a) }
583}
584#[doc = "Floating-point add across vector"]
585#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f64)"]
586#[inline]
587#[target_feature(enable = "neon")]
588#[stable(feature = "neon_intrinsics", since = "1.59.0")]
589#[cfg_attr(test, assert_instr(faddp))]
590pub fn vaddvq_f64(a: float64x2_t) -> f64 {
591 unsafe extern "unadjusted" {
592 #[cfg_attr(
593 any(target_arch = "aarch64", target_arch = "arm64ec"),
594 link_name = "llvm.aarch64.neon.faddv.f64.v2f64"
595 )]
596 fn _vaddvq_f64(a: float64x2_t) -> f64;
597 }
598 unsafe { _vaddvq_f64(a) }
599}
600#[doc = "Add across vector"]
601#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s32)"]
602#[inline]
603#[target_feature(enable = "neon")]
604#[stable(feature = "neon_intrinsics", since = "1.59.0")]
605#[cfg_attr(test, assert_instr(addp))]
606pub fn vaddv_s32(a: int32x2_t) -> i32 {
607 unsafe extern "unadjusted" {
608 #[cfg_attr(
609 any(target_arch = "aarch64", target_arch = "arm64ec"),
610 link_name = "llvm.aarch64.neon.saddv.i32.v2i32"
611 )]
612 fn _vaddv_s32(a: int32x2_t) -> i32;
613 }
614 unsafe { _vaddv_s32(a) }
615}
616#[doc = "Add across vector"]
617#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s8)"]
618#[inline]
619#[target_feature(enable = "neon")]
620#[stable(feature = "neon_intrinsics", since = "1.59.0")]
621#[cfg_attr(test, assert_instr(addv))]
622pub fn vaddv_s8(a: int8x8_t) -> i8 {
623 unsafe extern "unadjusted" {
624 #[cfg_attr(
625 any(target_arch = "aarch64", target_arch = "arm64ec"),
626 link_name = "llvm.aarch64.neon.saddv.i8.v8i8"
627 )]
628 fn _vaddv_s8(a: int8x8_t) -> i8;
629 }
630 unsafe { _vaddv_s8(a) }
631}
632#[doc = "Add across vector"]
633#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s8)"]
634#[inline]
635#[target_feature(enable = "neon")]
636#[stable(feature = "neon_intrinsics", since = "1.59.0")]
637#[cfg_attr(test, assert_instr(addv))]
638pub fn vaddvq_s8(a: int8x16_t) -> i8 {
639 unsafe extern "unadjusted" {
640 #[cfg_attr(
641 any(target_arch = "aarch64", target_arch = "arm64ec"),
642 link_name = "llvm.aarch64.neon.saddv.i8.v16i8"
643 )]
644 fn _vaddvq_s8(a: int8x16_t) -> i8;
645 }
646 unsafe { _vaddvq_s8(a) }
647}
648#[doc = "Add across vector"]
649#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s16)"]
650#[inline]
651#[target_feature(enable = "neon")]
652#[stable(feature = "neon_intrinsics", since = "1.59.0")]
653#[cfg_attr(test, assert_instr(addv))]
654pub fn vaddv_s16(a: int16x4_t) -> i16 {
655 unsafe extern "unadjusted" {
656 #[cfg_attr(
657 any(target_arch = "aarch64", target_arch = "arm64ec"),
658 link_name = "llvm.aarch64.neon.saddv.i16.v4i16"
659 )]
660 fn _vaddv_s16(a: int16x4_t) -> i16;
661 }
662 unsafe { _vaddv_s16(a) }
663}
664#[doc = "Add across vector"]
665#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s16)"]
666#[inline]
667#[target_feature(enable = "neon")]
668#[stable(feature = "neon_intrinsics", since = "1.59.0")]
669#[cfg_attr(test, assert_instr(addv))]
670pub fn vaddvq_s16(a: int16x8_t) -> i16 {
671 unsafe extern "unadjusted" {
672 #[cfg_attr(
673 any(target_arch = "aarch64", target_arch = "arm64ec"),
674 link_name = "llvm.aarch64.neon.saddv.i16.v8i16"
675 )]
676 fn _vaddvq_s16(a: int16x8_t) -> i16;
677 }
678 unsafe { _vaddvq_s16(a) }
679}
680#[doc = "Add across vector"]
681#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s32)"]
682#[inline]
683#[target_feature(enable = "neon")]
684#[stable(feature = "neon_intrinsics", since = "1.59.0")]
685#[cfg_attr(test, assert_instr(addv))]
686pub fn vaddvq_s32(a: int32x4_t) -> i32 {
687 unsafe extern "unadjusted" {
688 #[cfg_attr(
689 any(target_arch = "aarch64", target_arch = "arm64ec"),
690 link_name = "llvm.aarch64.neon.saddv.i32.v4i32"
691 )]
692 fn _vaddvq_s32(a: int32x4_t) -> i32;
693 }
694 unsafe { _vaddvq_s32(a) }
695}
696#[doc = "Add across vector"]
697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u32)"]
698#[inline]
699#[target_feature(enable = "neon")]
700#[stable(feature = "neon_intrinsics", since = "1.59.0")]
701#[cfg_attr(test, assert_instr(addp))]
702pub fn vaddv_u32(a: uint32x2_t) -> u32 {
703 unsafe extern "unadjusted" {
704 #[cfg_attr(
705 any(target_arch = "aarch64", target_arch = "arm64ec"),
706 link_name = "llvm.aarch64.neon.uaddv.i32.v2i32"
707 )]
708 fn _vaddv_u32(a: uint32x2_t) -> u32;
709 }
710 unsafe { _vaddv_u32(a) }
711}
712#[doc = "Add across vector"]
713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u8)"]
714#[inline]
715#[target_feature(enable = "neon")]
716#[stable(feature = "neon_intrinsics", since = "1.59.0")]
717#[cfg_attr(test, assert_instr(addv))]
718pub fn vaddv_u8(a: uint8x8_t) -> u8 {
719 unsafe extern "unadjusted" {
720 #[cfg_attr(
721 any(target_arch = "aarch64", target_arch = "arm64ec"),
722 link_name = "llvm.aarch64.neon.uaddv.i8.v8i8"
723 )]
724 fn _vaddv_u8(a: uint8x8_t) -> u8;
725 }
726 unsafe { _vaddv_u8(a) }
727}
728#[doc = "Add across vector"]
729#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u8)"]
730#[inline]
731#[target_feature(enable = "neon")]
732#[stable(feature = "neon_intrinsics", since = "1.59.0")]
733#[cfg_attr(test, assert_instr(addv))]
734pub fn vaddvq_u8(a: uint8x16_t) -> u8 {
735 unsafe extern "unadjusted" {
736 #[cfg_attr(
737 any(target_arch = "aarch64", target_arch = "arm64ec"),
738 link_name = "llvm.aarch64.neon.uaddv.i8.v16i8"
739 )]
740 fn _vaddvq_u8(a: uint8x16_t) -> u8;
741 }
742 unsafe { _vaddvq_u8(a) }
743}
744#[doc = "Add across vector"]
745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u16)"]
746#[inline]
747#[target_feature(enable = "neon")]
748#[stable(feature = "neon_intrinsics", since = "1.59.0")]
749#[cfg_attr(test, assert_instr(addv))]
750pub fn vaddv_u16(a: uint16x4_t) -> u16 {
751 unsafe extern "unadjusted" {
752 #[cfg_attr(
753 any(target_arch = "aarch64", target_arch = "arm64ec"),
754 link_name = "llvm.aarch64.neon.uaddv.i16.v4i16"
755 )]
756 fn _vaddv_u16(a: uint16x4_t) -> u16;
757 }
758 unsafe { _vaddv_u16(a) }
759}
760#[doc = "Add across vector"]
761#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u16)"]
762#[inline]
763#[target_feature(enable = "neon")]
764#[stable(feature = "neon_intrinsics", since = "1.59.0")]
765#[cfg_attr(test, assert_instr(addv))]
766pub fn vaddvq_u16(a: uint16x8_t) -> u16 {
767 unsafe extern "unadjusted" {
768 #[cfg_attr(
769 any(target_arch = "aarch64", target_arch = "arm64ec"),
770 link_name = "llvm.aarch64.neon.uaddv.i16.v8i16"
771 )]
772 fn _vaddvq_u16(a: uint16x8_t) -> u16;
773 }
774 unsafe { _vaddvq_u16(a) }
775}
776#[doc = "Add across vector"]
777#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u32)"]
778#[inline]
779#[target_feature(enable = "neon")]
780#[stable(feature = "neon_intrinsics", since = "1.59.0")]
781#[cfg_attr(test, assert_instr(addv))]
782pub fn vaddvq_u32(a: uint32x4_t) -> u32 {
783 unsafe extern "unadjusted" {
784 #[cfg_attr(
785 any(target_arch = "aarch64", target_arch = "arm64ec"),
786 link_name = "llvm.aarch64.neon.uaddv.i32.v4i32"
787 )]
788 fn _vaddvq_u32(a: uint32x4_t) -> u32;
789 }
790 unsafe { _vaddvq_u32(a) }
791}
792#[doc = "Add across vector"]
793#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s64)"]
794#[inline]
795#[target_feature(enable = "neon")]
796#[stable(feature = "neon_intrinsics", since = "1.59.0")]
797#[cfg_attr(test, assert_instr(addp))]
798pub fn vaddvq_s64(a: int64x2_t) -> i64 {
799 unsafe extern "unadjusted" {
800 #[cfg_attr(
801 any(target_arch = "aarch64", target_arch = "arm64ec"),
802 link_name = "llvm.aarch64.neon.saddv.i64.v2i64"
803 )]
804 fn _vaddvq_s64(a: int64x2_t) -> i64;
805 }
806 unsafe { _vaddvq_s64(a) }
807}
808#[doc = "Add across vector"]
809#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u64)"]
810#[inline]
811#[target_feature(enable = "neon")]
812#[stable(feature = "neon_intrinsics", since = "1.59.0")]
813#[cfg_attr(test, assert_instr(addp))]
814pub fn vaddvq_u64(a: uint64x2_t) -> u64 {
815 unsafe extern "unadjusted" {
816 #[cfg_attr(
817 any(target_arch = "aarch64", target_arch = "arm64ec"),
818 link_name = "llvm.aarch64.neon.uaddv.i64.v2i64"
819 )]
820 fn _vaddvq_u64(a: uint64x2_t) -> u64;
821 }
822 unsafe { _vaddvq_u64(a) }
823}
824#[doc = "Multi-vector floating-point absolute maximum"]
825#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamax_f32)"]
826#[inline]
827#[target_feature(enable = "neon,faminmax")]
828#[cfg_attr(test, assert_instr(nop))]
829#[unstable(feature = "faminmax", issue = "137933")]
830pub fn vamax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
831 unsafe extern "unadjusted" {
832 #[cfg_attr(
833 any(target_arch = "aarch64", target_arch = "arm64ec"),
834 link_name = "llvm.aarch64.neon.famax.v2f32"
835 )]
836 fn _vamax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
837 }
838 unsafe { _vamax_f32(a, b) }
839}
840#[doc = "Multi-vector floating-point absolute maximum"]
841#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamaxq_f32)"]
842#[inline]
843#[target_feature(enable = "neon,faminmax")]
844#[cfg_attr(test, assert_instr(nop))]
845#[unstable(feature = "faminmax", issue = "137933")]
846pub fn vamaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
847 unsafe extern "unadjusted" {
848 #[cfg_attr(
849 any(target_arch = "aarch64", target_arch = "arm64ec"),
850 link_name = "llvm.aarch64.neon.famax.v4f32"
851 )]
852 fn _vamaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
853 }
854 unsafe { _vamaxq_f32(a, b) }
855}
856#[doc = "Multi-vector floating-point absolute maximum"]
857#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamaxq_f64)"]
858#[inline]
859#[target_feature(enable = "neon,faminmax")]
860#[cfg_attr(test, assert_instr(nop))]
861#[unstable(feature = "faminmax", issue = "137933")]
862pub fn vamaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
863 unsafe extern "unadjusted" {
864 #[cfg_attr(
865 any(target_arch = "aarch64", target_arch = "arm64ec"),
866 link_name = "llvm.aarch64.neon.famax.v2f64"
867 )]
868 fn _vamaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
869 }
870 unsafe { _vamaxq_f64(a, b) }
871}
872#[doc = "Multi-vector floating-point absolute minimum"]
873#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamin_f32)"]
874#[inline]
875#[target_feature(enable = "neon,faminmax")]
876#[cfg_attr(test, assert_instr(nop))]
877#[unstable(feature = "faminmax", issue = "137933")]
878pub fn vamin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
879 unsafe extern "unadjusted" {
880 #[cfg_attr(
881 any(target_arch = "aarch64", target_arch = "arm64ec"),
882 link_name = "llvm.aarch64.neon.famin.v2f32"
883 )]
884 fn _vamin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
885 }
886 unsafe { _vamin_f32(a, b) }
887}
888#[doc = "Multi-vector floating-point absolute minimum"]
889#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaminq_f32)"]
890#[inline]
891#[target_feature(enable = "neon,faminmax")]
892#[cfg_attr(test, assert_instr(nop))]
893#[unstable(feature = "faminmax", issue = "137933")]
894pub fn vaminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
895 unsafe extern "unadjusted" {
896 #[cfg_attr(
897 any(target_arch = "aarch64", target_arch = "arm64ec"),
898 link_name = "llvm.aarch64.neon.famin.v4f32"
899 )]
900 fn _vaminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
901 }
902 unsafe { _vaminq_f32(a, b) }
903}
904#[doc = "Multi-vector floating-point absolute minimum"]
905#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaminq_f64)"]
906#[inline]
907#[target_feature(enable = "neon,faminmax")]
908#[cfg_attr(test, assert_instr(nop))]
909#[unstable(feature = "faminmax", issue = "137933")]
910pub fn vaminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
911 unsafe extern "unadjusted" {
912 #[cfg_attr(
913 any(target_arch = "aarch64", target_arch = "arm64ec"),
914 link_name = "llvm.aarch64.neon.famin.v2f64"
915 )]
916 fn _vaminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
917 }
918 unsafe { _vaminq_f64(a, b) }
919}
920#[doc = "Bit clear and exclusive OR"]
921#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s8)"]
922#[inline]
923#[target_feature(enable = "neon,sha3")]
924#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
925#[cfg_attr(test, assert_instr(bcax))]
926pub fn vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
927 unsafe extern "unadjusted" {
928 #[cfg_attr(
929 any(target_arch = "aarch64", target_arch = "arm64ec"),
930 link_name = "llvm.aarch64.crypto.bcaxs.v16i8"
931 )]
932 fn _vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t;
933 }
934 unsafe { _vbcaxq_s8(a, b, c) }
935}
936#[doc = "Bit clear and exclusive OR"]
937#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s16)"]
938#[inline]
939#[target_feature(enable = "neon,sha3")]
940#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
941#[cfg_attr(test, assert_instr(bcax))]
942pub fn vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
943 unsafe extern "unadjusted" {
944 #[cfg_attr(
945 any(target_arch = "aarch64", target_arch = "arm64ec"),
946 link_name = "llvm.aarch64.crypto.bcaxs.v8i16"
947 )]
948 fn _vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
949 }
950 unsafe { _vbcaxq_s16(a, b, c) }
951}
952#[doc = "Bit clear and exclusive OR"]
953#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s32)"]
954#[inline]
955#[target_feature(enable = "neon,sha3")]
956#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
957#[cfg_attr(test, assert_instr(bcax))]
958pub fn vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
959 unsafe extern "unadjusted" {
960 #[cfg_attr(
961 any(target_arch = "aarch64", target_arch = "arm64ec"),
962 link_name = "llvm.aarch64.crypto.bcaxs.v4i32"
963 )]
964 fn _vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
965 }
966 unsafe { _vbcaxq_s32(a, b, c) }
967}
968#[doc = "Bit clear and exclusive OR"]
969#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s64)"]
970#[inline]
971#[target_feature(enable = "neon,sha3")]
972#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
973#[cfg_attr(test, assert_instr(bcax))]
974pub fn vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t {
975 unsafe extern "unadjusted" {
976 #[cfg_attr(
977 any(target_arch = "aarch64", target_arch = "arm64ec"),
978 link_name = "llvm.aarch64.crypto.bcaxs.v2i64"
979 )]
980 fn _vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t;
981 }
982 unsafe { _vbcaxq_s64(a, b, c) }
983}
984#[doc = "Bit clear and exclusive OR"]
985#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u8)"]
986#[inline]
987#[target_feature(enable = "neon,sha3")]
988#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
989#[cfg_attr(test, assert_instr(bcax))]
990pub fn vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
991 unsafe extern "unadjusted" {
992 #[cfg_attr(
993 any(target_arch = "aarch64", target_arch = "arm64ec"),
994 link_name = "llvm.aarch64.crypto.bcaxu.v16i8"
995 )]
996 fn _vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t;
997 }
998 unsafe { _vbcaxq_u8(a, b, c) }
999}
1000#[doc = "Bit clear and exclusive OR"]
1001#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u16)"]
1002#[inline]
1003#[target_feature(enable = "neon,sha3")]
1004#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
1005#[cfg_attr(test, assert_instr(bcax))]
1006pub fn vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
1007 unsafe extern "unadjusted" {
1008 #[cfg_attr(
1009 any(target_arch = "aarch64", target_arch = "arm64ec"),
1010 link_name = "llvm.aarch64.crypto.bcaxu.v8i16"
1011 )]
1012 fn _vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t;
1013 }
1014 unsafe { _vbcaxq_u16(a, b, c) }
1015}
1016#[doc = "Bit clear and exclusive OR"]
1017#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u32)"]
1018#[inline]
1019#[target_feature(enable = "neon,sha3")]
1020#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
1021#[cfg_attr(test, assert_instr(bcax))]
1022pub fn vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
1023 unsafe extern "unadjusted" {
1024 #[cfg_attr(
1025 any(target_arch = "aarch64", target_arch = "arm64ec"),
1026 link_name = "llvm.aarch64.crypto.bcaxu.v4i32"
1027 )]
1028 fn _vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
1029 }
1030 unsafe { _vbcaxq_u32(a, b, c) }
1031}
1032#[doc = "Bit clear and exclusive OR"]
1033#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u64)"]
1034#[inline]
1035#[target_feature(enable = "neon,sha3")]
1036#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
1037#[cfg_attr(test, assert_instr(bcax))]
1038pub fn vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
1039 unsafe extern "unadjusted" {
1040 #[cfg_attr(
1041 any(target_arch = "aarch64", target_arch = "arm64ec"),
1042 link_name = "llvm.aarch64.crypto.bcaxu.v2i64"
1043 )]
1044 fn _vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
1045 }
1046 unsafe { _vbcaxq_u64(a, b, c) }
1047}
1048#[doc = "Floating-point complex add"]
1049#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f16)"]
1050#[inline]
1051#[target_feature(enable = "neon,fp16")]
1052#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1053#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1054#[cfg_attr(test, assert_instr(fcadd))]
1055pub fn vcadd_rot270_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
1056 unsafe extern "unadjusted" {
1057 #[cfg_attr(
1058 any(target_arch = "aarch64", target_arch = "arm64ec"),
1059 link_name = "llvm.aarch64.neon.vcadd.rot270.v4f16"
1060 )]
1061 fn _vcadd_rot270_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
1062 }
1063 unsafe { _vcadd_rot270_f16(a, b) }
1064}
1065#[doc = "Floating-point complex add"]
1066#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f16)"]
1067#[inline]
1068#[target_feature(enable = "neon,fp16")]
1069#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1070#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1071#[cfg_attr(test, assert_instr(fcadd))]
1072pub fn vcaddq_rot270_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
1073 unsafe extern "unadjusted" {
1074 #[cfg_attr(
1075 any(target_arch = "aarch64", target_arch = "arm64ec"),
1076 link_name = "llvm.aarch64.neon.vcadd.rot270.v8f16"
1077 )]
1078 fn _vcaddq_rot270_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
1079 }
1080 unsafe { _vcaddq_rot270_f16(a, b) }
1081}
1082#[doc = "Floating-point complex add"]
1083#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f32)"]
1084#[inline]
1085#[target_feature(enable = "neon,fcma")]
1086#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1087#[cfg_attr(test, assert_instr(fcadd))]
1088pub fn vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
1089 unsafe extern "unadjusted" {
1090 #[cfg_attr(
1091 any(target_arch = "aarch64", target_arch = "arm64ec"),
1092 link_name = "llvm.aarch64.neon.vcadd.rot270.v2f32"
1093 )]
1094 fn _vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
1095 }
1096 unsafe { _vcadd_rot270_f32(a, b) }
1097}
1098#[doc = "Floating-point complex add"]
1099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f32)"]
1100#[inline]
1101#[target_feature(enable = "neon,fcma")]
1102#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1103#[cfg_attr(test, assert_instr(fcadd))]
1104pub fn vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
1105 unsafe extern "unadjusted" {
1106 #[cfg_attr(
1107 any(target_arch = "aarch64", target_arch = "arm64ec"),
1108 link_name = "llvm.aarch64.neon.vcadd.rot270.v4f32"
1109 )]
1110 fn _vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
1111 }
1112 unsafe { _vcaddq_rot270_f32(a, b) }
1113}
1114#[doc = "Floating-point complex add"]
1115#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f64)"]
1116#[inline]
1117#[target_feature(enable = "neon,fcma")]
1118#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1119#[cfg_attr(test, assert_instr(fcadd))]
1120pub fn vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
1121 unsafe extern "unadjusted" {
1122 #[cfg_attr(
1123 any(target_arch = "aarch64", target_arch = "arm64ec"),
1124 link_name = "llvm.aarch64.neon.vcadd.rot270.v2f64"
1125 )]
1126 fn _vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
1127 }
1128 unsafe { _vcaddq_rot270_f64(a, b) }
1129}
1130#[doc = "Floating-point complex add"]
1131#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f16)"]
1132#[inline]
1133#[target_feature(enable = "neon,fp16")]
1134#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1135#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1136#[cfg_attr(test, assert_instr(fcadd))]
1137pub fn vcadd_rot90_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
1138 unsafe extern "unadjusted" {
1139 #[cfg_attr(
1140 any(target_arch = "aarch64", target_arch = "arm64ec"),
1141 link_name = "llvm.aarch64.neon.vcadd.rot90.v4f16"
1142 )]
1143 fn _vcadd_rot90_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
1144 }
1145 unsafe { _vcadd_rot90_f16(a, b) }
1146}
1147#[doc = "Floating-point complex add"]
1148#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f16)"]
1149#[inline]
1150#[target_feature(enable = "neon,fp16")]
1151#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1152#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1153#[cfg_attr(test, assert_instr(fcadd))]
1154pub fn vcaddq_rot90_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
1155 unsafe extern "unadjusted" {
1156 #[cfg_attr(
1157 any(target_arch = "aarch64", target_arch = "arm64ec"),
1158 link_name = "llvm.aarch64.neon.vcadd.rot90.v8f16"
1159 )]
1160 fn _vcaddq_rot90_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
1161 }
1162 unsafe { _vcaddq_rot90_f16(a, b) }
1163}
1164#[doc = "Floating-point complex add"]
1165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f32)"]
1166#[inline]
1167#[target_feature(enable = "neon,fcma")]
1168#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1169#[cfg_attr(test, assert_instr(fcadd))]
1170pub fn vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
1171 unsafe extern "unadjusted" {
1172 #[cfg_attr(
1173 any(target_arch = "aarch64", target_arch = "arm64ec"),
1174 link_name = "llvm.aarch64.neon.vcadd.rot90.v2f32"
1175 )]
1176 fn _vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
1177 }
1178 unsafe { _vcadd_rot90_f32(a, b) }
1179}
1180#[doc = "Floating-point complex add"]
1181#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f32)"]
1182#[inline]
1183#[target_feature(enable = "neon,fcma")]
1184#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1185#[cfg_attr(test, assert_instr(fcadd))]
1186pub fn vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
1187 unsafe extern "unadjusted" {
1188 #[cfg_attr(
1189 any(target_arch = "aarch64", target_arch = "arm64ec"),
1190 link_name = "llvm.aarch64.neon.vcadd.rot90.v4f32"
1191 )]
1192 fn _vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
1193 }
1194 unsafe { _vcaddq_rot90_f32(a, b) }
1195}
1196#[doc = "Floating-point complex add"]
1197#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f64)"]
1198#[inline]
1199#[target_feature(enable = "neon,fcma")]
1200#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1201#[cfg_attr(test, assert_instr(fcadd))]
1202pub fn vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
1203 unsafe extern "unadjusted" {
1204 #[cfg_attr(
1205 any(target_arch = "aarch64", target_arch = "arm64ec"),
1206 link_name = "llvm.aarch64.neon.vcadd.rot90.v2f64"
1207 )]
1208 fn _vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
1209 }
1210 unsafe { _vcaddq_rot90_f64(a, b) }
1211}
1212#[doc = "Floating-point absolute compare greater than or equal"]
1213#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcage_f64)"]
1214#[inline]
1215#[target_feature(enable = "neon")]
1216#[cfg_attr(test, assert_instr(facge))]
1217#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1218pub fn vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1219 unsafe extern "unadjusted" {
1220 #[cfg_attr(
1221 any(target_arch = "aarch64", target_arch = "arm64ec"),
1222 link_name = "llvm.aarch64.neon.facge.v1i64.v1f64"
1223 )]
1224 fn _vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t;
1225 }
1226 unsafe { _vcage_f64(a, b) }
1227}
1228#[doc = "Floating-point absolute compare greater than or equal"]
1229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f64)"]
1230#[inline]
1231#[target_feature(enable = "neon")]
1232#[cfg_attr(test, assert_instr(facge))]
1233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1234pub fn vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1235 unsafe extern "unadjusted" {
1236 #[cfg_attr(
1237 any(target_arch = "aarch64", target_arch = "arm64ec"),
1238 link_name = "llvm.aarch64.neon.facge.v2i64.v2f64"
1239 )]
1240 fn _vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t;
1241 }
1242 unsafe { _vcageq_f64(a, b) }
1243}
1244#[doc = "Floating-point absolute compare greater than or equal"]
1245#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaged_f64)"]
1246#[inline]
1247#[target_feature(enable = "neon")]
1248#[cfg_attr(test, assert_instr(facge))]
1249#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1250pub fn vcaged_f64(a: f64, b: f64) -> u64 {
1251 unsafe extern "unadjusted" {
1252 #[cfg_attr(
1253 any(target_arch = "aarch64", target_arch = "arm64ec"),
1254 link_name = "llvm.aarch64.neon.facge.i64.f64"
1255 )]
1256 fn _vcaged_f64(a: f64, b: f64) -> u64;
1257 }
1258 unsafe { _vcaged_f64(a, b) }
1259}
1260#[doc = "Floating-point absolute compare greater than or equal"]
1261#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcages_f32)"]
1262#[inline]
1263#[target_feature(enable = "neon")]
1264#[cfg_attr(test, assert_instr(facge))]
1265#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1266pub fn vcages_f32(a: f32, b: f32) -> u32 {
1267 unsafe extern "unadjusted" {
1268 #[cfg_attr(
1269 any(target_arch = "aarch64", target_arch = "arm64ec"),
1270 link_name = "llvm.aarch64.neon.facge.i32.f32"
1271 )]
1272 fn _vcages_f32(a: f32, b: f32) -> u32;
1273 }
1274 unsafe { _vcages_f32(a, b) }
1275}
1276#[doc = "Floating-point absolute compare greater than or equal"]
1277#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageh_f16)"]
1278#[inline]
1279#[cfg_attr(test, assert_instr(facge))]
1280#[target_feature(enable = "neon,fp16")]
1281#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1282pub fn vcageh_f16(a: f16, b: f16) -> u16 {
1283 unsafe extern "unadjusted" {
1284 #[cfg_attr(
1285 any(target_arch = "aarch64", target_arch = "arm64ec"),
1286 link_name = "llvm.aarch64.neon.facge.i32.f16"
1287 )]
1288 fn _vcageh_f16(a: f16, b: f16) -> i32;
1289 }
1290 unsafe { _vcageh_f16(a, b) as u16 }
1291}
1292#[doc = "Floating-point absolute compare greater than"]
1293#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagt_f64)"]
1294#[inline]
1295#[target_feature(enable = "neon")]
1296#[cfg_attr(test, assert_instr(facgt))]
1297#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1298pub fn vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1299 unsafe extern "unadjusted" {
1300 #[cfg_attr(
1301 any(target_arch = "aarch64", target_arch = "arm64ec"),
1302 link_name = "llvm.aarch64.neon.facgt.v1i64.v1f64"
1303 )]
1304 fn _vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t;
1305 }
1306 unsafe { _vcagt_f64(a, b) }
1307}
1308#[doc = "Floating-point absolute compare greater than"]
1309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f64)"]
1310#[inline]
1311#[target_feature(enable = "neon")]
1312#[cfg_attr(test, assert_instr(facgt))]
1313#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1314pub fn vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1315 unsafe extern "unadjusted" {
1316 #[cfg_attr(
1317 any(target_arch = "aarch64", target_arch = "arm64ec"),
1318 link_name = "llvm.aarch64.neon.facgt.v2i64.v2f64"
1319 )]
1320 fn _vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t;
1321 }
1322 unsafe { _vcagtq_f64(a, b) }
1323}
1324#[doc = "Floating-point absolute compare greater than"]
1325#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtd_f64)"]
1326#[inline]
1327#[target_feature(enable = "neon")]
1328#[cfg_attr(test, assert_instr(facgt))]
1329#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1330pub fn vcagtd_f64(a: f64, b: f64) -> u64 {
1331 unsafe extern "unadjusted" {
1332 #[cfg_attr(
1333 any(target_arch = "aarch64", target_arch = "arm64ec"),
1334 link_name = "llvm.aarch64.neon.facgt.i64.f64"
1335 )]
1336 fn _vcagtd_f64(a: f64, b: f64) -> u64;
1337 }
1338 unsafe { _vcagtd_f64(a, b) }
1339}
1340#[doc = "Floating-point absolute compare greater than"]
1341#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagts_f32)"]
1342#[inline]
1343#[target_feature(enable = "neon")]
1344#[cfg_attr(test, assert_instr(facgt))]
1345#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1346pub fn vcagts_f32(a: f32, b: f32) -> u32 {
1347 unsafe extern "unadjusted" {
1348 #[cfg_attr(
1349 any(target_arch = "aarch64", target_arch = "arm64ec"),
1350 link_name = "llvm.aarch64.neon.facgt.i32.f32"
1351 )]
1352 fn _vcagts_f32(a: f32, b: f32) -> u32;
1353 }
1354 unsafe { _vcagts_f32(a, b) }
1355}
1356#[doc = "Floating-point absolute compare greater than"]
1357#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagth_f16)"]
1358#[inline]
1359#[cfg_attr(test, assert_instr(facgt))]
1360#[target_feature(enable = "neon,fp16")]
1361#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1362pub fn vcagth_f16(a: f16, b: f16) -> u16 {
1363 unsafe extern "unadjusted" {
1364 #[cfg_attr(
1365 any(target_arch = "aarch64", target_arch = "arm64ec"),
1366 link_name = "llvm.aarch64.neon.facgt.i32.f16"
1367 )]
1368 fn _vcagth_f16(a: f16, b: f16) -> i32;
1369 }
1370 unsafe { _vcagth_f16(a, b) as u16 }
1371}
1372#[doc = "Floating-point absolute compare less than or equal"]
1373#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcale_f64)"]
1374#[inline]
1375#[target_feature(enable = "neon")]
1376#[cfg_attr(test, assert_instr(facge))]
1377#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1378pub fn vcale_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1379 vcage_f64(b, a)
1380}
1381#[doc = "Floating-point absolute compare less than or equal"]
1382#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleq_f64)"]
1383#[inline]
1384#[target_feature(enable = "neon")]
1385#[cfg_attr(test, assert_instr(facge))]
1386#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1387pub fn vcaleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1388 vcageq_f64(b, a)
1389}
1390#[doc = "Floating-point absolute compare less than or equal"]
1391#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaled_f64)"]
1392#[inline]
1393#[target_feature(enable = "neon")]
1394#[cfg_attr(test, assert_instr(facge))]
1395#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1396pub fn vcaled_f64(a: f64, b: f64) -> u64 {
1397 vcaged_f64(b, a)
1398}
1399#[doc = "Floating-point absolute compare less than or equal"]
1400#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcales_f32)"]
1401#[inline]
1402#[target_feature(enable = "neon")]
1403#[cfg_attr(test, assert_instr(facge))]
1404#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1405pub fn vcales_f32(a: f32, b: f32) -> u32 {
1406 vcages_f32(b, a)
1407}
1408#[doc = "Floating-point absolute compare less than or equal"]
1409#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleh_f16)"]
1410#[inline]
1411#[cfg_attr(test, assert_instr(facge))]
1412#[target_feature(enable = "neon,fp16")]
1413#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1414pub fn vcaleh_f16(a: f16, b: f16) -> u16 {
1415 vcageh_f16(b, a)
1416}
1417#[doc = "Floating-point absolute compare less than"]
1418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalt_f64)"]
1419#[inline]
1420#[target_feature(enable = "neon")]
1421#[cfg_attr(test, assert_instr(facgt))]
1422#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1423pub fn vcalt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1424 vcagt_f64(b, a)
1425}
1426#[doc = "Floating-point absolute compare less than"]
1427#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltq_f64)"]
1428#[inline]
1429#[target_feature(enable = "neon")]
1430#[cfg_attr(test, assert_instr(facgt))]
1431#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1432pub fn vcaltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1433 vcagtq_f64(b, a)
1434}
1435#[doc = "Floating-point absolute compare less than"]
1436#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltd_f64)"]
1437#[inline]
1438#[target_feature(enable = "neon")]
1439#[cfg_attr(test, assert_instr(facgt))]
1440#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1441pub fn vcaltd_f64(a: f64, b: f64) -> u64 {
1442 vcagtd_f64(b, a)
1443}
1444#[doc = "Floating-point absolute compare less than"]
1445#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalts_f32)"]
1446#[inline]
1447#[target_feature(enable = "neon")]
1448#[cfg_attr(test, assert_instr(facgt))]
1449#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1450pub fn vcalts_f32(a: f32, b: f32) -> u32 {
1451 vcagts_f32(b, a)
1452}
1453#[doc = "Floating-point absolute compare less than"]
1454#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalth_f16)"]
1455#[inline]
1456#[cfg_attr(test, assert_instr(facgt))]
1457#[target_feature(enable = "neon,fp16")]
1458#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1459pub fn vcalth_f16(a: f16, b: f16) -> u16 {
1460 vcagth_f16(b, a)
1461}
1462#[doc = "Floating-point compare equal"]
1463#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_f64)"]
1464#[inline]
1465#[target_feature(enable = "neon")]
1466#[cfg_attr(test, assert_instr(fcmeq))]
1467#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1468pub fn vceq_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1469 unsafe { simd_eq(a, b) }
1470}
1471#[doc = "Floating-point compare equal"]
1472#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_f64)"]
1473#[inline]
1474#[target_feature(enable = "neon")]
1475#[cfg_attr(test, assert_instr(fcmeq))]
1476#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1477pub fn vceqq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1478 unsafe { simd_eq(a, b) }
1479}
1480#[doc = "Compare bitwise Equal (vector)"]
1481#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s64)"]
1482#[inline]
1483#[target_feature(enable = "neon")]
1484#[cfg_attr(test, assert_instr(cmeq))]
1485#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1486pub fn vceq_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
1487 unsafe { simd_eq(a, b) }
1488}
1489#[doc = "Compare bitwise Equal (vector)"]
1490#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s64)"]
1491#[inline]
1492#[target_feature(enable = "neon")]
1493#[cfg_attr(test, assert_instr(cmeq))]
1494#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1495pub fn vceqq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
1496 unsafe { simd_eq(a, b) }
1497}
1498#[doc = "Compare bitwise Equal (vector)"]
1499#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u64)"]
1500#[inline]
1501#[target_feature(enable = "neon")]
1502#[cfg_attr(test, assert_instr(cmeq))]
1503#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1504pub fn vceq_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
1505 unsafe { simd_eq(a, b) }
1506}
1507#[doc = "Compare bitwise Equal (vector)"]
1508#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u64)"]
1509#[inline]
1510#[target_feature(enable = "neon")]
1511#[cfg_attr(test, assert_instr(cmeq))]
1512#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1513pub fn vceqq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
1514 unsafe { simd_eq(a, b) }
1515}
1516#[doc = "Compare bitwise Equal (vector)"]
1517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_p64)"]
1518#[inline]
1519#[target_feature(enable = "neon")]
1520#[cfg_attr(test, assert_instr(cmeq))]
1521#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1522pub fn vceq_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t {
1523 unsafe { simd_eq(a, b) }
1524}
1525#[doc = "Compare bitwise Equal (vector)"]
1526#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_p64)"]
1527#[inline]
1528#[target_feature(enable = "neon")]
1529#[cfg_attr(test, assert_instr(cmeq))]
1530#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1531pub fn vceqq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t {
1532 unsafe { simd_eq(a, b) }
1533}
1534#[doc = "Floating-point compare equal"]
1535#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_f64)"]
1536#[inline]
1537#[target_feature(enable = "neon")]
1538#[cfg_attr(test, assert_instr(fcmp))]
1539#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1540pub fn vceqd_f64(a: f64, b: f64) -> u64 {
1541 unsafe { simd_extract!(vceq_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
1542}
1543#[doc = "Floating-point compare equal"]
1544#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqs_f32)"]
1545#[inline]
1546#[target_feature(enable = "neon")]
1547#[cfg_attr(test, assert_instr(fcmp))]
1548#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1549pub fn vceqs_f32(a: f32, b: f32) -> u32 {
1550 unsafe { simd_extract!(vceq_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
1551}
1552#[doc = "Compare bitwise equal"]
1553#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_s64)"]
1554#[inline]
1555#[target_feature(enable = "neon")]
1556#[cfg_attr(test, assert_instr(cmp))]
1557#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1558pub fn vceqd_s64(a: i64, b: i64) -> u64 {
1559 unsafe { transmute(vceq_s64(transmute(a), transmute(b))) }
1560}
1561#[doc = "Compare bitwise equal"]
1562#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_u64)"]
1563#[inline]
1564#[target_feature(enable = "neon")]
1565#[cfg_attr(test, assert_instr(cmp))]
1566#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1567pub fn vceqd_u64(a: u64, b: u64) -> u64 {
1568 unsafe { transmute(vceq_u64(transmute(a), transmute(b))) }
1569}
1570#[doc = "Floating-point compare equal"]
1571#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqh_f16)"]
1572#[inline]
1573#[cfg_attr(test, assert_instr(fcmp))]
1574#[target_feature(enable = "neon,fp16")]
1575#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1576pub fn vceqh_f16(a: f16, b: f16) -> u16 {
1577 unsafe { simd_extract!(vceq_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
1578}
1579#[doc = "Floating-point compare bitwise equal to zero"]
1580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f16)"]
1581#[inline]
1582#[cfg_attr(test, assert_instr(fcmeq))]
1583#[target_feature(enable = "neon,fp16")]
1584#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1585pub fn vceqz_f16(a: float16x4_t) -> uint16x4_t {
1586 let b: f16x4 = f16x4::new(0.0, 0.0, 0.0, 0.0);
1587 unsafe { simd_eq(a, transmute(b)) }
1588}
1589#[doc = "Floating-point compare bitwise equal to zero"]
1590#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f16)"]
1591#[inline]
1592#[cfg_attr(test, assert_instr(fcmeq))]
1593#[target_feature(enable = "neon,fp16")]
1594#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1595pub fn vceqzq_f16(a: float16x8_t) -> uint16x8_t {
1596 let b: f16x8 = f16x8::new(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0);
1597 unsafe { simd_eq(a, transmute(b)) }
1598}
1599#[doc = "Floating-point compare bitwise equal to zero"]
1600#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f32)"]
1601#[inline]
1602#[target_feature(enable = "neon")]
1603#[cfg_attr(test, assert_instr(fcmeq))]
1604#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1605pub fn vceqz_f32(a: float32x2_t) -> uint32x2_t {
1606 let b: f32x2 = f32x2::new(0.0, 0.0);
1607 unsafe { simd_eq(a, transmute(b)) }
1608}
1609#[doc = "Floating-point compare bitwise equal to zero"]
1610#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f32)"]
1611#[inline]
1612#[target_feature(enable = "neon")]
1613#[cfg_attr(test, assert_instr(fcmeq))]
1614#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1615pub fn vceqzq_f32(a: float32x4_t) -> uint32x4_t {
1616 let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
1617 unsafe { simd_eq(a, transmute(b)) }
1618}
1619#[doc = "Floating-point compare bitwise equal to zero"]
1620#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f64)"]
1621#[inline]
1622#[target_feature(enable = "neon")]
1623#[cfg_attr(test, assert_instr(fcmeq))]
1624#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1625pub fn vceqz_f64(a: float64x1_t) -> uint64x1_t {
1626 let b: f64 = 0.0;
1627 unsafe { simd_eq(a, transmute(b)) }
1628}
1629#[doc = "Floating-point compare bitwise equal to zero"]
1630#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f64)"]
1631#[inline]
1632#[target_feature(enable = "neon")]
1633#[cfg_attr(test, assert_instr(fcmeq))]
1634#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1635pub fn vceqzq_f64(a: float64x2_t) -> uint64x2_t {
1636 let b: f64x2 = f64x2::new(0.0, 0.0);
1637 unsafe { simd_eq(a, transmute(b)) }
1638}
1639#[doc = "Signed compare bitwise equal to zero"]
1640#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s8)"]
1641#[inline]
1642#[target_feature(enable = "neon")]
1643#[cfg_attr(test, assert_instr(cmeq))]
1644#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1645pub fn vceqz_s8(a: int8x8_t) -> uint8x8_t {
1646 let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1647 unsafe { simd_eq(a, transmute(b)) }
1648}
1649#[doc = "Signed compare bitwise equal to zero"]
1650#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s8)"]
1651#[inline]
1652#[target_feature(enable = "neon")]
1653#[cfg_attr(test, assert_instr(cmeq))]
1654#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1655pub fn vceqzq_s8(a: int8x16_t) -> uint8x16_t {
1656 let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1657 unsafe { simd_eq(a, transmute(b)) }
1658}
1659#[doc = "Signed compare bitwise equal to zero"]
1660#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s16)"]
1661#[inline]
1662#[target_feature(enable = "neon")]
1663#[cfg_attr(test, assert_instr(cmeq))]
1664#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1665pub fn vceqz_s16(a: int16x4_t) -> uint16x4_t {
1666 let b: i16x4 = i16x4::new(0, 0, 0, 0);
1667 unsafe { simd_eq(a, transmute(b)) }
1668}
1669#[doc = "Signed compare bitwise equal to zero"]
1670#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s16)"]
1671#[inline]
1672#[target_feature(enable = "neon")]
1673#[cfg_attr(test, assert_instr(cmeq))]
1674#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1675pub fn vceqzq_s16(a: int16x8_t) -> uint16x8_t {
1676 let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1677 unsafe { simd_eq(a, transmute(b)) }
1678}
1679#[doc = "Signed compare bitwise equal to zero"]
1680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s32)"]
1681#[inline]
1682#[target_feature(enable = "neon")]
1683#[cfg_attr(test, assert_instr(cmeq))]
1684#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1685pub fn vceqz_s32(a: int32x2_t) -> uint32x2_t {
1686 let b: i32x2 = i32x2::new(0, 0);
1687 unsafe { simd_eq(a, transmute(b)) }
1688}
1689#[doc = "Signed compare bitwise equal to zero"]
1690#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s32)"]
1691#[inline]
1692#[target_feature(enable = "neon")]
1693#[cfg_attr(test, assert_instr(cmeq))]
1694#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1695pub fn vceqzq_s32(a: int32x4_t) -> uint32x4_t {
1696 let b: i32x4 = i32x4::new(0, 0, 0, 0);
1697 unsafe { simd_eq(a, transmute(b)) }
1698}
1699#[doc = "Signed compare bitwise equal to zero"]
1700#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s64)"]
1701#[inline]
1702#[target_feature(enable = "neon")]
1703#[cfg_attr(test, assert_instr(cmeq))]
1704#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1705pub fn vceqz_s64(a: int64x1_t) -> uint64x1_t {
1706 let b: i64x1 = i64x1::new(0);
1707 unsafe { simd_eq(a, transmute(b)) }
1708}
1709#[doc = "Signed compare bitwise equal to zero"]
1710#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s64)"]
1711#[inline]
1712#[target_feature(enable = "neon")]
1713#[cfg_attr(test, assert_instr(cmeq))]
1714#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1715pub fn vceqzq_s64(a: int64x2_t) -> uint64x2_t {
1716 let b: i64x2 = i64x2::new(0, 0);
1717 unsafe { simd_eq(a, transmute(b)) }
1718}
1719#[doc = "Signed compare bitwise equal to zero"]
1720#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p8)"]
1721#[inline]
1722#[target_feature(enable = "neon")]
1723#[cfg_attr(test, assert_instr(cmeq))]
1724#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1725pub fn vceqz_p8(a: poly8x8_t) -> uint8x8_t {
1726 let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1727 unsafe { simd_eq(a, transmute(b)) }
1728}
1729#[doc = "Signed compare bitwise equal to zero"]
1730#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p8)"]
1731#[inline]
1732#[target_feature(enable = "neon")]
1733#[cfg_attr(test, assert_instr(cmeq))]
1734#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1735pub fn vceqzq_p8(a: poly8x16_t) -> uint8x16_t {
1736 let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1737 unsafe { simd_eq(a, transmute(b)) }
1738}
1739#[doc = "Signed compare bitwise equal to zero"]
1740#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p64)"]
1741#[inline]
1742#[target_feature(enable = "neon")]
1743#[cfg_attr(test, assert_instr(cmeq))]
1744#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1745pub fn vceqz_p64(a: poly64x1_t) -> uint64x1_t {
1746 let b: i64x1 = i64x1::new(0);
1747 unsafe { simd_eq(a, transmute(b)) }
1748}
1749#[doc = "Signed compare bitwise equal to zero"]
1750#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p64)"]
1751#[inline]
1752#[target_feature(enable = "neon")]
1753#[cfg_attr(test, assert_instr(cmeq))]
1754#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1755pub fn vceqzq_p64(a: poly64x2_t) -> uint64x2_t {
1756 let b: i64x2 = i64x2::new(0, 0);
1757 unsafe { simd_eq(a, transmute(b)) }
1758}
1759#[doc = "Unsigned compare bitwise equal to zero"]
1760#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u8)"]
1761#[inline]
1762#[target_feature(enable = "neon")]
1763#[cfg_attr(test, assert_instr(cmeq))]
1764#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1765pub fn vceqz_u8(a: uint8x8_t) -> uint8x8_t {
1766 let b: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1767 unsafe { simd_eq(a, transmute(b)) }
1768}
1769#[doc = "Unsigned compare bitwise equal to zero"]
1770#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u8)"]
1771#[inline]
1772#[target_feature(enable = "neon")]
1773#[cfg_attr(test, assert_instr(cmeq))]
1774#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1775pub fn vceqzq_u8(a: uint8x16_t) -> uint8x16_t {
1776 let b: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1777 unsafe { simd_eq(a, transmute(b)) }
1778}
1779#[doc = "Unsigned compare bitwise equal to zero"]
1780#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u16)"]
1781#[inline]
1782#[target_feature(enable = "neon")]
1783#[cfg_attr(test, assert_instr(cmeq))]
1784#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1785pub fn vceqz_u16(a: uint16x4_t) -> uint16x4_t {
1786 let b: u16x4 = u16x4::new(0, 0, 0, 0);
1787 unsafe { simd_eq(a, transmute(b)) }
1788}
1789#[doc = "Unsigned compare bitwise equal to zero"]
1790#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u16)"]
1791#[inline]
1792#[target_feature(enable = "neon")]
1793#[cfg_attr(test, assert_instr(cmeq))]
1794#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1795pub fn vceqzq_u16(a: uint16x8_t) -> uint16x8_t {
1796 let b: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1797 unsafe { simd_eq(a, transmute(b)) }
1798}
1799#[doc = "Unsigned compare bitwise equal to zero"]
1800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u32)"]
1801#[inline]
1802#[target_feature(enable = "neon")]
1803#[cfg_attr(test, assert_instr(cmeq))]
1804#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1805pub fn vceqz_u32(a: uint32x2_t) -> uint32x2_t {
1806 let b: u32x2 = u32x2::new(0, 0);
1807 unsafe { simd_eq(a, transmute(b)) }
1808}
1809#[doc = "Unsigned compare bitwise equal to zero"]
1810#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u32)"]
1811#[inline]
1812#[target_feature(enable = "neon")]
1813#[cfg_attr(test, assert_instr(cmeq))]
1814#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1815pub fn vceqzq_u32(a: uint32x4_t) -> uint32x4_t {
1816 let b: u32x4 = u32x4::new(0, 0, 0, 0);
1817 unsafe { simd_eq(a, transmute(b)) }
1818}
1819#[doc = "Unsigned compare bitwise equal to zero"]
1820#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u64)"]
1821#[inline]
1822#[target_feature(enable = "neon")]
1823#[cfg_attr(test, assert_instr(cmeq))]
1824#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1825pub fn vceqz_u64(a: uint64x1_t) -> uint64x1_t {
1826 let b: u64x1 = u64x1::new(0);
1827 unsafe { simd_eq(a, transmute(b)) }
1828}
1829#[doc = "Unsigned compare bitwise equal to zero"]
1830#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u64)"]
1831#[inline]
1832#[target_feature(enable = "neon")]
1833#[cfg_attr(test, assert_instr(cmeq))]
1834#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1835pub fn vceqzq_u64(a: uint64x2_t) -> uint64x2_t {
1836 let b: u64x2 = u64x2::new(0, 0);
1837 unsafe { simd_eq(a, transmute(b)) }
1838}
1839#[doc = "Compare bitwise equal to zero"]
1840#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_s64)"]
1841#[inline]
1842#[target_feature(enable = "neon")]
1843#[cfg_attr(test, assert_instr(cmp))]
1844#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1845pub fn vceqzd_s64(a: i64) -> u64 {
1846 unsafe { transmute(vceqz_s64(transmute(a))) }
1847}
1848#[doc = "Compare bitwise equal to zero"]
1849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_u64)"]
1850#[inline]
1851#[target_feature(enable = "neon")]
1852#[cfg_attr(test, assert_instr(cmp))]
1853#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1854pub fn vceqzd_u64(a: u64) -> u64 {
1855 unsafe { transmute(vceqz_u64(transmute(a))) }
1856}
1857#[doc = "Floating-point compare bitwise equal to zero"]
1858#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzh_f16)"]
1859#[inline]
1860#[cfg_attr(test, assert_instr(fcmp))]
1861#[target_feature(enable = "neon,fp16")]
1862#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1863pub fn vceqzh_f16(a: f16) -> u16 {
1864 unsafe { simd_extract!(vceqz_f16(vdup_n_f16(a)), 0) }
1865}
1866#[doc = "Floating-point compare bitwise equal to zero"]
1867#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzs_f32)"]
1868#[inline]
1869#[target_feature(enable = "neon")]
1870#[cfg_attr(test, assert_instr(fcmp))]
1871#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1872pub fn vceqzs_f32(a: f32) -> u32 {
1873 unsafe { simd_extract!(vceqz_f32(vdup_n_f32(a)), 0) }
1874}
1875#[doc = "Floating-point compare bitwise equal to zero"]
1876#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_f64)"]
1877#[inline]
1878#[target_feature(enable = "neon")]
1879#[cfg_attr(test, assert_instr(fcmp))]
1880#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1881pub fn vceqzd_f64(a: f64) -> u64 {
1882 unsafe { simd_extract!(vceqz_f64(vdup_n_f64(a)), 0) }
1883}
1884#[doc = "Floating-point compare greater than or equal"]
1885#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_f64)"]
1886#[inline]
1887#[target_feature(enable = "neon")]
1888#[cfg_attr(test, assert_instr(fcmge))]
1889#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1890pub fn vcge_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1891 unsafe { simd_ge(a, b) }
1892}
1893#[doc = "Floating-point compare greater than or equal"]
1894#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_f64)"]
1895#[inline]
1896#[target_feature(enable = "neon")]
1897#[cfg_attr(test, assert_instr(fcmge))]
1898#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1899pub fn vcgeq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1900 unsafe { simd_ge(a, b) }
1901}
1902#[doc = "Compare signed greater than or equal"]
1903#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s64)"]
1904#[inline]
1905#[target_feature(enable = "neon")]
1906#[cfg_attr(test, assert_instr(cmge))]
1907#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1908pub fn vcge_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
1909 unsafe { simd_ge(a, b) }
1910}
1911#[doc = "Compare signed greater than or equal"]
1912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s64)"]
1913#[inline]
1914#[target_feature(enable = "neon")]
1915#[cfg_attr(test, assert_instr(cmge))]
1916#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1917pub fn vcgeq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
1918 unsafe { simd_ge(a, b) }
1919}
1920#[doc = "Compare unsigned greater than or equal"]
1921#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u64)"]
1922#[inline]
1923#[target_feature(enable = "neon")]
1924#[cfg_attr(test, assert_instr(cmhs))]
1925#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1926pub fn vcge_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
1927 unsafe { simd_ge(a, b) }
1928}
1929#[doc = "Compare unsigned greater than or equal"]
1930#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u64)"]
1931#[inline]
1932#[target_feature(enable = "neon")]
1933#[cfg_attr(test, assert_instr(cmhs))]
1934#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1935pub fn vcgeq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
1936 unsafe { simd_ge(a, b) }
1937}
1938#[doc = "Floating-point compare greater than or equal"]
1939#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_f64)"]
1940#[inline]
1941#[target_feature(enable = "neon")]
1942#[cfg_attr(test, assert_instr(fcmp))]
1943#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1944pub fn vcged_f64(a: f64, b: f64) -> u64 {
1945 unsafe { simd_extract!(vcge_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
1946}
1947#[doc = "Floating-point compare greater than or equal"]
1948#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcges_f32)"]
1949#[inline]
1950#[target_feature(enable = "neon")]
1951#[cfg_attr(test, assert_instr(fcmp))]
1952#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1953pub fn vcges_f32(a: f32, b: f32) -> u32 {
1954 unsafe { simd_extract!(vcge_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
1955}
1956#[doc = "Compare greater than or equal"]
1957#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_s64)"]
1958#[inline]
1959#[target_feature(enable = "neon")]
1960#[cfg_attr(test, assert_instr(cmp))]
1961#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1962pub fn vcged_s64(a: i64, b: i64) -> u64 {
1963 unsafe { transmute(vcge_s64(transmute(a), transmute(b))) }
1964}
1965#[doc = "Compare greater than or equal"]
1966#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_u64)"]
1967#[inline]
1968#[target_feature(enable = "neon")]
1969#[cfg_attr(test, assert_instr(cmp))]
1970#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1971pub fn vcged_u64(a: u64, b: u64) -> u64 {
1972 unsafe { transmute(vcge_u64(transmute(a), transmute(b))) }
1973}
1974#[doc = "Floating-point compare greater than or equal"]
1975#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeh_f16)"]
1976#[inline]
1977#[cfg_attr(test, assert_instr(fcmp))]
1978#[target_feature(enable = "neon,fp16")]
1979#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1980pub fn vcgeh_f16(a: f16, b: f16) -> u16 {
1981 unsafe { simd_extract!(vcge_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
1982}
1983#[doc = "Floating-point compare greater than or equal to zero"]
1984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f32)"]
1985#[inline]
1986#[target_feature(enable = "neon")]
1987#[cfg_attr(test, assert_instr(fcmge))]
1988#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1989pub fn vcgez_f32(a: float32x2_t) -> uint32x2_t {
1990 let b: f32x2 = f32x2::new(0.0, 0.0);
1991 unsafe { simd_ge(a, transmute(b)) }
1992}
1993#[doc = "Floating-point compare greater than or equal to zero"]
1994#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f32)"]
1995#[inline]
1996#[target_feature(enable = "neon")]
1997#[cfg_attr(test, assert_instr(fcmge))]
1998#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1999pub fn vcgezq_f32(a: float32x4_t) -> uint32x4_t {
2000 let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2001 unsafe { simd_ge(a, transmute(b)) }
2002}
2003#[doc = "Floating-point compare greater than or equal to zero"]
2004#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f64)"]
2005#[inline]
2006#[target_feature(enable = "neon")]
2007#[cfg_attr(test, assert_instr(fcmge))]
2008#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2009pub fn vcgez_f64(a: float64x1_t) -> uint64x1_t {
2010 let b: f64 = 0.0;
2011 unsafe { simd_ge(a, transmute(b)) }
2012}
2013#[doc = "Floating-point compare greater than or equal to zero"]
2014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f64)"]
2015#[inline]
2016#[target_feature(enable = "neon")]
2017#[cfg_attr(test, assert_instr(fcmge))]
2018#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2019pub fn vcgezq_f64(a: float64x2_t) -> uint64x2_t {
2020 let b: f64x2 = f64x2::new(0.0, 0.0);
2021 unsafe { simd_ge(a, transmute(b)) }
2022}
2023#[doc = "Compare signed greater than or equal to zero"]
2024#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s8)"]
2025#[inline]
2026#[target_feature(enable = "neon")]
2027#[cfg_attr(test, assert_instr(cmge))]
2028#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2029pub fn vcgez_s8(a: int8x8_t) -> uint8x8_t {
2030 let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2031 unsafe { simd_ge(a, transmute(b)) }
2032}
2033#[doc = "Compare signed greater than or equal to zero"]
2034#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s8)"]
2035#[inline]
2036#[target_feature(enable = "neon")]
2037#[cfg_attr(test, assert_instr(cmge))]
2038#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2039pub fn vcgezq_s8(a: int8x16_t) -> uint8x16_t {
2040 let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2041 unsafe { simd_ge(a, transmute(b)) }
2042}
2043#[doc = "Compare signed greater than or equal to zero"]
2044#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s16)"]
2045#[inline]
2046#[target_feature(enable = "neon")]
2047#[cfg_attr(test, assert_instr(cmge))]
2048#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2049pub fn vcgez_s16(a: int16x4_t) -> uint16x4_t {
2050 let b: i16x4 = i16x4::new(0, 0, 0, 0);
2051 unsafe { simd_ge(a, transmute(b)) }
2052}
2053#[doc = "Compare signed greater than or equal to zero"]
2054#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s16)"]
2055#[inline]
2056#[target_feature(enable = "neon")]
2057#[cfg_attr(test, assert_instr(cmge))]
2058#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2059pub fn vcgezq_s16(a: int16x8_t) -> uint16x8_t {
2060 let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2061 unsafe { simd_ge(a, transmute(b)) }
2062}
2063#[doc = "Compare signed greater than or equal to zero"]
2064#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s32)"]
2065#[inline]
2066#[target_feature(enable = "neon")]
2067#[cfg_attr(test, assert_instr(cmge))]
2068#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2069pub fn vcgez_s32(a: int32x2_t) -> uint32x2_t {
2070 let b: i32x2 = i32x2::new(0, 0);
2071 unsafe { simd_ge(a, transmute(b)) }
2072}
2073#[doc = "Compare signed greater than or equal to zero"]
2074#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s32)"]
2075#[inline]
2076#[target_feature(enable = "neon")]
2077#[cfg_attr(test, assert_instr(cmge))]
2078#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2079pub fn vcgezq_s32(a: int32x4_t) -> uint32x4_t {
2080 let b: i32x4 = i32x4::new(0, 0, 0, 0);
2081 unsafe { simd_ge(a, transmute(b)) }
2082}
2083#[doc = "Compare signed greater than or equal to zero"]
2084#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s64)"]
2085#[inline]
2086#[target_feature(enable = "neon")]
2087#[cfg_attr(test, assert_instr(cmge))]
2088#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2089pub fn vcgez_s64(a: int64x1_t) -> uint64x1_t {
2090 let b: i64x1 = i64x1::new(0);
2091 unsafe { simd_ge(a, transmute(b)) }
2092}
2093#[doc = "Compare signed greater than or equal to zero"]
2094#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s64)"]
2095#[inline]
2096#[target_feature(enable = "neon")]
2097#[cfg_attr(test, assert_instr(cmge))]
2098#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2099pub fn vcgezq_s64(a: int64x2_t) -> uint64x2_t {
2100 let b: i64x2 = i64x2::new(0, 0);
2101 unsafe { simd_ge(a, transmute(b)) }
2102}
2103#[doc = "Floating-point compare greater than or equal to zero"]
2104#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_f64)"]
2105#[inline]
2106#[target_feature(enable = "neon")]
2107#[cfg_attr(test, assert_instr(fcmp))]
2108#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2109pub fn vcgezd_f64(a: f64) -> u64 {
2110 unsafe { simd_extract!(vcgez_f64(vdup_n_f64(a)), 0) }
2111}
2112#[doc = "Floating-point compare greater than or equal to zero"]
2113#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezs_f32)"]
2114#[inline]
2115#[target_feature(enable = "neon")]
2116#[cfg_attr(test, assert_instr(fcmp))]
2117#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2118pub fn vcgezs_f32(a: f32) -> u32 {
2119 unsafe { simd_extract!(vcgez_f32(vdup_n_f32(a)), 0) }
2120}
2121#[doc = "Compare signed greater than or equal to zero"]
2122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_s64)"]
2123#[inline]
2124#[target_feature(enable = "neon")]
2125#[cfg_attr(test, assert_instr(nop))]
2126#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2127pub fn vcgezd_s64(a: i64) -> u64 {
2128 unsafe { transmute(vcgez_s64(transmute(a))) }
2129}
2130#[doc = "Floating-point compare greater than or equal to zero"]
2131#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezh_f16)"]
2132#[inline]
2133#[cfg_attr(test, assert_instr(fcmp))]
2134#[target_feature(enable = "neon,fp16")]
2135#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2136pub fn vcgezh_f16(a: f16) -> u16 {
2137 unsafe { simd_extract!(vcgez_f16(vdup_n_f16(a)), 0) }
2138}
2139#[doc = "Floating-point compare greater than"]
2140#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_f64)"]
2141#[inline]
2142#[target_feature(enable = "neon")]
2143#[cfg_attr(test, assert_instr(fcmgt))]
2144#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2145pub fn vcgt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2146 unsafe { simd_gt(a, b) }
2147}
2148#[doc = "Floating-point compare greater than"]
2149#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_f64)"]
2150#[inline]
2151#[target_feature(enable = "neon")]
2152#[cfg_attr(test, assert_instr(fcmgt))]
2153#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2154pub fn vcgtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2155 unsafe { simd_gt(a, b) }
2156}
2157#[doc = "Compare signed greater than"]
2158#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s64)"]
2159#[inline]
2160#[target_feature(enable = "neon")]
2161#[cfg_attr(test, assert_instr(cmgt))]
2162#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2163pub fn vcgt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
2164 unsafe { simd_gt(a, b) }
2165}
2166#[doc = "Compare signed greater than"]
2167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s64)"]
2168#[inline]
2169#[target_feature(enable = "neon")]
2170#[cfg_attr(test, assert_instr(cmgt))]
2171#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2172pub fn vcgtq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
2173 unsafe { simd_gt(a, b) }
2174}
2175#[doc = "Compare unsigned greater than"]
2176#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u64)"]
2177#[inline]
2178#[target_feature(enable = "neon")]
2179#[cfg_attr(test, assert_instr(cmhi))]
2180#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2181pub fn vcgt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
2182 unsafe { simd_gt(a, b) }
2183}
2184#[doc = "Compare unsigned greater than"]
2185#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u64)"]
2186#[inline]
2187#[target_feature(enable = "neon")]
2188#[cfg_attr(test, assert_instr(cmhi))]
2189#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2190pub fn vcgtq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
2191 unsafe { simd_gt(a, b) }
2192}
2193#[doc = "Floating-point compare greater than"]
2194#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_f64)"]
2195#[inline]
2196#[target_feature(enable = "neon")]
2197#[cfg_attr(test, assert_instr(fcmp))]
2198#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2199pub fn vcgtd_f64(a: f64, b: f64) -> u64 {
2200 unsafe { simd_extract!(vcgt_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
2201}
2202#[doc = "Floating-point compare greater than"]
2203#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgts_f32)"]
2204#[inline]
2205#[target_feature(enable = "neon")]
2206#[cfg_attr(test, assert_instr(fcmp))]
2207#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2208pub fn vcgts_f32(a: f32, b: f32) -> u32 {
2209 unsafe { simd_extract!(vcgt_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
2210}
2211#[doc = "Compare greater than"]
2212#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_s64)"]
2213#[inline]
2214#[target_feature(enable = "neon")]
2215#[cfg_attr(test, assert_instr(cmp))]
2216#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2217pub fn vcgtd_s64(a: i64, b: i64) -> u64 {
2218 unsafe { transmute(vcgt_s64(transmute(a), transmute(b))) }
2219}
2220#[doc = "Compare greater than"]
2221#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_u64)"]
2222#[inline]
2223#[target_feature(enable = "neon")]
2224#[cfg_attr(test, assert_instr(cmp))]
2225#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2226pub fn vcgtd_u64(a: u64, b: u64) -> u64 {
2227 unsafe { transmute(vcgt_u64(transmute(a), transmute(b))) }
2228}
2229#[doc = "Floating-point compare greater than"]
2230#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgth_f16)"]
2231#[inline]
2232#[cfg_attr(test, assert_instr(fcmp))]
2233#[target_feature(enable = "neon,fp16")]
2234#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2235pub fn vcgth_f16(a: f16, b: f16) -> u16 {
2236 unsafe { simd_extract!(vcgt_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
2237}
2238#[doc = "Floating-point compare greater than zero"]
2239#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f32)"]
2240#[inline]
2241#[target_feature(enable = "neon")]
2242#[cfg_attr(test, assert_instr(fcmgt))]
2243#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2244pub fn vcgtz_f32(a: float32x2_t) -> uint32x2_t {
2245 let b: f32x2 = f32x2::new(0.0, 0.0);
2246 unsafe { simd_gt(a, transmute(b)) }
2247}
2248#[doc = "Floating-point compare greater than zero"]
2249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f32)"]
2250#[inline]
2251#[target_feature(enable = "neon")]
2252#[cfg_attr(test, assert_instr(fcmgt))]
2253#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2254pub fn vcgtzq_f32(a: float32x4_t) -> uint32x4_t {
2255 let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2256 unsafe { simd_gt(a, transmute(b)) }
2257}
2258#[doc = "Floating-point compare greater than zero"]
2259#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f64)"]
2260#[inline]
2261#[target_feature(enable = "neon")]
2262#[cfg_attr(test, assert_instr(fcmgt))]
2263#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2264pub fn vcgtz_f64(a: float64x1_t) -> uint64x1_t {
2265 let b: f64 = 0.0;
2266 unsafe { simd_gt(a, transmute(b)) }
2267}
2268#[doc = "Floating-point compare greater than zero"]
2269#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f64)"]
2270#[inline]
2271#[target_feature(enable = "neon")]
2272#[cfg_attr(test, assert_instr(fcmgt))]
2273#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2274pub fn vcgtzq_f64(a: float64x2_t) -> uint64x2_t {
2275 let b: f64x2 = f64x2::new(0.0, 0.0);
2276 unsafe { simd_gt(a, transmute(b)) }
2277}
2278#[doc = "Compare signed greater than zero"]
2279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s8)"]
2280#[inline]
2281#[target_feature(enable = "neon")]
2282#[cfg_attr(test, assert_instr(cmgt))]
2283#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2284pub fn vcgtz_s8(a: int8x8_t) -> uint8x8_t {
2285 let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2286 unsafe { simd_gt(a, transmute(b)) }
2287}
2288#[doc = "Compare signed greater than zero"]
2289#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s8)"]
2290#[inline]
2291#[target_feature(enable = "neon")]
2292#[cfg_attr(test, assert_instr(cmgt))]
2293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2294pub fn vcgtzq_s8(a: int8x16_t) -> uint8x16_t {
2295 let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2296 unsafe { simd_gt(a, transmute(b)) }
2297}
2298#[doc = "Compare signed greater than zero"]
2299#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s16)"]
2300#[inline]
2301#[target_feature(enable = "neon")]
2302#[cfg_attr(test, assert_instr(cmgt))]
2303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2304pub fn vcgtz_s16(a: int16x4_t) -> uint16x4_t {
2305 let b: i16x4 = i16x4::new(0, 0, 0, 0);
2306 unsafe { simd_gt(a, transmute(b)) }
2307}
2308#[doc = "Compare signed greater than zero"]
2309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s16)"]
2310#[inline]
2311#[target_feature(enable = "neon")]
2312#[cfg_attr(test, assert_instr(cmgt))]
2313#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2314pub fn vcgtzq_s16(a: int16x8_t) -> uint16x8_t {
2315 let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2316 unsafe { simd_gt(a, transmute(b)) }
2317}
2318#[doc = "Compare signed greater than zero"]
2319#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s32)"]
2320#[inline]
2321#[target_feature(enable = "neon")]
2322#[cfg_attr(test, assert_instr(cmgt))]
2323#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2324pub fn vcgtz_s32(a: int32x2_t) -> uint32x2_t {
2325 let b: i32x2 = i32x2::new(0, 0);
2326 unsafe { simd_gt(a, transmute(b)) }
2327}
2328#[doc = "Compare signed greater than zero"]
2329#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s32)"]
2330#[inline]
2331#[target_feature(enable = "neon")]
2332#[cfg_attr(test, assert_instr(cmgt))]
2333#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2334pub fn vcgtzq_s32(a: int32x4_t) -> uint32x4_t {
2335 let b: i32x4 = i32x4::new(0, 0, 0, 0);
2336 unsafe { simd_gt(a, transmute(b)) }
2337}
2338#[doc = "Compare signed greater than zero"]
2339#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s64)"]
2340#[inline]
2341#[target_feature(enable = "neon")]
2342#[cfg_attr(test, assert_instr(cmgt))]
2343#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2344pub fn vcgtz_s64(a: int64x1_t) -> uint64x1_t {
2345 let b: i64x1 = i64x1::new(0);
2346 unsafe { simd_gt(a, transmute(b)) }
2347}
2348#[doc = "Compare signed greater than zero"]
2349#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s64)"]
2350#[inline]
2351#[target_feature(enable = "neon")]
2352#[cfg_attr(test, assert_instr(cmgt))]
2353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2354pub fn vcgtzq_s64(a: int64x2_t) -> uint64x2_t {
2355 let b: i64x2 = i64x2::new(0, 0);
2356 unsafe { simd_gt(a, transmute(b)) }
2357}
2358#[doc = "Floating-point compare greater than zero"]
2359#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_f64)"]
2360#[inline]
2361#[target_feature(enable = "neon")]
2362#[cfg_attr(test, assert_instr(fcmp))]
2363#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2364pub fn vcgtzd_f64(a: f64) -> u64 {
2365 unsafe { simd_extract!(vcgtz_f64(vdup_n_f64(a)), 0) }
2366}
2367#[doc = "Floating-point compare greater than zero"]
2368#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzs_f32)"]
2369#[inline]
2370#[target_feature(enable = "neon")]
2371#[cfg_attr(test, assert_instr(fcmp))]
2372#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2373pub fn vcgtzs_f32(a: f32) -> u32 {
2374 unsafe { simd_extract!(vcgtz_f32(vdup_n_f32(a)), 0) }
2375}
2376#[doc = "Compare signed greater than zero"]
2377#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_s64)"]
2378#[inline]
2379#[target_feature(enable = "neon")]
2380#[cfg_attr(test, assert_instr(cmp))]
2381#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2382pub fn vcgtzd_s64(a: i64) -> u64 {
2383 unsafe { transmute(vcgtz_s64(transmute(a))) }
2384}
2385#[doc = "Floating-point compare greater than zero"]
2386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzh_f16)"]
2387#[inline]
2388#[cfg_attr(test, assert_instr(fcmp))]
2389#[target_feature(enable = "neon,fp16")]
2390#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2391pub fn vcgtzh_f16(a: f16) -> u16 {
2392 unsafe { simd_extract!(vcgtz_f16(vdup_n_f16(a)), 0) }
2393}
2394#[doc = "Floating-point compare less than or equal"]
2395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_f64)"]
2396#[inline]
2397#[target_feature(enable = "neon")]
2398#[cfg_attr(test, assert_instr(fcmge))]
2399#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2400pub fn vcle_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2401 unsafe { simd_le(a, b) }
2402}
2403#[doc = "Floating-point compare less than or equal"]
2404#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_f64)"]
2405#[inline]
2406#[target_feature(enable = "neon")]
2407#[cfg_attr(test, assert_instr(fcmge))]
2408#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2409pub fn vcleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2410 unsafe { simd_le(a, b) }
2411}
2412#[doc = "Compare signed less than or equal"]
2413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s64)"]
2414#[inline]
2415#[target_feature(enable = "neon")]
2416#[cfg_attr(test, assert_instr(cmge))]
2417#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2418pub fn vcle_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
2419 unsafe { simd_le(a, b) }
2420}
2421#[doc = "Compare signed less than or equal"]
2422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s64)"]
2423#[inline]
2424#[target_feature(enable = "neon")]
2425#[cfg_attr(test, assert_instr(cmge))]
2426#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2427pub fn vcleq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
2428 unsafe { simd_le(a, b) }
2429}
2430#[doc = "Compare unsigned less than or equal"]
2431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u64)"]
2432#[inline]
2433#[target_feature(enable = "neon")]
2434#[cfg_attr(test, assert_instr(cmhs))]
2435#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2436pub fn vcle_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
2437 unsafe { simd_le(a, b) }
2438}
2439#[doc = "Compare unsigned less than or equal"]
2440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u64)"]
2441#[inline]
2442#[target_feature(enable = "neon")]
2443#[cfg_attr(test, assert_instr(cmhs))]
2444#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2445pub fn vcleq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
2446 unsafe { simd_le(a, b) }
2447}
2448#[doc = "Floating-point compare less than or equal"]
2449#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_f64)"]
2450#[inline]
2451#[target_feature(enable = "neon")]
2452#[cfg_attr(test, assert_instr(fcmp))]
2453#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2454pub fn vcled_f64(a: f64, b: f64) -> u64 {
2455 unsafe { simd_extract!(vcle_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
2456}
2457#[doc = "Floating-point compare less than or equal"]
2458#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcles_f32)"]
2459#[inline]
2460#[target_feature(enable = "neon")]
2461#[cfg_attr(test, assert_instr(fcmp))]
2462#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2463pub fn vcles_f32(a: f32, b: f32) -> u32 {
2464 unsafe { simd_extract!(vcle_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
2465}
2466#[doc = "Compare less than or equal"]
2467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_u64)"]
2468#[inline]
2469#[target_feature(enable = "neon")]
2470#[cfg_attr(test, assert_instr(cmp))]
2471#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2472pub fn vcled_u64(a: u64, b: u64) -> u64 {
2473 unsafe { transmute(vcle_u64(transmute(a), transmute(b))) }
2474}
2475#[doc = "Compare less than or equal"]
2476#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_s64)"]
2477#[inline]
2478#[target_feature(enable = "neon")]
2479#[cfg_attr(test, assert_instr(cmp))]
2480#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2481pub fn vcled_s64(a: i64, b: i64) -> u64 {
2482 unsafe { transmute(vcle_s64(transmute(a), transmute(b))) }
2483}
2484#[doc = "Floating-point compare less than or equal"]
2485#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleh_f16)"]
2486#[inline]
2487#[cfg_attr(test, assert_instr(fcmp))]
2488#[target_feature(enable = "neon,fp16")]
2489#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2490pub fn vcleh_f16(a: f16, b: f16) -> u16 {
2491 unsafe { simd_extract!(vcle_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
2492}
2493#[doc = "Floating-point compare less than or equal to zero"]
2494#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f32)"]
2495#[inline]
2496#[target_feature(enable = "neon")]
2497#[cfg_attr(test, assert_instr(fcmle))]
2498#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2499pub fn vclez_f32(a: float32x2_t) -> uint32x2_t {
2500 let b: f32x2 = f32x2::new(0.0, 0.0);
2501 unsafe { simd_le(a, transmute(b)) }
2502}
2503#[doc = "Floating-point compare less than or equal to zero"]
2504#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f32)"]
2505#[inline]
2506#[target_feature(enable = "neon")]
2507#[cfg_attr(test, assert_instr(fcmle))]
2508#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2509pub fn vclezq_f32(a: float32x4_t) -> uint32x4_t {
2510 let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2511 unsafe { simd_le(a, transmute(b)) }
2512}
2513#[doc = "Floating-point compare less than or equal to zero"]
2514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f64)"]
2515#[inline]
2516#[target_feature(enable = "neon")]
2517#[cfg_attr(test, assert_instr(fcmle))]
2518#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2519pub fn vclez_f64(a: float64x1_t) -> uint64x1_t {
2520 let b: f64 = 0.0;
2521 unsafe { simd_le(a, transmute(b)) }
2522}
2523#[doc = "Floating-point compare less than or equal to zero"]
2524#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f64)"]
2525#[inline]
2526#[target_feature(enable = "neon")]
2527#[cfg_attr(test, assert_instr(fcmle))]
2528#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2529pub fn vclezq_f64(a: float64x2_t) -> uint64x2_t {
2530 let b: f64x2 = f64x2::new(0.0, 0.0);
2531 unsafe { simd_le(a, transmute(b)) }
2532}
2533#[doc = "Compare signed less than or equal to zero"]
2534#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s8)"]
2535#[inline]
2536#[target_feature(enable = "neon")]
2537#[cfg_attr(test, assert_instr(cmle))]
2538#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2539pub fn vclez_s8(a: int8x8_t) -> uint8x8_t {
2540 let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2541 unsafe { simd_le(a, transmute(b)) }
2542}
2543#[doc = "Compare signed less than or equal to zero"]
2544#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s8)"]
2545#[inline]
2546#[target_feature(enable = "neon")]
2547#[cfg_attr(test, assert_instr(cmle))]
2548#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2549pub fn vclezq_s8(a: int8x16_t) -> uint8x16_t {
2550 let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2551 unsafe { simd_le(a, transmute(b)) }
2552}
2553#[doc = "Compare signed less than or equal to zero"]
2554#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s16)"]
2555#[inline]
2556#[target_feature(enable = "neon")]
2557#[cfg_attr(test, assert_instr(cmle))]
2558#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2559pub fn vclez_s16(a: int16x4_t) -> uint16x4_t {
2560 let b: i16x4 = i16x4::new(0, 0, 0, 0);
2561 unsafe { simd_le(a, transmute(b)) }
2562}
2563#[doc = "Compare signed less than or equal to zero"]
2564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s16)"]
2565#[inline]
2566#[target_feature(enable = "neon")]
2567#[cfg_attr(test, assert_instr(cmle))]
2568#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2569pub fn vclezq_s16(a: int16x8_t) -> uint16x8_t {
2570 let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2571 unsafe { simd_le(a, transmute(b)) }
2572}
2573#[doc = "Compare signed less than or equal to zero"]
2574#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s32)"]
2575#[inline]
2576#[target_feature(enable = "neon")]
2577#[cfg_attr(test, assert_instr(cmle))]
2578#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2579pub fn vclez_s32(a: int32x2_t) -> uint32x2_t {
2580 let b: i32x2 = i32x2::new(0, 0);
2581 unsafe { simd_le(a, transmute(b)) }
2582}
2583#[doc = "Compare signed less than or equal to zero"]
2584#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s32)"]
2585#[inline]
2586#[target_feature(enable = "neon")]
2587#[cfg_attr(test, assert_instr(cmle))]
2588#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2589pub fn vclezq_s32(a: int32x4_t) -> uint32x4_t {
2590 let b: i32x4 = i32x4::new(0, 0, 0, 0);
2591 unsafe { simd_le(a, transmute(b)) }
2592}
2593#[doc = "Compare signed less than or equal to zero"]
2594#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s64)"]
2595#[inline]
2596#[target_feature(enable = "neon")]
2597#[cfg_attr(test, assert_instr(cmle))]
2598#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2599pub fn vclez_s64(a: int64x1_t) -> uint64x1_t {
2600 let b: i64x1 = i64x1::new(0);
2601 unsafe { simd_le(a, transmute(b)) }
2602}
2603#[doc = "Compare signed less than or equal to zero"]
2604#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s64)"]
2605#[inline]
2606#[target_feature(enable = "neon")]
2607#[cfg_attr(test, assert_instr(cmle))]
2608#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2609pub fn vclezq_s64(a: int64x2_t) -> uint64x2_t {
2610 let b: i64x2 = i64x2::new(0, 0);
2611 unsafe { simd_le(a, transmute(b)) }
2612}
2613#[doc = "Floating-point compare less than or equal to zero"]
2614#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_f64)"]
2615#[inline]
2616#[target_feature(enable = "neon")]
2617#[cfg_attr(test, assert_instr(fcmp))]
2618#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2619pub fn vclezd_f64(a: f64) -> u64 {
2620 unsafe { simd_extract!(vclez_f64(vdup_n_f64(a)), 0) }
2621}
2622#[doc = "Floating-point compare less than or equal to zero"]
2623#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezs_f32)"]
2624#[inline]
2625#[target_feature(enable = "neon")]
2626#[cfg_attr(test, assert_instr(fcmp))]
2627#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2628pub fn vclezs_f32(a: f32) -> u32 {
2629 unsafe { simd_extract!(vclez_f32(vdup_n_f32(a)), 0) }
2630}
2631#[doc = "Compare less than or equal to zero"]
2632#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_s64)"]
2633#[inline]
2634#[target_feature(enable = "neon")]
2635#[cfg_attr(test, assert_instr(cmp))]
2636#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2637pub fn vclezd_s64(a: i64) -> u64 {
2638 unsafe { transmute(vclez_s64(transmute(a))) }
2639}
2640#[doc = "Floating-point compare less than or equal to zero"]
2641#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezh_f16)"]
2642#[inline]
2643#[cfg_attr(test, assert_instr(fcmp))]
2644#[target_feature(enable = "neon,fp16")]
2645#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2646pub fn vclezh_f16(a: f16) -> u16 {
2647 unsafe { simd_extract!(vclez_f16(vdup_n_f16(a)), 0) }
2648}
2649#[doc = "Floating-point compare less than"]
2650#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_f64)"]
2651#[inline]
2652#[target_feature(enable = "neon")]
2653#[cfg_attr(test, assert_instr(fcmgt))]
2654#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2655pub fn vclt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2656 unsafe { simd_lt(a, b) }
2657}
2658#[doc = "Floating-point compare less than"]
2659#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_f64)"]
2660#[inline]
2661#[target_feature(enable = "neon")]
2662#[cfg_attr(test, assert_instr(fcmgt))]
2663#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2664pub fn vcltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2665 unsafe { simd_lt(a, b) }
2666}
2667#[doc = "Compare signed less than"]
2668#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s64)"]
2669#[inline]
2670#[target_feature(enable = "neon")]
2671#[cfg_attr(test, assert_instr(cmgt))]
2672#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2673pub fn vclt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
2674 unsafe { simd_lt(a, b) }
2675}
2676#[doc = "Compare signed less than"]
2677#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s64)"]
2678#[inline]
2679#[target_feature(enable = "neon")]
2680#[cfg_attr(test, assert_instr(cmgt))]
2681#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2682pub fn vcltq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
2683 unsafe { simd_lt(a, b) }
2684}
2685#[doc = "Compare unsigned less than"]
2686#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u64)"]
2687#[inline]
2688#[target_feature(enable = "neon")]
2689#[cfg_attr(test, assert_instr(cmhi))]
2690#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2691pub fn vclt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
2692 unsafe { simd_lt(a, b) }
2693}
2694#[doc = "Compare unsigned less than"]
2695#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u64)"]
2696#[inline]
2697#[target_feature(enable = "neon")]
2698#[cfg_attr(test, assert_instr(cmhi))]
2699#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2700pub fn vcltq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
2701 unsafe { simd_lt(a, b) }
2702}
2703#[doc = "Compare less than"]
2704#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_u64)"]
2705#[inline]
2706#[target_feature(enable = "neon")]
2707#[cfg_attr(test, assert_instr(cmp))]
2708#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2709pub fn vcltd_u64(a: u64, b: u64) -> u64 {
2710 unsafe { transmute(vclt_u64(transmute(a), transmute(b))) }
2711}
2712#[doc = "Compare less than"]
2713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_s64)"]
2714#[inline]
2715#[target_feature(enable = "neon")]
2716#[cfg_attr(test, assert_instr(cmp))]
2717#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2718pub fn vcltd_s64(a: i64, b: i64) -> u64 {
2719 unsafe { transmute(vclt_s64(transmute(a), transmute(b))) }
2720}
2721#[doc = "Floating-point compare less than"]
2722#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclth_f16)"]
2723#[inline]
2724#[cfg_attr(test, assert_instr(fcmp))]
2725#[target_feature(enable = "neon,fp16")]
2726#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2727pub fn vclth_f16(a: f16, b: f16) -> u16 {
2728 unsafe { simd_extract!(vclt_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
2729}
2730#[doc = "Floating-point compare less than"]
2731#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclts_f32)"]
2732#[inline]
2733#[target_feature(enable = "neon")]
2734#[cfg_attr(test, assert_instr(fcmp))]
2735#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2736pub fn vclts_f32(a: f32, b: f32) -> u32 {
2737 unsafe { simd_extract!(vclt_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
2738}
2739#[doc = "Floating-point compare less than"]
2740#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_f64)"]
2741#[inline]
2742#[target_feature(enable = "neon")]
2743#[cfg_attr(test, assert_instr(fcmp))]
2744#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2745pub fn vcltd_f64(a: f64, b: f64) -> u64 {
2746 unsafe { simd_extract!(vclt_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
2747}
2748#[doc = "Floating-point compare less than zero"]
2749#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f32)"]
2750#[inline]
2751#[target_feature(enable = "neon")]
2752#[cfg_attr(test, assert_instr(fcmlt))]
2753#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2754pub fn vcltz_f32(a: float32x2_t) -> uint32x2_t {
2755 let b: f32x2 = f32x2::new(0.0, 0.0);
2756 unsafe { simd_lt(a, transmute(b)) }
2757}
2758#[doc = "Floating-point compare less than zero"]
2759#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f32)"]
2760#[inline]
2761#[target_feature(enable = "neon")]
2762#[cfg_attr(test, assert_instr(fcmlt))]
2763#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2764pub fn vcltzq_f32(a: float32x4_t) -> uint32x4_t {
2765 let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2766 unsafe { simd_lt(a, transmute(b)) }
2767}
2768#[doc = "Floating-point compare less than zero"]
2769#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f64)"]
2770#[inline]
2771#[target_feature(enable = "neon")]
2772#[cfg_attr(test, assert_instr(fcmlt))]
2773#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2774pub fn vcltz_f64(a: float64x1_t) -> uint64x1_t {
2775 let b: f64 = 0.0;
2776 unsafe { simd_lt(a, transmute(b)) }
2777}
2778#[doc = "Floating-point compare less than zero"]
2779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f64)"]
2780#[inline]
2781#[target_feature(enable = "neon")]
2782#[cfg_attr(test, assert_instr(fcmlt))]
2783#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2784pub fn vcltzq_f64(a: float64x2_t) -> uint64x2_t {
2785 let b: f64x2 = f64x2::new(0.0, 0.0);
2786 unsafe { simd_lt(a, transmute(b)) }
2787}
2788#[doc = "Compare signed less than zero"]
2789#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s8)"]
2790#[inline]
2791#[target_feature(enable = "neon")]
2792#[cfg_attr(test, assert_instr(cmlt))]
2793#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2794pub fn vcltz_s8(a: int8x8_t) -> uint8x8_t {
2795 let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2796 unsafe { simd_lt(a, transmute(b)) }
2797}
2798#[doc = "Compare signed less than zero"]
2799#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s8)"]
2800#[inline]
2801#[target_feature(enable = "neon")]
2802#[cfg_attr(test, assert_instr(cmlt))]
2803#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2804pub fn vcltzq_s8(a: int8x16_t) -> uint8x16_t {
2805 let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2806 unsafe { simd_lt(a, transmute(b)) }
2807}
2808#[doc = "Compare signed less than zero"]
2809#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s16)"]
2810#[inline]
2811#[target_feature(enable = "neon")]
2812#[cfg_attr(test, assert_instr(cmlt))]
2813#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2814pub fn vcltz_s16(a: int16x4_t) -> uint16x4_t {
2815 let b: i16x4 = i16x4::new(0, 0, 0, 0);
2816 unsafe { simd_lt(a, transmute(b)) }
2817}
2818#[doc = "Compare signed less than zero"]
2819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s16)"]
2820#[inline]
2821#[target_feature(enable = "neon")]
2822#[cfg_attr(test, assert_instr(cmlt))]
2823#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2824pub fn vcltzq_s16(a: int16x8_t) -> uint16x8_t {
2825 let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2826 unsafe { simd_lt(a, transmute(b)) }
2827}
2828#[doc = "Compare signed less than zero"]
2829#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s32)"]
2830#[inline]
2831#[target_feature(enable = "neon")]
2832#[cfg_attr(test, assert_instr(cmlt))]
2833#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2834pub fn vcltz_s32(a: int32x2_t) -> uint32x2_t {
2835 let b: i32x2 = i32x2::new(0, 0);
2836 unsafe { simd_lt(a, transmute(b)) }
2837}
2838#[doc = "Compare signed less than zero"]
2839#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s32)"]
2840#[inline]
2841#[target_feature(enable = "neon")]
2842#[cfg_attr(test, assert_instr(cmlt))]
2843#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2844pub fn vcltzq_s32(a: int32x4_t) -> uint32x4_t {
2845 let b: i32x4 = i32x4::new(0, 0, 0, 0);
2846 unsafe { simd_lt(a, transmute(b)) }
2847}
2848#[doc = "Compare signed less than zero"]
2849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s64)"]
2850#[inline]
2851#[target_feature(enable = "neon")]
2852#[cfg_attr(test, assert_instr(cmlt))]
2853#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2854pub fn vcltz_s64(a: int64x1_t) -> uint64x1_t {
2855 let b: i64x1 = i64x1::new(0);
2856 unsafe { simd_lt(a, transmute(b)) }
2857}
2858#[doc = "Compare signed less than zero"]
2859#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s64)"]
2860#[inline]
2861#[target_feature(enable = "neon")]
2862#[cfg_attr(test, assert_instr(cmlt))]
2863#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2864pub fn vcltzq_s64(a: int64x2_t) -> uint64x2_t {
2865 let b: i64x2 = i64x2::new(0, 0);
2866 unsafe { simd_lt(a, transmute(b)) }
2867}
2868#[doc = "Floating-point compare less than zero"]
2869#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_f64)"]
2870#[inline]
2871#[target_feature(enable = "neon")]
2872#[cfg_attr(test, assert_instr(fcmp))]
2873#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2874pub fn vcltzd_f64(a: f64) -> u64 {
2875 unsafe { simd_extract!(vcltz_f64(vdup_n_f64(a)), 0) }
2876}
2877#[doc = "Floating-point compare less than zero"]
2878#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzs_f32)"]
2879#[inline]
2880#[target_feature(enable = "neon")]
2881#[cfg_attr(test, assert_instr(fcmp))]
2882#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2883pub fn vcltzs_f32(a: f32) -> u32 {
2884 unsafe { simd_extract!(vcltz_f32(vdup_n_f32(a)), 0) }
2885}
2886#[doc = "Compare less than zero"]
2887#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_s64)"]
2888#[inline]
2889#[target_feature(enable = "neon")]
2890#[cfg_attr(test, assert_instr(asr))]
2891#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2892pub fn vcltzd_s64(a: i64) -> u64 {
2893 unsafe { transmute(vcltz_s64(transmute(a))) }
2894}
2895#[doc = "Floating-point compare less than zero"]
2896#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzh_f16)"]
2897#[inline]
2898#[cfg_attr(test, assert_instr(fcmp))]
2899#[target_feature(enable = "neon,fp16")]
2900#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2901pub fn vcltzh_f16(a: f16) -> u16 {
2902 unsafe { simd_extract!(vcltz_f16(vdup_n_f16(a)), 0) }
2903}
2904#[doc = "Floating-point complex multiply accumulate"]
2905#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f16)"]
2906#[inline]
2907#[target_feature(enable = "neon,fcma")]
2908#[target_feature(enable = "neon,fp16")]
2909#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2910#[cfg_attr(test, assert_instr(fcmla))]
2911pub fn vcmla_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
2912 unsafe extern "unadjusted" {
2913 #[cfg_attr(
2914 any(target_arch = "aarch64", target_arch = "arm64ec"),
2915 link_name = "llvm.aarch64.neon.vcmla.rot0.v4f16"
2916 )]
2917 fn _vcmla_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
2918 }
2919 unsafe { _vcmla_f16(a, b, c) }
2920}
2921#[doc = "Floating-point complex multiply accumulate"]
2922#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f16)"]
2923#[inline]
2924#[target_feature(enable = "neon,fcma")]
2925#[target_feature(enable = "neon,fp16")]
2926#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2927#[cfg_attr(test, assert_instr(fcmla))]
2928pub fn vcmlaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
2929 unsafe extern "unadjusted" {
2930 #[cfg_attr(
2931 any(target_arch = "aarch64", target_arch = "arm64ec"),
2932 link_name = "llvm.aarch64.neon.vcmla.rot0.v8f16"
2933 )]
2934 fn _vcmlaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
2935 }
2936 unsafe { _vcmlaq_f16(a, b, c) }
2937}
2938#[doc = "Floating-point complex multiply accumulate"]
2939#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f32)"]
2940#[inline]
2941#[target_feature(enable = "neon,fcma")]
2942#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2943#[cfg_attr(test, assert_instr(fcmla))]
2944pub fn vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
2945 unsafe extern "unadjusted" {
2946 #[cfg_attr(
2947 any(target_arch = "aarch64", target_arch = "arm64ec"),
2948 link_name = "llvm.aarch64.neon.vcmla.rot0.v2f32"
2949 )]
2950 fn _vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
2951 }
2952 unsafe { _vcmla_f32(a, b, c) }
2953}
2954#[doc = "Floating-point complex multiply accumulate"]
2955#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f32)"]
2956#[inline]
2957#[target_feature(enable = "neon,fcma")]
2958#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2959#[cfg_attr(test, assert_instr(fcmla))]
2960pub fn vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
2961 unsafe extern "unadjusted" {
2962 #[cfg_attr(
2963 any(target_arch = "aarch64", target_arch = "arm64ec"),
2964 link_name = "llvm.aarch64.neon.vcmla.rot0.v4f32"
2965 )]
2966 fn _vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
2967 }
2968 unsafe { _vcmlaq_f32(a, b, c) }
2969}
2970#[doc = "Floating-point complex multiply accumulate"]
2971#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f64)"]
2972#[inline]
2973#[target_feature(enable = "neon,fcma")]
2974#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2975#[cfg_attr(test, assert_instr(fcmla))]
2976pub fn vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
2977 unsafe extern "unadjusted" {
2978 #[cfg_attr(
2979 any(target_arch = "aarch64", target_arch = "arm64ec"),
2980 link_name = "llvm.aarch64.neon.vcmla.rot0.v2f64"
2981 )]
2982 fn _vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
2983 }
2984 unsafe { _vcmlaq_f64(a, b, c) }
2985}
2986#[doc = "Floating-point complex multiply accumulate"]
2987#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_lane_f16)"]
2988#[inline]
2989#[target_feature(enable = "neon,fcma")]
2990#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
2991#[rustc_legacy_const_generics(3)]
2992#[target_feature(enable = "neon,fp16")]
2993#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2994pub fn vcmla_lane_f16<const LANE: i32>(
2995 a: float16x4_t,
2996 b: float16x4_t,
2997 c: float16x4_t,
2998) -> float16x4_t {
2999 static_assert_uimm_bits!(LANE, 1);
3000 unsafe {
3001 let c: float16x4_t = simd_shuffle!(
3002 c,
3003 c,
3004 [
3005 2 * LANE as u32,
3006 2 * LANE as u32 + 1,
3007 2 * LANE as u32,
3008 2 * LANE as u32 + 1
3009 ]
3010 );
3011 vcmla_f16(a, b, c)
3012 }
3013}
3014#[doc = "Floating-point complex multiply accumulate"]
3015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_lane_f16)"]
3016#[inline]
3017#[target_feature(enable = "neon,fcma")]
3018#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3019#[rustc_legacy_const_generics(3)]
3020#[target_feature(enable = "neon,fp16")]
3021#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3022pub fn vcmlaq_lane_f16<const LANE: i32>(
3023 a: float16x8_t,
3024 b: float16x8_t,
3025 c: float16x4_t,
3026) -> float16x8_t {
3027 static_assert_uimm_bits!(LANE, 1);
3028 unsafe {
3029 let c: float16x8_t = simd_shuffle!(
3030 c,
3031 c,
3032 [
3033 2 * LANE as u32,
3034 2 * LANE as u32 + 1,
3035 2 * LANE as u32,
3036 2 * LANE as u32 + 1,
3037 2 * LANE as u32,
3038 2 * LANE as u32 + 1,
3039 2 * LANE as u32,
3040 2 * LANE as u32 + 1
3041 ]
3042 );
3043 vcmlaq_f16(a, b, c)
3044 }
3045}
3046#[doc = "Floating-point complex multiply accumulate"]
3047#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_lane_f32)"]
3048#[inline]
3049#[target_feature(enable = "neon,fcma")]
3050#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3051#[rustc_legacy_const_generics(3)]
3052#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3053pub fn vcmla_lane_f32<const LANE: i32>(
3054 a: float32x2_t,
3055 b: float32x2_t,
3056 c: float32x2_t,
3057) -> float32x2_t {
3058 static_assert!(LANE == 0);
3059 unsafe {
3060 let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3061 vcmla_f32(a, b, c)
3062 }
3063}
3064#[doc = "Floating-point complex multiply accumulate"]
3065#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_lane_f32)"]
3066#[inline]
3067#[target_feature(enable = "neon,fcma")]
3068#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3069#[rustc_legacy_const_generics(3)]
3070#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3071pub fn vcmlaq_lane_f32<const LANE: i32>(
3072 a: float32x4_t,
3073 b: float32x4_t,
3074 c: float32x2_t,
3075) -> float32x4_t {
3076 static_assert!(LANE == 0);
3077 unsafe {
3078 let c: float32x4_t = simd_shuffle!(
3079 c,
3080 c,
3081 [
3082 2 * LANE as u32,
3083 2 * LANE as u32 + 1,
3084 2 * LANE as u32,
3085 2 * LANE as u32 + 1
3086 ]
3087 );
3088 vcmlaq_f32(a, b, c)
3089 }
3090}
3091#[doc = "Floating-point complex multiply accumulate"]
3092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_laneq_f16)"]
3093#[inline]
3094#[target_feature(enable = "neon,fcma")]
3095#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3096#[rustc_legacy_const_generics(3)]
3097#[target_feature(enable = "neon,fp16")]
3098#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3099pub fn vcmla_laneq_f16<const LANE: i32>(
3100 a: float16x4_t,
3101 b: float16x4_t,
3102 c: float16x8_t,
3103) -> float16x4_t {
3104 static_assert_uimm_bits!(LANE, 2);
3105 unsafe {
3106 let c: float16x4_t = simd_shuffle!(
3107 c,
3108 c,
3109 [
3110 2 * LANE as u32,
3111 2 * LANE as u32 + 1,
3112 2 * LANE as u32,
3113 2 * LANE as u32 + 1
3114 ]
3115 );
3116 vcmla_f16(a, b, c)
3117 }
3118}
3119#[doc = "Floating-point complex multiply accumulate"]
3120#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_laneq_f16)"]
3121#[inline]
3122#[target_feature(enable = "neon,fcma")]
3123#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3124#[rustc_legacy_const_generics(3)]
3125#[target_feature(enable = "neon,fp16")]
3126#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3127pub fn vcmlaq_laneq_f16<const LANE: i32>(
3128 a: float16x8_t,
3129 b: float16x8_t,
3130 c: float16x8_t,
3131) -> float16x8_t {
3132 static_assert_uimm_bits!(LANE, 2);
3133 unsafe {
3134 let c: float16x8_t = simd_shuffle!(
3135 c,
3136 c,
3137 [
3138 2 * LANE as u32,
3139 2 * LANE as u32 + 1,
3140 2 * LANE as u32,
3141 2 * LANE as u32 + 1,
3142 2 * LANE as u32,
3143 2 * LANE as u32 + 1,
3144 2 * LANE as u32,
3145 2 * LANE as u32 + 1
3146 ]
3147 );
3148 vcmlaq_f16(a, b, c)
3149 }
3150}
3151#[doc = "Floating-point complex multiply accumulate"]
3152#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_laneq_f32)"]
3153#[inline]
3154#[target_feature(enable = "neon,fcma")]
3155#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3156#[rustc_legacy_const_generics(3)]
3157#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3158pub fn vcmla_laneq_f32<const LANE: i32>(
3159 a: float32x2_t,
3160 b: float32x2_t,
3161 c: float32x4_t,
3162) -> float32x2_t {
3163 static_assert_uimm_bits!(LANE, 1);
3164 unsafe {
3165 let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3166 vcmla_f32(a, b, c)
3167 }
3168}
3169#[doc = "Floating-point complex multiply accumulate"]
3170#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_laneq_f32)"]
3171#[inline]
3172#[target_feature(enable = "neon,fcma")]
3173#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3174#[rustc_legacy_const_generics(3)]
3175#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3176pub fn vcmlaq_laneq_f32<const LANE: i32>(
3177 a: float32x4_t,
3178 b: float32x4_t,
3179 c: float32x4_t,
3180) -> float32x4_t {
3181 static_assert_uimm_bits!(LANE, 1);
3182 unsafe {
3183 let c: float32x4_t = simd_shuffle!(
3184 c,
3185 c,
3186 [
3187 2 * LANE as u32,
3188 2 * LANE as u32 + 1,
3189 2 * LANE as u32,
3190 2 * LANE as u32 + 1
3191 ]
3192 );
3193 vcmlaq_f32(a, b, c)
3194 }
3195}
3196#[doc = "Floating-point complex multiply accumulate"]
3197#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f16)"]
3198#[inline]
3199#[target_feature(enable = "neon,fcma")]
3200#[target_feature(enable = "neon,fp16")]
3201#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3202#[cfg_attr(test, assert_instr(fcmla))]
3203pub fn vcmla_rot180_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
3204 unsafe extern "unadjusted" {
3205 #[cfg_attr(
3206 any(target_arch = "aarch64", target_arch = "arm64ec"),
3207 link_name = "llvm.aarch64.neon.vcmla.rot180.v4f16"
3208 )]
3209 fn _vcmla_rot180_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
3210 }
3211 unsafe { _vcmla_rot180_f16(a, b, c) }
3212}
3213#[doc = "Floating-point complex multiply accumulate"]
3214#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f16)"]
3215#[inline]
3216#[target_feature(enable = "neon,fcma")]
3217#[target_feature(enable = "neon,fp16")]
3218#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3219#[cfg_attr(test, assert_instr(fcmla))]
3220pub fn vcmlaq_rot180_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
3221 unsafe extern "unadjusted" {
3222 #[cfg_attr(
3223 any(target_arch = "aarch64", target_arch = "arm64ec"),
3224 link_name = "llvm.aarch64.neon.vcmla.rot180.v8f16"
3225 )]
3226 fn _vcmlaq_rot180_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
3227 }
3228 unsafe { _vcmlaq_rot180_f16(a, b, c) }
3229}
3230#[doc = "Floating-point complex multiply accumulate"]
3231#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f32)"]
3232#[inline]
3233#[target_feature(enable = "neon,fcma")]
3234#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3235#[cfg_attr(test, assert_instr(fcmla))]
3236pub fn vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
3237 unsafe extern "unadjusted" {
3238 #[cfg_attr(
3239 any(target_arch = "aarch64", target_arch = "arm64ec"),
3240 link_name = "llvm.aarch64.neon.vcmla.rot180.v2f32"
3241 )]
3242 fn _vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
3243 }
3244 unsafe { _vcmla_rot180_f32(a, b, c) }
3245}
3246#[doc = "Floating-point complex multiply accumulate"]
3247#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f32)"]
3248#[inline]
3249#[target_feature(enable = "neon,fcma")]
3250#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3251#[cfg_attr(test, assert_instr(fcmla))]
3252pub fn vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
3253 unsafe extern "unadjusted" {
3254 #[cfg_attr(
3255 any(target_arch = "aarch64", target_arch = "arm64ec"),
3256 link_name = "llvm.aarch64.neon.vcmla.rot180.v4f32"
3257 )]
3258 fn _vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
3259 }
3260 unsafe { _vcmlaq_rot180_f32(a, b, c) }
3261}
3262#[doc = "Floating-point complex multiply accumulate"]
3263#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f64)"]
3264#[inline]
3265#[target_feature(enable = "neon,fcma")]
3266#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3267#[cfg_attr(test, assert_instr(fcmla))]
3268pub fn vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
3269 unsafe extern "unadjusted" {
3270 #[cfg_attr(
3271 any(target_arch = "aarch64", target_arch = "arm64ec"),
3272 link_name = "llvm.aarch64.neon.vcmla.rot180.v2f64"
3273 )]
3274 fn _vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
3275 }
3276 unsafe { _vcmlaq_rot180_f64(a, b, c) }
3277}
3278#[doc = "Floating-point complex multiply accumulate"]
3279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_lane_f16)"]
3280#[inline]
3281#[target_feature(enable = "neon,fcma")]
3282#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3283#[rustc_legacy_const_generics(3)]
3284#[target_feature(enable = "neon,fp16")]
3285#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3286pub fn vcmla_rot180_lane_f16<const LANE: i32>(
3287 a: float16x4_t,
3288 b: float16x4_t,
3289 c: float16x4_t,
3290) -> float16x4_t {
3291 static_assert_uimm_bits!(LANE, 1);
3292 unsafe {
3293 let c: float16x4_t = simd_shuffle!(
3294 c,
3295 c,
3296 [
3297 2 * LANE as u32,
3298 2 * LANE as u32 + 1,
3299 2 * LANE as u32,
3300 2 * LANE as u32 + 1
3301 ]
3302 );
3303 vcmla_rot180_f16(a, b, c)
3304 }
3305}
3306#[doc = "Floating-point complex multiply accumulate"]
3307#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_lane_f16)"]
3308#[inline]
3309#[target_feature(enable = "neon,fcma")]
3310#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3311#[rustc_legacy_const_generics(3)]
3312#[target_feature(enable = "neon,fp16")]
3313#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3314pub fn vcmlaq_rot180_lane_f16<const LANE: i32>(
3315 a: float16x8_t,
3316 b: float16x8_t,
3317 c: float16x4_t,
3318) -> float16x8_t {
3319 static_assert_uimm_bits!(LANE, 1);
3320 unsafe {
3321 let c: float16x8_t = simd_shuffle!(
3322 c,
3323 c,
3324 [
3325 2 * LANE as u32,
3326 2 * LANE as u32 + 1,
3327 2 * LANE as u32,
3328 2 * LANE as u32 + 1,
3329 2 * LANE as u32,
3330 2 * LANE as u32 + 1,
3331 2 * LANE as u32,
3332 2 * LANE as u32 + 1
3333 ]
3334 );
3335 vcmlaq_rot180_f16(a, b, c)
3336 }
3337}
3338#[doc = "Floating-point complex multiply accumulate"]
3339#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_lane_f32)"]
3340#[inline]
3341#[target_feature(enable = "neon,fcma")]
3342#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3343#[rustc_legacy_const_generics(3)]
3344#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3345pub fn vcmla_rot180_lane_f32<const LANE: i32>(
3346 a: float32x2_t,
3347 b: float32x2_t,
3348 c: float32x2_t,
3349) -> float32x2_t {
3350 static_assert!(LANE == 0);
3351 unsafe {
3352 let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3353 vcmla_rot180_f32(a, b, c)
3354 }
3355}
3356#[doc = "Floating-point complex multiply accumulate"]
3357#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_lane_f32)"]
3358#[inline]
3359#[target_feature(enable = "neon,fcma")]
3360#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3361#[rustc_legacy_const_generics(3)]
3362#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3363pub fn vcmlaq_rot180_lane_f32<const LANE: i32>(
3364 a: float32x4_t,
3365 b: float32x4_t,
3366 c: float32x2_t,
3367) -> float32x4_t {
3368 static_assert!(LANE == 0);
3369 unsafe {
3370 let c: float32x4_t = simd_shuffle!(
3371 c,
3372 c,
3373 [
3374 2 * LANE as u32,
3375 2 * LANE as u32 + 1,
3376 2 * LANE as u32,
3377 2 * LANE as u32 + 1
3378 ]
3379 );
3380 vcmlaq_rot180_f32(a, b, c)
3381 }
3382}
3383#[doc = "Floating-point complex multiply accumulate"]
3384#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_laneq_f16)"]
3385#[inline]
3386#[target_feature(enable = "neon,fcma")]
3387#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3388#[rustc_legacy_const_generics(3)]
3389#[target_feature(enable = "neon,fp16")]
3390#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3391pub fn vcmla_rot180_laneq_f16<const LANE: i32>(
3392 a: float16x4_t,
3393 b: float16x4_t,
3394 c: float16x8_t,
3395) -> float16x4_t {
3396 static_assert_uimm_bits!(LANE, 2);
3397 unsafe {
3398 let c: float16x4_t = simd_shuffle!(
3399 c,
3400 c,
3401 [
3402 2 * LANE as u32,
3403 2 * LANE as u32 + 1,
3404 2 * LANE as u32,
3405 2 * LANE as u32 + 1
3406 ]
3407 );
3408 vcmla_rot180_f16(a, b, c)
3409 }
3410}
3411#[doc = "Floating-point complex multiply accumulate"]
3412#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_laneq_f16)"]
3413#[inline]
3414#[target_feature(enable = "neon,fcma")]
3415#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3416#[rustc_legacy_const_generics(3)]
3417#[target_feature(enable = "neon,fp16")]
3418#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3419pub fn vcmlaq_rot180_laneq_f16<const LANE: i32>(
3420 a: float16x8_t,
3421 b: float16x8_t,
3422 c: float16x8_t,
3423) -> float16x8_t {
3424 static_assert_uimm_bits!(LANE, 2);
3425 unsafe {
3426 let c: float16x8_t = simd_shuffle!(
3427 c,
3428 c,
3429 [
3430 2 * LANE as u32,
3431 2 * LANE as u32 + 1,
3432 2 * LANE as u32,
3433 2 * LANE as u32 + 1,
3434 2 * LANE as u32,
3435 2 * LANE as u32 + 1,
3436 2 * LANE as u32,
3437 2 * LANE as u32 + 1
3438 ]
3439 );
3440 vcmlaq_rot180_f16(a, b, c)
3441 }
3442}
3443#[doc = "Floating-point complex multiply accumulate"]
3444#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_laneq_f32)"]
3445#[inline]
3446#[target_feature(enable = "neon,fcma")]
3447#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3448#[rustc_legacy_const_generics(3)]
3449#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3450pub fn vcmla_rot180_laneq_f32<const LANE: i32>(
3451 a: float32x2_t,
3452 b: float32x2_t,
3453 c: float32x4_t,
3454) -> float32x2_t {
3455 static_assert_uimm_bits!(LANE, 1);
3456 unsafe {
3457 let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3458 vcmla_rot180_f32(a, b, c)
3459 }
3460}
3461#[doc = "Floating-point complex multiply accumulate"]
3462#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_laneq_f32)"]
3463#[inline]
3464#[target_feature(enable = "neon,fcma")]
3465#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3466#[rustc_legacy_const_generics(3)]
3467#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3468pub fn vcmlaq_rot180_laneq_f32<const LANE: i32>(
3469 a: float32x4_t,
3470 b: float32x4_t,
3471 c: float32x4_t,
3472) -> float32x4_t {
3473 static_assert_uimm_bits!(LANE, 1);
3474 unsafe {
3475 let c: float32x4_t = simd_shuffle!(
3476 c,
3477 c,
3478 [
3479 2 * LANE as u32,
3480 2 * LANE as u32 + 1,
3481 2 * LANE as u32,
3482 2 * LANE as u32 + 1
3483 ]
3484 );
3485 vcmlaq_rot180_f32(a, b, c)
3486 }
3487}
3488#[doc = "Floating-point complex multiply accumulate"]
3489#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f16)"]
3490#[inline]
3491#[target_feature(enable = "neon,fcma")]
3492#[target_feature(enable = "neon,fp16")]
3493#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3494#[cfg_attr(test, assert_instr(fcmla))]
3495pub fn vcmla_rot270_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
3496 unsafe extern "unadjusted" {
3497 #[cfg_attr(
3498 any(target_arch = "aarch64", target_arch = "arm64ec"),
3499 link_name = "llvm.aarch64.neon.vcmla.rot270.v4f16"
3500 )]
3501 fn _vcmla_rot270_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
3502 }
3503 unsafe { _vcmla_rot270_f16(a, b, c) }
3504}
3505#[doc = "Floating-point complex multiply accumulate"]
3506#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f16)"]
3507#[inline]
3508#[target_feature(enable = "neon,fcma")]
3509#[target_feature(enable = "neon,fp16")]
3510#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3511#[cfg_attr(test, assert_instr(fcmla))]
3512pub fn vcmlaq_rot270_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
3513 unsafe extern "unadjusted" {
3514 #[cfg_attr(
3515 any(target_arch = "aarch64", target_arch = "arm64ec"),
3516 link_name = "llvm.aarch64.neon.vcmla.rot270.v8f16"
3517 )]
3518 fn _vcmlaq_rot270_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
3519 }
3520 unsafe { _vcmlaq_rot270_f16(a, b, c) }
3521}
3522#[doc = "Floating-point complex multiply accumulate"]
3523#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f32)"]
3524#[inline]
3525#[target_feature(enable = "neon,fcma")]
3526#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3527#[cfg_attr(test, assert_instr(fcmla))]
3528pub fn vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
3529 unsafe extern "unadjusted" {
3530 #[cfg_attr(
3531 any(target_arch = "aarch64", target_arch = "arm64ec"),
3532 link_name = "llvm.aarch64.neon.vcmla.rot270.v2f32"
3533 )]
3534 fn _vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
3535 }
3536 unsafe { _vcmla_rot270_f32(a, b, c) }
3537}
3538#[doc = "Floating-point complex multiply accumulate"]
3539#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f32)"]
3540#[inline]
3541#[target_feature(enable = "neon,fcma")]
3542#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3543#[cfg_attr(test, assert_instr(fcmla))]
3544pub fn vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
3545 unsafe extern "unadjusted" {
3546 #[cfg_attr(
3547 any(target_arch = "aarch64", target_arch = "arm64ec"),
3548 link_name = "llvm.aarch64.neon.vcmla.rot270.v4f32"
3549 )]
3550 fn _vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
3551 }
3552 unsafe { _vcmlaq_rot270_f32(a, b, c) }
3553}
3554#[doc = "Floating-point complex multiply accumulate"]
3555#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f64)"]
3556#[inline]
3557#[target_feature(enable = "neon,fcma")]
3558#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3559#[cfg_attr(test, assert_instr(fcmla))]
3560pub fn vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
3561 unsafe extern "unadjusted" {
3562 #[cfg_attr(
3563 any(target_arch = "aarch64", target_arch = "arm64ec"),
3564 link_name = "llvm.aarch64.neon.vcmla.rot270.v2f64"
3565 )]
3566 fn _vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
3567 }
3568 unsafe { _vcmlaq_rot270_f64(a, b, c) }
3569}
3570#[doc = "Floating-point complex multiply accumulate"]
3571#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_lane_f16)"]
3572#[inline]
3573#[target_feature(enable = "neon,fcma")]
3574#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3575#[rustc_legacy_const_generics(3)]
3576#[target_feature(enable = "neon,fp16")]
3577#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3578pub fn vcmla_rot270_lane_f16<const LANE: i32>(
3579 a: float16x4_t,
3580 b: float16x4_t,
3581 c: float16x4_t,
3582) -> float16x4_t {
3583 static_assert_uimm_bits!(LANE, 1);
3584 unsafe {
3585 let c: float16x4_t = simd_shuffle!(
3586 c,
3587 c,
3588 [
3589 2 * LANE as u32,
3590 2 * LANE as u32 + 1,
3591 2 * LANE as u32,
3592 2 * LANE as u32 + 1
3593 ]
3594 );
3595 vcmla_rot270_f16(a, b, c)
3596 }
3597}
3598#[doc = "Floating-point complex multiply accumulate"]
3599#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_lane_f16)"]
3600#[inline]
3601#[target_feature(enable = "neon,fcma")]
3602#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3603#[rustc_legacy_const_generics(3)]
3604#[target_feature(enable = "neon,fp16")]
3605#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3606pub fn vcmlaq_rot270_lane_f16<const LANE: i32>(
3607 a: float16x8_t,
3608 b: float16x8_t,
3609 c: float16x4_t,
3610) -> float16x8_t {
3611 static_assert_uimm_bits!(LANE, 1);
3612 unsafe {
3613 let c: float16x8_t = simd_shuffle!(
3614 c,
3615 c,
3616 [
3617 2 * LANE as u32,
3618 2 * LANE as u32 + 1,
3619 2 * LANE as u32,
3620 2 * LANE as u32 + 1,
3621 2 * LANE as u32,
3622 2 * LANE as u32 + 1,
3623 2 * LANE as u32,
3624 2 * LANE as u32 + 1
3625 ]
3626 );
3627 vcmlaq_rot270_f16(a, b, c)
3628 }
3629}
3630#[doc = "Floating-point complex multiply accumulate"]
3631#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_lane_f32)"]
3632#[inline]
3633#[target_feature(enable = "neon,fcma")]
3634#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3635#[rustc_legacy_const_generics(3)]
3636#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3637pub fn vcmla_rot270_lane_f32<const LANE: i32>(
3638 a: float32x2_t,
3639 b: float32x2_t,
3640 c: float32x2_t,
3641) -> float32x2_t {
3642 static_assert!(LANE == 0);
3643 unsafe {
3644 let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3645 vcmla_rot270_f32(a, b, c)
3646 }
3647}
3648#[doc = "Floating-point complex multiply accumulate"]
3649#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_lane_f32)"]
3650#[inline]
3651#[target_feature(enable = "neon,fcma")]
3652#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3653#[rustc_legacy_const_generics(3)]
3654#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3655pub fn vcmlaq_rot270_lane_f32<const LANE: i32>(
3656 a: float32x4_t,
3657 b: float32x4_t,
3658 c: float32x2_t,
3659) -> float32x4_t {
3660 static_assert!(LANE == 0);
3661 unsafe {
3662 let c: float32x4_t = simd_shuffle!(
3663 c,
3664 c,
3665 [
3666 2 * LANE as u32,
3667 2 * LANE as u32 + 1,
3668 2 * LANE as u32,
3669 2 * LANE as u32 + 1
3670 ]
3671 );
3672 vcmlaq_rot270_f32(a, b, c)
3673 }
3674}
3675#[doc = "Floating-point complex multiply accumulate"]
3676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_laneq_f16)"]
3677#[inline]
3678#[target_feature(enable = "neon,fcma")]
3679#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3680#[rustc_legacy_const_generics(3)]
3681#[target_feature(enable = "neon,fp16")]
3682#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3683pub fn vcmla_rot270_laneq_f16<const LANE: i32>(
3684 a: float16x4_t,
3685 b: float16x4_t,
3686 c: float16x8_t,
3687) -> float16x4_t {
3688 static_assert_uimm_bits!(LANE, 2);
3689 unsafe {
3690 let c: float16x4_t = simd_shuffle!(
3691 c,
3692 c,
3693 [
3694 2 * LANE as u32,
3695 2 * LANE as u32 + 1,
3696 2 * LANE as u32,
3697 2 * LANE as u32 + 1
3698 ]
3699 );
3700 vcmla_rot270_f16(a, b, c)
3701 }
3702}
3703#[doc = "Floating-point complex multiply accumulate"]
3704#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_laneq_f16)"]
3705#[inline]
3706#[target_feature(enable = "neon,fcma")]
3707#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3708#[rustc_legacy_const_generics(3)]
3709#[target_feature(enable = "neon,fp16")]
3710#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3711pub fn vcmlaq_rot270_laneq_f16<const LANE: i32>(
3712 a: float16x8_t,
3713 b: float16x8_t,
3714 c: float16x8_t,
3715) -> float16x8_t {
3716 static_assert_uimm_bits!(LANE, 2);
3717 unsafe {
3718 let c: float16x8_t = simd_shuffle!(
3719 c,
3720 c,
3721 [
3722 2 * LANE as u32,
3723 2 * LANE as u32 + 1,
3724 2 * LANE as u32,
3725 2 * LANE as u32 + 1,
3726 2 * LANE as u32,
3727 2 * LANE as u32 + 1,
3728 2 * LANE as u32,
3729 2 * LANE as u32 + 1
3730 ]
3731 );
3732 vcmlaq_rot270_f16(a, b, c)
3733 }
3734}
3735#[doc = "Floating-point complex multiply accumulate"]
3736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_laneq_f32)"]
3737#[inline]
3738#[target_feature(enable = "neon,fcma")]
3739#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3740#[rustc_legacy_const_generics(3)]
3741#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3742pub fn vcmla_rot270_laneq_f32<const LANE: i32>(
3743 a: float32x2_t,
3744 b: float32x2_t,
3745 c: float32x4_t,
3746) -> float32x2_t {
3747 static_assert_uimm_bits!(LANE, 1);
3748 unsafe {
3749 let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3750 vcmla_rot270_f32(a, b, c)
3751 }
3752}
3753#[doc = "Floating-point complex multiply accumulate"]
3754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_laneq_f32)"]
3755#[inline]
3756#[target_feature(enable = "neon,fcma")]
3757#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3758#[rustc_legacy_const_generics(3)]
3759#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3760pub fn vcmlaq_rot270_laneq_f32<const LANE: i32>(
3761 a: float32x4_t,
3762 b: float32x4_t,
3763 c: float32x4_t,
3764) -> float32x4_t {
3765 static_assert_uimm_bits!(LANE, 1);
3766 unsafe {
3767 let c: float32x4_t = simd_shuffle!(
3768 c,
3769 c,
3770 [
3771 2 * LANE as u32,
3772 2 * LANE as u32 + 1,
3773 2 * LANE as u32,
3774 2 * LANE as u32 + 1
3775 ]
3776 );
3777 vcmlaq_rot270_f32(a, b, c)
3778 }
3779}
3780#[doc = "Floating-point complex multiply accumulate"]
3781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f16)"]
3782#[inline]
3783#[target_feature(enable = "neon,fcma")]
3784#[target_feature(enable = "neon,fp16")]
3785#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3786#[cfg_attr(test, assert_instr(fcmla))]
3787pub fn vcmla_rot90_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
3788 unsafe extern "unadjusted" {
3789 #[cfg_attr(
3790 any(target_arch = "aarch64", target_arch = "arm64ec"),
3791 link_name = "llvm.aarch64.neon.vcmla.rot90.v4f16"
3792 )]
3793 fn _vcmla_rot90_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
3794 }
3795 unsafe { _vcmla_rot90_f16(a, b, c) }
3796}
3797#[doc = "Floating-point complex multiply accumulate"]
3798#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f16)"]
3799#[inline]
3800#[target_feature(enable = "neon,fcma")]
3801#[target_feature(enable = "neon,fp16")]
3802#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3803#[cfg_attr(test, assert_instr(fcmla))]
3804pub fn vcmlaq_rot90_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
3805 unsafe extern "unadjusted" {
3806 #[cfg_attr(
3807 any(target_arch = "aarch64", target_arch = "arm64ec"),
3808 link_name = "llvm.aarch64.neon.vcmla.rot90.v8f16"
3809 )]
3810 fn _vcmlaq_rot90_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
3811 }
3812 unsafe { _vcmlaq_rot90_f16(a, b, c) }
3813}
3814#[doc = "Floating-point complex multiply accumulate"]
3815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f32)"]
3816#[inline]
3817#[target_feature(enable = "neon,fcma")]
3818#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3819#[cfg_attr(test, assert_instr(fcmla))]
3820pub fn vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
3821 unsafe extern "unadjusted" {
3822 #[cfg_attr(
3823 any(target_arch = "aarch64", target_arch = "arm64ec"),
3824 link_name = "llvm.aarch64.neon.vcmla.rot90.v2f32"
3825 )]
3826 fn _vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
3827 }
3828 unsafe { _vcmla_rot90_f32(a, b, c) }
3829}
3830#[doc = "Floating-point complex multiply accumulate"]
3831#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f32)"]
3832#[inline]
3833#[target_feature(enable = "neon,fcma")]
3834#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3835#[cfg_attr(test, assert_instr(fcmla))]
3836pub fn vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
3837 unsafe extern "unadjusted" {
3838 #[cfg_attr(
3839 any(target_arch = "aarch64", target_arch = "arm64ec"),
3840 link_name = "llvm.aarch64.neon.vcmla.rot90.v4f32"
3841 )]
3842 fn _vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
3843 }
3844 unsafe { _vcmlaq_rot90_f32(a, b, c) }
3845}
3846#[doc = "Floating-point complex multiply accumulate"]
3847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f64)"]
3848#[inline]
3849#[target_feature(enable = "neon,fcma")]
3850#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3851#[cfg_attr(test, assert_instr(fcmla))]
3852pub fn vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
3853 unsafe extern "unadjusted" {
3854 #[cfg_attr(
3855 any(target_arch = "aarch64", target_arch = "arm64ec"),
3856 link_name = "llvm.aarch64.neon.vcmla.rot90.v2f64"
3857 )]
3858 fn _vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
3859 }
3860 unsafe { _vcmlaq_rot90_f64(a, b, c) }
3861}
3862#[doc = "Floating-point complex multiply accumulate"]
3863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_lane_f16)"]
3864#[inline]
3865#[target_feature(enable = "neon,fcma")]
3866#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3867#[rustc_legacy_const_generics(3)]
3868#[target_feature(enable = "neon,fp16")]
3869#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3870pub fn vcmla_rot90_lane_f16<const LANE: i32>(
3871 a: float16x4_t,
3872 b: float16x4_t,
3873 c: float16x4_t,
3874) -> float16x4_t {
3875 static_assert_uimm_bits!(LANE, 1);
3876 unsafe {
3877 let c: float16x4_t = simd_shuffle!(
3878 c,
3879 c,
3880 [
3881 2 * LANE as u32,
3882 2 * LANE as u32 + 1,
3883 2 * LANE as u32,
3884 2 * LANE as u32 + 1
3885 ]
3886 );
3887 vcmla_rot90_f16(a, b, c)
3888 }
3889}
3890#[doc = "Floating-point complex multiply accumulate"]
3891#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_lane_f16)"]
3892#[inline]
3893#[target_feature(enable = "neon,fcma")]
3894#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3895#[rustc_legacy_const_generics(3)]
3896#[target_feature(enable = "neon,fp16")]
3897#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3898pub fn vcmlaq_rot90_lane_f16<const LANE: i32>(
3899 a: float16x8_t,
3900 b: float16x8_t,
3901 c: float16x4_t,
3902) -> float16x8_t {
3903 static_assert_uimm_bits!(LANE, 1);
3904 unsafe {
3905 let c: float16x8_t = simd_shuffle!(
3906 c,
3907 c,
3908 [
3909 2 * LANE as u32,
3910 2 * LANE as u32 + 1,
3911 2 * LANE as u32,
3912 2 * LANE as u32 + 1,
3913 2 * LANE as u32,
3914 2 * LANE as u32 + 1,
3915 2 * LANE as u32,
3916 2 * LANE as u32 + 1
3917 ]
3918 );
3919 vcmlaq_rot90_f16(a, b, c)
3920 }
3921}
3922#[doc = "Floating-point complex multiply accumulate"]
3923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_lane_f32)"]
3924#[inline]
3925#[target_feature(enable = "neon,fcma")]
3926#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3927#[rustc_legacy_const_generics(3)]
3928#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3929pub fn vcmla_rot90_lane_f32<const LANE: i32>(
3930 a: float32x2_t,
3931 b: float32x2_t,
3932 c: float32x2_t,
3933) -> float32x2_t {
3934 static_assert!(LANE == 0);
3935 unsafe {
3936 let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3937 vcmla_rot90_f32(a, b, c)
3938 }
3939}
3940#[doc = "Floating-point complex multiply accumulate"]
3941#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_lane_f32)"]
3942#[inline]
3943#[target_feature(enable = "neon,fcma")]
3944#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3945#[rustc_legacy_const_generics(3)]
3946#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3947pub fn vcmlaq_rot90_lane_f32<const LANE: i32>(
3948 a: float32x4_t,
3949 b: float32x4_t,
3950 c: float32x2_t,
3951) -> float32x4_t {
3952 static_assert!(LANE == 0);
3953 unsafe {
3954 let c: float32x4_t = simd_shuffle!(
3955 c,
3956 c,
3957 [
3958 2 * LANE as u32,
3959 2 * LANE as u32 + 1,
3960 2 * LANE as u32,
3961 2 * LANE as u32 + 1
3962 ]
3963 );
3964 vcmlaq_rot90_f32(a, b, c)
3965 }
3966}
3967#[doc = "Floating-point complex multiply accumulate"]
3968#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_laneq_f16)"]
3969#[inline]
3970#[target_feature(enable = "neon,fcma")]
3971#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3972#[rustc_legacy_const_generics(3)]
3973#[target_feature(enable = "neon,fp16")]
3974#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3975pub fn vcmla_rot90_laneq_f16<const LANE: i32>(
3976 a: float16x4_t,
3977 b: float16x4_t,
3978 c: float16x8_t,
3979) -> float16x4_t {
3980 static_assert_uimm_bits!(LANE, 2);
3981 unsafe {
3982 let c: float16x4_t = simd_shuffle!(
3983 c,
3984 c,
3985 [
3986 2 * LANE as u32,
3987 2 * LANE as u32 + 1,
3988 2 * LANE as u32,
3989 2 * LANE as u32 + 1
3990 ]
3991 );
3992 vcmla_rot90_f16(a, b, c)
3993 }
3994}
3995#[doc = "Floating-point complex multiply accumulate"]
3996#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_laneq_f16)"]
3997#[inline]
3998#[target_feature(enable = "neon,fcma")]
3999#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
4000#[rustc_legacy_const_generics(3)]
4001#[target_feature(enable = "neon,fp16")]
4002#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
4003pub fn vcmlaq_rot90_laneq_f16<const LANE: i32>(
4004 a: float16x8_t,
4005 b: float16x8_t,
4006 c: float16x8_t,
4007) -> float16x8_t {
4008 static_assert_uimm_bits!(LANE, 2);
4009 unsafe {
4010 let c: float16x8_t = simd_shuffle!(
4011 c,
4012 c,
4013 [
4014 2 * LANE as u32,
4015 2 * LANE as u32 + 1,
4016 2 * LANE as u32,
4017 2 * LANE as u32 + 1,
4018 2 * LANE as u32,
4019 2 * LANE as u32 + 1,
4020 2 * LANE as u32,
4021 2 * LANE as u32 + 1
4022 ]
4023 );
4024 vcmlaq_rot90_f16(a, b, c)
4025 }
4026}
4027#[doc = "Floating-point complex multiply accumulate"]
4028#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_laneq_f32)"]
4029#[inline]
4030#[target_feature(enable = "neon,fcma")]
4031#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
4032#[rustc_legacy_const_generics(3)]
4033#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
4034pub fn vcmla_rot90_laneq_f32<const LANE: i32>(
4035 a: float32x2_t,
4036 b: float32x2_t,
4037 c: float32x4_t,
4038) -> float32x2_t {
4039 static_assert_uimm_bits!(LANE, 1);
4040 unsafe {
4041 let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
4042 vcmla_rot90_f32(a, b, c)
4043 }
4044}
4045#[doc = "Floating-point complex multiply accumulate"]
4046#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_laneq_f32)"]
4047#[inline]
4048#[target_feature(enable = "neon,fcma")]
4049#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
4050#[rustc_legacy_const_generics(3)]
4051#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
4052pub fn vcmlaq_rot90_laneq_f32<const LANE: i32>(
4053 a: float32x4_t,
4054 b: float32x4_t,
4055 c: float32x4_t,
4056) -> float32x4_t {
4057 static_assert_uimm_bits!(LANE, 1);
4058 unsafe {
4059 let c: float32x4_t = simd_shuffle!(
4060 c,
4061 c,
4062 [
4063 2 * LANE as u32,
4064 2 * LANE as u32 + 1,
4065 2 * LANE as u32,
4066 2 * LANE as u32 + 1
4067 ]
4068 );
4069 vcmlaq_rot90_f32(a, b, c)
4070 }
4071}
4072#[doc = "Insert vector element from another vector element"]
4073#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_f32)"]
4074#[inline]
4075#[target_feature(enable = "neon")]
4076#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4077#[rustc_legacy_const_generics(1, 3)]
4078#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4079pub fn vcopy_lane_f32<const LANE1: i32, const LANE2: i32>(
4080 a: float32x2_t,
4081 b: float32x2_t,
4082) -> float32x2_t {
4083 static_assert_uimm_bits!(LANE1, 1);
4084 static_assert_uimm_bits!(LANE2, 1);
4085 unsafe {
4086 match LANE1 & 0b1 {
4087 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4088 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4089 _ => unreachable_unchecked(),
4090 }
4091 }
4092}
4093#[doc = "Insert vector element from another vector element"]
4094#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s8)"]
4095#[inline]
4096#[target_feature(enable = "neon")]
4097#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4098#[rustc_legacy_const_generics(1, 3)]
4099#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4100pub fn vcopy_lane_s8<const LANE1: i32, const LANE2: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
4101 static_assert_uimm_bits!(LANE1, 3);
4102 static_assert_uimm_bits!(LANE2, 3);
4103 unsafe {
4104 match LANE1 & 0b111 {
4105 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4106 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4107 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4108 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4109 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4110 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4111 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4112 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4113 _ => unreachable_unchecked(),
4114 }
4115 }
4116}
4117#[doc = "Insert vector element from another vector element"]
4118#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s16)"]
4119#[inline]
4120#[target_feature(enable = "neon")]
4121#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4122#[rustc_legacy_const_generics(1, 3)]
4123#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4124pub fn vcopy_lane_s16<const LANE1: i32, const LANE2: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
4125 static_assert_uimm_bits!(LANE1, 2);
4126 static_assert_uimm_bits!(LANE2, 2);
4127 unsafe {
4128 match LANE1 & 0b11 {
4129 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4130 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4131 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4132 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4133 _ => unreachable_unchecked(),
4134 }
4135 }
4136}
4137#[doc = "Insert vector element from another vector element"]
4138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s32)"]
4139#[inline]
4140#[target_feature(enable = "neon")]
4141#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4142#[rustc_legacy_const_generics(1, 3)]
4143#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4144pub fn vcopy_lane_s32<const LANE1: i32, const LANE2: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
4145 static_assert_uimm_bits!(LANE1, 1);
4146 static_assert_uimm_bits!(LANE2, 1);
4147 unsafe {
4148 match LANE1 & 0b1 {
4149 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4150 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4151 _ => unreachable_unchecked(),
4152 }
4153 }
4154}
4155#[doc = "Insert vector element from another vector element"]
4156#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u8)"]
4157#[inline]
4158#[target_feature(enable = "neon")]
4159#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4160#[rustc_legacy_const_generics(1, 3)]
4161#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4162pub fn vcopy_lane_u8<const LANE1: i32, const LANE2: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
4163 static_assert_uimm_bits!(LANE1, 3);
4164 static_assert_uimm_bits!(LANE2, 3);
4165 unsafe {
4166 match LANE1 & 0b111 {
4167 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4168 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4169 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4170 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4171 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4172 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4173 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4174 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4175 _ => unreachable_unchecked(),
4176 }
4177 }
4178}
4179#[doc = "Insert vector element from another vector element"]
4180#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u16)"]
4181#[inline]
4182#[target_feature(enable = "neon")]
4183#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4184#[rustc_legacy_const_generics(1, 3)]
4185#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4186pub fn vcopy_lane_u16<const LANE1: i32, const LANE2: i32>(
4187 a: uint16x4_t,
4188 b: uint16x4_t,
4189) -> uint16x4_t {
4190 static_assert_uimm_bits!(LANE1, 2);
4191 static_assert_uimm_bits!(LANE2, 2);
4192 unsafe {
4193 match LANE1 & 0b11 {
4194 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4195 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4196 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4197 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4198 _ => unreachable_unchecked(),
4199 }
4200 }
4201}
4202#[doc = "Insert vector element from another vector element"]
4203#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u32)"]
4204#[inline]
4205#[target_feature(enable = "neon")]
4206#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4207#[rustc_legacy_const_generics(1, 3)]
4208#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4209pub fn vcopy_lane_u32<const LANE1: i32, const LANE2: i32>(
4210 a: uint32x2_t,
4211 b: uint32x2_t,
4212) -> uint32x2_t {
4213 static_assert_uimm_bits!(LANE1, 1);
4214 static_assert_uimm_bits!(LANE2, 1);
4215 unsafe {
4216 match LANE1 & 0b1 {
4217 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4218 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4219 _ => unreachable_unchecked(),
4220 }
4221 }
4222}
4223#[doc = "Insert vector element from another vector element"]
4224#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p8)"]
4225#[inline]
4226#[target_feature(enable = "neon")]
4227#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4228#[rustc_legacy_const_generics(1, 3)]
4229#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4230pub fn vcopy_lane_p8<const LANE1: i32, const LANE2: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
4231 static_assert_uimm_bits!(LANE1, 3);
4232 static_assert_uimm_bits!(LANE2, 3);
4233 unsafe {
4234 match LANE1 & 0b111 {
4235 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4236 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4237 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4238 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4239 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4240 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4241 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4242 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4243 _ => unreachable_unchecked(),
4244 }
4245 }
4246}
4247#[doc = "Insert vector element from another vector element"]
4248#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p16)"]
4249#[inline]
4250#[target_feature(enable = "neon")]
4251#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4252#[rustc_legacy_const_generics(1, 3)]
4253#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4254pub fn vcopy_lane_p16<const LANE1: i32, const LANE2: i32>(
4255 a: poly16x4_t,
4256 b: poly16x4_t,
4257) -> poly16x4_t {
4258 static_assert_uimm_bits!(LANE1, 2);
4259 static_assert_uimm_bits!(LANE2, 2);
4260 unsafe {
4261 match LANE1 & 0b11 {
4262 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4263 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4264 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4265 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4266 _ => unreachable_unchecked(),
4267 }
4268 }
4269}
4270#[doc = "Insert vector element from another vector element"]
4271#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_f32)"]
4272#[inline]
4273#[target_feature(enable = "neon")]
4274#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4275#[rustc_legacy_const_generics(1, 3)]
4276#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4277pub fn vcopy_laneq_f32<const LANE1: i32, const LANE2: i32>(
4278 a: float32x2_t,
4279 b: float32x4_t,
4280) -> float32x2_t {
4281 static_assert_uimm_bits!(LANE1, 1);
4282 static_assert_uimm_bits!(LANE2, 2);
4283 let a: float32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };
4284 unsafe {
4285 match LANE1 & 0b1 {
4286 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
4287 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
4288 _ => unreachable_unchecked(),
4289 }
4290 }
4291}
4292#[doc = "Insert vector element from another vector element"]
4293#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s8)"]
4294#[inline]
4295#[target_feature(enable = "neon")]
4296#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4297#[rustc_legacy_const_generics(1, 3)]
4298#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4299pub fn vcopy_laneq_s8<const LANE1: i32, const LANE2: i32>(a: int8x8_t, b: int8x16_t) -> int8x8_t {
4300 static_assert_uimm_bits!(LANE1, 3);
4301 static_assert_uimm_bits!(LANE2, 4);
4302 let a: int8x16_t =
4303 unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4304 unsafe {
4305 match LANE1 & 0b111 {
4306 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4307 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4308 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
4309 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
4310 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
4311 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
4312 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
4313 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
4314 _ => unreachable_unchecked(),
4315 }
4316 }
4317}
4318#[doc = "Insert vector element from another vector element"]
4319#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s16)"]
4320#[inline]
4321#[target_feature(enable = "neon")]
4322#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4323#[rustc_legacy_const_generics(1, 3)]
4324#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4325pub fn vcopy_laneq_s16<const LANE1: i32, const LANE2: i32>(
4326 a: int16x4_t,
4327 b: int16x8_t,
4328) -> int16x4_t {
4329 static_assert_uimm_bits!(LANE1, 2);
4330 static_assert_uimm_bits!(LANE2, 3);
4331 let a: int16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };
4332 unsafe {
4333 match LANE1 & 0b11 {
4334 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
4335 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
4336 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
4337 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
4338 _ => unreachable_unchecked(),
4339 }
4340 }
4341}
4342#[doc = "Insert vector element from another vector element"]
4343#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s32)"]
4344#[inline]
4345#[target_feature(enable = "neon")]
4346#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4347#[rustc_legacy_const_generics(1, 3)]
4348#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4349pub fn vcopy_laneq_s32<const LANE1: i32, const LANE2: i32>(
4350 a: int32x2_t,
4351 b: int32x4_t,
4352) -> int32x2_t {
4353 static_assert_uimm_bits!(LANE1, 1);
4354 static_assert_uimm_bits!(LANE2, 2);
4355 let a: int32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };
4356 unsafe {
4357 match LANE1 & 0b1 {
4358 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
4359 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
4360 _ => unreachable_unchecked(),
4361 }
4362 }
4363}
4364#[doc = "Insert vector element from another vector element"]
4365#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u8)"]
4366#[inline]
4367#[target_feature(enable = "neon")]
4368#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4369#[rustc_legacy_const_generics(1, 3)]
4370#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4371pub fn vcopy_laneq_u8<const LANE1: i32, const LANE2: i32>(
4372 a: uint8x8_t,
4373 b: uint8x16_t,
4374) -> uint8x8_t {
4375 static_assert_uimm_bits!(LANE1, 3);
4376 static_assert_uimm_bits!(LANE2, 4);
4377 let a: uint8x16_t =
4378 unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4379 unsafe {
4380 match LANE1 & 0b111 {
4381 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4382 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4383 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
4384 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
4385 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
4386 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
4387 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
4388 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
4389 _ => unreachable_unchecked(),
4390 }
4391 }
4392}
4393#[doc = "Insert vector element from another vector element"]
4394#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u16)"]
4395#[inline]
4396#[target_feature(enable = "neon")]
4397#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4398#[rustc_legacy_const_generics(1, 3)]
4399#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4400pub fn vcopy_laneq_u16<const LANE1: i32, const LANE2: i32>(
4401 a: uint16x4_t,
4402 b: uint16x8_t,
4403) -> uint16x4_t {
4404 static_assert_uimm_bits!(LANE1, 2);
4405 static_assert_uimm_bits!(LANE2, 3);
4406 let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };
4407 unsafe {
4408 match LANE1 & 0b11 {
4409 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
4410 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
4411 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
4412 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
4413 _ => unreachable_unchecked(),
4414 }
4415 }
4416}
4417#[doc = "Insert vector element from another vector element"]
4418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u32)"]
4419#[inline]
4420#[target_feature(enable = "neon")]
4421#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4422#[rustc_legacy_const_generics(1, 3)]
4423#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4424pub fn vcopy_laneq_u32<const LANE1: i32, const LANE2: i32>(
4425 a: uint32x2_t,
4426 b: uint32x4_t,
4427) -> uint32x2_t {
4428 static_assert_uimm_bits!(LANE1, 1);
4429 static_assert_uimm_bits!(LANE2, 2);
4430 let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };
4431 unsafe {
4432 match LANE1 & 0b1 {
4433 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
4434 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
4435 _ => unreachable_unchecked(),
4436 }
4437 }
4438}
4439#[doc = "Insert vector element from another vector element"]
4440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p8)"]
4441#[inline]
4442#[target_feature(enable = "neon")]
4443#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4444#[rustc_legacy_const_generics(1, 3)]
4445#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4446pub fn vcopy_laneq_p8<const LANE1: i32, const LANE2: i32>(
4447 a: poly8x8_t,
4448 b: poly8x16_t,
4449) -> poly8x8_t {
4450 static_assert_uimm_bits!(LANE1, 3);
4451 static_assert_uimm_bits!(LANE2, 4);
4452 let a: poly8x16_t =
4453 unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4454 unsafe {
4455 match LANE1 & 0b111 {
4456 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4457 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4458 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
4459 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
4460 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
4461 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
4462 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
4463 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
4464 _ => unreachable_unchecked(),
4465 }
4466 }
4467}
4468#[doc = "Insert vector element from another vector element"]
4469#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p16)"]
4470#[inline]
4471#[target_feature(enable = "neon")]
4472#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4473#[rustc_legacy_const_generics(1, 3)]
4474#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4475pub fn vcopy_laneq_p16<const LANE1: i32, const LANE2: i32>(
4476 a: poly16x4_t,
4477 b: poly16x8_t,
4478) -> poly16x4_t {
4479 static_assert_uimm_bits!(LANE1, 2);
4480 static_assert_uimm_bits!(LANE2, 3);
4481 let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };
4482 unsafe {
4483 match LANE1 & 0b11 {
4484 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
4485 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
4486 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
4487 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
4488 _ => unreachable_unchecked(),
4489 }
4490 }
4491}
4492#[doc = "Insert vector element from another vector element"]
4493#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f32)"]
4494#[inline]
4495#[target_feature(enable = "neon")]
4496#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4497#[rustc_legacy_const_generics(1, 3)]
4498#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4499pub fn vcopyq_lane_f32<const LANE1: i32, const LANE2: i32>(
4500 a: float32x4_t,
4501 b: float32x2_t,
4502) -> float32x4_t {
4503 static_assert_uimm_bits!(LANE1, 2);
4504 static_assert_uimm_bits!(LANE2, 1);
4505 let b: float32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };
4506 unsafe {
4507 match LANE1 & 0b11 {
4508 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4509 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4510 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4511 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4512 _ => unreachable_unchecked(),
4513 }
4514 }
4515}
4516#[doc = "Insert vector element from another vector element"]
4517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f64)"]
4518#[inline]
4519#[target_feature(enable = "neon")]
4520#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4521#[rustc_legacy_const_generics(1, 3)]
4522#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4523pub fn vcopyq_lane_f64<const LANE1: i32, const LANE2: i32>(
4524 a: float64x2_t,
4525 b: float64x1_t,
4526) -> float64x2_t {
4527 static_assert_uimm_bits!(LANE1, 1);
4528 static_assert!(LANE2 == 0);
4529 let b: float64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4530 unsafe {
4531 match LANE1 & 0b1 {
4532 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4533 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4534 _ => unreachable_unchecked(),
4535 }
4536 }
4537}
4538#[doc = "Insert vector element from another vector element"]
4539#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s64)"]
4540#[inline]
4541#[target_feature(enable = "neon")]
4542#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4543#[rustc_legacy_const_generics(1, 3)]
4544#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4545pub fn vcopyq_lane_s64<const LANE1: i32, const LANE2: i32>(
4546 a: int64x2_t,
4547 b: int64x1_t,
4548) -> int64x2_t {
4549 static_assert_uimm_bits!(LANE1, 1);
4550 static_assert!(LANE2 == 0);
4551 let b: int64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4552 unsafe {
4553 match LANE1 & 0b1 {
4554 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4555 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4556 _ => unreachable_unchecked(),
4557 }
4558 }
4559}
4560#[doc = "Insert vector element from another vector element"]
4561#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u64)"]
4562#[inline]
4563#[target_feature(enable = "neon")]
4564#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4565#[rustc_legacy_const_generics(1, 3)]
4566#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4567pub fn vcopyq_lane_u64<const LANE1: i32, const LANE2: i32>(
4568 a: uint64x2_t,
4569 b: uint64x1_t,
4570) -> uint64x2_t {
4571 static_assert_uimm_bits!(LANE1, 1);
4572 static_assert!(LANE2 == 0);
4573 let b: uint64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4574 unsafe {
4575 match LANE1 & 0b1 {
4576 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4577 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4578 _ => unreachable_unchecked(),
4579 }
4580 }
4581}
4582#[doc = "Insert vector element from another vector element"]
4583#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p64)"]
4584#[inline]
4585#[target_feature(enable = "neon")]
4586#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4587#[rustc_legacy_const_generics(1, 3)]
4588#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4589pub fn vcopyq_lane_p64<const LANE1: i32, const LANE2: i32>(
4590 a: poly64x2_t,
4591 b: poly64x1_t,
4592) -> poly64x2_t {
4593 static_assert_uimm_bits!(LANE1, 1);
4594 static_assert!(LANE2 == 0);
4595 let b: poly64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4596 unsafe {
4597 match LANE1 & 0b1 {
4598 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4599 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4600 _ => unreachable_unchecked(),
4601 }
4602 }
4603}
4604#[doc = "Insert vector element from another vector element"]
4605#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s8)"]
4606#[inline]
4607#[target_feature(enable = "neon")]
4608#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4609#[rustc_legacy_const_generics(1, 3)]
4610#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4611pub fn vcopyq_lane_s8<const LANE1: i32, const LANE2: i32>(a: int8x16_t, b: int8x8_t) -> int8x16_t {
4612 static_assert_uimm_bits!(LANE1, 4);
4613 static_assert_uimm_bits!(LANE2, 3);
4614 let b: int8x16_t =
4615 unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4616 unsafe {
4617 match LANE1 & 0b1111 {
4618 0 => simd_shuffle!(
4619 a,
4620 b,
4621 [
4622 16 + LANE2 as u32,
4623 1,
4624 2,
4625 3,
4626 4,
4627 5,
4628 6,
4629 7,
4630 8,
4631 9,
4632 10,
4633 11,
4634 12,
4635 13,
4636 14,
4637 15
4638 ]
4639 ),
4640 1 => simd_shuffle!(
4641 a,
4642 b,
4643 [
4644 0,
4645 16 + LANE2 as u32,
4646 2,
4647 3,
4648 4,
4649 5,
4650 6,
4651 7,
4652 8,
4653 9,
4654 10,
4655 11,
4656 12,
4657 13,
4658 14,
4659 15
4660 ]
4661 ),
4662 2 => simd_shuffle!(
4663 a,
4664 b,
4665 [
4666 0,
4667 1,
4668 16 + LANE2 as u32,
4669 3,
4670 4,
4671 5,
4672 6,
4673 7,
4674 8,
4675 9,
4676 10,
4677 11,
4678 12,
4679 13,
4680 14,
4681 15
4682 ]
4683 ),
4684 3 => simd_shuffle!(
4685 a,
4686 b,
4687 [
4688 0,
4689 1,
4690 2,
4691 16 + LANE2 as u32,
4692 4,
4693 5,
4694 6,
4695 7,
4696 8,
4697 9,
4698 10,
4699 11,
4700 12,
4701 13,
4702 14,
4703 15
4704 ]
4705 ),
4706 4 => simd_shuffle!(
4707 a,
4708 b,
4709 [
4710 0,
4711 1,
4712 2,
4713 3,
4714 16 + LANE2 as u32,
4715 5,
4716 6,
4717 7,
4718 8,
4719 9,
4720 10,
4721 11,
4722 12,
4723 13,
4724 14,
4725 15
4726 ]
4727 ),
4728 5 => simd_shuffle!(
4729 a,
4730 b,
4731 [
4732 0,
4733 1,
4734 2,
4735 3,
4736 4,
4737 16 + LANE2 as u32,
4738 6,
4739 7,
4740 8,
4741 9,
4742 10,
4743 11,
4744 12,
4745 13,
4746 14,
4747 15
4748 ]
4749 ),
4750 6 => simd_shuffle!(
4751 a,
4752 b,
4753 [
4754 0,
4755 1,
4756 2,
4757 3,
4758 4,
4759 5,
4760 16 + LANE2 as u32,
4761 7,
4762 8,
4763 9,
4764 10,
4765 11,
4766 12,
4767 13,
4768 14,
4769 15
4770 ]
4771 ),
4772 7 => simd_shuffle!(
4773 a,
4774 b,
4775 [
4776 0,
4777 1,
4778 2,
4779 3,
4780 4,
4781 5,
4782 6,
4783 16 + LANE2 as u32,
4784 8,
4785 9,
4786 10,
4787 11,
4788 12,
4789 13,
4790 14,
4791 15
4792 ]
4793 ),
4794 8 => simd_shuffle!(
4795 a,
4796 b,
4797 [
4798 0,
4799 1,
4800 2,
4801 3,
4802 4,
4803 5,
4804 6,
4805 7,
4806 16 + LANE2 as u32,
4807 9,
4808 10,
4809 11,
4810 12,
4811 13,
4812 14,
4813 15
4814 ]
4815 ),
4816 9 => simd_shuffle!(
4817 a,
4818 b,
4819 [
4820 0,
4821 1,
4822 2,
4823 3,
4824 4,
4825 5,
4826 6,
4827 7,
4828 8,
4829 16 + LANE2 as u32,
4830 10,
4831 11,
4832 12,
4833 13,
4834 14,
4835 15
4836 ]
4837 ),
4838 10 => simd_shuffle!(
4839 a,
4840 b,
4841 [
4842 0,
4843 1,
4844 2,
4845 3,
4846 4,
4847 5,
4848 6,
4849 7,
4850 8,
4851 9,
4852 16 + LANE2 as u32,
4853 11,
4854 12,
4855 13,
4856 14,
4857 15
4858 ]
4859 ),
4860 11 => simd_shuffle!(
4861 a,
4862 b,
4863 [
4864 0,
4865 1,
4866 2,
4867 3,
4868 4,
4869 5,
4870 6,
4871 7,
4872 8,
4873 9,
4874 10,
4875 16 + LANE2 as u32,
4876 12,
4877 13,
4878 14,
4879 15
4880 ]
4881 ),
4882 12 => simd_shuffle!(
4883 a,
4884 b,
4885 [
4886 0,
4887 1,
4888 2,
4889 3,
4890 4,
4891 5,
4892 6,
4893 7,
4894 8,
4895 9,
4896 10,
4897 11,
4898 16 + LANE2 as u32,
4899 13,
4900 14,
4901 15
4902 ]
4903 ),
4904 13 => simd_shuffle!(
4905 a,
4906 b,
4907 [
4908 0,
4909 1,
4910 2,
4911 3,
4912 4,
4913 5,
4914 6,
4915 7,
4916 8,
4917 9,
4918 10,
4919 11,
4920 12,
4921 16 + LANE2 as u32,
4922 14,
4923 15
4924 ]
4925 ),
4926 14 => simd_shuffle!(
4927 a,
4928 b,
4929 [
4930 0,
4931 1,
4932 2,
4933 3,
4934 4,
4935 5,
4936 6,
4937 7,
4938 8,
4939 9,
4940 10,
4941 11,
4942 12,
4943 13,
4944 16 + LANE2 as u32,
4945 15
4946 ]
4947 ),
4948 15 => simd_shuffle!(
4949 a,
4950 b,
4951 [
4952 0,
4953 1,
4954 2,
4955 3,
4956 4,
4957 5,
4958 6,
4959 7,
4960 8,
4961 9,
4962 10,
4963 11,
4964 12,
4965 13,
4966 14,
4967 16 + LANE2 as u32
4968 ]
4969 ),
4970 _ => unreachable_unchecked(),
4971 }
4972 }
4973}
4974#[doc = "Insert vector element from another vector element"]
4975#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s16)"]
4976#[inline]
4977#[target_feature(enable = "neon")]
4978#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4979#[rustc_legacy_const_generics(1, 3)]
4980#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4981pub fn vcopyq_lane_s16<const LANE1: i32, const LANE2: i32>(
4982 a: int16x8_t,
4983 b: int16x4_t,
4984) -> int16x8_t {
4985 static_assert_uimm_bits!(LANE1, 3);
4986 static_assert_uimm_bits!(LANE2, 2);
4987 let b: int16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };
4988 unsafe {
4989 match LANE1 & 0b111 {
4990 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4991 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4992 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4993 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4994 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4995 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4996 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4997 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4998 _ => unreachable_unchecked(),
4999 }
5000 }
5001}
5002#[doc = "Insert vector element from another vector element"]
5003#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s32)"]
5004#[inline]
5005#[target_feature(enable = "neon")]
5006#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5007#[rustc_legacy_const_generics(1, 3)]
5008#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5009pub fn vcopyq_lane_s32<const LANE1: i32, const LANE2: i32>(
5010 a: int32x4_t,
5011 b: int32x2_t,
5012) -> int32x4_t {
5013 static_assert_uimm_bits!(LANE1, 2);
5014 static_assert_uimm_bits!(LANE2, 1);
5015 let b: int32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };
5016 unsafe {
5017 match LANE1 & 0b11 {
5018 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
5019 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
5020 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
5021 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
5022 _ => unreachable_unchecked(),
5023 }
5024 }
5025}
5026#[doc = "Insert vector element from another vector element"]
5027#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u8)"]
5028#[inline]
5029#[target_feature(enable = "neon")]
5030#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5031#[rustc_legacy_const_generics(1, 3)]
5032#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5033pub fn vcopyq_lane_u8<const LANE1: i32, const LANE2: i32>(
5034 a: uint8x16_t,
5035 b: uint8x8_t,
5036) -> uint8x16_t {
5037 static_assert_uimm_bits!(LANE1, 4);
5038 static_assert_uimm_bits!(LANE2, 3);
5039 let b: uint8x16_t =
5040 unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
5041 unsafe {
5042 match LANE1 & 0b1111 {
5043 0 => simd_shuffle!(
5044 a,
5045 b,
5046 [
5047 16 + LANE2 as u32,
5048 1,
5049 2,
5050 3,
5051 4,
5052 5,
5053 6,
5054 7,
5055 8,
5056 9,
5057 10,
5058 11,
5059 12,
5060 13,
5061 14,
5062 15
5063 ]
5064 ),
5065 1 => simd_shuffle!(
5066 a,
5067 b,
5068 [
5069 0,
5070 16 + LANE2 as u32,
5071 2,
5072 3,
5073 4,
5074 5,
5075 6,
5076 7,
5077 8,
5078 9,
5079 10,
5080 11,
5081 12,
5082 13,
5083 14,
5084 15
5085 ]
5086 ),
5087 2 => simd_shuffle!(
5088 a,
5089 b,
5090 [
5091 0,
5092 1,
5093 16 + LANE2 as u32,
5094 3,
5095 4,
5096 5,
5097 6,
5098 7,
5099 8,
5100 9,
5101 10,
5102 11,
5103 12,
5104 13,
5105 14,
5106 15
5107 ]
5108 ),
5109 3 => simd_shuffle!(
5110 a,
5111 b,
5112 [
5113 0,
5114 1,
5115 2,
5116 16 + LANE2 as u32,
5117 4,
5118 5,
5119 6,
5120 7,
5121 8,
5122 9,
5123 10,
5124 11,
5125 12,
5126 13,
5127 14,
5128 15
5129 ]
5130 ),
5131 4 => simd_shuffle!(
5132 a,
5133 b,
5134 [
5135 0,
5136 1,
5137 2,
5138 3,
5139 16 + LANE2 as u32,
5140 5,
5141 6,
5142 7,
5143 8,
5144 9,
5145 10,
5146 11,
5147 12,
5148 13,
5149 14,
5150 15
5151 ]
5152 ),
5153 5 => simd_shuffle!(
5154 a,
5155 b,
5156 [
5157 0,
5158 1,
5159 2,
5160 3,
5161 4,
5162 16 + LANE2 as u32,
5163 6,
5164 7,
5165 8,
5166 9,
5167 10,
5168 11,
5169 12,
5170 13,
5171 14,
5172 15
5173 ]
5174 ),
5175 6 => simd_shuffle!(
5176 a,
5177 b,
5178 [
5179 0,
5180 1,
5181 2,
5182 3,
5183 4,
5184 5,
5185 16 + LANE2 as u32,
5186 7,
5187 8,
5188 9,
5189 10,
5190 11,
5191 12,
5192 13,
5193 14,
5194 15
5195 ]
5196 ),
5197 7 => simd_shuffle!(
5198 a,
5199 b,
5200 [
5201 0,
5202 1,
5203 2,
5204 3,
5205 4,
5206 5,
5207 6,
5208 16 + LANE2 as u32,
5209 8,
5210 9,
5211 10,
5212 11,
5213 12,
5214 13,
5215 14,
5216 15
5217 ]
5218 ),
5219 8 => simd_shuffle!(
5220 a,
5221 b,
5222 [
5223 0,
5224 1,
5225 2,
5226 3,
5227 4,
5228 5,
5229 6,
5230 7,
5231 16 + LANE2 as u32,
5232 9,
5233 10,
5234 11,
5235 12,
5236 13,
5237 14,
5238 15
5239 ]
5240 ),
5241 9 => simd_shuffle!(
5242 a,
5243 b,
5244 [
5245 0,
5246 1,
5247 2,
5248 3,
5249 4,
5250 5,
5251 6,
5252 7,
5253 8,
5254 16 + LANE2 as u32,
5255 10,
5256 11,
5257 12,
5258 13,
5259 14,
5260 15
5261 ]
5262 ),
5263 10 => simd_shuffle!(
5264 a,
5265 b,
5266 [
5267 0,
5268 1,
5269 2,
5270 3,
5271 4,
5272 5,
5273 6,
5274 7,
5275 8,
5276 9,
5277 16 + LANE2 as u32,
5278 11,
5279 12,
5280 13,
5281 14,
5282 15
5283 ]
5284 ),
5285 11 => simd_shuffle!(
5286 a,
5287 b,
5288 [
5289 0,
5290 1,
5291 2,
5292 3,
5293 4,
5294 5,
5295 6,
5296 7,
5297 8,
5298 9,
5299 10,
5300 16 + LANE2 as u32,
5301 12,
5302 13,
5303 14,
5304 15
5305 ]
5306 ),
5307 12 => simd_shuffle!(
5308 a,
5309 b,
5310 [
5311 0,
5312 1,
5313 2,
5314 3,
5315 4,
5316 5,
5317 6,
5318 7,
5319 8,
5320 9,
5321 10,
5322 11,
5323 16 + LANE2 as u32,
5324 13,
5325 14,
5326 15
5327 ]
5328 ),
5329 13 => simd_shuffle!(
5330 a,
5331 b,
5332 [
5333 0,
5334 1,
5335 2,
5336 3,
5337 4,
5338 5,
5339 6,
5340 7,
5341 8,
5342 9,
5343 10,
5344 11,
5345 12,
5346 16 + LANE2 as u32,
5347 14,
5348 15
5349 ]
5350 ),
5351 14 => simd_shuffle!(
5352 a,
5353 b,
5354 [
5355 0,
5356 1,
5357 2,
5358 3,
5359 4,
5360 5,
5361 6,
5362 7,
5363 8,
5364 9,
5365 10,
5366 11,
5367 12,
5368 13,
5369 16 + LANE2 as u32,
5370 15
5371 ]
5372 ),
5373 15 => simd_shuffle!(
5374 a,
5375 b,
5376 [
5377 0,
5378 1,
5379 2,
5380 3,
5381 4,
5382 5,
5383 6,
5384 7,
5385 8,
5386 9,
5387 10,
5388 11,
5389 12,
5390 13,
5391 14,
5392 16 + LANE2 as u32
5393 ]
5394 ),
5395 _ => unreachable_unchecked(),
5396 }
5397 }
5398}
5399#[doc = "Insert vector element from another vector element"]
5400#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u16)"]
5401#[inline]
5402#[target_feature(enable = "neon")]
5403#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5404#[rustc_legacy_const_generics(1, 3)]
5405#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5406pub fn vcopyq_lane_u16<const LANE1: i32, const LANE2: i32>(
5407 a: uint16x8_t,
5408 b: uint16x4_t,
5409) -> uint16x8_t {
5410 static_assert_uimm_bits!(LANE1, 3);
5411 static_assert_uimm_bits!(LANE2, 2);
5412 let b: uint16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };
5413 unsafe {
5414 match LANE1 & 0b111 {
5415 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
5416 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
5417 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
5418 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
5419 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
5420 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
5421 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
5422 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
5423 _ => unreachable_unchecked(),
5424 }
5425 }
5426}
5427#[doc = "Insert vector element from another vector element"]
5428#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u32)"]
5429#[inline]
5430#[target_feature(enable = "neon")]
5431#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5432#[rustc_legacy_const_generics(1, 3)]
5433#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5434pub fn vcopyq_lane_u32<const LANE1: i32, const LANE2: i32>(
5435 a: uint32x4_t,
5436 b: uint32x2_t,
5437) -> uint32x4_t {
5438 static_assert_uimm_bits!(LANE1, 2);
5439 static_assert_uimm_bits!(LANE2, 1);
5440 let b: uint32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };
5441 unsafe {
5442 match LANE1 & 0b11 {
5443 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
5444 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
5445 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
5446 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
5447 _ => unreachable_unchecked(),
5448 }
5449 }
5450}
5451#[doc = "Insert vector element from another vector element"]
5452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p8)"]
5453#[inline]
5454#[target_feature(enable = "neon")]
5455#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5456#[rustc_legacy_const_generics(1, 3)]
5457#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5458pub fn vcopyq_lane_p8<const LANE1: i32, const LANE2: i32>(
5459 a: poly8x16_t,
5460 b: poly8x8_t,
5461) -> poly8x16_t {
5462 static_assert_uimm_bits!(LANE1, 4);
5463 static_assert_uimm_bits!(LANE2, 3);
5464 let b: poly8x16_t =
5465 unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
5466 unsafe {
5467 match LANE1 & 0b1111 {
5468 0 => simd_shuffle!(
5469 a,
5470 b,
5471 [
5472 16 + LANE2 as u32,
5473 1,
5474 2,
5475 3,
5476 4,
5477 5,
5478 6,
5479 7,
5480 8,
5481 9,
5482 10,
5483 11,
5484 12,
5485 13,
5486 14,
5487 15
5488 ]
5489 ),
5490 1 => simd_shuffle!(
5491 a,
5492 b,
5493 [
5494 0,
5495 16 + LANE2 as u32,
5496 2,
5497 3,
5498 4,
5499 5,
5500 6,
5501 7,
5502 8,
5503 9,
5504 10,
5505 11,
5506 12,
5507 13,
5508 14,
5509 15
5510 ]
5511 ),
5512 2 => simd_shuffle!(
5513 a,
5514 b,
5515 [
5516 0,
5517 1,
5518 16 + LANE2 as u32,
5519 3,
5520 4,
5521 5,
5522 6,
5523 7,
5524 8,
5525 9,
5526 10,
5527 11,
5528 12,
5529 13,
5530 14,
5531 15
5532 ]
5533 ),
5534 3 => simd_shuffle!(
5535 a,
5536 b,
5537 [
5538 0,
5539 1,
5540 2,
5541 16 + LANE2 as u32,
5542 4,
5543 5,
5544 6,
5545 7,
5546 8,
5547 9,
5548 10,
5549 11,
5550 12,
5551 13,
5552 14,
5553 15
5554 ]
5555 ),
5556 4 => simd_shuffle!(
5557 a,
5558 b,
5559 [
5560 0,
5561 1,
5562 2,
5563 3,
5564 16 + LANE2 as u32,
5565 5,
5566 6,
5567 7,
5568 8,
5569 9,
5570 10,
5571 11,
5572 12,
5573 13,
5574 14,
5575 15
5576 ]
5577 ),
5578 5 => simd_shuffle!(
5579 a,
5580 b,
5581 [
5582 0,
5583 1,
5584 2,
5585 3,
5586 4,
5587 16 + LANE2 as u32,
5588 6,
5589 7,
5590 8,
5591 9,
5592 10,
5593 11,
5594 12,
5595 13,
5596 14,
5597 15
5598 ]
5599 ),
5600 6 => simd_shuffle!(
5601 a,
5602 b,
5603 [
5604 0,
5605 1,
5606 2,
5607 3,
5608 4,
5609 5,
5610 16 + LANE2 as u32,
5611 7,
5612 8,
5613 9,
5614 10,
5615 11,
5616 12,
5617 13,
5618 14,
5619 15
5620 ]
5621 ),
5622 7 => simd_shuffle!(
5623 a,
5624 b,
5625 [
5626 0,
5627 1,
5628 2,
5629 3,
5630 4,
5631 5,
5632 6,
5633 16 + LANE2 as u32,
5634 8,
5635 9,
5636 10,
5637 11,
5638 12,
5639 13,
5640 14,
5641 15
5642 ]
5643 ),
5644 8 => simd_shuffle!(
5645 a,
5646 b,
5647 [
5648 0,
5649 1,
5650 2,
5651 3,
5652 4,
5653 5,
5654 6,
5655 7,
5656 16 + LANE2 as u32,
5657 9,
5658 10,
5659 11,
5660 12,
5661 13,
5662 14,
5663 15
5664 ]
5665 ),
5666 9 => simd_shuffle!(
5667 a,
5668 b,
5669 [
5670 0,
5671 1,
5672 2,
5673 3,
5674 4,
5675 5,
5676 6,
5677 7,
5678 8,
5679 16 + LANE2 as u32,
5680 10,
5681 11,
5682 12,
5683 13,
5684 14,
5685 15
5686 ]
5687 ),
5688 10 => simd_shuffle!(
5689 a,
5690 b,
5691 [
5692 0,
5693 1,
5694 2,
5695 3,
5696 4,
5697 5,
5698 6,
5699 7,
5700 8,
5701 9,
5702 16 + LANE2 as u32,
5703 11,
5704 12,
5705 13,
5706 14,
5707 15
5708 ]
5709 ),
5710 11 => simd_shuffle!(
5711 a,
5712 b,
5713 [
5714 0,
5715 1,
5716 2,
5717 3,
5718 4,
5719 5,
5720 6,
5721 7,
5722 8,
5723 9,
5724 10,
5725 16 + LANE2 as u32,
5726 12,
5727 13,
5728 14,
5729 15
5730 ]
5731 ),
5732 12 => simd_shuffle!(
5733 a,
5734 b,
5735 [
5736 0,
5737 1,
5738 2,
5739 3,
5740 4,
5741 5,
5742 6,
5743 7,
5744 8,
5745 9,
5746 10,
5747 11,
5748 16 + LANE2 as u32,
5749 13,
5750 14,
5751 15
5752 ]
5753 ),
5754 13 => simd_shuffle!(
5755 a,
5756 b,
5757 [
5758 0,
5759 1,
5760 2,
5761 3,
5762 4,
5763 5,
5764 6,
5765 7,
5766 8,
5767 9,
5768 10,
5769 11,
5770 12,
5771 16 + LANE2 as u32,
5772 14,
5773 15
5774 ]
5775 ),
5776 14 => simd_shuffle!(
5777 a,
5778 b,
5779 [
5780 0,
5781 1,
5782 2,
5783 3,
5784 4,
5785 5,
5786 6,
5787 7,
5788 8,
5789 9,
5790 10,
5791 11,
5792 12,
5793 13,
5794 16 + LANE2 as u32,
5795 15
5796 ]
5797 ),
5798 15 => simd_shuffle!(
5799 a,
5800 b,
5801 [
5802 0,
5803 1,
5804 2,
5805 3,
5806 4,
5807 5,
5808 6,
5809 7,
5810 8,
5811 9,
5812 10,
5813 11,
5814 12,
5815 13,
5816 14,
5817 16 + LANE2 as u32
5818 ]
5819 ),
5820 _ => unreachable_unchecked(),
5821 }
5822 }
5823}
5824#[doc = "Insert vector element from another vector element"]
5825#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p16)"]
5826#[inline]
5827#[target_feature(enable = "neon")]
5828#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5829#[rustc_legacy_const_generics(1, 3)]
5830#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5831pub fn vcopyq_lane_p16<const LANE1: i32, const LANE2: i32>(
5832 a: poly16x8_t,
5833 b: poly16x4_t,
5834) -> poly16x8_t {
5835 static_assert_uimm_bits!(LANE1, 3);
5836 static_assert_uimm_bits!(LANE2, 2);
5837 let b: poly16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };
5838 unsafe {
5839 match LANE1 & 0b111 {
5840 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
5841 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
5842 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
5843 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
5844 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
5845 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
5846 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
5847 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
5848 _ => unreachable_unchecked(),
5849 }
5850 }
5851}
5852#[doc = "Insert vector element from another vector element"]
5853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f32)"]
5854#[inline]
5855#[target_feature(enable = "neon")]
5856#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5857#[rustc_legacy_const_generics(1, 3)]
5858#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5859pub fn vcopyq_laneq_f32<const LANE1: i32, const LANE2: i32>(
5860 a: float32x4_t,
5861 b: float32x4_t,
5862) -> float32x4_t {
5863 static_assert_uimm_bits!(LANE1, 2);
5864 static_assert_uimm_bits!(LANE2, 2);
5865 unsafe {
5866 match LANE1 & 0b11 {
5867 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
5868 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
5869 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
5870 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
5871 _ => unreachable_unchecked(),
5872 }
5873 }
5874}
5875#[doc = "Insert vector element from another vector element"]
5876#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f64)"]
5877#[inline]
5878#[target_feature(enable = "neon")]
5879#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5880#[rustc_legacy_const_generics(1, 3)]
5881#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5882pub fn vcopyq_laneq_f64<const LANE1: i32, const LANE2: i32>(
5883 a: float64x2_t,
5884 b: float64x2_t,
5885) -> float64x2_t {
5886 static_assert_uimm_bits!(LANE1, 1);
5887 static_assert_uimm_bits!(LANE2, 1);
5888 unsafe {
5889 match LANE1 & 0b1 {
5890 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
5891 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
5892 _ => unreachable_unchecked(),
5893 }
5894 }
5895}
5896#[doc = "Insert vector element from another vector element"]
5897#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s8)"]
5898#[inline]
5899#[target_feature(enable = "neon")]
5900#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5901#[rustc_legacy_const_generics(1, 3)]
5902#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5903pub fn vcopyq_laneq_s8<const LANE1: i32, const LANE2: i32>(
5904 a: int8x16_t,
5905 b: int8x16_t,
5906) -> int8x16_t {
5907 static_assert_uimm_bits!(LANE1, 4);
5908 static_assert_uimm_bits!(LANE2, 4);
5909 unsafe {
5910 match LANE1 & 0b1111 {
5911 0 => simd_shuffle!(
5912 a,
5913 b,
5914 [
5915 16 + LANE2 as u32,
5916 1,
5917 2,
5918 3,
5919 4,
5920 5,
5921 6,
5922 7,
5923 8,
5924 9,
5925 10,
5926 11,
5927 12,
5928 13,
5929 14,
5930 15
5931 ]
5932 ),
5933 1 => simd_shuffle!(
5934 a,
5935 b,
5936 [
5937 0,
5938 16 + LANE2 as u32,
5939 2,
5940 3,
5941 4,
5942 5,
5943 6,
5944 7,
5945 8,
5946 9,
5947 10,
5948 11,
5949 12,
5950 13,
5951 14,
5952 15
5953 ]
5954 ),
5955 2 => simd_shuffle!(
5956 a,
5957 b,
5958 [
5959 0,
5960 1,
5961 16 + LANE2 as u32,
5962 3,
5963 4,
5964 5,
5965 6,
5966 7,
5967 8,
5968 9,
5969 10,
5970 11,
5971 12,
5972 13,
5973 14,
5974 15
5975 ]
5976 ),
5977 3 => simd_shuffle!(
5978 a,
5979 b,
5980 [
5981 0,
5982 1,
5983 2,
5984 16 + LANE2 as u32,
5985 4,
5986 5,
5987 6,
5988 7,
5989 8,
5990 9,
5991 10,
5992 11,
5993 12,
5994 13,
5995 14,
5996 15
5997 ]
5998 ),
5999 4 => simd_shuffle!(
6000 a,
6001 b,
6002 [
6003 0,
6004 1,
6005 2,
6006 3,
6007 16 + LANE2 as u32,
6008 5,
6009 6,
6010 7,
6011 8,
6012 9,
6013 10,
6014 11,
6015 12,
6016 13,
6017 14,
6018 15
6019 ]
6020 ),
6021 5 => simd_shuffle!(
6022 a,
6023 b,
6024 [
6025 0,
6026 1,
6027 2,
6028 3,
6029 4,
6030 16 + LANE2 as u32,
6031 6,
6032 7,
6033 8,
6034 9,
6035 10,
6036 11,
6037 12,
6038 13,
6039 14,
6040 15
6041 ]
6042 ),
6043 6 => simd_shuffle!(
6044 a,
6045 b,
6046 [
6047 0,
6048 1,
6049 2,
6050 3,
6051 4,
6052 5,
6053 16 + LANE2 as u32,
6054 7,
6055 8,
6056 9,
6057 10,
6058 11,
6059 12,
6060 13,
6061 14,
6062 15
6063 ]
6064 ),
6065 7 => simd_shuffle!(
6066 a,
6067 b,
6068 [
6069 0,
6070 1,
6071 2,
6072 3,
6073 4,
6074 5,
6075 6,
6076 16 + LANE2 as u32,
6077 8,
6078 9,
6079 10,
6080 11,
6081 12,
6082 13,
6083 14,
6084 15
6085 ]
6086 ),
6087 8 => simd_shuffle!(
6088 a,
6089 b,
6090 [
6091 0,
6092 1,
6093 2,
6094 3,
6095 4,
6096 5,
6097 6,
6098 7,
6099 16 + LANE2 as u32,
6100 9,
6101 10,
6102 11,
6103 12,
6104 13,
6105 14,
6106 15
6107 ]
6108 ),
6109 9 => simd_shuffle!(
6110 a,
6111 b,
6112 [
6113 0,
6114 1,
6115 2,
6116 3,
6117 4,
6118 5,
6119 6,
6120 7,
6121 8,
6122 16 + LANE2 as u32,
6123 10,
6124 11,
6125 12,
6126 13,
6127 14,
6128 15
6129 ]
6130 ),
6131 10 => simd_shuffle!(
6132 a,
6133 b,
6134 [
6135 0,
6136 1,
6137 2,
6138 3,
6139 4,
6140 5,
6141 6,
6142 7,
6143 8,
6144 9,
6145 16 + LANE2 as u32,
6146 11,
6147 12,
6148 13,
6149 14,
6150 15
6151 ]
6152 ),
6153 11 => simd_shuffle!(
6154 a,
6155 b,
6156 [
6157 0,
6158 1,
6159 2,
6160 3,
6161 4,
6162 5,
6163 6,
6164 7,
6165 8,
6166 9,
6167 10,
6168 16 + LANE2 as u32,
6169 12,
6170 13,
6171 14,
6172 15
6173 ]
6174 ),
6175 12 => simd_shuffle!(
6176 a,
6177 b,
6178 [
6179 0,
6180 1,
6181 2,
6182 3,
6183 4,
6184 5,
6185 6,
6186 7,
6187 8,
6188 9,
6189 10,
6190 11,
6191 16 + LANE2 as u32,
6192 13,
6193 14,
6194 15
6195 ]
6196 ),
6197 13 => simd_shuffle!(
6198 a,
6199 b,
6200 [
6201 0,
6202 1,
6203 2,
6204 3,
6205 4,
6206 5,
6207 6,
6208 7,
6209 8,
6210 9,
6211 10,
6212 11,
6213 12,
6214 16 + LANE2 as u32,
6215 14,
6216 15
6217 ]
6218 ),
6219 14 => simd_shuffle!(
6220 a,
6221 b,
6222 [
6223 0,
6224 1,
6225 2,
6226 3,
6227 4,
6228 5,
6229 6,
6230 7,
6231 8,
6232 9,
6233 10,
6234 11,
6235 12,
6236 13,
6237 16 + LANE2 as u32,
6238 15
6239 ]
6240 ),
6241 15 => simd_shuffle!(
6242 a,
6243 b,
6244 [
6245 0,
6246 1,
6247 2,
6248 3,
6249 4,
6250 5,
6251 6,
6252 7,
6253 8,
6254 9,
6255 10,
6256 11,
6257 12,
6258 13,
6259 14,
6260 16 + LANE2 as u32
6261 ]
6262 ),
6263 _ => unreachable_unchecked(),
6264 }
6265 }
6266}
6267#[doc = "Insert vector element from another vector element"]
6268#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s16)"]
6269#[inline]
6270#[target_feature(enable = "neon")]
6271#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6272#[rustc_legacy_const_generics(1, 3)]
6273#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6274pub fn vcopyq_laneq_s16<const LANE1: i32, const LANE2: i32>(
6275 a: int16x8_t,
6276 b: int16x8_t,
6277) -> int16x8_t {
6278 static_assert_uimm_bits!(LANE1, 3);
6279 static_assert_uimm_bits!(LANE2, 3);
6280 unsafe {
6281 match LANE1 & 0b111 {
6282 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
6283 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
6284 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
6285 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
6286 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
6287 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
6288 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
6289 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
6290 _ => unreachable_unchecked(),
6291 }
6292 }
6293}
6294#[doc = "Insert vector element from another vector element"]
6295#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s32)"]
6296#[inline]
6297#[target_feature(enable = "neon")]
6298#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6299#[rustc_legacy_const_generics(1, 3)]
6300#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6301pub fn vcopyq_laneq_s32<const LANE1: i32, const LANE2: i32>(
6302 a: int32x4_t,
6303 b: int32x4_t,
6304) -> int32x4_t {
6305 static_assert_uimm_bits!(LANE1, 2);
6306 static_assert_uimm_bits!(LANE2, 2);
6307 unsafe {
6308 match LANE1 & 0b11 {
6309 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
6310 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
6311 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
6312 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
6313 _ => unreachable_unchecked(),
6314 }
6315 }
6316}
6317#[doc = "Insert vector element from another vector element"]
6318#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s64)"]
6319#[inline]
6320#[target_feature(enable = "neon")]
6321#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6322#[rustc_legacy_const_generics(1, 3)]
6323#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6324pub fn vcopyq_laneq_s64<const LANE1: i32, const LANE2: i32>(
6325 a: int64x2_t,
6326 b: int64x2_t,
6327) -> int64x2_t {
6328 static_assert_uimm_bits!(LANE1, 1);
6329 static_assert_uimm_bits!(LANE2, 1);
6330 unsafe {
6331 match LANE1 & 0b1 {
6332 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
6333 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
6334 _ => unreachable_unchecked(),
6335 }
6336 }
6337}
6338#[doc = "Insert vector element from another vector element"]
6339#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u8)"]
6340#[inline]
6341#[target_feature(enable = "neon")]
6342#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6343#[rustc_legacy_const_generics(1, 3)]
6344#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6345pub fn vcopyq_laneq_u8<const LANE1: i32, const LANE2: i32>(
6346 a: uint8x16_t,
6347 b: uint8x16_t,
6348) -> uint8x16_t {
6349 static_assert_uimm_bits!(LANE1, 4);
6350 static_assert_uimm_bits!(LANE2, 4);
6351 unsafe {
6352 match LANE1 & 0b1111 {
6353 0 => simd_shuffle!(
6354 a,
6355 b,
6356 [
6357 16 + LANE2 as u32,
6358 1,
6359 2,
6360 3,
6361 4,
6362 5,
6363 6,
6364 7,
6365 8,
6366 9,
6367 10,
6368 11,
6369 12,
6370 13,
6371 14,
6372 15
6373 ]
6374 ),
6375 1 => simd_shuffle!(
6376 a,
6377 b,
6378 [
6379 0,
6380 16 + LANE2 as u32,
6381 2,
6382 3,
6383 4,
6384 5,
6385 6,
6386 7,
6387 8,
6388 9,
6389 10,
6390 11,
6391 12,
6392 13,
6393 14,
6394 15
6395 ]
6396 ),
6397 2 => simd_shuffle!(
6398 a,
6399 b,
6400 [
6401 0,
6402 1,
6403 16 + LANE2 as u32,
6404 3,
6405 4,
6406 5,
6407 6,
6408 7,
6409 8,
6410 9,
6411 10,
6412 11,
6413 12,
6414 13,
6415 14,
6416 15
6417 ]
6418 ),
6419 3 => simd_shuffle!(
6420 a,
6421 b,
6422 [
6423 0,
6424 1,
6425 2,
6426 16 + LANE2 as u32,
6427 4,
6428 5,
6429 6,
6430 7,
6431 8,
6432 9,
6433 10,
6434 11,
6435 12,
6436 13,
6437 14,
6438 15
6439 ]
6440 ),
6441 4 => simd_shuffle!(
6442 a,
6443 b,
6444 [
6445 0,
6446 1,
6447 2,
6448 3,
6449 16 + LANE2 as u32,
6450 5,
6451 6,
6452 7,
6453 8,
6454 9,
6455 10,
6456 11,
6457 12,
6458 13,
6459 14,
6460 15
6461 ]
6462 ),
6463 5 => simd_shuffle!(
6464 a,
6465 b,
6466 [
6467 0,
6468 1,
6469 2,
6470 3,
6471 4,
6472 16 + LANE2 as u32,
6473 6,
6474 7,
6475 8,
6476 9,
6477 10,
6478 11,
6479 12,
6480 13,
6481 14,
6482 15
6483 ]
6484 ),
6485 6 => simd_shuffle!(
6486 a,
6487 b,
6488 [
6489 0,
6490 1,
6491 2,
6492 3,
6493 4,
6494 5,
6495 16 + LANE2 as u32,
6496 7,
6497 8,
6498 9,
6499 10,
6500 11,
6501 12,
6502 13,
6503 14,
6504 15
6505 ]
6506 ),
6507 7 => simd_shuffle!(
6508 a,
6509 b,
6510 [
6511 0,
6512 1,
6513 2,
6514 3,
6515 4,
6516 5,
6517 6,
6518 16 + LANE2 as u32,
6519 8,
6520 9,
6521 10,
6522 11,
6523 12,
6524 13,
6525 14,
6526 15
6527 ]
6528 ),
6529 8 => simd_shuffle!(
6530 a,
6531 b,
6532 [
6533 0,
6534 1,
6535 2,
6536 3,
6537 4,
6538 5,
6539 6,
6540 7,
6541 16 + LANE2 as u32,
6542 9,
6543 10,
6544 11,
6545 12,
6546 13,
6547 14,
6548 15
6549 ]
6550 ),
6551 9 => simd_shuffle!(
6552 a,
6553 b,
6554 [
6555 0,
6556 1,
6557 2,
6558 3,
6559 4,
6560 5,
6561 6,
6562 7,
6563 8,
6564 16 + LANE2 as u32,
6565 10,
6566 11,
6567 12,
6568 13,
6569 14,
6570 15
6571 ]
6572 ),
6573 10 => simd_shuffle!(
6574 a,
6575 b,
6576 [
6577 0,
6578 1,
6579 2,
6580 3,
6581 4,
6582 5,
6583 6,
6584 7,
6585 8,
6586 9,
6587 16 + LANE2 as u32,
6588 11,
6589 12,
6590 13,
6591 14,
6592 15
6593 ]
6594 ),
6595 11 => simd_shuffle!(
6596 a,
6597 b,
6598 [
6599 0,
6600 1,
6601 2,
6602 3,
6603 4,
6604 5,
6605 6,
6606 7,
6607 8,
6608 9,
6609 10,
6610 16 + LANE2 as u32,
6611 12,
6612 13,
6613 14,
6614 15
6615 ]
6616 ),
6617 12 => simd_shuffle!(
6618 a,
6619 b,
6620 [
6621 0,
6622 1,
6623 2,
6624 3,
6625 4,
6626 5,
6627 6,
6628 7,
6629 8,
6630 9,
6631 10,
6632 11,
6633 16 + LANE2 as u32,
6634 13,
6635 14,
6636 15
6637 ]
6638 ),
6639 13 => simd_shuffle!(
6640 a,
6641 b,
6642 [
6643 0,
6644 1,
6645 2,
6646 3,
6647 4,
6648 5,
6649 6,
6650 7,
6651 8,
6652 9,
6653 10,
6654 11,
6655 12,
6656 16 + LANE2 as u32,
6657 14,
6658 15
6659 ]
6660 ),
6661 14 => simd_shuffle!(
6662 a,
6663 b,
6664 [
6665 0,
6666 1,
6667 2,
6668 3,
6669 4,
6670 5,
6671 6,
6672 7,
6673 8,
6674 9,
6675 10,
6676 11,
6677 12,
6678 13,
6679 16 + LANE2 as u32,
6680 15
6681 ]
6682 ),
6683 15 => simd_shuffle!(
6684 a,
6685 b,
6686 [
6687 0,
6688 1,
6689 2,
6690 3,
6691 4,
6692 5,
6693 6,
6694 7,
6695 8,
6696 9,
6697 10,
6698 11,
6699 12,
6700 13,
6701 14,
6702 16 + LANE2 as u32
6703 ]
6704 ),
6705 _ => unreachable_unchecked(),
6706 }
6707 }
6708}
6709#[doc = "Insert vector element from another vector element"]
6710#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u16)"]
6711#[inline]
6712#[target_feature(enable = "neon")]
6713#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6714#[rustc_legacy_const_generics(1, 3)]
6715#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6716pub fn vcopyq_laneq_u16<const LANE1: i32, const LANE2: i32>(
6717 a: uint16x8_t,
6718 b: uint16x8_t,
6719) -> uint16x8_t {
6720 static_assert_uimm_bits!(LANE1, 3);
6721 static_assert_uimm_bits!(LANE2, 3);
6722 unsafe {
6723 match LANE1 & 0b111 {
6724 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
6725 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
6726 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
6727 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
6728 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
6729 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
6730 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
6731 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
6732 _ => unreachable_unchecked(),
6733 }
6734 }
6735}
6736#[doc = "Insert vector element from another vector element"]
6737#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u32)"]
6738#[inline]
6739#[target_feature(enable = "neon")]
6740#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6741#[rustc_legacy_const_generics(1, 3)]
6742#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6743pub fn vcopyq_laneq_u32<const LANE1: i32, const LANE2: i32>(
6744 a: uint32x4_t,
6745 b: uint32x4_t,
6746) -> uint32x4_t {
6747 static_assert_uimm_bits!(LANE1, 2);
6748 static_assert_uimm_bits!(LANE2, 2);
6749 unsafe {
6750 match LANE1 & 0b11 {
6751 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
6752 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
6753 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
6754 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
6755 _ => unreachable_unchecked(),
6756 }
6757 }
6758}
6759#[doc = "Insert vector element from another vector element"]
6760#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u64)"]
6761#[inline]
6762#[target_feature(enable = "neon")]
6763#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6764#[rustc_legacy_const_generics(1, 3)]
6765#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6766pub fn vcopyq_laneq_u64<const LANE1: i32, const LANE2: i32>(
6767 a: uint64x2_t,
6768 b: uint64x2_t,
6769) -> uint64x2_t {
6770 static_assert_uimm_bits!(LANE1, 1);
6771 static_assert_uimm_bits!(LANE2, 1);
6772 unsafe {
6773 match LANE1 & 0b1 {
6774 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
6775 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
6776 _ => unreachable_unchecked(),
6777 }
6778 }
6779}
6780#[doc = "Insert vector element from another vector element"]
6781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p8)"]
6782#[inline]
6783#[target_feature(enable = "neon")]
6784#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6785#[rustc_legacy_const_generics(1, 3)]
6786#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6787pub fn vcopyq_laneq_p8<const LANE1: i32, const LANE2: i32>(
6788 a: poly8x16_t,
6789 b: poly8x16_t,
6790) -> poly8x16_t {
6791 static_assert_uimm_bits!(LANE1, 4);
6792 static_assert_uimm_bits!(LANE2, 4);
6793 unsafe {
6794 match LANE1 & 0b1111 {
6795 0 => simd_shuffle!(
6796 a,
6797 b,
6798 [
6799 16 + LANE2 as u32,
6800 1,
6801 2,
6802 3,
6803 4,
6804 5,
6805 6,
6806 7,
6807 8,
6808 9,
6809 10,
6810 11,
6811 12,
6812 13,
6813 14,
6814 15
6815 ]
6816 ),
6817 1 => simd_shuffle!(
6818 a,
6819 b,
6820 [
6821 0,
6822 16 + LANE2 as u32,
6823 2,
6824 3,
6825 4,
6826 5,
6827 6,
6828 7,
6829 8,
6830 9,
6831 10,
6832 11,
6833 12,
6834 13,
6835 14,
6836 15
6837 ]
6838 ),
6839 2 => simd_shuffle!(
6840 a,
6841 b,
6842 [
6843 0,
6844 1,
6845 16 + LANE2 as u32,
6846 3,
6847 4,
6848 5,
6849 6,
6850 7,
6851 8,
6852 9,
6853 10,
6854 11,
6855 12,
6856 13,
6857 14,
6858 15
6859 ]
6860 ),
6861 3 => simd_shuffle!(
6862 a,
6863 b,
6864 [
6865 0,
6866 1,
6867 2,
6868 16 + LANE2 as u32,
6869 4,
6870 5,
6871 6,
6872 7,
6873 8,
6874 9,
6875 10,
6876 11,
6877 12,
6878 13,
6879 14,
6880 15
6881 ]
6882 ),
6883 4 => simd_shuffle!(
6884 a,
6885 b,
6886 [
6887 0,
6888 1,
6889 2,
6890 3,
6891 16 + LANE2 as u32,
6892 5,
6893 6,
6894 7,
6895 8,
6896 9,
6897 10,
6898 11,
6899 12,
6900 13,
6901 14,
6902 15
6903 ]
6904 ),
6905 5 => simd_shuffle!(
6906 a,
6907 b,
6908 [
6909 0,
6910 1,
6911 2,
6912 3,
6913 4,
6914 16 + LANE2 as u32,
6915 6,
6916 7,
6917 8,
6918 9,
6919 10,
6920 11,
6921 12,
6922 13,
6923 14,
6924 15
6925 ]
6926 ),
6927 6 => simd_shuffle!(
6928 a,
6929 b,
6930 [
6931 0,
6932 1,
6933 2,
6934 3,
6935 4,
6936 5,
6937 16 + LANE2 as u32,
6938 7,
6939 8,
6940 9,
6941 10,
6942 11,
6943 12,
6944 13,
6945 14,
6946 15
6947 ]
6948 ),
6949 7 => simd_shuffle!(
6950 a,
6951 b,
6952 [
6953 0,
6954 1,
6955 2,
6956 3,
6957 4,
6958 5,
6959 6,
6960 16 + LANE2 as u32,
6961 8,
6962 9,
6963 10,
6964 11,
6965 12,
6966 13,
6967 14,
6968 15
6969 ]
6970 ),
6971 8 => simd_shuffle!(
6972 a,
6973 b,
6974 [
6975 0,
6976 1,
6977 2,
6978 3,
6979 4,
6980 5,
6981 6,
6982 7,
6983 16 + LANE2 as u32,
6984 9,
6985 10,
6986 11,
6987 12,
6988 13,
6989 14,
6990 15
6991 ]
6992 ),
6993 9 => simd_shuffle!(
6994 a,
6995 b,
6996 [
6997 0,
6998 1,
6999 2,
7000 3,
7001 4,
7002 5,
7003 6,
7004 7,
7005 8,
7006 16 + LANE2 as u32,
7007 10,
7008 11,
7009 12,
7010 13,
7011 14,
7012 15
7013 ]
7014 ),
7015 10 => simd_shuffle!(
7016 a,
7017 b,
7018 [
7019 0,
7020 1,
7021 2,
7022 3,
7023 4,
7024 5,
7025 6,
7026 7,
7027 8,
7028 9,
7029 16 + LANE2 as u32,
7030 11,
7031 12,
7032 13,
7033 14,
7034 15
7035 ]
7036 ),
7037 11 => simd_shuffle!(
7038 a,
7039 b,
7040 [
7041 0,
7042 1,
7043 2,
7044 3,
7045 4,
7046 5,
7047 6,
7048 7,
7049 8,
7050 9,
7051 10,
7052 16 + LANE2 as u32,
7053 12,
7054 13,
7055 14,
7056 15
7057 ]
7058 ),
7059 12 => simd_shuffle!(
7060 a,
7061 b,
7062 [
7063 0,
7064 1,
7065 2,
7066 3,
7067 4,
7068 5,
7069 6,
7070 7,
7071 8,
7072 9,
7073 10,
7074 11,
7075 16 + LANE2 as u32,
7076 13,
7077 14,
7078 15
7079 ]
7080 ),
7081 13 => simd_shuffle!(
7082 a,
7083 b,
7084 [
7085 0,
7086 1,
7087 2,
7088 3,
7089 4,
7090 5,
7091 6,
7092 7,
7093 8,
7094 9,
7095 10,
7096 11,
7097 12,
7098 16 + LANE2 as u32,
7099 14,
7100 15
7101 ]
7102 ),
7103 14 => simd_shuffle!(
7104 a,
7105 b,
7106 [
7107 0,
7108 1,
7109 2,
7110 3,
7111 4,
7112 5,
7113 6,
7114 7,
7115 8,
7116 9,
7117 10,
7118 11,
7119 12,
7120 13,
7121 16 + LANE2 as u32,
7122 15
7123 ]
7124 ),
7125 15 => simd_shuffle!(
7126 a,
7127 b,
7128 [
7129 0,
7130 1,
7131 2,
7132 3,
7133 4,
7134 5,
7135 6,
7136 7,
7137 8,
7138 9,
7139 10,
7140 11,
7141 12,
7142 13,
7143 14,
7144 16 + LANE2 as u32
7145 ]
7146 ),
7147 _ => unreachable_unchecked(),
7148 }
7149 }
7150}
7151#[doc = "Insert vector element from another vector element"]
7152#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p16)"]
7153#[inline]
7154#[target_feature(enable = "neon")]
7155#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
7156#[rustc_legacy_const_generics(1, 3)]
7157#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7158pub fn vcopyq_laneq_p16<const LANE1: i32, const LANE2: i32>(
7159 a: poly16x8_t,
7160 b: poly16x8_t,
7161) -> poly16x8_t {
7162 static_assert_uimm_bits!(LANE1, 3);
7163 static_assert_uimm_bits!(LANE2, 3);
7164 unsafe {
7165 match LANE1 & 0b111 {
7166 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
7167 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
7168 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
7169 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
7170 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
7171 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
7172 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
7173 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
7174 _ => unreachable_unchecked(),
7175 }
7176 }
7177}
7178#[doc = "Insert vector element from another vector element"]
7179#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p64)"]
7180#[inline]
7181#[target_feature(enable = "neon")]
7182#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
7183#[rustc_legacy_const_generics(1, 3)]
7184#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7185pub fn vcopyq_laneq_p64<const LANE1: i32, const LANE2: i32>(
7186 a: poly64x2_t,
7187 b: poly64x2_t,
7188) -> poly64x2_t {
7189 static_assert_uimm_bits!(LANE1, 1);
7190 static_assert_uimm_bits!(LANE2, 1);
7191 unsafe {
7192 match LANE1 & 0b1 {
7193 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
7194 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
7195 _ => unreachable_unchecked(),
7196 }
7197 }
7198}
7199#[doc = "Insert vector element from another vector element"]
7200#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f64)"]
7201#[inline]
7202#[target_feature(enable = "neon")]
7203#[cfg_attr(test, assert_instr(nop))]
7204#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7205pub fn vcreate_f64(a: u64) -> float64x1_t {
7206 unsafe { transmute(a) }
7207}
7208#[doc = "Floating-point convert"]
7209#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_f64)"]
7210#[inline]
7211#[target_feature(enable = "neon")]
7212#[cfg_attr(test, assert_instr(fcvtn))]
7213#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7214pub fn vcvt_f32_f64(a: float64x2_t) -> float32x2_t {
7215 unsafe { simd_cast(a) }
7216}
7217#[doc = "Floating-point convert to higher precision long"]
7218#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_f32)"]
7219#[inline]
7220#[target_feature(enable = "neon")]
7221#[cfg_attr(test, assert_instr(fcvtl))]
7222#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7223pub fn vcvt_f64_f32(a: float32x2_t) -> float64x2_t {
7224 unsafe { simd_cast(a) }
7225}
7226#[doc = "Fixed-point convert to floating-point"]
7227#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_s64)"]
7228#[inline]
7229#[target_feature(enable = "neon")]
7230#[cfg_attr(test, assert_instr(scvtf))]
7231#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7232pub fn vcvt_f64_s64(a: int64x1_t) -> float64x1_t {
7233 unsafe { simd_cast(a) }
7234}
7235#[doc = "Fixed-point convert to floating-point"]
7236#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_s64)"]
7237#[inline]
7238#[target_feature(enable = "neon")]
7239#[cfg_attr(test, assert_instr(scvtf))]
7240#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7241pub fn vcvtq_f64_s64(a: int64x2_t) -> float64x2_t {
7242 unsafe { simd_cast(a) }
7243}
7244#[doc = "Fixed-point convert to floating-point"]
7245#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_u64)"]
7246#[inline]
7247#[target_feature(enable = "neon")]
7248#[cfg_attr(test, assert_instr(ucvtf))]
7249#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7250pub fn vcvt_f64_u64(a: uint64x1_t) -> float64x1_t {
7251 unsafe { simd_cast(a) }
7252}
7253#[doc = "Fixed-point convert to floating-point"]
7254#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_u64)"]
7255#[inline]
7256#[target_feature(enable = "neon")]
7257#[cfg_attr(test, assert_instr(ucvtf))]
7258#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7259pub fn vcvtq_f64_u64(a: uint64x2_t) -> float64x2_t {
7260 unsafe { simd_cast(a) }
7261}
7262#[doc = "Floating-point convert to lower precision"]
7263#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f16_f32)"]
7264#[inline]
7265#[cfg_attr(test, assert_instr(fcvtn2))]
7266#[target_feature(enable = "neon,fp16")]
7267#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7268pub fn vcvt_high_f16_f32(a: float16x4_t, b: float32x4_t) -> float16x8_t {
7269 vcombine_f16(a, vcvt_f16_f32(b))
7270}
7271#[doc = "Floating-point convert to higher precision"]
7272#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f32_f16)"]
7273#[inline]
7274#[cfg_attr(test, assert_instr(fcvtl2))]
7275#[target_feature(enable = "neon,fp16")]
7276#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7277pub fn vcvt_high_f32_f16(a: float16x8_t) -> float32x4_t {
7278 vcvt_f32_f16(vget_high_f16(a))
7279}
7280#[doc = "Floating-point convert to lower precision narrow"]
7281#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f32_f64)"]
7282#[inline]
7283#[target_feature(enable = "neon")]
7284#[cfg_attr(test, assert_instr(fcvtn))]
7285#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7286pub fn vcvt_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t {
7287 unsafe { simd_shuffle!(a, simd_cast(b), [0, 1, 2, 3]) }
7288}
7289#[doc = "Floating-point convert to higher precision long"]
7290#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f64_f32)"]
7291#[inline]
7292#[target_feature(enable = "neon")]
7293#[cfg_attr(test, assert_instr(fcvtl))]
7294#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7295pub fn vcvt_high_f64_f32(a: float32x4_t) -> float64x2_t {
7296 unsafe {
7297 let b: float32x2_t = simd_shuffle!(a, a, [2, 3]);
7298 simd_cast(b)
7299 }
7300}
7301#[doc = "Fixed-point convert to floating-point"]
7302#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_s64)"]
7303#[inline]
7304#[target_feature(enable = "neon")]
7305#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7306#[rustc_legacy_const_generics(1)]
7307#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7308pub fn vcvt_n_f64_s64<const N: i32>(a: int64x1_t) -> float64x1_t {
7309 static_assert!(N >= 1 && N <= 64);
7310 unsafe extern "unadjusted" {
7311 #[cfg_attr(
7312 any(target_arch = "aarch64", target_arch = "arm64ec"),
7313 link_name = "llvm.aarch64.neon.vcvtfxs2fp.v1f64.v1i64"
7314 )]
7315 fn _vcvt_n_f64_s64(a: int64x1_t, n: i32) -> float64x1_t;
7316 }
7317 unsafe { _vcvt_n_f64_s64(a, N) }
7318}
7319#[doc = "Fixed-point convert to floating-point"]
7320#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_s64)"]
7321#[inline]
7322#[target_feature(enable = "neon")]
7323#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7324#[rustc_legacy_const_generics(1)]
7325#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7326pub fn vcvtq_n_f64_s64<const N: i32>(a: int64x2_t) -> float64x2_t {
7327 static_assert!(N >= 1 && N <= 64);
7328 unsafe extern "unadjusted" {
7329 #[cfg_attr(
7330 any(target_arch = "aarch64", target_arch = "arm64ec"),
7331 link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64"
7332 )]
7333 fn _vcvtq_n_f64_s64(a: int64x2_t, n: i32) -> float64x2_t;
7334 }
7335 unsafe { _vcvtq_n_f64_s64(a, N) }
7336}
7337#[doc = "Fixed-point convert to floating-point"]
7338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_u64)"]
7339#[inline]
7340#[target_feature(enable = "neon")]
7341#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7342#[rustc_legacy_const_generics(1)]
7343#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7344pub fn vcvt_n_f64_u64<const N: i32>(a: uint64x1_t) -> float64x1_t {
7345 static_assert!(N >= 1 && N <= 64);
7346 unsafe extern "unadjusted" {
7347 #[cfg_attr(
7348 any(target_arch = "aarch64", target_arch = "arm64ec"),
7349 link_name = "llvm.aarch64.neon.vcvtfxu2fp.v1f64.v1i64"
7350 )]
7351 fn _vcvt_n_f64_u64(a: uint64x1_t, n: i32) -> float64x1_t;
7352 }
7353 unsafe { _vcvt_n_f64_u64(a, N) }
7354}
7355#[doc = "Fixed-point convert to floating-point"]
7356#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_u64)"]
7357#[inline]
7358#[target_feature(enable = "neon")]
7359#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7360#[rustc_legacy_const_generics(1)]
7361#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7362pub fn vcvtq_n_f64_u64<const N: i32>(a: uint64x2_t) -> float64x2_t {
7363 static_assert!(N >= 1 && N <= 64);
7364 unsafe extern "unadjusted" {
7365 #[cfg_attr(
7366 any(target_arch = "aarch64", target_arch = "arm64ec"),
7367 link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64"
7368 )]
7369 fn _vcvtq_n_f64_u64(a: uint64x2_t, n: i32) -> float64x2_t;
7370 }
7371 unsafe { _vcvtq_n_f64_u64(a, N) }
7372}
7373#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7374#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s64_f64)"]
7375#[inline]
7376#[target_feature(enable = "neon")]
7377#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
7378#[rustc_legacy_const_generics(1)]
7379#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7380pub fn vcvt_n_s64_f64<const N: i32>(a: float64x1_t) -> int64x1_t {
7381 static_assert!(N >= 1 && N <= 64);
7382 unsafe extern "unadjusted" {
7383 #[cfg_attr(
7384 any(target_arch = "aarch64", target_arch = "arm64ec"),
7385 link_name = "llvm.aarch64.neon.vcvtfp2fxs.v1i64.v1f64"
7386 )]
7387 fn _vcvt_n_s64_f64(a: float64x1_t, n: i32) -> int64x1_t;
7388 }
7389 unsafe { _vcvt_n_s64_f64(a, N) }
7390}
7391#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7392#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s64_f64)"]
7393#[inline]
7394#[target_feature(enable = "neon")]
7395#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
7396#[rustc_legacy_const_generics(1)]
7397#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7398pub fn vcvtq_n_s64_f64<const N: i32>(a: float64x2_t) -> int64x2_t {
7399 static_assert!(N >= 1 && N <= 64);
7400 unsafe extern "unadjusted" {
7401 #[cfg_attr(
7402 any(target_arch = "aarch64", target_arch = "arm64ec"),
7403 link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i64.v2f64"
7404 )]
7405 fn _vcvtq_n_s64_f64(a: float64x2_t, n: i32) -> int64x2_t;
7406 }
7407 unsafe { _vcvtq_n_s64_f64(a, N) }
7408}
7409#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7410#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u64_f64)"]
7411#[inline]
7412#[target_feature(enable = "neon")]
7413#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
7414#[rustc_legacy_const_generics(1)]
7415#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7416pub fn vcvt_n_u64_f64<const N: i32>(a: float64x1_t) -> uint64x1_t {
7417 static_assert!(N >= 1 && N <= 64);
7418 unsafe extern "unadjusted" {
7419 #[cfg_attr(
7420 any(target_arch = "aarch64", target_arch = "arm64ec"),
7421 link_name = "llvm.aarch64.neon.vcvtfp2fxu.v1i64.v1f64"
7422 )]
7423 fn _vcvt_n_u64_f64(a: float64x1_t, n: i32) -> uint64x1_t;
7424 }
7425 unsafe { _vcvt_n_u64_f64(a, N) }
7426}
7427#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7428#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u64_f64)"]
7429#[inline]
7430#[target_feature(enable = "neon")]
7431#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
7432#[rustc_legacy_const_generics(1)]
7433#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7434pub fn vcvtq_n_u64_f64<const N: i32>(a: float64x2_t) -> uint64x2_t {
7435 static_assert!(N >= 1 && N <= 64);
7436 unsafe extern "unadjusted" {
7437 #[cfg_attr(
7438 any(target_arch = "aarch64", target_arch = "arm64ec"),
7439 link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i64.v2f64"
7440 )]
7441 fn _vcvtq_n_u64_f64(a: float64x2_t, n: i32) -> uint64x2_t;
7442 }
7443 unsafe { _vcvtq_n_u64_f64(a, N) }
7444}
7445#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"]
7446#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s64_f64)"]
7447#[inline]
7448#[target_feature(enable = "neon")]
7449#[cfg_attr(test, assert_instr(fcvtzs))]
7450#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7451pub fn vcvt_s64_f64(a: float64x1_t) -> int64x1_t {
7452 unsafe extern "unadjusted" {
7453 #[cfg_attr(
7454 any(target_arch = "aarch64", target_arch = "arm64ec"),
7455 link_name = "llvm.fptosi.sat.v1i64.v1f64"
7456 )]
7457 fn _vcvt_s64_f64(a: float64x1_t) -> int64x1_t;
7458 }
7459 unsafe { _vcvt_s64_f64(a) }
7460}
7461#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"]
7462#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s64_f64)"]
7463#[inline]
7464#[target_feature(enable = "neon")]
7465#[cfg_attr(test, assert_instr(fcvtzs))]
7466#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7467pub fn vcvtq_s64_f64(a: float64x2_t) -> int64x2_t {
7468 unsafe extern "unadjusted" {
7469 #[cfg_attr(
7470 any(target_arch = "aarch64", target_arch = "arm64ec"),
7471 link_name = "llvm.fptosi.sat.v2i64.v2f64"
7472 )]
7473 fn _vcvtq_s64_f64(a: float64x2_t) -> int64x2_t;
7474 }
7475 unsafe { _vcvtq_s64_f64(a) }
7476}
7477#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"]
7478#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_u64_f64)"]
7479#[inline]
7480#[target_feature(enable = "neon")]
7481#[cfg_attr(test, assert_instr(fcvtzu))]
7482#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7483pub fn vcvt_u64_f64(a: float64x1_t) -> uint64x1_t {
7484 unsafe extern "unadjusted" {
7485 #[cfg_attr(
7486 any(target_arch = "aarch64", target_arch = "arm64ec"),
7487 link_name = "llvm.fptoui.sat.v1i64.v1f64"
7488 )]
7489 fn _vcvt_u64_f64(a: float64x1_t) -> uint64x1_t;
7490 }
7491 unsafe { _vcvt_u64_f64(a) }
7492}
7493#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"]
7494#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u64_f64)"]
7495#[inline]
7496#[target_feature(enable = "neon")]
7497#[cfg_attr(test, assert_instr(fcvtzu))]
7498#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7499pub fn vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t {
7500 unsafe extern "unadjusted" {
7501 #[cfg_attr(
7502 any(target_arch = "aarch64", target_arch = "arm64ec"),
7503 link_name = "llvm.fptoui.sat.v2i64.v2f64"
7504 )]
7505 fn _vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t;
7506 }
7507 unsafe { _vcvtq_u64_f64(a) }
7508}
7509#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7510#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s16_f16)"]
7511#[inline]
7512#[cfg_attr(test, assert_instr(fcvtas))]
7513#[target_feature(enable = "neon,fp16")]
7514#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7515pub fn vcvta_s16_f16(a: float16x4_t) -> int16x4_t {
7516 unsafe extern "unadjusted" {
7517 #[cfg_attr(
7518 any(target_arch = "aarch64", target_arch = "arm64ec"),
7519 link_name = "llvm.aarch64.neon.fcvtas.v4i16.v4f16"
7520 )]
7521 fn _vcvta_s16_f16(a: float16x4_t) -> int16x4_t;
7522 }
7523 unsafe { _vcvta_s16_f16(a) }
7524}
7525#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7526#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s16_f16)"]
7527#[inline]
7528#[cfg_attr(test, assert_instr(fcvtas))]
7529#[target_feature(enable = "neon,fp16")]
7530#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7531pub fn vcvtaq_s16_f16(a: float16x8_t) -> int16x8_t {
7532 unsafe extern "unadjusted" {
7533 #[cfg_attr(
7534 any(target_arch = "aarch64", target_arch = "arm64ec"),
7535 link_name = "llvm.aarch64.neon.fcvtas.v8i16.v8f16"
7536 )]
7537 fn _vcvtaq_s16_f16(a: float16x8_t) -> int16x8_t;
7538 }
7539 unsafe { _vcvtaq_s16_f16(a) }
7540}
7541#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7542#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s32_f32)"]
7543#[inline]
7544#[target_feature(enable = "neon")]
7545#[cfg_attr(test, assert_instr(fcvtas))]
7546#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7547pub fn vcvta_s32_f32(a: float32x2_t) -> int32x2_t {
7548 unsafe extern "unadjusted" {
7549 #[cfg_attr(
7550 any(target_arch = "aarch64", target_arch = "arm64ec"),
7551 link_name = "llvm.aarch64.neon.fcvtas.v2i32.v2f32"
7552 )]
7553 fn _vcvta_s32_f32(a: float32x2_t) -> int32x2_t;
7554 }
7555 unsafe { _vcvta_s32_f32(a) }
7556}
7557#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7558#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s32_f32)"]
7559#[inline]
7560#[target_feature(enable = "neon")]
7561#[cfg_attr(test, assert_instr(fcvtas))]
7562#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7563pub fn vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t {
7564 unsafe extern "unadjusted" {
7565 #[cfg_attr(
7566 any(target_arch = "aarch64", target_arch = "arm64ec"),
7567 link_name = "llvm.aarch64.neon.fcvtas.v4i32.v4f32"
7568 )]
7569 fn _vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t;
7570 }
7571 unsafe { _vcvtaq_s32_f32(a) }
7572}
7573#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7574#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s64_f64)"]
7575#[inline]
7576#[target_feature(enable = "neon")]
7577#[cfg_attr(test, assert_instr(fcvtas))]
7578#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7579pub fn vcvta_s64_f64(a: float64x1_t) -> int64x1_t {
7580 unsafe extern "unadjusted" {
7581 #[cfg_attr(
7582 any(target_arch = "aarch64", target_arch = "arm64ec"),
7583 link_name = "llvm.aarch64.neon.fcvtas.v1i64.v1f64"
7584 )]
7585 fn _vcvta_s64_f64(a: float64x1_t) -> int64x1_t;
7586 }
7587 unsafe { _vcvta_s64_f64(a) }
7588}
7589#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7590#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s64_f64)"]
7591#[inline]
7592#[target_feature(enable = "neon")]
7593#[cfg_attr(test, assert_instr(fcvtas))]
7594#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7595pub fn vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t {
7596 unsafe extern "unadjusted" {
7597 #[cfg_attr(
7598 any(target_arch = "aarch64", target_arch = "arm64ec"),
7599 link_name = "llvm.aarch64.neon.fcvtas.v2i64.v2f64"
7600 )]
7601 fn _vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t;
7602 }
7603 unsafe { _vcvtaq_s64_f64(a) }
7604}
7605#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u16_f16)"]
7607#[inline]
7608#[cfg_attr(test, assert_instr(fcvtau))]
7609#[target_feature(enable = "neon,fp16")]
7610#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7611pub fn vcvta_u16_f16(a: float16x4_t) -> uint16x4_t {
7612 unsafe extern "unadjusted" {
7613 #[cfg_attr(
7614 any(target_arch = "aarch64", target_arch = "arm64ec"),
7615 link_name = "llvm.aarch64.neon.fcvtau.v4i16.v4f16"
7616 )]
7617 fn _vcvta_u16_f16(a: float16x4_t) -> uint16x4_t;
7618 }
7619 unsafe { _vcvta_u16_f16(a) }
7620}
7621#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7622#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u16_f16)"]
7623#[inline]
7624#[cfg_attr(test, assert_instr(fcvtau))]
7625#[target_feature(enable = "neon,fp16")]
7626#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7627pub fn vcvtaq_u16_f16(a: float16x8_t) -> uint16x8_t {
7628 unsafe extern "unadjusted" {
7629 #[cfg_attr(
7630 any(target_arch = "aarch64", target_arch = "arm64ec"),
7631 link_name = "llvm.aarch64.neon.fcvtau.v8i16.v8f16"
7632 )]
7633 fn _vcvtaq_u16_f16(a: float16x8_t) -> uint16x8_t;
7634 }
7635 unsafe { _vcvtaq_u16_f16(a) }
7636}
7637#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7638#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u32_f32)"]
7639#[inline]
7640#[target_feature(enable = "neon")]
7641#[cfg_attr(test, assert_instr(fcvtau))]
7642#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7643pub fn vcvta_u32_f32(a: float32x2_t) -> uint32x2_t {
7644 unsafe extern "unadjusted" {
7645 #[cfg_attr(
7646 any(target_arch = "aarch64", target_arch = "arm64ec"),
7647 link_name = "llvm.aarch64.neon.fcvtau.v2i32.v2f32"
7648 )]
7649 fn _vcvta_u32_f32(a: float32x2_t) -> uint32x2_t;
7650 }
7651 unsafe { _vcvta_u32_f32(a) }
7652}
7653#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7654#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u32_f32)"]
7655#[inline]
7656#[target_feature(enable = "neon")]
7657#[cfg_attr(test, assert_instr(fcvtau))]
7658#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7659pub fn vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t {
7660 unsafe extern "unadjusted" {
7661 #[cfg_attr(
7662 any(target_arch = "aarch64", target_arch = "arm64ec"),
7663 link_name = "llvm.aarch64.neon.fcvtau.v4i32.v4f32"
7664 )]
7665 fn _vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t;
7666 }
7667 unsafe { _vcvtaq_u32_f32(a) }
7668}
7669#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7670#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u64_f64)"]
7671#[inline]
7672#[target_feature(enable = "neon")]
7673#[cfg_attr(test, assert_instr(fcvtau))]
7674#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7675pub fn vcvta_u64_f64(a: float64x1_t) -> uint64x1_t {
7676 unsafe extern "unadjusted" {
7677 #[cfg_attr(
7678 any(target_arch = "aarch64", target_arch = "arm64ec"),
7679 link_name = "llvm.aarch64.neon.fcvtau.v1i64.v1f64"
7680 )]
7681 fn _vcvta_u64_f64(a: float64x1_t) -> uint64x1_t;
7682 }
7683 unsafe { _vcvta_u64_f64(a) }
7684}
7685#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7686#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u64_f64)"]
7687#[inline]
7688#[target_feature(enable = "neon")]
7689#[cfg_attr(test, assert_instr(fcvtau))]
7690#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7691pub fn vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t {
7692 unsafe extern "unadjusted" {
7693 #[cfg_attr(
7694 any(target_arch = "aarch64", target_arch = "arm64ec"),
7695 link_name = "llvm.aarch64.neon.fcvtau.v2i64.v2f64"
7696 )]
7697 fn _vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t;
7698 }
7699 unsafe { _vcvtaq_u64_f64(a) }
7700}
7701#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7702#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s16_f16)"]
7703#[inline]
7704#[cfg_attr(test, assert_instr(fcvtas))]
7705#[target_feature(enable = "neon,fp16")]
7706#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7707pub fn vcvtah_s16_f16(a: f16) -> i16 {
7708 vcvtah_s32_f16(a) as i16
7709}
7710#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s32_f16)"]
7712#[inline]
7713#[cfg_attr(test, assert_instr(fcvtas))]
7714#[target_feature(enable = "neon,fp16")]
7715#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7716pub fn vcvtah_s32_f16(a: f16) -> i32 {
7717 unsafe extern "unadjusted" {
7718 #[cfg_attr(
7719 any(target_arch = "aarch64", target_arch = "arm64ec"),
7720 link_name = "llvm.aarch64.neon.fcvtas.i32.f16"
7721 )]
7722 fn _vcvtah_s32_f16(a: f16) -> i32;
7723 }
7724 unsafe { _vcvtah_s32_f16(a) }
7725}
7726#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7727#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s64_f16)"]
7728#[inline]
7729#[cfg_attr(test, assert_instr(fcvtas))]
7730#[target_feature(enable = "neon,fp16")]
7731#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7732pub fn vcvtah_s64_f16(a: f16) -> i64 {
7733 unsafe extern "unadjusted" {
7734 #[cfg_attr(
7735 any(target_arch = "aarch64", target_arch = "arm64ec"),
7736 link_name = "llvm.aarch64.neon.fcvtas.i64.f16"
7737 )]
7738 fn _vcvtah_s64_f16(a: f16) -> i64;
7739 }
7740 unsafe { _vcvtah_s64_f16(a) }
7741}
7742#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7743#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u16_f16)"]
7744#[inline]
7745#[cfg_attr(test, assert_instr(fcvtau))]
7746#[target_feature(enable = "neon,fp16")]
7747#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7748pub fn vcvtah_u16_f16(a: f16) -> u16 {
7749 vcvtah_u32_f16(a) as u16
7750}
7751#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7752#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u32_f16)"]
7753#[inline]
7754#[cfg_attr(test, assert_instr(fcvtau))]
7755#[target_feature(enable = "neon,fp16")]
7756#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7757pub fn vcvtah_u32_f16(a: f16) -> u32 {
7758 unsafe extern "unadjusted" {
7759 #[cfg_attr(
7760 any(target_arch = "aarch64", target_arch = "arm64ec"),
7761 link_name = "llvm.aarch64.neon.fcvtau.i32.f16"
7762 )]
7763 fn _vcvtah_u32_f16(a: f16) -> u32;
7764 }
7765 unsafe { _vcvtah_u32_f16(a) }
7766}
7767#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u64_f16)"]
7769#[inline]
7770#[cfg_attr(test, assert_instr(fcvtau))]
7771#[target_feature(enable = "neon,fp16")]
7772#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7773pub fn vcvtah_u64_f16(a: f16) -> u64 {
7774 unsafe extern "unadjusted" {
7775 #[cfg_attr(
7776 any(target_arch = "aarch64", target_arch = "arm64ec"),
7777 link_name = "llvm.aarch64.neon.fcvtau.i64.f16"
7778 )]
7779 fn _vcvtah_u64_f16(a: f16) -> u64;
7780 }
7781 unsafe { _vcvtah_u64_f16(a) }
7782}
7783#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7784#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_s32_f32)"]
7785#[inline]
7786#[target_feature(enable = "neon")]
7787#[cfg_attr(test, assert_instr(fcvtas))]
7788#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7789pub fn vcvtas_s32_f32(a: f32) -> i32 {
7790 unsafe extern "unadjusted" {
7791 #[cfg_attr(
7792 any(target_arch = "aarch64", target_arch = "arm64ec"),
7793 link_name = "llvm.aarch64.neon.fcvtas.i32.f32"
7794 )]
7795 fn _vcvtas_s32_f32(a: f32) -> i32;
7796 }
7797 unsafe { _vcvtas_s32_f32(a) }
7798}
7799#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_s64_f64)"]
7801#[inline]
7802#[target_feature(enable = "neon")]
7803#[cfg_attr(test, assert_instr(fcvtas))]
7804#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7805pub fn vcvtad_s64_f64(a: f64) -> i64 {
7806 unsafe extern "unadjusted" {
7807 #[cfg_attr(
7808 any(target_arch = "aarch64", target_arch = "arm64ec"),
7809 link_name = "llvm.aarch64.neon.fcvtas.i64.f64"
7810 )]
7811 fn _vcvtad_s64_f64(a: f64) -> i64;
7812 }
7813 unsafe { _vcvtad_s64_f64(a) }
7814}
7815#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7816#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_u32_f32)"]
7817#[inline]
7818#[target_feature(enable = "neon")]
7819#[cfg_attr(test, assert_instr(fcvtau))]
7820#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7821pub fn vcvtas_u32_f32(a: f32) -> u32 {
7822 unsafe extern "unadjusted" {
7823 #[cfg_attr(
7824 any(target_arch = "aarch64", target_arch = "arm64ec"),
7825 link_name = "llvm.aarch64.neon.fcvtau.i32.f32"
7826 )]
7827 fn _vcvtas_u32_f32(a: f32) -> u32;
7828 }
7829 unsafe { _vcvtas_u32_f32(a) }
7830}
7831#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7832#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_u64_f64)"]
7833#[inline]
7834#[target_feature(enable = "neon")]
7835#[cfg_attr(test, assert_instr(fcvtau))]
7836#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7837pub fn vcvtad_u64_f64(a: f64) -> u64 {
7838 unsafe extern "unadjusted" {
7839 #[cfg_attr(
7840 any(target_arch = "aarch64", target_arch = "arm64ec"),
7841 link_name = "llvm.aarch64.neon.fcvtau.i64.f64"
7842 )]
7843 fn _vcvtad_u64_f64(a: f64) -> u64;
7844 }
7845 unsafe { _vcvtad_u64_f64(a) }
7846}
7847#[doc = "Fixed-point convert to floating-point"]
7848#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_s64)"]
7849#[inline]
7850#[target_feature(enable = "neon")]
7851#[cfg_attr(test, assert_instr(scvtf))]
7852#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7853pub fn vcvtd_f64_s64(a: i64) -> f64 {
7854 a as f64
7855}
7856#[doc = "Fixed-point convert to floating-point"]
7857#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_s32)"]
7858#[inline]
7859#[target_feature(enable = "neon")]
7860#[cfg_attr(test, assert_instr(scvtf))]
7861#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7862pub fn vcvts_f32_s32(a: i32) -> f32 {
7863 a as f32
7864}
7865#[doc = "Fixed-point convert to floating-point"]
7866#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s16)"]
7867#[inline]
7868#[cfg_attr(test, assert_instr(scvtf))]
7869#[target_feature(enable = "neon,fp16")]
7870#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7871pub fn vcvth_f16_s16(a: i16) -> f16 {
7872 a as f16
7873}
7874#[doc = "Fixed-point convert to floating-point"]
7875#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s32)"]
7876#[inline]
7877#[cfg_attr(test, assert_instr(scvtf))]
7878#[target_feature(enable = "neon,fp16")]
7879#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7880pub fn vcvth_f16_s32(a: i32) -> f16 {
7881 a as f16
7882}
7883#[doc = "Fixed-point convert to floating-point"]
7884#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s64)"]
7885#[inline]
7886#[cfg_attr(test, assert_instr(scvtf))]
7887#[target_feature(enable = "neon,fp16")]
7888#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7889pub fn vcvth_f16_s64(a: i64) -> f16 {
7890 a as f16
7891}
7892#[doc = "Unsigned fixed-point convert to floating-point"]
7893#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u16)"]
7894#[inline]
7895#[cfg_attr(test, assert_instr(ucvtf))]
7896#[target_feature(enable = "neon,fp16")]
7897#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7898pub fn vcvth_f16_u16(a: u16) -> f16 {
7899 a as f16
7900}
7901#[doc = "Unsigned fixed-point convert to floating-point"]
7902#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u32)"]
7903#[inline]
7904#[cfg_attr(test, assert_instr(ucvtf))]
7905#[target_feature(enable = "neon,fp16")]
7906#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7907pub fn vcvth_f16_u32(a: u32) -> f16 {
7908 a as f16
7909}
7910#[doc = "Unsigned fixed-point convert to floating-point"]
7911#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u64)"]
7912#[inline]
7913#[cfg_attr(test, assert_instr(ucvtf))]
7914#[target_feature(enable = "neon,fp16")]
7915#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7916pub fn vcvth_f16_u64(a: u64) -> f16 {
7917 a as f16
7918}
7919#[doc = "Fixed-point convert to floating-point"]
7920#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s16)"]
7921#[inline]
7922#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7923#[rustc_legacy_const_generics(1)]
7924#[target_feature(enable = "neon,fp16")]
7925#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7926pub fn vcvth_n_f16_s16<const N: i32>(a: i16) -> f16 {
7927 static_assert!(N >= 1 && N <= 16);
7928 vcvth_n_f16_s32::<N>(a as i32)
7929}
7930#[doc = "Fixed-point convert to floating-point"]
7931#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s32)"]
7932#[inline]
7933#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7934#[rustc_legacy_const_generics(1)]
7935#[target_feature(enable = "neon,fp16")]
7936#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7937pub fn vcvth_n_f16_s32<const N: i32>(a: i32) -> f16 {
7938 static_assert!(N >= 1 && N <= 16);
7939 unsafe extern "unadjusted" {
7940 #[cfg_attr(
7941 any(target_arch = "aarch64", target_arch = "arm64ec"),
7942 link_name = "llvm.aarch64.neon.vcvtfxs2fp.f16.i32"
7943 )]
7944 fn _vcvth_n_f16_s32(a: i32, n: i32) -> f16;
7945 }
7946 unsafe { _vcvth_n_f16_s32(a, N) }
7947}
7948#[doc = "Fixed-point convert to floating-point"]
7949#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s64)"]
7950#[inline]
7951#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7952#[rustc_legacy_const_generics(1)]
7953#[target_feature(enable = "neon,fp16")]
7954#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7955pub fn vcvth_n_f16_s64<const N: i32>(a: i64) -> f16 {
7956 static_assert!(N >= 1 && N <= 16);
7957 unsafe extern "unadjusted" {
7958 #[cfg_attr(
7959 any(target_arch = "aarch64", target_arch = "arm64ec"),
7960 link_name = "llvm.aarch64.neon.vcvtfxs2fp.f16.i64"
7961 )]
7962 fn _vcvth_n_f16_s64(a: i64, n: i32) -> f16;
7963 }
7964 unsafe { _vcvth_n_f16_s64(a, N) }
7965}
7966#[doc = "Fixed-point convert to floating-point"]
7967#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u16)"]
7968#[inline]
7969#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7970#[rustc_legacy_const_generics(1)]
7971#[target_feature(enable = "neon,fp16")]
7972#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7973pub fn vcvth_n_f16_u16<const N: i32>(a: u16) -> f16 {
7974 static_assert!(N >= 1 && N <= 16);
7975 vcvth_n_f16_u32::<N>(a as u32)
7976}
7977#[doc = "Fixed-point convert to floating-point"]
7978#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u32)"]
7979#[inline]
7980#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7981#[rustc_legacy_const_generics(1)]
7982#[target_feature(enable = "neon,fp16")]
7983#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7984pub fn vcvth_n_f16_u32<const N: i32>(a: u32) -> f16 {
7985 static_assert!(N >= 1 && N <= 16);
7986 unsafe extern "unadjusted" {
7987 #[cfg_attr(
7988 any(target_arch = "aarch64", target_arch = "arm64ec"),
7989 link_name = "llvm.aarch64.neon.vcvtfxu2fp.f16.i32"
7990 )]
7991 fn _vcvth_n_f16_u32(a: u32, n: i32) -> f16;
7992 }
7993 unsafe { _vcvth_n_f16_u32(a, N) }
7994}
7995#[doc = "Fixed-point convert to floating-point"]
7996#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u64)"]
7997#[inline]
7998#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7999#[rustc_legacy_const_generics(1)]
8000#[target_feature(enable = "neon,fp16")]
8001#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8002pub fn vcvth_n_f16_u64<const N: i32>(a: u64) -> f16 {
8003 static_assert!(N >= 1 && N <= 16);
8004 unsafe extern "unadjusted" {
8005 #[cfg_attr(
8006 any(target_arch = "aarch64", target_arch = "arm64ec"),
8007 link_name = "llvm.aarch64.neon.vcvtfxu2fp.f16.i64"
8008 )]
8009 fn _vcvth_n_f16_u64(a: u64, n: i32) -> f16;
8010 }
8011 unsafe { _vcvth_n_f16_u64(a, N) }
8012}
8013#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s16_f16)"]
8015#[inline]
8016#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
8017#[rustc_legacy_const_generics(1)]
8018#[target_feature(enable = "neon,fp16")]
8019#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8020pub fn vcvth_n_s16_f16<const N: i32>(a: f16) -> i16 {
8021 static_assert!(N >= 1 && N <= 16);
8022 vcvth_n_s32_f16::<N>(a) as i16
8023}
8024#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8025#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s32_f16)"]
8026#[inline]
8027#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
8028#[rustc_legacy_const_generics(1)]
8029#[target_feature(enable = "neon,fp16")]
8030#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8031pub fn vcvth_n_s32_f16<const N: i32>(a: f16) -> i32 {
8032 static_assert!(N >= 1 && N <= 16);
8033 unsafe extern "unadjusted" {
8034 #[cfg_attr(
8035 any(target_arch = "aarch64", target_arch = "arm64ec"),
8036 link_name = "llvm.aarch64.neon.vcvtfp2fxs.i32.f16"
8037 )]
8038 fn _vcvth_n_s32_f16(a: f16, n: i32) -> i32;
8039 }
8040 unsafe { _vcvth_n_s32_f16(a, N) }
8041}
8042#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8043#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s64_f16)"]
8044#[inline]
8045#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
8046#[rustc_legacy_const_generics(1)]
8047#[target_feature(enable = "neon,fp16")]
8048#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8049pub fn vcvth_n_s64_f16<const N: i32>(a: f16) -> i64 {
8050 static_assert!(N >= 1 && N <= 16);
8051 unsafe extern "unadjusted" {
8052 #[cfg_attr(
8053 any(target_arch = "aarch64", target_arch = "arm64ec"),
8054 link_name = "llvm.aarch64.neon.vcvtfp2fxs.i64.f16"
8055 )]
8056 fn _vcvth_n_s64_f16(a: f16, n: i32) -> i64;
8057 }
8058 unsafe { _vcvth_n_s64_f16(a, N) }
8059}
8060#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8061#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u16_f16)"]
8062#[inline]
8063#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
8064#[rustc_legacy_const_generics(1)]
8065#[target_feature(enable = "neon,fp16")]
8066#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8067pub fn vcvth_n_u16_f16<const N: i32>(a: f16) -> u16 {
8068 static_assert!(N >= 1 && N <= 16);
8069 vcvth_n_u32_f16::<N>(a) as u16
8070}
8071#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8072#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u32_f16)"]
8073#[inline]
8074#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
8075#[rustc_legacy_const_generics(1)]
8076#[target_feature(enable = "neon,fp16")]
8077#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8078pub fn vcvth_n_u32_f16<const N: i32>(a: f16) -> u32 {
8079 static_assert!(N >= 1 && N <= 16);
8080 unsafe extern "unadjusted" {
8081 #[cfg_attr(
8082 any(target_arch = "aarch64", target_arch = "arm64ec"),
8083 link_name = "llvm.aarch64.neon.vcvtfp2fxu.i32.f16"
8084 )]
8085 fn _vcvth_n_u32_f16(a: f16, n: i32) -> u32;
8086 }
8087 unsafe { _vcvth_n_u32_f16(a, N) }
8088}
8089#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8090#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u64_f16)"]
8091#[inline]
8092#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
8093#[rustc_legacy_const_generics(1)]
8094#[target_feature(enable = "neon,fp16")]
8095#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8096pub fn vcvth_n_u64_f16<const N: i32>(a: f16) -> u64 {
8097 static_assert!(N >= 1 && N <= 16);
8098 unsafe extern "unadjusted" {
8099 #[cfg_attr(
8100 any(target_arch = "aarch64", target_arch = "arm64ec"),
8101 link_name = "llvm.aarch64.neon.vcvtfp2fxu.i64.f16"
8102 )]
8103 fn _vcvth_n_u64_f16(a: f16, n: i32) -> u64;
8104 }
8105 unsafe { _vcvth_n_u64_f16(a, N) }
8106}
8107#[doc = "Floating-point convert to signed fixed-point"]
8108#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s16_f16)"]
8109#[inline]
8110#[cfg_attr(test, assert_instr(fcvtzs))]
8111#[target_feature(enable = "neon,fp16")]
8112#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8113pub fn vcvth_s16_f16(a: f16) -> i16 {
8114 a as i16
8115}
8116#[doc = "Floating-point convert to signed fixed-point"]
8117#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s32_f16)"]
8118#[inline]
8119#[cfg_attr(test, assert_instr(fcvtzs))]
8120#[target_feature(enable = "neon,fp16")]
8121#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8122pub fn vcvth_s32_f16(a: f16) -> i32 {
8123 a as i32
8124}
8125#[doc = "Floating-point convert to signed fixed-point"]
8126#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s64_f16)"]
8127#[inline]
8128#[cfg_attr(test, assert_instr(fcvtzs))]
8129#[target_feature(enable = "neon,fp16")]
8130#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8131pub fn vcvth_s64_f16(a: f16) -> i64 {
8132 a as i64
8133}
8134#[doc = "Floating-point convert to unsigned fixed-point"]
8135#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u16_f16)"]
8136#[inline]
8137#[cfg_attr(test, assert_instr(fcvtzu))]
8138#[target_feature(enable = "neon,fp16")]
8139#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8140pub fn vcvth_u16_f16(a: f16) -> u16 {
8141 a as u16
8142}
8143#[doc = "Floating-point convert to unsigned fixed-point"]
8144#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u32_f16)"]
8145#[inline]
8146#[cfg_attr(test, assert_instr(fcvtzu))]
8147#[target_feature(enable = "neon,fp16")]
8148#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8149pub fn vcvth_u32_f16(a: f16) -> u32 {
8150 a as u32
8151}
8152#[doc = "Floating-point convert to unsigned fixed-point"]
8153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u64_f16)"]
8154#[inline]
8155#[cfg_attr(test, assert_instr(fcvtzu))]
8156#[target_feature(enable = "neon,fp16")]
8157#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8158pub fn vcvth_u64_f16(a: f16) -> u64 {
8159 a as u64
8160}
8161#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8162#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s16_f16)"]
8163#[inline]
8164#[cfg_attr(test, assert_instr(fcvtms))]
8165#[target_feature(enable = "neon,fp16")]
8166#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8167pub fn vcvtm_s16_f16(a: float16x4_t) -> int16x4_t {
8168 unsafe extern "unadjusted" {
8169 #[cfg_attr(
8170 any(target_arch = "aarch64", target_arch = "arm64ec"),
8171 link_name = "llvm.aarch64.neon.fcvtms.v4i16.v4f16"
8172 )]
8173 fn _vcvtm_s16_f16(a: float16x4_t) -> int16x4_t;
8174 }
8175 unsafe { _vcvtm_s16_f16(a) }
8176}
8177#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8178#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s16_f16)"]
8179#[inline]
8180#[cfg_attr(test, assert_instr(fcvtms))]
8181#[target_feature(enable = "neon,fp16")]
8182#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8183pub fn vcvtmq_s16_f16(a: float16x8_t) -> int16x8_t {
8184 unsafe extern "unadjusted" {
8185 #[cfg_attr(
8186 any(target_arch = "aarch64", target_arch = "arm64ec"),
8187 link_name = "llvm.aarch64.neon.fcvtms.v8i16.v8f16"
8188 )]
8189 fn _vcvtmq_s16_f16(a: float16x8_t) -> int16x8_t;
8190 }
8191 unsafe { _vcvtmq_s16_f16(a) }
8192}
8193#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8194#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s32_f32)"]
8195#[inline]
8196#[target_feature(enable = "neon")]
8197#[cfg_attr(test, assert_instr(fcvtms))]
8198#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8199pub fn vcvtm_s32_f32(a: float32x2_t) -> int32x2_t {
8200 unsafe extern "unadjusted" {
8201 #[cfg_attr(
8202 any(target_arch = "aarch64", target_arch = "arm64ec"),
8203 link_name = "llvm.aarch64.neon.fcvtms.v2i32.v2f32"
8204 )]
8205 fn _vcvtm_s32_f32(a: float32x2_t) -> int32x2_t;
8206 }
8207 unsafe { _vcvtm_s32_f32(a) }
8208}
8209#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s32_f32)"]
8211#[inline]
8212#[target_feature(enable = "neon")]
8213#[cfg_attr(test, assert_instr(fcvtms))]
8214#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8215pub fn vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t {
8216 unsafe extern "unadjusted" {
8217 #[cfg_attr(
8218 any(target_arch = "aarch64", target_arch = "arm64ec"),
8219 link_name = "llvm.aarch64.neon.fcvtms.v4i32.v4f32"
8220 )]
8221 fn _vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t;
8222 }
8223 unsafe { _vcvtmq_s32_f32(a) }
8224}
8225#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8226#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s64_f64)"]
8227#[inline]
8228#[target_feature(enable = "neon")]
8229#[cfg_attr(test, assert_instr(fcvtms))]
8230#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8231pub fn vcvtm_s64_f64(a: float64x1_t) -> int64x1_t {
8232 unsafe extern "unadjusted" {
8233 #[cfg_attr(
8234 any(target_arch = "aarch64", target_arch = "arm64ec"),
8235 link_name = "llvm.aarch64.neon.fcvtms.v1i64.v1f64"
8236 )]
8237 fn _vcvtm_s64_f64(a: float64x1_t) -> int64x1_t;
8238 }
8239 unsafe { _vcvtm_s64_f64(a) }
8240}
8241#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8242#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s64_f64)"]
8243#[inline]
8244#[target_feature(enable = "neon")]
8245#[cfg_attr(test, assert_instr(fcvtms))]
8246#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8247pub fn vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t {
8248 unsafe extern "unadjusted" {
8249 #[cfg_attr(
8250 any(target_arch = "aarch64", target_arch = "arm64ec"),
8251 link_name = "llvm.aarch64.neon.fcvtms.v2i64.v2f64"
8252 )]
8253 fn _vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t;
8254 }
8255 unsafe { _vcvtmq_s64_f64(a) }
8256}
8257#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8258#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u16_f16)"]
8259#[inline]
8260#[cfg_attr(test, assert_instr(fcvtmu))]
8261#[target_feature(enable = "neon,fp16")]
8262#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8263pub fn vcvtm_u16_f16(a: float16x4_t) -> uint16x4_t {
8264 unsafe extern "unadjusted" {
8265 #[cfg_attr(
8266 any(target_arch = "aarch64", target_arch = "arm64ec"),
8267 link_name = "llvm.aarch64.neon.fcvtmu.v4i16.v4f16"
8268 )]
8269 fn _vcvtm_u16_f16(a: float16x4_t) -> uint16x4_t;
8270 }
8271 unsafe { _vcvtm_u16_f16(a) }
8272}
8273#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8274#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u16_f16)"]
8275#[inline]
8276#[cfg_attr(test, assert_instr(fcvtmu))]
8277#[target_feature(enable = "neon,fp16")]
8278#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8279pub fn vcvtmq_u16_f16(a: float16x8_t) -> uint16x8_t {
8280 unsafe extern "unadjusted" {
8281 #[cfg_attr(
8282 any(target_arch = "aarch64", target_arch = "arm64ec"),
8283 link_name = "llvm.aarch64.neon.fcvtmu.v8i16.v8f16"
8284 )]
8285 fn _vcvtmq_u16_f16(a: float16x8_t) -> uint16x8_t;
8286 }
8287 unsafe { _vcvtmq_u16_f16(a) }
8288}
8289#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8290#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u32_f32)"]
8291#[inline]
8292#[target_feature(enable = "neon")]
8293#[cfg_attr(test, assert_instr(fcvtmu))]
8294#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8295pub fn vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t {
8296 unsafe extern "unadjusted" {
8297 #[cfg_attr(
8298 any(target_arch = "aarch64", target_arch = "arm64ec"),
8299 link_name = "llvm.aarch64.neon.fcvtmu.v2i32.v2f32"
8300 )]
8301 fn _vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t;
8302 }
8303 unsafe { _vcvtm_u32_f32(a) }
8304}
8305#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8306#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u32_f32)"]
8307#[inline]
8308#[target_feature(enable = "neon")]
8309#[cfg_attr(test, assert_instr(fcvtmu))]
8310#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8311pub fn vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t {
8312 unsafe extern "unadjusted" {
8313 #[cfg_attr(
8314 any(target_arch = "aarch64", target_arch = "arm64ec"),
8315 link_name = "llvm.aarch64.neon.fcvtmu.v4i32.v4f32"
8316 )]
8317 fn _vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t;
8318 }
8319 unsafe { _vcvtmq_u32_f32(a) }
8320}
8321#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8322#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u64_f64)"]
8323#[inline]
8324#[target_feature(enable = "neon")]
8325#[cfg_attr(test, assert_instr(fcvtmu))]
8326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8327pub fn vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t {
8328 unsafe extern "unadjusted" {
8329 #[cfg_attr(
8330 any(target_arch = "aarch64", target_arch = "arm64ec"),
8331 link_name = "llvm.aarch64.neon.fcvtmu.v1i64.v1f64"
8332 )]
8333 fn _vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t;
8334 }
8335 unsafe { _vcvtm_u64_f64(a) }
8336}
8337#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u64_f64)"]
8339#[inline]
8340#[target_feature(enable = "neon")]
8341#[cfg_attr(test, assert_instr(fcvtmu))]
8342#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8343pub fn vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t {
8344 unsafe extern "unadjusted" {
8345 #[cfg_attr(
8346 any(target_arch = "aarch64", target_arch = "arm64ec"),
8347 link_name = "llvm.aarch64.neon.fcvtmu.v2i64.v2f64"
8348 )]
8349 fn _vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t;
8350 }
8351 unsafe { _vcvtmq_u64_f64(a) }
8352}
8353#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s16_f16)"]
8355#[inline]
8356#[cfg_attr(test, assert_instr(fcvtms))]
8357#[target_feature(enable = "neon,fp16")]
8358#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8359pub fn vcvtmh_s16_f16(a: f16) -> i16 {
8360 vcvtmh_s32_f16(a) as i16
8361}
8362#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s32_f16)"]
8364#[inline]
8365#[cfg_attr(test, assert_instr(fcvtms))]
8366#[target_feature(enable = "neon,fp16")]
8367#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8368pub fn vcvtmh_s32_f16(a: f16) -> i32 {
8369 unsafe extern "unadjusted" {
8370 #[cfg_attr(
8371 any(target_arch = "aarch64", target_arch = "arm64ec"),
8372 link_name = "llvm.aarch64.neon.fcvtms.i32.f16"
8373 )]
8374 fn _vcvtmh_s32_f16(a: f16) -> i32;
8375 }
8376 unsafe { _vcvtmh_s32_f16(a) }
8377}
8378#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8379#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s64_f16)"]
8380#[inline]
8381#[cfg_attr(test, assert_instr(fcvtms))]
8382#[target_feature(enable = "neon,fp16")]
8383#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8384pub fn vcvtmh_s64_f16(a: f16) -> i64 {
8385 unsafe extern "unadjusted" {
8386 #[cfg_attr(
8387 any(target_arch = "aarch64", target_arch = "arm64ec"),
8388 link_name = "llvm.aarch64.neon.fcvtms.i64.f16"
8389 )]
8390 fn _vcvtmh_s64_f16(a: f16) -> i64;
8391 }
8392 unsafe { _vcvtmh_s64_f16(a) }
8393}
8394#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u16_f16)"]
8396#[inline]
8397#[cfg_attr(test, assert_instr(fcvtmu))]
8398#[target_feature(enable = "neon,fp16")]
8399#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8400pub fn vcvtmh_u16_f16(a: f16) -> u16 {
8401 vcvtmh_u32_f16(a) as u16
8402}
8403#[doc = "Floating-point convert to unsigned integer, rounding towards minus infinity"]
8404#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u32_f16)"]
8405#[inline]
8406#[cfg_attr(test, assert_instr(fcvtmu))]
8407#[target_feature(enable = "neon,fp16")]
8408#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8409pub fn vcvtmh_u32_f16(a: f16) -> u32 {
8410 unsafe extern "unadjusted" {
8411 #[cfg_attr(
8412 any(target_arch = "aarch64", target_arch = "arm64ec"),
8413 link_name = "llvm.aarch64.neon.fcvtmu.i32.f16"
8414 )]
8415 fn _vcvtmh_u32_f16(a: f16) -> u32;
8416 }
8417 unsafe { _vcvtmh_u32_f16(a) }
8418}
8419#[doc = "Floating-point convert to unsigned integer, rounding towards minus infinity"]
8420#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u64_f16)"]
8421#[inline]
8422#[cfg_attr(test, assert_instr(fcvtmu))]
8423#[target_feature(enable = "neon,fp16")]
8424#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8425pub fn vcvtmh_u64_f16(a: f16) -> u64 {
8426 unsafe extern "unadjusted" {
8427 #[cfg_attr(
8428 any(target_arch = "aarch64", target_arch = "arm64ec"),
8429 link_name = "llvm.aarch64.neon.fcvtmu.i64.f16"
8430 )]
8431 fn _vcvtmh_u64_f16(a: f16) -> u64;
8432 }
8433 unsafe { _vcvtmh_u64_f16(a) }
8434}
8435#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8436#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_s32_f32)"]
8437#[inline]
8438#[target_feature(enable = "neon")]
8439#[cfg_attr(test, assert_instr(fcvtms))]
8440#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8441pub fn vcvtms_s32_f32(a: f32) -> i32 {
8442 unsafe extern "unadjusted" {
8443 #[cfg_attr(
8444 any(target_arch = "aarch64", target_arch = "arm64ec"),
8445 link_name = "llvm.aarch64.neon.fcvtms.i32.f32"
8446 )]
8447 fn _vcvtms_s32_f32(a: f32) -> i32;
8448 }
8449 unsafe { _vcvtms_s32_f32(a) }
8450}
8451#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_s64_f64)"]
8453#[inline]
8454#[target_feature(enable = "neon")]
8455#[cfg_attr(test, assert_instr(fcvtms))]
8456#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8457pub fn vcvtmd_s64_f64(a: f64) -> i64 {
8458 unsafe extern "unadjusted" {
8459 #[cfg_attr(
8460 any(target_arch = "aarch64", target_arch = "arm64ec"),
8461 link_name = "llvm.aarch64.neon.fcvtms.i64.f64"
8462 )]
8463 fn _vcvtmd_s64_f64(a: f64) -> i64;
8464 }
8465 unsafe { _vcvtmd_s64_f64(a) }
8466}
8467#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8468#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_u32_f32)"]
8469#[inline]
8470#[target_feature(enable = "neon")]
8471#[cfg_attr(test, assert_instr(fcvtmu))]
8472#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8473pub fn vcvtms_u32_f32(a: f32) -> u32 {
8474 unsafe extern "unadjusted" {
8475 #[cfg_attr(
8476 any(target_arch = "aarch64", target_arch = "arm64ec"),
8477 link_name = "llvm.aarch64.neon.fcvtmu.i32.f32"
8478 )]
8479 fn _vcvtms_u32_f32(a: f32) -> u32;
8480 }
8481 unsafe { _vcvtms_u32_f32(a) }
8482}
8483#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_u64_f64)"]
8485#[inline]
8486#[target_feature(enable = "neon")]
8487#[cfg_attr(test, assert_instr(fcvtmu))]
8488#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8489pub fn vcvtmd_u64_f64(a: f64) -> u64 {
8490 unsafe extern "unadjusted" {
8491 #[cfg_attr(
8492 any(target_arch = "aarch64", target_arch = "arm64ec"),
8493 link_name = "llvm.aarch64.neon.fcvtmu.i64.f64"
8494 )]
8495 fn _vcvtmd_u64_f64(a: f64) -> u64;
8496 }
8497 unsafe { _vcvtmd_u64_f64(a) }
8498}
8499#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8500#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s16_f16)"]
8501#[inline]
8502#[cfg_attr(test, assert_instr(fcvtns))]
8503#[target_feature(enable = "neon,fp16")]
8504#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8505pub fn vcvtn_s16_f16(a: float16x4_t) -> int16x4_t {
8506 unsafe extern "unadjusted" {
8507 #[cfg_attr(
8508 any(target_arch = "aarch64", target_arch = "arm64ec"),
8509 link_name = "llvm.aarch64.neon.fcvtns.v4i16.v4f16"
8510 )]
8511 fn _vcvtn_s16_f16(a: float16x4_t) -> int16x4_t;
8512 }
8513 unsafe { _vcvtn_s16_f16(a) }
8514}
8515#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8516#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s16_f16)"]
8517#[inline]
8518#[cfg_attr(test, assert_instr(fcvtns))]
8519#[target_feature(enable = "neon,fp16")]
8520#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8521pub fn vcvtnq_s16_f16(a: float16x8_t) -> int16x8_t {
8522 unsafe extern "unadjusted" {
8523 #[cfg_attr(
8524 any(target_arch = "aarch64", target_arch = "arm64ec"),
8525 link_name = "llvm.aarch64.neon.fcvtns.v8i16.v8f16"
8526 )]
8527 fn _vcvtnq_s16_f16(a: float16x8_t) -> int16x8_t;
8528 }
8529 unsafe { _vcvtnq_s16_f16(a) }
8530}
8531#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8532#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s32_f32)"]
8533#[inline]
8534#[target_feature(enable = "neon")]
8535#[cfg_attr(test, assert_instr(fcvtns))]
8536#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8537pub fn vcvtn_s32_f32(a: float32x2_t) -> int32x2_t {
8538 unsafe extern "unadjusted" {
8539 #[cfg_attr(
8540 any(target_arch = "aarch64", target_arch = "arm64ec"),
8541 link_name = "llvm.aarch64.neon.fcvtns.v2i32.v2f32"
8542 )]
8543 fn _vcvtn_s32_f32(a: float32x2_t) -> int32x2_t;
8544 }
8545 unsafe { _vcvtn_s32_f32(a) }
8546}
8547#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8548#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s32_f32)"]
8549#[inline]
8550#[target_feature(enable = "neon")]
8551#[cfg_attr(test, assert_instr(fcvtns))]
8552#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8553pub fn vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t {
8554 unsafe extern "unadjusted" {
8555 #[cfg_attr(
8556 any(target_arch = "aarch64", target_arch = "arm64ec"),
8557 link_name = "llvm.aarch64.neon.fcvtns.v4i32.v4f32"
8558 )]
8559 fn _vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t;
8560 }
8561 unsafe { _vcvtnq_s32_f32(a) }
8562}
8563#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s64_f64)"]
8565#[inline]
8566#[target_feature(enable = "neon")]
8567#[cfg_attr(test, assert_instr(fcvtns))]
8568#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8569pub fn vcvtn_s64_f64(a: float64x1_t) -> int64x1_t {
8570 unsafe extern "unadjusted" {
8571 #[cfg_attr(
8572 any(target_arch = "aarch64", target_arch = "arm64ec"),
8573 link_name = "llvm.aarch64.neon.fcvtns.v1i64.v1f64"
8574 )]
8575 fn _vcvtn_s64_f64(a: float64x1_t) -> int64x1_t;
8576 }
8577 unsafe { _vcvtn_s64_f64(a) }
8578}
8579#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s64_f64)"]
8581#[inline]
8582#[target_feature(enable = "neon")]
8583#[cfg_attr(test, assert_instr(fcvtns))]
8584#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8585pub fn vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t {
8586 unsafe extern "unadjusted" {
8587 #[cfg_attr(
8588 any(target_arch = "aarch64", target_arch = "arm64ec"),
8589 link_name = "llvm.aarch64.neon.fcvtns.v2i64.v2f64"
8590 )]
8591 fn _vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t;
8592 }
8593 unsafe { _vcvtnq_s64_f64(a) }
8594}
8595#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8596#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u16_f16)"]
8597#[inline]
8598#[cfg_attr(test, assert_instr(fcvtnu))]
8599#[target_feature(enable = "neon,fp16")]
8600#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8601pub fn vcvtn_u16_f16(a: float16x4_t) -> uint16x4_t {
8602 unsafe extern "unadjusted" {
8603 #[cfg_attr(
8604 any(target_arch = "aarch64", target_arch = "arm64ec"),
8605 link_name = "llvm.aarch64.neon.fcvtnu.v4i16.v4f16"
8606 )]
8607 fn _vcvtn_u16_f16(a: float16x4_t) -> uint16x4_t;
8608 }
8609 unsafe { _vcvtn_u16_f16(a) }
8610}
8611#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8612#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u16_f16)"]
8613#[inline]
8614#[cfg_attr(test, assert_instr(fcvtnu))]
8615#[target_feature(enable = "neon,fp16")]
8616#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8617pub fn vcvtnq_u16_f16(a: float16x8_t) -> uint16x8_t {
8618 unsafe extern "unadjusted" {
8619 #[cfg_attr(
8620 any(target_arch = "aarch64", target_arch = "arm64ec"),
8621 link_name = "llvm.aarch64.neon.fcvtnu.v8i16.v8f16"
8622 )]
8623 fn _vcvtnq_u16_f16(a: float16x8_t) -> uint16x8_t;
8624 }
8625 unsafe { _vcvtnq_u16_f16(a) }
8626}
8627#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8628#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u32_f32)"]
8629#[inline]
8630#[target_feature(enable = "neon")]
8631#[cfg_attr(test, assert_instr(fcvtnu))]
8632#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8633pub fn vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t {
8634 unsafe extern "unadjusted" {
8635 #[cfg_attr(
8636 any(target_arch = "aarch64", target_arch = "arm64ec"),
8637 link_name = "llvm.aarch64.neon.fcvtnu.v2i32.v2f32"
8638 )]
8639 fn _vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t;
8640 }
8641 unsafe { _vcvtn_u32_f32(a) }
8642}
8643#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8644#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u32_f32)"]
8645#[inline]
8646#[target_feature(enable = "neon")]
8647#[cfg_attr(test, assert_instr(fcvtnu))]
8648#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8649pub fn vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t {
8650 unsafe extern "unadjusted" {
8651 #[cfg_attr(
8652 any(target_arch = "aarch64", target_arch = "arm64ec"),
8653 link_name = "llvm.aarch64.neon.fcvtnu.v4i32.v4f32"
8654 )]
8655 fn _vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t;
8656 }
8657 unsafe { _vcvtnq_u32_f32(a) }
8658}
8659#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8660#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u64_f64)"]
8661#[inline]
8662#[target_feature(enable = "neon")]
8663#[cfg_attr(test, assert_instr(fcvtnu))]
8664#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8665pub fn vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t {
8666 unsafe extern "unadjusted" {
8667 #[cfg_attr(
8668 any(target_arch = "aarch64", target_arch = "arm64ec"),
8669 link_name = "llvm.aarch64.neon.fcvtnu.v1i64.v1f64"
8670 )]
8671 fn _vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t;
8672 }
8673 unsafe { _vcvtn_u64_f64(a) }
8674}
8675#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u64_f64)"]
8677#[inline]
8678#[target_feature(enable = "neon")]
8679#[cfg_attr(test, assert_instr(fcvtnu))]
8680#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8681pub fn vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t {
8682 unsafe extern "unadjusted" {
8683 #[cfg_attr(
8684 any(target_arch = "aarch64", target_arch = "arm64ec"),
8685 link_name = "llvm.aarch64.neon.fcvtnu.v2i64.v2f64"
8686 )]
8687 fn _vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t;
8688 }
8689 unsafe { _vcvtnq_u64_f64(a) }
8690}
8691#[doc = "Floating-point convert to integer, rounding to nearest with ties to even"]
8692#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s16_f16)"]
8693#[inline]
8694#[cfg_attr(test, assert_instr(fcvtns))]
8695#[target_feature(enable = "neon,fp16")]
8696#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8697pub fn vcvtnh_s16_f16(a: f16) -> i16 {
8698 vcvtnh_s32_f16(a) as i16
8699}
8700#[doc = "Floating-point convert to integer, rounding to nearest with ties to even"]
8701#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s32_f16)"]
8702#[inline]
8703#[cfg_attr(test, assert_instr(fcvtns))]
8704#[target_feature(enable = "neon,fp16")]
8705#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8706pub fn vcvtnh_s32_f16(a: f16) -> i32 {
8707 unsafe extern "unadjusted" {
8708 #[cfg_attr(
8709 any(target_arch = "aarch64", target_arch = "arm64ec"),
8710 link_name = "llvm.aarch64.neon.fcvtns.i32.f16"
8711 )]
8712 fn _vcvtnh_s32_f16(a: f16) -> i32;
8713 }
8714 unsafe { _vcvtnh_s32_f16(a) }
8715}
8716#[doc = "Floating-point convert to integer, rounding to nearest with ties to even"]
8717#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s64_f16)"]
8718#[inline]
8719#[cfg_attr(test, assert_instr(fcvtns))]
8720#[target_feature(enable = "neon,fp16")]
8721#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8722pub fn vcvtnh_s64_f16(a: f16) -> i64 {
8723 unsafe extern "unadjusted" {
8724 #[cfg_attr(
8725 any(target_arch = "aarch64", target_arch = "arm64ec"),
8726 link_name = "llvm.aarch64.neon.fcvtns.i64.f16"
8727 )]
8728 fn _vcvtnh_s64_f16(a: f16) -> i64;
8729 }
8730 unsafe { _vcvtnh_s64_f16(a) }
8731}
8732#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8733#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u16_f16)"]
8734#[inline]
8735#[cfg_attr(test, assert_instr(fcvtnu))]
8736#[target_feature(enable = "neon,fp16")]
8737#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8738pub fn vcvtnh_u16_f16(a: f16) -> u16 {
8739 vcvtnh_u32_f16(a) as u16
8740}
8741#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8742#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u32_f16)"]
8743#[inline]
8744#[cfg_attr(test, assert_instr(fcvtnu))]
8745#[target_feature(enable = "neon,fp16")]
8746#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8747pub fn vcvtnh_u32_f16(a: f16) -> u32 {
8748 unsafe extern "unadjusted" {
8749 #[cfg_attr(
8750 any(target_arch = "aarch64", target_arch = "arm64ec"),
8751 link_name = "llvm.aarch64.neon.fcvtnu.i32.f16"
8752 )]
8753 fn _vcvtnh_u32_f16(a: f16) -> u32;
8754 }
8755 unsafe { _vcvtnh_u32_f16(a) }
8756}
8757#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8758#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u64_f16)"]
8759#[inline]
8760#[cfg_attr(test, assert_instr(fcvtnu))]
8761#[target_feature(enable = "neon,fp16")]
8762#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8763pub fn vcvtnh_u64_f16(a: f16) -> u64 {
8764 unsafe extern "unadjusted" {
8765 #[cfg_attr(
8766 any(target_arch = "aarch64", target_arch = "arm64ec"),
8767 link_name = "llvm.aarch64.neon.fcvtnu.i64.f16"
8768 )]
8769 fn _vcvtnh_u64_f16(a: f16) -> u64;
8770 }
8771 unsafe { _vcvtnh_u64_f16(a) }
8772}
8773#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8774#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_s32_f32)"]
8775#[inline]
8776#[target_feature(enable = "neon")]
8777#[cfg_attr(test, assert_instr(fcvtns))]
8778#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8779pub fn vcvtns_s32_f32(a: f32) -> i32 {
8780 unsafe extern "unadjusted" {
8781 #[cfg_attr(
8782 any(target_arch = "aarch64", target_arch = "arm64ec"),
8783 link_name = "llvm.aarch64.neon.fcvtns.i32.f32"
8784 )]
8785 fn _vcvtns_s32_f32(a: f32) -> i32;
8786 }
8787 unsafe { _vcvtns_s32_f32(a) }
8788}
8789#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8790#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_s64_f64)"]
8791#[inline]
8792#[target_feature(enable = "neon")]
8793#[cfg_attr(test, assert_instr(fcvtns))]
8794#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8795pub fn vcvtnd_s64_f64(a: f64) -> i64 {
8796 unsafe extern "unadjusted" {
8797 #[cfg_attr(
8798 any(target_arch = "aarch64", target_arch = "arm64ec"),
8799 link_name = "llvm.aarch64.neon.fcvtns.i64.f64"
8800 )]
8801 fn _vcvtnd_s64_f64(a: f64) -> i64;
8802 }
8803 unsafe { _vcvtnd_s64_f64(a) }
8804}
8805#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8806#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_u32_f32)"]
8807#[inline]
8808#[target_feature(enable = "neon")]
8809#[cfg_attr(test, assert_instr(fcvtnu))]
8810#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8811pub fn vcvtns_u32_f32(a: f32) -> u32 {
8812 unsafe extern "unadjusted" {
8813 #[cfg_attr(
8814 any(target_arch = "aarch64", target_arch = "arm64ec"),
8815 link_name = "llvm.aarch64.neon.fcvtnu.i32.f32"
8816 )]
8817 fn _vcvtns_u32_f32(a: f32) -> u32;
8818 }
8819 unsafe { _vcvtns_u32_f32(a) }
8820}
8821#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8822#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_u64_f64)"]
8823#[inline]
8824#[target_feature(enable = "neon")]
8825#[cfg_attr(test, assert_instr(fcvtnu))]
8826#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8827pub fn vcvtnd_u64_f64(a: f64) -> u64 {
8828 unsafe extern "unadjusted" {
8829 #[cfg_attr(
8830 any(target_arch = "aarch64", target_arch = "arm64ec"),
8831 link_name = "llvm.aarch64.neon.fcvtnu.i64.f64"
8832 )]
8833 fn _vcvtnd_u64_f64(a: f64) -> u64;
8834 }
8835 unsafe { _vcvtnd_u64_f64(a) }
8836}
8837#[doc = "Floating-point convert to signed integer, rounding to plus infinity"]
8838#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s16_f16)"]
8839#[inline]
8840#[cfg_attr(test, assert_instr(fcvtps))]
8841#[target_feature(enable = "neon,fp16")]
8842#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8843pub fn vcvtp_s16_f16(a: float16x4_t) -> int16x4_t {
8844 unsafe extern "unadjusted" {
8845 #[cfg_attr(
8846 any(target_arch = "aarch64", target_arch = "arm64ec"),
8847 link_name = "llvm.aarch64.neon.fcvtps.v4i16.v4f16"
8848 )]
8849 fn _vcvtp_s16_f16(a: float16x4_t) -> int16x4_t;
8850 }
8851 unsafe { _vcvtp_s16_f16(a) }
8852}
8853#[doc = "Floating-point convert to signed integer, rounding to plus infinity"]
8854#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s16_f16)"]
8855#[inline]
8856#[cfg_attr(test, assert_instr(fcvtps))]
8857#[target_feature(enable = "neon,fp16")]
8858#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8859pub fn vcvtpq_s16_f16(a: float16x8_t) -> int16x8_t {
8860 unsafe extern "unadjusted" {
8861 #[cfg_attr(
8862 any(target_arch = "aarch64", target_arch = "arm64ec"),
8863 link_name = "llvm.aarch64.neon.fcvtps.v8i16.v8f16"
8864 )]
8865 fn _vcvtpq_s16_f16(a: float16x8_t) -> int16x8_t;
8866 }
8867 unsafe { _vcvtpq_s16_f16(a) }
8868}
8869#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8870#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s32_f32)"]
8871#[inline]
8872#[target_feature(enable = "neon")]
8873#[cfg_attr(test, assert_instr(fcvtps))]
8874#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8875pub fn vcvtp_s32_f32(a: float32x2_t) -> int32x2_t {
8876 unsafe extern "unadjusted" {
8877 #[cfg_attr(
8878 any(target_arch = "aarch64", target_arch = "arm64ec"),
8879 link_name = "llvm.aarch64.neon.fcvtps.v2i32.v2f32"
8880 )]
8881 fn _vcvtp_s32_f32(a: float32x2_t) -> int32x2_t;
8882 }
8883 unsafe { _vcvtp_s32_f32(a) }
8884}
8885#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8886#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s32_f32)"]
8887#[inline]
8888#[target_feature(enable = "neon")]
8889#[cfg_attr(test, assert_instr(fcvtps))]
8890#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8891pub fn vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t {
8892 unsafe extern "unadjusted" {
8893 #[cfg_attr(
8894 any(target_arch = "aarch64", target_arch = "arm64ec"),
8895 link_name = "llvm.aarch64.neon.fcvtps.v4i32.v4f32"
8896 )]
8897 fn _vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t;
8898 }
8899 unsafe { _vcvtpq_s32_f32(a) }
8900}
8901#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8902#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s64_f64)"]
8903#[inline]
8904#[target_feature(enable = "neon")]
8905#[cfg_attr(test, assert_instr(fcvtps))]
8906#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8907pub fn vcvtp_s64_f64(a: float64x1_t) -> int64x1_t {
8908 unsafe extern "unadjusted" {
8909 #[cfg_attr(
8910 any(target_arch = "aarch64", target_arch = "arm64ec"),
8911 link_name = "llvm.aarch64.neon.fcvtps.v1i64.v1f64"
8912 )]
8913 fn _vcvtp_s64_f64(a: float64x1_t) -> int64x1_t;
8914 }
8915 unsafe { _vcvtp_s64_f64(a) }
8916}
8917#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8918#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s64_f64)"]
8919#[inline]
8920#[target_feature(enable = "neon")]
8921#[cfg_attr(test, assert_instr(fcvtps))]
8922#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8923pub fn vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t {
8924 unsafe extern "unadjusted" {
8925 #[cfg_attr(
8926 any(target_arch = "aarch64", target_arch = "arm64ec"),
8927 link_name = "llvm.aarch64.neon.fcvtps.v2i64.v2f64"
8928 )]
8929 fn _vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t;
8930 }
8931 unsafe { _vcvtpq_s64_f64(a) }
8932}
8933#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
8934#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u16_f16)"]
8935#[inline]
8936#[cfg_attr(test, assert_instr(fcvtpu))]
8937#[target_feature(enable = "neon,fp16")]
8938#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8939pub fn vcvtp_u16_f16(a: float16x4_t) -> uint16x4_t {
8940 unsafe extern "unadjusted" {
8941 #[cfg_attr(
8942 any(target_arch = "aarch64", target_arch = "arm64ec"),
8943 link_name = "llvm.aarch64.neon.fcvtpu.v4i16.v4f16"
8944 )]
8945 fn _vcvtp_u16_f16(a: float16x4_t) -> uint16x4_t;
8946 }
8947 unsafe { _vcvtp_u16_f16(a) }
8948}
8949#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
8950#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u16_f16)"]
8951#[inline]
8952#[cfg_attr(test, assert_instr(fcvtpu))]
8953#[target_feature(enable = "neon,fp16")]
8954#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8955pub fn vcvtpq_u16_f16(a: float16x8_t) -> uint16x8_t {
8956 unsafe extern "unadjusted" {
8957 #[cfg_attr(
8958 any(target_arch = "aarch64", target_arch = "arm64ec"),
8959 link_name = "llvm.aarch64.neon.fcvtpu.v8i16.v8f16"
8960 )]
8961 fn _vcvtpq_u16_f16(a: float16x8_t) -> uint16x8_t;
8962 }
8963 unsafe { _vcvtpq_u16_f16(a) }
8964}
8965#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
8966#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u32_f32)"]
8967#[inline]
8968#[target_feature(enable = "neon")]
8969#[cfg_attr(test, assert_instr(fcvtpu))]
8970#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8971pub fn vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t {
8972 unsafe extern "unadjusted" {
8973 #[cfg_attr(
8974 any(target_arch = "aarch64", target_arch = "arm64ec"),
8975 link_name = "llvm.aarch64.neon.fcvtpu.v2i32.v2f32"
8976 )]
8977 fn _vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t;
8978 }
8979 unsafe { _vcvtp_u32_f32(a) }
8980}
8981#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
8982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u32_f32)"]
8983#[inline]
8984#[target_feature(enable = "neon")]
8985#[cfg_attr(test, assert_instr(fcvtpu))]
8986#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8987pub fn vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t {
8988 unsafe extern "unadjusted" {
8989 #[cfg_attr(
8990 any(target_arch = "aarch64", target_arch = "arm64ec"),
8991 link_name = "llvm.aarch64.neon.fcvtpu.v4i32.v4f32"
8992 )]
8993 fn _vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t;
8994 }
8995 unsafe { _vcvtpq_u32_f32(a) }
8996}
8997#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
8998#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u64_f64)"]
8999#[inline]
9000#[target_feature(enable = "neon")]
9001#[cfg_attr(test, assert_instr(fcvtpu))]
9002#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9003pub fn vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t {
9004 unsafe extern "unadjusted" {
9005 #[cfg_attr(
9006 any(target_arch = "aarch64", target_arch = "arm64ec"),
9007 link_name = "llvm.aarch64.neon.fcvtpu.v1i64.v1f64"
9008 )]
9009 fn _vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t;
9010 }
9011 unsafe { _vcvtp_u64_f64(a) }
9012}
9013#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u64_f64)"]
9015#[inline]
9016#[target_feature(enable = "neon")]
9017#[cfg_attr(test, assert_instr(fcvtpu))]
9018#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9019pub fn vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t {
9020 unsafe extern "unadjusted" {
9021 #[cfg_attr(
9022 any(target_arch = "aarch64", target_arch = "arm64ec"),
9023 link_name = "llvm.aarch64.neon.fcvtpu.v2i64.v2f64"
9024 )]
9025 fn _vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t;
9026 }
9027 unsafe { _vcvtpq_u64_f64(a) }
9028}
9029#[doc = "Floating-point convert to integer, rounding to plus infinity"]
9030#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s16_f16)"]
9031#[inline]
9032#[cfg_attr(test, assert_instr(fcvtps))]
9033#[target_feature(enable = "neon,fp16")]
9034#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9035pub fn vcvtph_s16_f16(a: f16) -> i16 {
9036 vcvtph_s32_f16(a) as i16
9037}
9038#[doc = "Floating-point convert to integer, rounding to plus infinity"]
9039#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s32_f16)"]
9040#[inline]
9041#[cfg_attr(test, assert_instr(fcvtps))]
9042#[target_feature(enable = "neon,fp16")]
9043#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9044pub fn vcvtph_s32_f16(a: f16) -> i32 {
9045 unsafe extern "unadjusted" {
9046 #[cfg_attr(
9047 any(target_arch = "aarch64", target_arch = "arm64ec"),
9048 link_name = "llvm.aarch64.neon.fcvtps.i32.f16"
9049 )]
9050 fn _vcvtph_s32_f16(a: f16) -> i32;
9051 }
9052 unsafe { _vcvtph_s32_f16(a) }
9053}
9054#[doc = "Floating-point convert to integer, rounding to plus infinity"]
9055#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s64_f16)"]
9056#[inline]
9057#[cfg_attr(test, assert_instr(fcvtps))]
9058#[target_feature(enable = "neon,fp16")]
9059#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9060pub fn vcvtph_s64_f16(a: f16) -> i64 {
9061 unsafe extern "unadjusted" {
9062 #[cfg_attr(
9063 any(target_arch = "aarch64", target_arch = "arm64ec"),
9064 link_name = "llvm.aarch64.neon.fcvtps.i64.f16"
9065 )]
9066 fn _vcvtph_s64_f16(a: f16) -> i64;
9067 }
9068 unsafe { _vcvtph_s64_f16(a) }
9069}
9070#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9071#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u16_f16)"]
9072#[inline]
9073#[cfg_attr(test, assert_instr(fcvtpu))]
9074#[target_feature(enable = "neon,fp16")]
9075#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9076pub fn vcvtph_u16_f16(a: f16) -> u16 {
9077 vcvtph_u32_f16(a) as u16
9078}
9079#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9080#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u32_f16)"]
9081#[inline]
9082#[cfg_attr(test, assert_instr(fcvtpu))]
9083#[target_feature(enable = "neon,fp16")]
9084#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9085pub fn vcvtph_u32_f16(a: f16) -> u32 {
9086 unsafe extern "unadjusted" {
9087 #[cfg_attr(
9088 any(target_arch = "aarch64", target_arch = "arm64ec"),
9089 link_name = "llvm.aarch64.neon.fcvtpu.i32.f16"
9090 )]
9091 fn _vcvtph_u32_f16(a: f16) -> u32;
9092 }
9093 unsafe { _vcvtph_u32_f16(a) }
9094}
9095#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9096#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u64_f16)"]
9097#[inline]
9098#[cfg_attr(test, assert_instr(fcvtpu))]
9099#[target_feature(enable = "neon,fp16")]
9100#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9101pub fn vcvtph_u64_f16(a: f16) -> u64 {
9102 unsafe extern "unadjusted" {
9103 #[cfg_attr(
9104 any(target_arch = "aarch64", target_arch = "arm64ec"),
9105 link_name = "llvm.aarch64.neon.fcvtpu.i64.f16"
9106 )]
9107 fn _vcvtph_u64_f16(a: f16) -> u64;
9108 }
9109 unsafe { _vcvtph_u64_f16(a) }
9110}
9111#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
9112#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_s32_f32)"]
9113#[inline]
9114#[target_feature(enable = "neon")]
9115#[cfg_attr(test, assert_instr(fcvtps))]
9116#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9117pub fn vcvtps_s32_f32(a: f32) -> i32 {
9118 unsafe extern "unadjusted" {
9119 #[cfg_attr(
9120 any(target_arch = "aarch64", target_arch = "arm64ec"),
9121 link_name = "llvm.aarch64.neon.fcvtps.i32.f32"
9122 )]
9123 fn _vcvtps_s32_f32(a: f32) -> i32;
9124 }
9125 unsafe { _vcvtps_s32_f32(a) }
9126}
9127#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
9128#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_s64_f64)"]
9129#[inline]
9130#[target_feature(enable = "neon")]
9131#[cfg_attr(test, assert_instr(fcvtps))]
9132#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9133pub fn vcvtpd_s64_f64(a: f64) -> i64 {
9134 unsafe extern "unadjusted" {
9135 #[cfg_attr(
9136 any(target_arch = "aarch64", target_arch = "arm64ec"),
9137 link_name = "llvm.aarch64.neon.fcvtps.i64.f64"
9138 )]
9139 fn _vcvtpd_s64_f64(a: f64) -> i64;
9140 }
9141 unsafe { _vcvtpd_s64_f64(a) }
9142}
9143#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9144#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_u32_f32)"]
9145#[inline]
9146#[target_feature(enable = "neon")]
9147#[cfg_attr(test, assert_instr(fcvtpu))]
9148#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9149pub fn vcvtps_u32_f32(a: f32) -> u32 {
9150 unsafe extern "unadjusted" {
9151 #[cfg_attr(
9152 any(target_arch = "aarch64", target_arch = "arm64ec"),
9153 link_name = "llvm.aarch64.neon.fcvtpu.i32.f32"
9154 )]
9155 fn _vcvtps_u32_f32(a: f32) -> u32;
9156 }
9157 unsafe { _vcvtps_u32_f32(a) }
9158}
9159#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9160#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_u64_f64)"]
9161#[inline]
9162#[target_feature(enable = "neon")]
9163#[cfg_attr(test, assert_instr(fcvtpu))]
9164#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9165pub fn vcvtpd_u64_f64(a: f64) -> u64 {
9166 unsafe extern "unadjusted" {
9167 #[cfg_attr(
9168 any(target_arch = "aarch64", target_arch = "arm64ec"),
9169 link_name = "llvm.aarch64.neon.fcvtpu.i64.f64"
9170 )]
9171 fn _vcvtpd_u64_f64(a: f64) -> u64;
9172 }
9173 unsafe { _vcvtpd_u64_f64(a) }
9174}
9175#[doc = "Fixed-point convert to floating-point"]
9176#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_u32)"]
9177#[inline]
9178#[target_feature(enable = "neon")]
9179#[cfg_attr(test, assert_instr(ucvtf))]
9180#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9181pub fn vcvts_f32_u32(a: u32) -> f32 {
9182 a as f32
9183}
9184#[doc = "Fixed-point convert to floating-point"]
9185#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_u64)"]
9186#[inline]
9187#[target_feature(enable = "neon")]
9188#[cfg_attr(test, assert_instr(ucvtf))]
9189#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9190pub fn vcvtd_f64_u64(a: u64) -> f64 {
9191 a as f64
9192}
9193#[doc = "Fixed-point convert to floating-point"]
9194#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_s32)"]
9195#[inline]
9196#[target_feature(enable = "neon")]
9197#[cfg_attr(test, assert_instr(scvtf, N = 2))]
9198#[rustc_legacy_const_generics(1)]
9199#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9200pub fn vcvts_n_f32_s32<const N: i32>(a: i32) -> f32 {
9201 static_assert!(N >= 1 && N <= 64);
9202 unsafe extern "unadjusted" {
9203 #[cfg_attr(
9204 any(target_arch = "aarch64", target_arch = "arm64ec"),
9205 link_name = "llvm.aarch64.neon.vcvtfxs2fp.f32.i32"
9206 )]
9207 fn _vcvts_n_f32_s32(a: i32, n: i32) -> f32;
9208 }
9209 unsafe { _vcvts_n_f32_s32(a, N) }
9210}
9211#[doc = "Fixed-point convert to floating-point"]
9212#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_s64)"]
9213#[inline]
9214#[target_feature(enable = "neon")]
9215#[cfg_attr(test, assert_instr(scvtf, N = 2))]
9216#[rustc_legacy_const_generics(1)]
9217#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9218pub fn vcvtd_n_f64_s64<const N: i32>(a: i64) -> f64 {
9219 static_assert!(N >= 1 && N <= 64);
9220 unsafe extern "unadjusted" {
9221 #[cfg_attr(
9222 any(target_arch = "aarch64", target_arch = "arm64ec"),
9223 link_name = "llvm.aarch64.neon.vcvtfxs2fp.f64.i64"
9224 )]
9225 fn _vcvtd_n_f64_s64(a: i64, n: i32) -> f64;
9226 }
9227 unsafe { _vcvtd_n_f64_s64(a, N) }
9228}
9229#[doc = "Fixed-point convert to floating-point"]
9230#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_u32)"]
9231#[inline]
9232#[target_feature(enable = "neon")]
9233#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
9234#[rustc_legacy_const_generics(1)]
9235#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9236pub fn vcvts_n_f32_u32<const N: i32>(a: u32) -> f32 {
9237 static_assert!(N >= 1 && N <= 32);
9238 unsafe extern "unadjusted" {
9239 #[cfg_attr(
9240 any(target_arch = "aarch64", target_arch = "arm64ec"),
9241 link_name = "llvm.aarch64.neon.vcvtfxu2fp.f32.i32"
9242 )]
9243 fn _vcvts_n_f32_u32(a: u32, n: i32) -> f32;
9244 }
9245 unsafe { _vcvts_n_f32_u32(a, N) }
9246}
9247#[doc = "Fixed-point convert to floating-point"]
9248#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_u64)"]
9249#[inline]
9250#[target_feature(enable = "neon")]
9251#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
9252#[rustc_legacy_const_generics(1)]
9253#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9254pub fn vcvtd_n_f64_u64<const N: i32>(a: u64) -> f64 {
9255 static_assert!(N >= 1 && N <= 64);
9256 unsafe extern "unadjusted" {
9257 #[cfg_attr(
9258 any(target_arch = "aarch64", target_arch = "arm64ec"),
9259 link_name = "llvm.aarch64.neon.vcvtfxu2fp.f64.i64"
9260 )]
9261 fn _vcvtd_n_f64_u64(a: u64, n: i32) -> f64;
9262 }
9263 unsafe { _vcvtd_n_f64_u64(a, N) }
9264}
9265#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_s32_f32)"]
9267#[inline]
9268#[target_feature(enable = "neon")]
9269#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
9270#[rustc_legacy_const_generics(1)]
9271#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9272pub fn vcvts_n_s32_f32<const N: i32>(a: f32) -> i32 {
9273 static_assert!(N >= 1 && N <= 32);
9274 unsafe extern "unadjusted" {
9275 #[cfg_attr(
9276 any(target_arch = "aarch64", target_arch = "arm64ec"),
9277 link_name = "llvm.aarch64.neon.vcvtfp2fxs.i32.f32"
9278 )]
9279 fn _vcvts_n_s32_f32(a: f32, n: i32) -> i32;
9280 }
9281 unsafe { _vcvts_n_s32_f32(a, N) }
9282}
9283#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9284#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_s64_f64)"]
9285#[inline]
9286#[target_feature(enable = "neon")]
9287#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
9288#[rustc_legacy_const_generics(1)]
9289#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9290pub fn vcvtd_n_s64_f64<const N: i32>(a: f64) -> i64 {
9291 static_assert!(N >= 1 && N <= 64);
9292 unsafe extern "unadjusted" {
9293 #[cfg_attr(
9294 any(target_arch = "aarch64", target_arch = "arm64ec"),
9295 link_name = "llvm.aarch64.neon.vcvtfp2fxs.i64.f64"
9296 )]
9297 fn _vcvtd_n_s64_f64(a: f64, n: i32) -> i64;
9298 }
9299 unsafe { _vcvtd_n_s64_f64(a, N) }
9300}
9301#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9302#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_u32_f32)"]
9303#[inline]
9304#[target_feature(enable = "neon")]
9305#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
9306#[rustc_legacy_const_generics(1)]
9307#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9308pub fn vcvts_n_u32_f32<const N: i32>(a: f32) -> u32 {
9309 static_assert!(N >= 1 && N <= 32);
9310 unsafe extern "unadjusted" {
9311 #[cfg_attr(
9312 any(target_arch = "aarch64", target_arch = "arm64ec"),
9313 link_name = "llvm.aarch64.neon.vcvtfp2fxu.i32.f32"
9314 )]
9315 fn _vcvts_n_u32_f32(a: f32, n: i32) -> u32;
9316 }
9317 unsafe { _vcvts_n_u32_f32(a, N) }
9318}
9319#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9320#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_u64_f64)"]
9321#[inline]
9322#[target_feature(enable = "neon")]
9323#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
9324#[rustc_legacy_const_generics(1)]
9325#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9326pub fn vcvtd_n_u64_f64<const N: i32>(a: f64) -> u64 {
9327 static_assert!(N >= 1 && N <= 64);
9328 unsafe extern "unadjusted" {
9329 #[cfg_attr(
9330 any(target_arch = "aarch64", target_arch = "arm64ec"),
9331 link_name = "llvm.aarch64.neon.vcvtfp2fxu.i64.f64"
9332 )]
9333 fn _vcvtd_n_u64_f64(a: f64, n: i32) -> u64;
9334 }
9335 unsafe { _vcvtd_n_u64_f64(a, N) }
9336}
9337#[doc = "Fixed-point convert to floating-point"]
9338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_s32_f32)"]
9339#[inline]
9340#[target_feature(enable = "neon")]
9341#[cfg_attr(test, assert_instr(fcvtzs))]
9342#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9343pub fn vcvts_s32_f32(a: f32) -> i32 {
9344 a as i32
9345}
9346#[doc = "Fixed-point convert to floating-point"]
9347#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_s64_f64)"]
9348#[inline]
9349#[target_feature(enable = "neon")]
9350#[cfg_attr(test, assert_instr(fcvtzs))]
9351#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9352pub fn vcvtd_s64_f64(a: f64) -> i64 {
9353 a as i64
9354}
9355#[doc = "Fixed-point convert to floating-point"]
9356#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_u32_f32)"]
9357#[inline]
9358#[target_feature(enable = "neon")]
9359#[cfg_attr(test, assert_instr(fcvtzu))]
9360#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9361pub fn vcvts_u32_f32(a: f32) -> u32 {
9362 a as u32
9363}
9364#[doc = "Fixed-point convert to floating-point"]
9365#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_u64_f64)"]
9366#[inline]
9367#[target_feature(enable = "neon")]
9368#[cfg_attr(test, assert_instr(fcvtzu))]
9369#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9370pub fn vcvtd_u64_f64(a: f64) -> u64 {
9371 a as u64
9372}
9373#[doc = "Floating-point convert to lower precision narrow, rounding to odd"]
9374#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_f32_f64)"]
9375#[inline]
9376#[target_feature(enable = "neon")]
9377#[cfg_attr(test, assert_instr(fcvtxn))]
9378#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9379pub fn vcvtx_f32_f64(a: float64x2_t) -> float32x2_t {
9380 unsafe extern "unadjusted" {
9381 #[cfg_attr(
9382 any(target_arch = "aarch64", target_arch = "arm64ec"),
9383 link_name = "llvm.aarch64.neon.fcvtxn.v2f32.v2f64"
9384 )]
9385 fn _vcvtx_f32_f64(a: float64x2_t) -> float32x2_t;
9386 }
9387 unsafe { _vcvtx_f32_f64(a) }
9388}
9389#[doc = "Floating-point convert to lower precision narrow, rounding to odd"]
9390#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_high_f32_f64)"]
9391#[inline]
9392#[target_feature(enable = "neon")]
9393#[cfg_attr(test, assert_instr(fcvtxn))]
9394#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9395pub fn vcvtx_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t {
9396 unsafe { simd_shuffle!(a, vcvtx_f32_f64(b), [0, 1, 2, 3]) }
9397}
9398#[doc = "Floating-point convert to lower precision narrow, rounding to odd"]
9399#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtxd_f32_f64)"]
9400#[inline]
9401#[target_feature(enable = "neon")]
9402#[cfg_attr(test, assert_instr(fcvtxn))]
9403#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9404pub fn vcvtxd_f32_f64(a: f64) -> f32 {
9405 unsafe { simd_extract!(vcvtx_f32_f64(vdupq_n_f64(a)), 0) }
9406}
9407#[doc = "Divide"]
9408#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f16)"]
9409#[inline]
9410#[target_feature(enable = "neon,fp16")]
9411#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9412#[cfg_attr(test, assert_instr(fdiv))]
9413pub fn vdiv_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
9414 unsafe { simd_div(a, b) }
9415}
9416#[doc = "Divide"]
9417#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f16)"]
9418#[inline]
9419#[target_feature(enable = "neon,fp16")]
9420#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9421#[cfg_attr(test, assert_instr(fdiv))]
9422pub fn vdivq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
9423 unsafe { simd_div(a, b) }
9424}
9425#[doc = "Divide"]
9426#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f32)"]
9427#[inline]
9428#[target_feature(enable = "neon")]
9429#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9430#[cfg_attr(test, assert_instr(fdiv))]
9431pub fn vdiv_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
9432 unsafe { simd_div(a, b) }
9433}
9434#[doc = "Divide"]
9435#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f32)"]
9436#[inline]
9437#[target_feature(enable = "neon")]
9438#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9439#[cfg_attr(test, assert_instr(fdiv))]
9440pub fn vdivq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
9441 unsafe { simd_div(a, b) }
9442}
9443#[doc = "Divide"]
9444#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f64)"]
9445#[inline]
9446#[target_feature(enable = "neon")]
9447#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9448#[cfg_attr(test, assert_instr(fdiv))]
9449pub fn vdiv_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
9450 unsafe { simd_div(a, b) }
9451}
9452#[doc = "Divide"]
9453#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f64)"]
9454#[inline]
9455#[target_feature(enable = "neon")]
9456#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9457#[cfg_attr(test, assert_instr(fdiv))]
9458pub fn vdivq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
9459 unsafe { simd_div(a, b) }
9460}
9461#[doc = "Divide"]
9462#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivh_f16)"]
9463#[inline]
9464#[target_feature(enable = "neon,fp16")]
9465#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9466#[cfg_attr(test, assert_instr(nop))]
9467pub fn vdivh_f16(a: f16, b: f16) -> f16 {
9468 a / b
9469}
9470#[doc = "Dot product arithmetic (indexed)"]
9471#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_s32)"]
9472#[inline]
9473#[target_feature(enable = "neon,dotprod")]
9474#[cfg_attr(test, assert_instr(sdot, LANE = 0))]
9475#[rustc_legacy_const_generics(3)]
9476#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")]
9477pub fn vdot_laneq_s32<const LANE: i32>(a: int32x2_t, b: int8x8_t, c: int8x16_t) -> int32x2_t {
9478 static_assert_uimm_bits!(LANE, 2);
9479 unsafe {
9480 let c: int32x4_t = transmute(c);
9481 let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
9482 vdot_s32(a, b, transmute(c))
9483 }
9484}
9485#[doc = "Dot product arithmetic (indexed)"]
9486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_s32)"]
9487#[inline]
9488#[target_feature(enable = "neon,dotprod")]
9489#[cfg_attr(test, assert_instr(sdot, LANE = 0))]
9490#[rustc_legacy_const_generics(3)]
9491#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")]
9492pub fn vdotq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t {
9493 static_assert_uimm_bits!(LANE, 2);
9494 unsafe {
9495 let c: int32x4_t = transmute(c);
9496 let c: int32x4_t =
9497 simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
9498 vdotq_s32(a, b, transmute(c))
9499 }
9500}
9501#[doc = "Dot product arithmetic (indexed)"]
9502#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_u32)"]
9503#[inline]
9504#[target_feature(enable = "neon,dotprod")]
9505#[cfg_attr(test, assert_instr(udot, LANE = 0))]
9506#[rustc_legacy_const_generics(3)]
9507#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")]
9508pub fn vdot_laneq_u32<const LANE: i32>(a: uint32x2_t, b: uint8x8_t, c: uint8x16_t) -> uint32x2_t {
9509 static_assert_uimm_bits!(LANE, 2);
9510 unsafe {
9511 let c: uint32x4_t = transmute(c);
9512 let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
9513 vdot_u32(a, b, transmute(c))
9514 }
9515}
9516#[doc = "Dot product arithmetic (indexed)"]
9517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_u32)"]
9518#[inline]
9519#[target_feature(enable = "neon,dotprod")]
9520#[cfg_attr(test, assert_instr(udot, LANE = 0))]
9521#[rustc_legacy_const_generics(3)]
9522#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")]
9523pub fn vdotq_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t {
9524 static_assert_uimm_bits!(LANE, 2);
9525 unsafe {
9526 let c: uint32x4_t = transmute(c);
9527 let c: uint32x4_t =
9528 simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
9529 vdotq_u32(a, b, transmute(c))
9530 }
9531}
9532#[doc = "Set all vector lanes to the same value"]
9533#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f64)"]
9534#[inline]
9535#[target_feature(enable = "neon")]
9536#[cfg_attr(test, assert_instr(nop, N = 0))]
9537#[rustc_legacy_const_generics(1)]
9538#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9539pub fn vdup_lane_f64<const N: i32>(a: float64x1_t) -> float64x1_t {
9540 static_assert!(N == 0);
9541 a
9542}
9543#[doc = "Set all vector lanes to the same value"]
9544#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p64)"]
9545#[inline]
9546#[target_feature(enable = "neon")]
9547#[cfg_attr(test, assert_instr(nop, N = 0))]
9548#[rustc_legacy_const_generics(1)]
9549#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9550pub fn vdup_lane_p64<const N: i32>(a: poly64x1_t) -> poly64x1_t {
9551 static_assert!(N == 0);
9552 a
9553}
9554#[doc = "Set all vector lanes to the same value"]
9555#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f64)"]
9556#[inline]
9557#[target_feature(enable = "neon")]
9558#[cfg_attr(test, assert_instr(nop, N = 1))]
9559#[rustc_legacy_const_generics(1)]
9560#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9561pub fn vdup_laneq_f64<const N: i32>(a: float64x2_t) -> float64x1_t {
9562 static_assert_uimm_bits!(N, 1);
9563 unsafe { transmute::<f64, _>(simd_extract!(a, N as u32)) }
9564}
9565#[doc = "Set all vector lanes to the same value"]
9566#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p64)"]
9567#[inline]
9568#[target_feature(enable = "neon")]
9569#[cfg_attr(test, assert_instr(nop, N = 1))]
9570#[rustc_legacy_const_generics(1)]
9571#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9572pub fn vdup_laneq_p64<const N: i32>(a: poly64x2_t) -> poly64x1_t {
9573 static_assert_uimm_bits!(N, 1);
9574 unsafe { transmute::<u64, _>(simd_extract!(a, N as u32)) }
9575}
9576#[doc = "Set all vector lanes to the same value"]
9577#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_s8)"]
9578#[inline]
9579#[target_feature(enable = "neon")]
9580#[cfg_attr(test, assert_instr(nop, N = 4))]
9581#[rustc_legacy_const_generics(1)]
9582#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9583pub fn vdupb_lane_s8<const N: i32>(a: int8x8_t) -> i8 {
9584 static_assert_uimm_bits!(N, 3);
9585 unsafe { simd_extract!(a, N as u32) }
9586}
9587#[doc = "Set all vector lanes to the same value"]
9588#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_s16)"]
9589#[inline]
9590#[target_feature(enable = "neon")]
9591#[cfg_attr(test, assert_instr(nop, N = 4))]
9592#[rustc_legacy_const_generics(1)]
9593#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9594pub fn vduph_laneq_s16<const N: i32>(a: int16x8_t) -> i16 {
9595 static_assert_uimm_bits!(N, 3);
9596 unsafe { simd_extract!(a, N as u32) }
9597}
9598#[doc = "Set all vector lanes to the same value"]
9599#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_u8)"]
9600#[inline]
9601#[target_feature(enable = "neon")]
9602#[cfg_attr(test, assert_instr(nop, N = 4))]
9603#[rustc_legacy_const_generics(1)]
9604#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9605pub fn vdupb_lane_u8<const N: i32>(a: uint8x8_t) -> u8 {
9606 static_assert_uimm_bits!(N, 3);
9607 unsafe { simd_extract!(a, N as u32) }
9608}
9609#[doc = "Set all vector lanes to the same value"]
9610#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_u16)"]
9611#[inline]
9612#[target_feature(enable = "neon")]
9613#[cfg_attr(test, assert_instr(nop, N = 4))]
9614#[rustc_legacy_const_generics(1)]
9615#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9616pub fn vduph_laneq_u16<const N: i32>(a: uint16x8_t) -> u16 {
9617 static_assert_uimm_bits!(N, 3);
9618 unsafe { simd_extract!(a, N as u32) }
9619}
9620#[doc = "Set all vector lanes to the same value"]
9621#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_p8)"]
9622#[inline]
9623#[target_feature(enable = "neon")]
9624#[cfg_attr(test, assert_instr(nop, N = 4))]
9625#[rustc_legacy_const_generics(1)]
9626#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9627pub fn vdupb_lane_p8<const N: i32>(a: poly8x8_t) -> p8 {
9628 static_assert_uimm_bits!(N, 3);
9629 unsafe { simd_extract!(a, N as u32) }
9630}
9631#[doc = "Set all vector lanes to the same value"]
9632#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_p16)"]
9633#[inline]
9634#[target_feature(enable = "neon")]
9635#[cfg_attr(test, assert_instr(nop, N = 4))]
9636#[rustc_legacy_const_generics(1)]
9637#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9638pub fn vduph_laneq_p16<const N: i32>(a: poly16x8_t) -> p16 {
9639 static_assert_uimm_bits!(N, 3);
9640 unsafe { simd_extract!(a, N as u32) }
9641}
9642#[doc = "Extract an element from a vector"]
9643#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_s8)"]
9644#[inline]
9645#[target_feature(enable = "neon")]
9646#[cfg_attr(test, assert_instr(nop, N = 8))]
9647#[rustc_legacy_const_generics(1)]
9648#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9649pub fn vdupb_laneq_s8<const N: i32>(a: int8x16_t) -> i8 {
9650 static_assert_uimm_bits!(N, 4);
9651 unsafe { simd_extract!(a, N as u32) }
9652}
9653#[doc = "Extract an element from a vector"]
9654#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_u8)"]
9655#[inline]
9656#[target_feature(enable = "neon")]
9657#[cfg_attr(test, assert_instr(nop, N = 8))]
9658#[rustc_legacy_const_generics(1)]
9659#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9660pub fn vdupb_laneq_u8<const N: i32>(a: uint8x16_t) -> u8 {
9661 static_assert_uimm_bits!(N, 4);
9662 unsafe { simd_extract!(a, N as u32) }
9663}
9664#[doc = "Extract an element from a vector"]
9665#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_p8)"]
9666#[inline]
9667#[target_feature(enable = "neon")]
9668#[cfg_attr(test, assert_instr(nop, N = 8))]
9669#[rustc_legacy_const_generics(1)]
9670#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9671pub fn vdupb_laneq_p8<const N: i32>(a: poly8x16_t) -> p8 {
9672 static_assert_uimm_bits!(N, 4);
9673 unsafe { simd_extract!(a, N as u32) }
9674}
9675#[doc = "Set all vector lanes to the same value"]
9676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_f64)"]
9677#[inline]
9678#[target_feature(enable = "neon")]
9679#[cfg_attr(test, assert_instr(nop, N = 0))]
9680#[rustc_legacy_const_generics(1)]
9681#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9682pub fn vdupd_lane_f64<const N: i32>(a: float64x1_t) -> f64 {
9683 static_assert!(N == 0);
9684 unsafe { simd_extract!(a, N as u32) }
9685}
9686#[doc = "Set all vector lanes to the same value"]
9687#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_s64)"]
9688#[inline]
9689#[target_feature(enable = "neon")]
9690#[cfg_attr(test, assert_instr(nop, N = 0))]
9691#[rustc_legacy_const_generics(1)]
9692#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9693pub fn vdupd_lane_s64<const N: i32>(a: int64x1_t) -> i64 {
9694 static_assert!(N == 0);
9695 unsafe { simd_extract!(a, N as u32) }
9696}
9697#[doc = "Set all vector lanes to the same value"]
9698#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_u64)"]
9699#[inline]
9700#[target_feature(enable = "neon")]
9701#[cfg_attr(test, assert_instr(nop, N = 0))]
9702#[rustc_legacy_const_generics(1)]
9703#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9704pub fn vdupd_lane_u64<const N: i32>(a: uint64x1_t) -> u64 {
9705 static_assert!(N == 0);
9706 unsafe { simd_extract!(a, N as u32) }
9707}
9708#[doc = "Set all vector lanes to the same value"]
9709#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_f16)"]
9710#[inline]
9711#[cfg_attr(test, assert_instr(nop, N = 2))]
9712#[rustc_legacy_const_generics(1)]
9713#[target_feature(enable = "neon,fp16")]
9714#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9715pub fn vduph_lane_f16<const N: i32>(a: float16x4_t) -> f16 {
9716 static_assert_uimm_bits!(N, 2);
9717 unsafe { simd_extract!(a, N as u32) }
9718}
9719#[doc = "Extract an element from a vector"]
9720#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_f16)"]
9721#[inline]
9722#[cfg_attr(test, assert_instr(nop, N = 4))]
9723#[rustc_legacy_const_generics(1)]
9724#[target_feature(enable = "neon,fp16")]
9725#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9726pub fn vduph_laneq_f16<const N: i32>(a: float16x8_t) -> f16 {
9727 static_assert_uimm_bits!(N, 4);
9728 unsafe { simd_extract!(a, N as u32) }
9729}
9730#[doc = "Set all vector lanes to the same value"]
9731#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f64)"]
9732#[inline]
9733#[target_feature(enable = "neon")]
9734#[cfg_attr(test, assert_instr(dup, N = 0))]
9735#[rustc_legacy_const_generics(1)]
9736#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9737pub fn vdupq_lane_f64<const N: i32>(a: float64x1_t) -> float64x2_t {
9738 static_assert!(N == 0);
9739 unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9740}
9741#[doc = "Set all vector lanes to the same value"]
9742#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p64)"]
9743#[inline]
9744#[target_feature(enable = "neon")]
9745#[cfg_attr(test, assert_instr(dup, N = 0))]
9746#[rustc_legacy_const_generics(1)]
9747#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9748pub fn vdupq_lane_p64<const N: i32>(a: poly64x1_t) -> poly64x2_t {
9749 static_assert!(N == 0);
9750 unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9751}
9752#[doc = "Set all vector lanes to the same value"]
9753#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f64)"]
9754#[inline]
9755#[target_feature(enable = "neon")]
9756#[cfg_attr(test, assert_instr(dup, N = 1))]
9757#[rustc_legacy_const_generics(1)]
9758#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9759pub fn vdupq_laneq_f64<const N: i32>(a: float64x2_t) -> float64x2_t {
9760 static_assert_uimm_bits!(N, 1);
9761 unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9762}
9763#[doc = "Set all vector lanes to the same value"]
9764#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p64)"]
9765#[inline]
9766#[target_feature(enable = "neon")]
9767#[cfg_attr(test, assert_instr(dup, N = 1))]
9768#[rustc_legacy_const_generics(1)]
9769#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9770pub fn vdupq_laneq_p64<const N: i32>(a: poly64x2_t) -> poly64x2_t {
9771 static_assert_uimm_bits!(N, 1);
9772 unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9773}
9774#[doc = "Set all vector lanes to the same value"]
9775#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_f32)"]
9776#[inline]
9777#[target_feature(enable = "neon")]
9778#[cfg_attr(test, assert_instr(nop, N = 1))]
9779#[rustc_legacy_const_generics(1)]
9780#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9781pub fn vdups_lane_f32<const N: i32>(a: float32x2_t) -> f32 {
9782 static_assert_uimm_bits!(N, 1);
9783 unsafe { simd_extract!(a, N as u32) }
9784}
9785#[doc = "Set all vector lanes to the same value"]
9786#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_f64)"]
9787#[inline]
9788#[target_feature(enable = "neon")]
9789#[cfg_attr(test, assert_instr(nop, N = 1))]
9790#[rustc_legacy_const_generics(1)]
9791#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9792pub fn vdupd_laneq_f64<const N: i32>(a: float64x2_t) -> f64 {
9793 static_assert_uimm_bits!(N, 1);
9794 unsafe { simd_extract!(a, N as u32) }
9795}
9796#[doc = "Set all vector lanes to the same value"]
9797#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_s32)"]
9798#[inline]
9799#[target_feature(enable = "neon")]
9800#[cfg_attr(test, assert_instr(nop, N = 1))]
9801#[rustc_legacy_const_generics(1)]
9802#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9803pub fn vdups_lane_s32<const N: i32>(a: int32x2_t) -> i32 {
9804 static_assert_uimm_bits!(N, 1);
9805 unsafe { simd_extract!(a, N as u32) }
9806}
9807#[doc = "Set all vector lanes to the same value"]
9808#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_s64)"]
9809#[inline]
9810#[target_feature(enable = "neon")]
9811#[cfg_attr(test, assert_instr(nop, N = 1))]
9812#[rustc_legacy_const_generics(1)]
9813#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9814pub fn vdupd_laneq_s64<const N: i32>(a: int64x2_t) -> i64 {
9815 static_assert_uimm_bits!(N, 1);
9816 unsafe { simd_extract!(a, N as u32) }
9817}
9818#[doc = "Set all vector lanes to the same value"]
9819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_u32)"]
9820#[inline]
9821#[target_feature(enable = "neon")]
9822#[cfg_attr(test, assert_instr(nop, N = 1))]
9823#[rustc_legacy_const_generics(1)]
9824#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9825pub fn vdups_lane_u32<const N: i32>(a: uint32x2_t) -> u32 {
9826 static_assert_uimm_bits!(N, 1);
9827 unsafe { simd_extract!(a, N as u32) }
9828}
9829#[doc = "Set all vector lanes to the same value"]
9830#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_u64)"]
9831#[inline]
9832#[target_feature(enable = "neon")]
9833#[cfg_attr(test, assert_instr(nop, N = 1))]
9834#[rustc_legacy_const_generics(1)]
9835#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9836pub fn vdupd_laneq_u64<const N: i32>(a: uint64x2_t) -> u64 {
9837 static_assert_uimm_bits!(N, 1);
9838 unsafe { simd_extract!(a, N as u32) }
9839}
9840#[doc = "Set all vector lanes to the same value"]
9841#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_f32)"]
9842#[inline]
9843#[target_feature(enable = "neon")]
9844#[cfg_attr(test, assert_instr(nop, N = 2))]
9845#[rustc_legacy_const_generics(1)]
9846#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9847pub fn vdups_laneq_f32<const N: i32>(a: float32x4_t) -> f32 {
9848 static_assert_uimm_bits!(N, 2);
9849 unsafe { simd_extract!(a, N as u32) }
9850}
9851#[doc = "Set all vector lanes to the same value"]
9852#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_s16)"]
9853#[inline]
9854#[target_feature(enable = "neon")]
9855#[cfg_attr(test, assert_instr(nop, N = 2))]
9856#[rustc_legacy_const_generics(1)]
9857#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9858pub fn vduph_lane_s16<const N: i32>(a: int16x4_t) -> i16 {
9859 static_assert_uimm_bits!(N, 2);
9860 unsafe { simd_extract!(a, N as u32) }
9861}
9862#[doc = "Set all vector lanes to the same value"]
9863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_s32)"]
9864#[inline]
9865#[target_feature(enable = "neon")]
9866#[cfg_attr(test, assert_instr(nop, N = 2))]
9867#[rustc_legacy_const_generics(1)]
9868#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9869pub fn vdups_laneq_s32<const N: i32>(a: int32x4_t) -> i32 {
9870 static_assert_uimm_bits!(N, 2);
9871 unsafe { simd_extract!(a, N as u32) }
9872}
9873#[doc = "Set all vector lanes to the same value"]
9874#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_u16)"]
9875#[inline]
9876#[target_feature(enable = "neon")]
9877#[cfg_attr(test, assert_instr(nop, N = 2))]
9878#[rustc_legacy_const_generics(1)]
9879#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9880pub fn vduph_lane_u16<const N: i32>(a: uint16x4_t) -> u16 {
9881 static_assert_uimm_bits!(N, 2);
9882 unsafe { simd_extract!(a, N as u32) }
9883}
9884#[doc = "Set all vector lanes to the same value"]
9885#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_u32)"]
9886#[inline]
9887#[target_feature(enable = "neon")]
9888#[cfg_attr(test, assert_instr(nop, N = 2))]
9889#[rustc_legacy_const_generics(1)]
9890#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9891pub fn vdups_laneq_u32<const N: i32>(a: uint32x4_t) -> u32 {
9892 static_assert_uimm_bits!(N, 2);
9893 unsafe { simd_extract!(a, N as u32) }
9894}
9895#[doc = "Set all vector lanes to the same value"]
9896#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_p16)"]
9897#[inline]
9898#[target_feature(enable = "neon")]
9899#[cfg_attr(test, assert_instr(nop, N = 2))]
9900#[rustc_legacy_const_generics(1)]
9901#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9902pub fn vduph_lane_p16<const N: i32>(a: poly16x4_t) -> p16 {
9903 static_assert_uimm_bits!(N, 2);
9904 unsafe { simd_extract!(a, N as u32) }
9905}
9906#[doc = "Three-way exclusive OR"]
9907#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s8)"]
9908#[inline]
9909#[target_feature(enable = "neon,sha3")]
9910#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9911#[cfg_attr(test, assert_instr(eor3))]
9912pub fn veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
9913 unsafe extern "unadjusted" {
9914 #[cfg_attr(
9915 any(target_arch = "aarch64", target_arch = "arm64ec"),
9916 link_name = "llvm.aarch64.crypto.eor3s.v16i8"
9917 )]
9918 fn _veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t;
9919 }
9920 unsafe { _veor3q_s8(a, b, c) }
9921}
9922#[doc = "Three-way exclusive OR"]
9923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s16)"]
9924#[inline]
9925#[target_feature(enable = "neon,sha3")]
9926#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9927#[cfg_attr(test, assert_instr(eor3))]
9928pub fn veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
9929 unsafe extern "unadjusted" {
9930 #[cfg_attr(
9931 any(target_arch = "aarch64", target_arch = "arm64ec"),
9932 link_name = "llvm.aarch64.crypto.eor3s.v8i16"
9933 )]
9934 fn _veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
9935 }
9936 unsafe { _veor3q_s16(a, b, c) }
9937}
9938#[doc = "Three-way exclusive OR"]
9939#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s32)"]
9940#[inline]
9941#[target_feature(enable = "neon,sha3")]
9942#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9943#[cfg_attr(test, assert_instr(eor3))]
9944pub fn veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
9945 unsafe extern "unadjusted" {
9946 #[cfg_attr(
9947 any(target_arch = "aarch64", target_arch = "arm64ec"),
9948 link_name = "llvm.aarch64.crypto.eor3s.v4i32"
9949 )]
9950 fn _veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
9951 }
9952 unsafe { _veor3q_s32(a, b, c) }
9953}
9954#[doc = "Three-way exclusive OR"]
9955#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s64)"]
9956#[inline]
9957#[target_feature(enable = "neon,sha3")]
9958#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9959#[cfg_attr(test, assert_instr(eor3))]
9960pub fn veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t {
9961 unsafe extern "unadjusted" {
9962 #[cfg_attr(
9963 any(target_arch = "aarch64", target_arch = "arm64ec"),
9964 link_name = "llvm.aarch64.crypto.eor3s.v2i64"
9965 )]
9966 fn _veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t;
9967 }
9968 unsafe { _veor3q_s64(a, b, c) }
9969}
9970#[doc = "Three-way exclusive OR"]
9971#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u8)"]
9972#[inline]
9973#[target_feature(enable = "neon,sha3")]
9974#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9975#[cfg_attr(test, assert_instr(eor3))]
9976pub fn veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
9977 unsafe extern "unadjusted" {
9978 #[cfg_attr(
9979 any(target_arch = "aarch64", target_arch = "arm64ec"),
9980 link_name = "llvm.aarch64.crypto.eor3u.v16i8"
9981 )]
9982 fn _veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t;
9983 }
9984 unsafe { _veor3q_u8(a, b, c) }
9985}
9986#[doc = "Three-way exclusive OR"]
9987#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u16)"]
9988#[inline]
9989#[target_feature(enable = "neon,sha3")]
9990#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9991#[cfg_attr(test, assert_instr(eor3))]
9992pub fn veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
9993 unsafe extern "unadjusted" {
9994 #[cfg_attr(
9995 any(target_arch = "aarch64", target_arch = "arm64ec"),
9996 link_name = "llvm.aarch64.crypto.eor3u.v8i16"
9997 )]
9998 fn _veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t;
9999 }
10000 unsafe { _veor3q_u16(a, b, c) }
10001}
10002#[doc = "Three-way exclusive OR"]
10003#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u32)"]
10004#[inline]
10005#[target_feature(enable = "neon,sha3")]
10006#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10007#[cfg_attr(test, assert_instr(eor3))]
10008pub fn veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
10009 unsafe extern "unadjusted" {
10010 #[cfg_attr(
10011 any(target_arch = "aarch64", target_arch = "arm64ec"),
10012 link_name = "llvm.aarch64.crypto.eor3u.v4i32"
10013 )]
10014 fn _veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
10015 }
10016 unsafe { _veor3q_u32(a, b, c) }
10017}
10018#[doc = "Three-way exclusive OR"]
10019#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u64)"]
10020#[inline]
10021#[target_feature(enable = "neon,sha3")]
10022#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10023#[cfg_attr(test, assert_instr(eor3))]
10024pub fn veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
10025 unsafe extern "unadjusted" {
10026 #[cfg_attr(
10027 any(target_arch = "aarch64", target_arch = "arm64ec"),
10028 link_name = "llvm.aarch64.crypto.eor3u.v2i64"
10029 )]
10030 fn _veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
10031 }
10032 unsafe { _veor3q_u64(a, b, c) }
10033}
10034#[doc = "Extract vector from pair of vectors"]
10035#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f64)"]
10036#[inline]
10037#[target_feature(enable = "neon")]
10038#[cfg_attr(test, assert_instr(ext, N = 1))]
10039#[rustc_legacy_const_generics(2)]
10040#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10041pub fn vextq_f64<const N: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
10042 static_assert_uimm_bits!(N, 1);
10043 unsafe {
10044 match N & 0b1 {
10045 0 => simd_shuffle!(a, b, [0, 1]),
10046 1 => simd_shuffle!(a, b, [1, 2]),
10047 _ => unreachable_unchecked(),
10048 }
10049 }
10050}
10051#[doc = "Extract vector from pair of vectors"]
10052#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p64)"]
10053#[inline]
10054#[target_feature(enable = "neon")]
10055#[cfg_attr(test, assert_instr(ext, N = 1))]
10056#[rustc_legacy_const_generics(2)]
10057#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10058pub fn vextq_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
10059 static_assert_uimm_bits!(N, 1);
10060 unsafe {
10061 match N & 0b1 {
10062 0 => simd_shuffle!(a, b, [0, 1]),
10063 1 => simd_shuffle!(a, b, [1, 2]),
10064 _ => unreachable_unchecked(),
10065 }
10066 }
10067}
10068#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10069#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f64)"]
10070#[inline]
10071#[target_feature(enable = "neon")]
10072#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10073#[cfg_attr(test, assert_instr(fmadd))]
10074pub fn vfma_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
10075 unsafe { simd_fma(b, c, a) }
10076}
10077#[doc = "Floating-point fused multiply-add to accumulator"]
10078#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f16)"]
10079#[inline]
10080#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10081#[rustc_legacy_const_generics(3)]
10082#[target_feature(enable = "neon,fp16")]
10083#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10084pub fn vfma_lane_f16<const LANE: i32>(
10085 a: float16x4_t,
10086 b: float16x4_t,
10087 c: float16x4_t,
10088) -> float16x4_t {
10089 static_assert_uimm_bits!(LANE, 2);
10090 unsafe { vfma_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10091}
10092#[doc = "Floating-point fused multiply-add to accumulator"]
10093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f16)"]
10094#[inline]
10095#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10096#[rustc_legacy_const_generics(3)]
10097#[target_feature(enable = "neon,fp16")]
10098#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10099pub fn vfma_laneq_f16<const LANE: i32>(
10100 a: float16x4_t,
10101 b: float16x4_t,
10102 c: float16x8_t,
10103) -> float16x4_t {
10104 static_assert_uimm_bits!(LANE, 3);
10105 unsafe { vfma_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10106}
10107#[doc = "Floating-point fused multiply-add to accumulator"]
10108#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f16)"]
10109#[inline]
10110#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10111#[rustc_legacy_const_generics(3)]
10112#[target_feature(enable = "neon,fp16")]
10113#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10114pub fn vfmaq_lane_f16<const LANE: i32>(
10115 a: float16x8_t,
10116 b: float16x8_t,
10117 c: float16x4_t,
10118) -> float16x8_t {
10119 static_assert_uimm_bits!(LANE, 2);
10120 unsafe { vfmaq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10121}
10122#[doc = "Floating-point fused multiply-add to accumulator"]
10123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f16)"]
10124#[inline]
10125#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10126#[rustc_legacy_const_generics(3)]
10127#[target_feature(enable = "neon,fp16")]
10128#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10129pub fn vfmaq_laneq_f16<const LANE: i32>(
10130 a: float16x8_t,
10131 b: float16x8_t,
10132 c: float16x8_t,
10133) -> float16x8_t {
10134 static_assert_uimm_bits!(LANE, 3);
10135 unsafe { vfmaq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10136}
10137#[doc = "Floating-point fused multiply-add to accumulator"]
10138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f32)"]
10139#[inline]
10140#[target_feature(enable = "neon")]
10141#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10142#[rustc_legacy_const_generics(3)]
10143#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10144pub fn vfma_lane_f32<const LANE: i32>(
10145 a: float32x2_t,
10146 b: float32x2_t,
10147 c: float32x2_t,
10148) -> float32x2_t {
10149 static_assert_uimm_bits!(LANE, 1);
10150 unsafe { vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10151}
10152#[doc = "Floating-point fused multiply-add to accumulator"]
10153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f32)"]
10154#[inline]
10155#[target_feature(enable = "neon")]
10156#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10157#[rustc_legacy_const_generics(3)]
10158#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10159pub fn vfma_laneq_f32<const LANE: i32>(
10160 a: float32x2_t,
10161 b: float32x2_t,
10162 c: float32x4_t,
10163) -> float32x2_t {
10164 static_assert_uimm_bits!(LANE, 2);
10165 unsafe { vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10166}
10167#[doc = "Floating-point fused multiply-add to accumulator"]
10168#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f32)"]
10169#[inline]
10170#[target_feature(enable = "neon")]
10171#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10172#[rustc_legacy_const_generics(3)]
10173#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10174pub fn vfmaq_lane_f32<const LANE: i32>(
10175 a: float32x4_t,
10176 b: float32x4_t,
10177 c: float32x2_t,
10178) -> float32x4_t {
10179 static_assert_uimm_bits!(LANE, 1);
10180 unsafe { vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10181}
10182#[doc = "Floating-point fused multiply-add to accumulator"]
10183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f32)"]
10184#[inline]
10185#[target_feature(enable = "neon")]
10186#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10187#[rustc_legacy_const_generics(3)]
10188#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10189pub fn vfmaq_laneq_f32<const LANE: i32>(
10190 a: float32x4_t,
10191 b: float32x4_t,
10192 c: float32x4_t,
10193) -> float32x4_t {
10194 static_assert_uimm_bits!(LANE, 2);
10195 unsafe { vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10196}
10197#[doc = "Floating-point fused multiply-add to accumulator"]
10198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f64)"]
10199#[inline]
10200#[target_feature(enable = "neon")]
10201#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10202#[rustc_legacy_const_generics(3)]
10203#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10204pub fn vfmaq_laneq_f64<const LANE: i32>(
10205 a: float64x2_t,
10206 b: float64x2_t,
10207 c: float64x2_t,
10208) -> float64x2_t {
10209 static_assert_uimm_bits!(LANE, 1);
10210 unsafe { vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
10211}
10212#[doc = "Floating-point fused multiply-add to accumulator"]
10213#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f64)"]
10214#[inline]
10215#[target_feature(enable = "neon")]
10216#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10217#[rustc_legacy_const_generics(3)]
10218#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10219pub fn vfma_lane_f64<const LANE: i32>(
10220 a: float64x1_t,
10221 b: float64x1_t,
10222 c: float64x1_t,
10223) -> float64x1_t {
10224 static_assert!(LANE == 0);
10225 unsafe { vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
10226}
10227#[doc = "Floating-point fused multiply-add to accumulator"]
10228#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f64)"]
10229#[inline]
10230#[target_feature(enable = "neon")]
10231#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10232#[rustc_legacy_const_generics(3)]
10233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10234pub fn vfma_laneq_f64<const LANE: i32>(
10235 a: float64x1_t,
10236 b: float64x1_t,
10237 c: float64x2_t,
10238) -> float64x1_t {
10239 static_assert_uimm_bits!(LANE, 1);
10240 unsafe { vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
10241}
10242#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
10243#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f16)"]
10244#[inline]
10245#[target_feature(enable = "neon,fp16")]
10246#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10247#[cfg_attr(test, assert_instr(fmla))]
10248pub fn vfma_n_f16(a: float16x4_t, b: float16x4_t, c: f16) -> float16x4_t {
10249 vfma_f16(a, b, vdup_n_f16(c))
10250}
10251#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
10252#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f16)"]
10253#[inline]
10254#[target_feature(enable = "neon,fp16")]
10255#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10256#[cfg_attr(test, assert_instr(fmla))]
10257pub fn vfmaq_n_f16(a: float16x8_t, b: float16x8_t, c: f16) -> float16x8_t {
10258 vfmaq_f16(a, b, vdupq_n_f16(c))
10259}
10260#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10261#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f64)"]
10262#[inline]
10263#[target_feature(enable = "neon")]
10264#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10265#[cfg_attr(test, assert_instr(fmadd))]
10266pub fn vfma_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t {
10267 vfma_f64(a, b, vdup_n_f64(c))
10268}
10269#[doc = "Floating-point fused multiply-add to accumulator"]
10270#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_lane_f64)"]
10271#[inline]
10272#[target_feature(enable = "neon")]
10273#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10274#[rustc_legacy_const_generics(3)]
10275#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10276pub fn vfmad_lane_f64<const LANE: i32>(a: f64, b: f64, c: float64x1_t) -> f64 {
10277 static_assert!(LANE == 0);
10278 unsafe {
10279 let c: f64 = simd_extract!(c, LANE as u32);
10280 fmaf64(b, c, a)
10281 }
10282}
10283#[doc = "Floating-point fused multiply-add to accumulator"]
10284#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_f16)"]
10285#[inline]
10286#[cfg_attr(test, assert_instr(fmadd))]
10287#[target_feature(enable = "neon,fp16")]
10288#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10289pub fn vfmah_f16(a: f16, b: f16, c: f16) -> f16 {
10290 unsafe { fmaf16(b, c, a) }
10291}
10292#[doc = "Floating-point fused multiply-add to accumulator"]
10293#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_lane_f16)"]
10294#[inline]
10295#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10296#[rustc_legacy_const_generics(3)]
10297#[target_feature(enable = "neon,fp16")]
10298#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10299pub fn vfmah_lane_f16<const LANE: i32>(a: f16, b: f16, v: float16x4_t) -> f16 {
10300 static_assert_uimm_bits!(LANE, 2);
10301 unsafe {
10302 let c: f16 = simd_extract!(v, LANE as u32);
10303 vfmah_f16(a, b, c)
10304 }
10305}
10306#[doc = "Floating-point fused multiply-add to accumulator"]
10307#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_laneq_f16)"]
10308#[inline]
10309#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10310#[rustc_legacy_const_generics(3)]
10311#[target_feature(enable = "neon,fp16")]
10312#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10313pub fn vfmah_laneq_f16<const LANE: i32>(a: f16, b: f16, v: float16x8_t) -> f16 {
10314 static_assert_uimm_bits!(LANE, 3);
10315 unsafe {
10316 let c: f16 = simd_extract!(v, LANE as u32);
10317 vfmah_f16(a, b, c)
10318 }
10319}
10320#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10321#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f64)"]
10322#[inline]
10323#[target_feature(enable = "neon")]
10324#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10325#[cfg_attr(test, assert_instr(fmla))]
10326pub fn vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
10327 unsafe { simd_fma(b, c, a) }
10328}
10329#[doc = "Floating-point fused multiply-add to accumulator"]
10330#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f64)"]
10331#[inline]
10332#[target_feature(enable = "neon")]
10333#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10334#[rustc_legacy_const_generics(3)]
10335#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10336pub fn vfmaq_lane_f64<const LANE: i32>(
10337 a: float64x2_t,
10338 b: float64x2_t,
10339 c: float64x1_t,
10340) -> float64x2_t {
10341 static_assert!(LANE == 0);
10342 unsafe { vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
10343}
10344#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f64)"]
10346#[inline]
10347#[target_feature(enable = "neon")]
10348#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10349#[cfg_attr(test, assert_instr(fmla))]
10350pub fn vfmaq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t {
10351 vfmaq_f64(a, b, vdupq_n_f64(c))
10352}
10353#[doc = "Floating-point fused multiply-add to accumulator"]
10354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_lane_f32)"]
10355#[inline]
10356#[target_feature(enable = "neon")]
10357#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10358#[rustc_legacy_const_generics(3)]
10359#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10360pub fn vfmas_lane_f32<const LANE: i32>(a: f32, b: f32, c: float32x2_t) -> f32 {
10361 static_assert_uimm_bits!(LANE, 1);
10362 unsafe {
10363 let c: f32 = simd_extract!(c, LANE as u32);
10364 fmaf32(b, c, a)
10365 }
10366}
10367#[doc = "Floating-point fused multiply-add to accumulator"]
10368#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_laneq_f32)"]
10369#[inline]
10370#[target_feature(enable = "neon")]
10371#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10372#[rustc_legacy_const_generics(3)]
10373#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10374pub fn vfmas_laneq_f32<const LANE: i32>(a: f32, b: f32, c: float32x4_t) -> f32 {
10375 static_assert_uimm_bits!(LANE, 2);
10376 unsafe {
10377 let c: f32 = simd_extract!(c, LANE as u32);
10378 fmaf32(b, c, a)
10379 }
10380}
10381#[doc = "Floating-point fused multiply-add to accumulator"]
10382#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_laneq_f64)"]
10383#[inline]
10384#[target_feature(enable = "neon")]
10385#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10386#[rustc_legacy_const_generics(3)]
10387#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10388pub fn vfmad_laneq_f64<const LANE: i32>(a: f64, b: f64, c: float64x2_t) -> f64 {
10389 static_assert_uimm_bits!(LANE, 1);
10390 unsafe {
10391 let c: f64 = simd_extract!(c, LANE as u32);
10392 fmaf64(b, c, a)
10393 }
10394}
10395#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10396#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_high_f16)"]
10397#[inline]
10398#[target_feature(enable = "neon,fp16")]
10399#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10400#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10401#[cfg_attr(test, assert_instr(fmlal2))]
10402pub fn vfmlal_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10403 unsafe extern "unadjusted" {
10404 #[cfg_attr(
10405 any(target_arch = "aarch64", target_arch = "arm64ec"),
10406 link_name = "llvm.aarch64.neon.fmlal2.v2f32.v4f16"
10407 )]
10408 fn _vfmlal_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10409 }
10410 unsafe { _vfmlal_high_f16(r, a, b) }
10411}
10412#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_high_f16)"]
10414#[inline]
10415#[target_feature(enable = "neon,fp16")]
10416#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10417#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10418#[cfg_attr(test, assert_instr(fmlal2))]
10419pub fn vfmlalq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10420 unsafe extern "unadjusted" {
10421 #[cfg_attr(
10422 any(target_arch = "aarch64", target_arch = "arm64ec"),
10423 link_name = "llvm.aarch64.neon.fmlal2.v4f32.v8f16"
10424 )]
10425 fn _vfmlalq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10426 }
10427 unsafe { _vfmlalq_high_f16(r, a, b) }
10428}
10429#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10430#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_lane_high_f16)"]
10431#[inline]
10432#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10433#[target_feature(enable = "neon,fp16")]
10434#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10435#[rustc_legacy_const_generics(3)]
10436#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10437pub fn vfmlal_lane_high_f16<const LANE: i32>(
10438 r: float32x2_t,
10439 a: float16x4_t,
10440 b: float16x4_t,
10441) -> float32x2_t {
10442 static_assert_uimm_bits!(LANE, 2);
10443 unsafe { vfmlal_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10444}
10445#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10446#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_laneq_high_f16)"]
10447#[inline]
10448#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10449#[target_feature(enable = "neon,fp16")]
10450#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10451#[rustc_legacy_const_generics(3)]
10452#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10453pub fn vfmlal_laneq_high_f16<const LANE: i32>(
10454 r: float32x2_t,
10455 a: float16x4_t,
10456 b: float16x8_t,
10457) -> float32x2_t {
10458 static_assert_uimm_bits!(LANE, 3);
10459 unsafe { vfmlal_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10460}
10461#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10462#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_lane_high_f16)"]
10463#[inline]
10464#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10465#[target_feature(enable = "neon,fp16")]
10466#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10467#[rustc_legacy_const_generics(3)]
10468#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10469pub fn vfmlalq_lane_high_f16<const LANE: i32>(
10470 r: float32x4_t,
10471 a: float16x8_t,
10472 b: float16x4_t,
10473) -> float32x4_t {
10474 static_assert_uimm_bits!(LANE, 2);
10475 unsafe { vfmlalq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10476}
10477#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10478#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_laneq_high_f16)"]
10479#[inline]
10480#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10481#[target_feature(enable = "neon,fp16")]
10482#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10483#[rustc_legacy_const_generics(3)]
10484#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10485pub fn vfmlalq_laneq_high_f16<const LANE: i32>(
10486 r: float32x4_t,
10487 a: float16x8_t,
10488 b: float16x8_t,
10489) -> float32x4_t {
10490 static_assert_uimm_bits!(LANE, 3);
10491 unsafe { vfmlalq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10492}
10493#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10494#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_lane_low_f16)"]
10495#[inline]
10496#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10497#[target_feature(enable = "neon,fp16")]
10498#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10499#[rustc_legacy_const_generics(3)]
10500#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10501pub fn vfmlal_lane_low_f16<const LANE: i32>(
10502 r: float32x2_t,
10503 a: float16x4_t,
10504 b: float16x4_t,
10505) -> float32x2_t {
10506 static_assert_uimm_bits!(LANE, 2);
10507 unsafe { vfmlal_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10508}
10509#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10510#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_laneq_low_f16)"]
10511#[inline]
10512#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10513#[target_feature(enable = "neon,fp16")]
10514#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10515#[rustc_legacy_const_generics(3)]
10516#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10517pub fn vfmlal_laneq_low_f16<const LANE: i32>(
10518 r: float32x2_t,
10519 a: float16x4_t,
10520 b: float16x8_t,
10521) -> float32x2_t {
10522 static_assert_uimm_bits!(LANE, 3);
10523 unsafe { vfmlal_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10524}
10525#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10526#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_lane_low_f16)"]
10527#[inline]
10528#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10529#[target_feature(enable = "neon,fp16")]
10530#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10531#[rustc_legacy_const_generics(3)]
10532#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10533pub fn vfmlalq_lane_low_f16<const LANE: i32>(
10534 r: float32x4_t,
10535 a: float16x8_t,
10536 b: float16x4_t,
10537) -> float32x4_t {
10538 static_assert_uimm_bits!(LANE, 2);
10539 unsafe { vfmlalq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10540}
10541#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10542#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_laneq_low_f16)"]
10543#[inline]
10544#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10545#[target_feature(enable = "neon,fp16")]
10546#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10547#[rustc_legacy_const_generics(3)]
10548#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10549pub fn vfmlalq_laneq_low_f16<const LANE: i32>(
10550 r: float32x4_t,
10551 a: float16x8_t,
10552 b: float16x8_t,
10553) -> float32x4_t {
10554 static_assert_uimm_bits!(LANE, 3);
10555 unsafe { vfmlalq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10556}
10557#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10558#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_low_f16)"]
10559#[inline]
10560#[target_feature(enable = "neon,fp16")]
10561#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10562#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10563#[cfg_attr(test, assert_instr(fmlal))]
10564pub fn vfmlal_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10565 unsafe extern "unadjusted" {
10566 #[cfg_attr(
10567 any(target_arch = "aarch64", target_arch = "arm64ec"),
10568 link_name = "llvm.aarch64.neon.fmlal.v2f32.v4f16"
10569 )]
10570 fn _vfmlal_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10571 }
10572 unsafe { _vfmlal_low_f16(r, a, b) }
10573}
10574#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10575#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_low_f16)"]
10576#[inline]
10577#[target_feature(enable = "neon,fp16")]
10578#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10579#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10580#[cfg_attr(test, assert_instr(fmlal))]
10581pub fn vfmlalq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10582 unsafe extern "unadjusted" {
10583 #[cfg_attr(
10584 any(target_arch = "aarch64", target_arch = "arm64ec"),
10585 link_name = "llvm.aarch64.neon.fmlal.v4f32.v8f16"
10586 )]
10587 fn _vfmlalq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10588 }
10589 unsafe { _vfmlalq_low_f16(r, a, b) }
10590}
10591#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10592#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_high_f16)"]
10593#[inline]
10594#[target_feature(enable = "neon,fp16")]
10595#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10596#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10597#[cfg_attr(test, assert_instr(fmlsl2))]
10598pub fn vfmlsl_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10599 unsafe extern "unadjusted" {
10600 #[cfg_attr(
10601 any(target_arch = "aarch64", target_arch = "arm64ec"),
10602 link_name = "llvm.aarch64.neon.fmlsl2.v2f32.v4f16"
10603 )]
10604 fn _vfmlsl_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10605 }
10606 unsafe { _vfmlsl_high_f16(r, a, b) }
10607}
10608#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10609#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_high_f16)"]
10610#[inline]
10611#[target_feature(enable = "neon,fp16")]
10612#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10613#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10614#[cfg_attr(test, assert_instr(fmlsl2))]
10615pub fn vfmlslq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10616 unsafe extern "unadjusted" {
10617 #[cfg_attr(
10618 any(target_arch = "aarch64", target_arch = "arm64ec"),
10619 link_name = "llvm.aarch64.neon.fmlsl2.v4f32.v8f16"
10620 )]
10621 fn _vfmlslq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10622 }
10623 unsafe { _vfmlslq_high_f16(r, a, b) }
10624}
10625#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10626#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_lane_high_f16)"]
10627#[inline]
10628#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10629#[target_feature(enable = "neon,fp16")]
10630#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10631#[rustc_legacy_const_generics(3)]
10632#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10633pub fn vfmlsl_lane_high_f16<const LANE: i32>(
10634 r: float32x2_t,
10635 a: float16x4_t,
10636 b: float16x4_t,
10637) -> float32x2_t {
10638 static_assert_uimm_bits!(LANE, 2);
10639 unsafe { vfmlsl_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10640}
10641#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10642#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_laneq_high_f16)"]
10643#[inline]
10644#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10645#[target_feature(enable = "neon,fp16")]
10646#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10647#[rustc_legacy_const_generics(3)]
10648#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10649pub fn vfmlsl_laneq_high_f16<const LANE: i32>(
10650 r: float32x2_t,
10651 a: float16x4_t,
10652 b: float16x8_t,
10653) -> float32x2_t {
10654 static_assert_uimm_bits!(LANE, 3);
10655 unsafe { vfmlsl_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10656}
10657#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10658#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_lane_high_f16)"]
10659#[inline]
10660#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10661#[target_feature(enable = "neon,fp16")]
10662#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10663#[rustc_legacy_const_generics(3)]
10664#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10665pub fn vfmlslq_lane_high_f16<const LANE: i32>(
10666 r: float32x4_t,
10667 a: float16x8_t,
10668 b: float16x4_t,
10669) -> float32x4_t {
10670 static_assert_uimm_bits!(LANE, 2);
10671 unsafe { vfmlslq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10672}
10673#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10674#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_laneq_high_f16)"]
10675#[inline]
10676#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10677#[target_feature(enable = "neon,fp16")]
10678#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10679#[rustc_legacy_const_generics(3)]
10680#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10681pub fn vfmlslq_laneq_high_f16<const LANE: i32>(
10682 r: float32x4_t,
10683 a: float16x8_t,
10684 b: float16x8_t,
10685) -> float32x4_t {
10686 static_assert_uimm_bits!(LANE, 3);
10687 unsafe { vfmlslq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10688}
10689#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10690#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_lane_low_f16)"]
10691#[inline]
10692#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10693#[target_feature(enable = "neon,fp16")]
10694#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10695#[rustc_legacy_const_generics(3)]
10696#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10697pub fn vfmlsl_lane_low_f16<const LANE: i32>(
10698 r: float32x2_t,
10699 a: float16x4_t,
10700 b: float16x4_t,
10701) -> float32x2_t {
10702 static_assert_uimm_bits!(LANE, 2);
10703 unsafe { vfmlsl_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10704}
10705#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10706#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_laneq_low_f16)"]
10707#[inline]
10708#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10709#[target_feature(enable = "neon,fp16")]
10710#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10711#[rustc_legacy_const_generics(3)]
10712#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10713pub fn vfmlsl_laneq_low_f16<const LANE: i32>(
10714 r: float32x2_t,
10715 a: float16x4_t,
10716 b: float16x8_t,
10717) -> float32x2_t {
10718 static_assert_uimm_bits!(LANE, 3);
10719 unsafe { vfmlsl_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10720}
10721#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10722#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_lane_low_f16)"]
10723#[inline]
10724#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10725#[target_feature(enable = "neon,fp16")]
10726#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10727#[rustc_legacy_const_generics(3)]
10728#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10729pub fn vfmlslq_lane_low_f16<const LANE: i32>(
10730 r: float32x4_t,
10731 a: float16x8_t,
10732 b: float16x4_t,
10733) -> float32x4_t {
10734 static_assert_uimm_bits!(LANE, 2);
10735 unsafe { vfmlslq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10736}
10737#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10738#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_laneq_low_f16)"]
10739#[inline]
10740#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10741#[target_feature(enable = "neon,fp16")]
10742#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10743#[rustc_legacy_const_generics(3)]
10744#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10745pub fn vfmlslq_laneq_low_f16<const LANE: i32>(
10746 r: float32x4_t,
10747 a: float16x8_t,
10748 b: float16x8_t,
10749) -> float32x4_t {
10750 static_assert_uimm_bits!(LANE, 3);
10751 unsafe { vfmlslq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10752}
10753#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_low_f16)"]
10755#[inline]
10756#[target_feature(enable = "neon,fp16")]
10757#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10758#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10759#[cfg_attr(test, assert_instr(fmlsl))]
10760pub fn vfmlsl_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10761 unsafe extern "unadjusted" {
10762 #[cfg_attr(
10763 any(target_arch = "aarch64", target_arch = "arm64ec"),
10764 link_name = "llvm.aarch64.neon.fmlsl.v2f32.v4f16"
10765 )]
10766 fn _vfmlsl_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10767 }
10768 unsafe { _vfmlsl_low_f16(r, a, b) }
10769}
10770#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_low_f16)"]
10772#[inline]
10773#[target_feature(enable = "neon,fp16")]
10774#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10775#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10776#[cfg_attr(test, assert_instr(fmlsl))]
10777pub fn vfmlslq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10778 unsafe extern "unadjusted" {
10779 #[cfg_attr(
10780 any(target_arch = "aarch64", target_arch = "arm64ec"),
10781 link_name = "llvm.aarch64.neon.fmlsl.v4f32.v8f16"
10782 )]
10783 fn _vfmlslq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10784 }
10785 unsafe { _vfmlslq_low_f16(r, a, b) }
10786}
10787#[doc = "Floating-point fused multiply-subtract from accumulator"]
10788#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f64)"]
10789#[inline]
10790#[target_feature(enable = "neon")]
10791#[cfg_attr(test, assert_instr(fmsub))]
10792#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10793pub fn vfms_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
10794 unsafe {
10795 let b: float64x1_t = simd_neg(b);
10796 vfma_f64(a, b, c)
10797 }
10798}
10799#[doc = "Floating-point fused multiply-subtract from accumulator"]
10800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f16)"]
10801#[inline]
10802#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10803#[rustc_legacy_const_generics(3)]
10804#[target_feature(enable = "neon,fp16")]
10805#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10806pub fn vfms_lane_f16<const LANE: i32>(
10807 a: float16x4_t,
10808 b: float16x4_t,
10809 c: float16x4_t,
10810) -> float16x4_t {
10811 static_assert_uimm_bits!(LANE, 2);
10812 unsafe { vfms_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10813}
10814#[doc = "Floating-point fused multiply-subtract from accumulator"]
10815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f16)"]
10816#[inline]
10817#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10818#[rustc_legacy_const_generics(3)]
10819#[target_feature(enable = "neon,fp16")]
10820#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10821pub fn vfms_laneq_f16<const LANE: i32>(
10822 a: float16x4_t,
10823 b: float16x4_t,
10824 c: float16x8_t,
10825) -> float16x4_t {
10826 static_assert_uimm_bits!(LANE, 3);
10827 unsafe { vfms_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10828}
10829#[doc = "Floating-point fused multiply-subtract from accumulator"]
10830#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f16)"]
10831#[inline]
10832#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10833#[rustc_legacy_const_generics(3)]
10834#[target_feature(enable = "neon,fp16")]
10835#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10836pub fn vfmsq_lane_f16<const LANE: i32>(
10837 a: float16x8_t,
10838 b: float16x8_t,
10839 c: float16x4_t,
10840) -> float16x8_t {
10841 static_assert_uimm_bits!(LANE, 2);
10842 unsafe { vfmsq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10843}
10844#[doc = "Floating-point fused multiply-subtract from accumulator"]
10845#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f16)"]
10846#[inline]
10847#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10848#[rustc_legacy_const_generics(3)]
10849#[target_feature(enable = "neon,fp16")]
10850#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10851pub fn vfmsq_laneq_f16<const LANE: i32>(
10852 a: float16x8_t,
10853 b: float16x8_t,
10854 c: float16x8_t,
10855) -> float16x8_t {
10856 static_assert_uimm_bits!(LANE, 3);
10857 unsafe { vfmsq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10858}
10859#[doc = "Floating-point fused multiply-subtract to accumulator"]
10860#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f32)"]
10861#[inline]
10862#[target_feature(enable = "neon")]
10863#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10864#[rustc_legacy_const_generics(3)]
10865#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10866pub fn vfms_lane_f32<const LANE: i32>(
10867 a: float32x2_t,
10868 b: float32x2_t,
10869 c: float32x2_t,
10870) -> float32x2_t {
10871 static_assert_uimm_bits!(LANE, 1);
10872 unsafe { vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10873}
10874#[doc = "Floating-point fused multiply-subtract to accumulator"]
10875#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f32)"]
10876#[inline]
10877#[target_feature(enable = "neon")]
10878#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10879#[rustc_legacy_const_generics(3)]
10880#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10881pub fn vfms_laneq_f32<const LANE: i32>(
10882 a: float32x2_t,
10883 b: float32x2_t,
10884 c: float32x4_t,
10885) -> float32x2_t {
10886 static_assert_uimm_bits!(LANE, 2);
10887 unsafe { vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10888}
10889#[doc = "Floating-point fused multiply-subtract to accumulator"]
10890#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f32)"]
10891#[inline]
10892#[target_feature(enable = "neon")]
10893#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10894#[rustc_legacy_const_generics(3)]
10895#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10896pub fn vfmsq_lane_f32<const LANE: i32>(
10897 a: float32x4_t,
10898 b: float32x4_t,
10899 c: float32x2_t,
10900) -> float32x4_t {
10901 static_assert_uimm_bits!(LANE, 1);
10902 unsafe { vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10903}
10904#[doc = "Floating-point fused multiply-subtract to accumulator"]
10905#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f32)"]
10906#[inline]
10907#[target_feature(enable = "neon")]
10908#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10909#[rustc_legacy_const_generics(3)]
10910#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10911pub fn vfmsq_laneq_f32<const LANE: i32>(
10912 a: float32x4_t,
10913 b: float32x4_t,
10914 c: float32x4_t,
10915) -> float32x4_t {
10916 static_assert_uimm_bits!(LANE, 2);
10917 unsafe { vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10918}
10919#[doc = "Floating-point fused multiply-subtract to accumulator"]
10920#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f64)"]
10921#[inline]
10922#[target_feature(enable = "neon")]
10923#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10924#[rustc_legacy_const_generics(3)]
10925#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10926pub fn vfmsq_laneq_f64<const LANE: i32>(
10927 a: float64x2_t,
10928 b: float64x2_t,
10929 c: float64x2_t,
10930) -> float64x2_t {
10931 static_assert_uimm_bits!(LANE, 1);
10932 unsafe { vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
10933}
10934#[doc = "Floating-point fused multiply-subtract to accumulator"]
10935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f64)"]
10936#[inline]
10937#[target_feature(enable = "neon")]
10938#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
10939#[rustc_legacy_const_generics(3)]
10940#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10941pub fn vfms_lane_f64<const LANE: i32>(
10942 a: float64x1_t,
10943 b: float64x1_t,
10944 c: float64x1_t,
10945) -> float64x1_t {
10946 static_assert!(LANE == 0);
10947 unsafe { vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
10948}
10949#[doc = "Floating-point fused multiply-subtract to accumulator"]
10950#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f64)"]
10951#[inline]
10952#[target_feature(enable = "neon")]
10953#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
10954#[rustc_legacy_const_generics(3)]
10955#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10956pub fn vfms_laneq_f64<const LANE: i32>(
10957 a: float64x1_t,
10958 b: float64x1_t,
10959 c: float64x2_t,
10960) -> float64x1_t {
10961 static_assert_uimm_bits!(LANE, 1);
10962 unsafe { vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
10963}
10964#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
10965#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f16)"]
10966#[inline]
10967#[target_feature(enable = "neon,fp16")]
10968#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10969#[cfg_attr(test, assert_instr(fmls))]
10970pub fn vfms_n_f16(a: float16x4_t, b: float16x4_t, c: f16) -> float16x4_t {
10971 vfms_f16(a, b, vdup_n_f16(c))
10972}
10973#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
10974#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f16)"]
10975#[inline]
10976#[target_feature(enable = "neon,fp16")]
10977#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10978#[cfg_attr(test, assert_instr(fmls))]
10979pub fn vfmsq_n_f16(a: float16x8_t, b: float16x8_t, c: f16) -> float16x8_t {
10980 vfmsq_f16(a, b, vdupq_n_f16(c))
10981}
10982#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"]
10983#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f64)"]
10984#[inline]
10985#[target_feature(enable = "neon")]
10986#[cfg_attr(test, assert_instr(fmsub))]
10987#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10988pub fn vfms_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t {
10989 vfms_f64(a, b, vdup_n_f64(c))
10990}
10991#[doc = "Floating-point fused multiply-subtract from accumulator"]
10992#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_f16)"]
10993#[inline]
10994#[cfg_attr(test, assert_instr(fmsub))]
10995#[target_feature(enable = "neon,fp16")]
10996#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10997pub fn vfmsh_f16(a: f16, b: f16, c: f16) -> f16 {
10998 vfmah_f16(a, -b, c)
10999}
11000#[doc = "Floating-point fused multiply-subtract from accumulator"]
11001#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_lane_f16)"]
11002#[inline]
11003#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11004#[rustc_legacy_const_generics(3)]
11005#[target_feature(enable = "neon,fp16")]
11006#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11007pub fn vfmsh_lane_f16<const LANE: i32>(a: f16, b: f16, v: float16x4_t) -> f16 {
11008 static_assert_uimm_bits!(LANE, 2);
11009 unsafe {
11010 let c: f16 = simd_extract!(v, LANE as u32);
11011 vfmsh_f16(a, b, c)
11012 }
11013}
11014#[doc = "Floating-point fused multiply-subtract from accumulator"]
11015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_laneq_f16)"]
11016#[inline]
11017#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11018#[rustc_legacy_const_generics(3)]
11019#[target_feature(enable = "neon,fp16")]
11020#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11021pub fn vfmsh_laneq_f16<const LANE: i32>(a: f16, b: f16, v: float16x8_t) -> f16 {
11022 static_assert_uimm_bits!(LANE, 3);
11023 unsafe {
11024 let c: f16 = simd_extract!(v, LANE as u32);
11025 vfmsh_f16(a, b, c)
11026 }
11027}
11028#[doc = "Floating-point fused multiply-subtract from accumulator"]
11029#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f64)"]
11030#[inline]
11031#[target_feature(enable = "neon")]
11032#[cfg_attr(test, assert_instr(fmls))]
11033#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11034pub fn vfmsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
11035 unsafe {
11036 let b: float64x2_t = simd_neg(b);
11037 vfmaq_f64(a, b, c)
11038 }
11039}
11040#[doc = "Floating-point fused multiply-subtract to accumulator"]
11041#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f64)"]
11042#[inline]
11043#[target_feature(enable = "neon")]
11044#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
11045#[rustc_legacy_const_generics(3)]
11046#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11047pub fn vfmsq_lane_f64<const LANE: i32>(
11048 a: float64x2_t,
11049 b: float64x2_t,
11050 c: float64x1_t,
11051) -> float64x2_t {
11052 static_assert!(LANE == 0);
11053 unsafe { vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
11054}
11055#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"]
11056#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f64)"]
11057#[inline]
11058#[target_feature(enable = "neon")]
11059#[cfg_attr(test, assert_instr(fmls))]
11060#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11061pub fn vfmsq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t {
11062 vfmsq_f64(a, b, vdupq_n_f64(c))
11063}
11064#[doc = "Floating-point fused multiply-subtract to accumulator"]
11065#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_lane_f32)"]
11066#[inline]
11067#[target_feature(enable = "neon")]
11068#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11069#[rustc_legacy_const_generics(3)]
11070#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11071pub fn vfmss_lane_f32<const LANE: i32>(a: f32, b: f32, c: float32x2_t) -> f32 {
11072 vfmas_lane_f32::<LANE>(a, -b, c)
11073}
11074#[doc = "Floating-point fused multiply-subtract to accumulator"]
11075#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_laneq_f32)"]
11076#[inline]
11077#[target_feature(enable = "neon")]
11078#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11079#[rustc_legacy_const_generics(3)]
11080#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11081pub fn vfmss_laneq_f32<const LANE: i32>(a: f32, b: f32, c: float32x4_t) -> f32 {
11082 vfmas_laneq_f32::<LANE>(a, -b, c)
11083}
11084#[doc = "Floating-point fused multiply-subtract to accumulator"]
11085#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_lane_f64)"]
11086#[inline]
11087#[target_feature(enable = "neon")]
11088#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11089#[rustc_legacy_const_generics(3)]
11090#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11091pub fn vfmsd_lane_f64<const LANE: i32>(a: f64, b: f64, c: float64x1_t) -> f64 {
11092 vfmad_lane_f64::<LANE>(a, -b, c)
11093}
11094#[doc = "Floating-point fused multiply-subtract to accumulator"]
11095#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_laneq_f64)"]
11096#[inline]
11097#[target_feature(enable = "neon")]
11098#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11099#[rustc_legacy_const_generics(3)]
11100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11101pub fn vfmsd_laneq_f64<const LANE: i32>(a: f64, b: f64, c: float64x2_t) -> f64 {
11102 vfmad_laneq_f64::<LANE>(a, -b, c)
11103}
11104#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11105#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f16)"]
11106#[doc = "## Safety"]
11107#[doc = " * Neon instrinsic unsafe"]
11108#[inline]
11109#[target_feature(enable = "neon,fp16")]
11110#[cfg_attr(test, assert_instr(ldr))]
11111#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11112pub unsafe fn vld1_f16(ptr: *const f16) -> float16x4_t {
11113 crate::ptr::read_unaligned(ptr.cast())
11114}
11115#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11116#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f16)"]
11117#[doc = "## Safety"]
11118#[doc = " * Neon instrinsic unsafe"]
11119#[inline]
11120#[target_feature(enable = "neon,fp16")]
11121#[cfg_attr(test, assert_instr(ldr))]
11122#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11123pub unsafe fn vld1q_f16(ptr: *const f16) -> float16x8_t {
11124 crate::ptr::read_unaligned(ptr.cast())
11125}
11126#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11127#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32)"]
11128#[doc = "## Safety"]
11129#[doc = " * Neon instrinsic unsafe"]
11130#[inline]
11131#[target_feature(enable = "neon")]
11132#[cfg_attr(test, assert_instr(ldr))]
11133#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11134pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t {
11135 crate::ptr::read_unaligned(ptr.cast())
11136}
11137#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32)"]
11139#[doc = "## Safety"]
11140#[doc = " * Neon instrinsic unsafe"]
11141#[inline]
11142#[target_feature(enable = "neon")]
11143#[cfg_attr(test, assert_instr(ldr))]
11144#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11145pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t {
11146 crate::ptr::read_unaligned(ptr.cast())
11147}
11148#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11149#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64)"]
11150#[doc = "## Safety"]
11151#[doc = " * Neon instrinsic unsafe"]
11152#[inline]
11153#[target_feature(enable = "neon")]
11154#[cfg_attr(test, assert_instr(ldr))]
11155#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11156pub unsafe fn vld1_f64(ptr: *const f64) -> float64x1_t {
11157 crate::ptr::read_unaligned(ptr.cast())
11158}
11159#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11160#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64)"]
11161#[doc = "## Safety"]
11162#[doc = " * Neon instrinsic unsafe"]
11163#[inline]
11164#[target_feature(enable = "neon")]
11165#[cfg_attr(test, assert_instr(ldr))]
11166#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11167pub unsafe fn vld1q_f64(ptr: *const f64) -> float64x2_t {
11168 crate::ptr::read_unaligned(ptr.cast())
11169}
11170#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8)"]
11172#[doc = "## Safety"]
11173#[doc = " * Neon instrinsic unsafe"]
11174#[inline]
11175#[target_feature(enable = "neon")]
11176#[cfg_attr(test, assert_instr(ldr))]
11177#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11178pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t {
11179 crate::ptr::read_unaligned(ptr.cast())
11180}
11181#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11182#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8)"]
11183#[doc = "## Safety"]
11184#[doc = " * Neon instrinsic unsafe"]
11185#[inline]
11186#[target_feature(enable = "neon")]
11187#[cfg_attr(test, assert_instr(ldr))]
11188#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11189pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t {
11190 crate::ptr::read_unaligned(ptr.cast())
11191}
11192#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11193#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16)"]
11194#[doc = "## Safety"]
11195#[doc = " * Neon instrinsic unsafe"]
11196#[inline]
11197#[target_feature(enable = "neon")]
11198#[cfg_attr(test, assert_instr(ldr))]
11199#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11200pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t {
11201 crate::ptr::read_unaligned(ptr.cast())
11202}
11203#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11204#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16)"]
11205#[doc = "## Safety"]
11206#[doc = " * Neon instrinsic unsafe"]
11207#[inline]
11208#[target_feature(enable = "neon")]
11209#[cfg_attr(test, assert_instr(ldr))]
11210#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11211pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t {
11212 crate::ptr::read_unaligned(ptr.cast())
11213}
11214#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11215#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32)"]
11216#[doc = "## Safety"]
11217#[doc = " * Neon instrinsic unsafe"]
11218#[inline]
11219#[target_feature(enable = "neon")]
11220#[cfg_attr(test, assert_instr(ldr))]
11221#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11222pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t {
11223 crate::ptr::read_unaligned(ptr.cast())
11224}
11225#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11226#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32)"]
11227#[doc = "## Safety"]
11228#[doc = " * Neon instrinsic unsafe"]
11229#[inline]
11230#[target_feature(enable = "neon")]
11231#[cfg_attr(test, assert_instr(ldr))]
11232#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11233pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t {
11234 crate::ptr::read_unaligned(ptr.cast())
11235}
11236#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11237#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64)"]
11238#[doc = "## Safety"]
11239#[doc = " * Neon instrinsic unsafe"]
11240#[inline]
11241#[target_feature(enable = "neon")]
11242#[cfg_attr(test, assert_instr(ldr))]
11243#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11244pub unsafe fn vld1_s64(ptr: *const i64) -> int64x1_t {
11245 crate::ptr::read_unaligned(ptr.cast())
11246}
11247#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11248#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64)"]
11249#[doc = "## Safety"]
11250#[doc = " * Neon instrinsic unsafe"]
11251#[inline]
11252#[target_feature(enable = "neon")]
11253#[cfg_attr(test, assert_instr(ldr))]
11254#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11255pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t {
11256 crate::ptr::read_unaligned(ptr.cast())
11257}
11258#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11259#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8)"]
11260#[doc = "## Safety"]
11261#[doc = " * Neon instrinsic unsafe"]
11262#[inline]
11263#[target_feature(enable = "neon")]
11264#[cfg_attr(test, assert_instr(ldr))]
11265#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11266pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t {
11267 crate::ptr::read_unaligned(ptr.cast())
11268}
11269#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11270#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8)"]
11271#[doc = "## Safety"]
11272#[doc = " * Neon instrinsic unsafe"]
11273#[inline]
11274#[target_feature(enable = "neon")]
11275#[cfg_attr(test, assert_instr(ldr))]
11276#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11277pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t {
11278 crate::ptr::read_unaligned(ptr.cast())
11279}
11280#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11281#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16)"]
11282#[doc = "## Safety"]
11283#[doc = " * Neon instrinsic unsafe"]
11284#[inline]
11285#[target_feature(enable = "neon")]
11286#[cfg_attr(test, assert_instr(ldr))]
11287#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11288pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t {
11289 crate::ptr::read_unaligned(ptr.cast())
11290}
11291#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16)"]
11293#[doc = "## Safety"]
11294#[doc = " * Neon instrinsic unsafe"]
11295#[inline]
11296#[target_feature(enable = "neon")]
11297#[cfg_attr(test, assert_instr(ldr))]
11298#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11299pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t {
11300 crate::ptr::read_unaligned(ptr.cast())
11301}
11302#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11303#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32)"]
11304#[doc = "## Safety"]
11305#[doc = " * Neon instrinsic unsafe"]
11306#[inline]
11307#[target_feature(enable = "neon")]
11308#[cfg_attr(test, assert_instr(ldr))]
11309#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11310pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t {
11311 crate::ptr::read_unaligned(ptr.cast())
11312}
11313#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11314#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32)"]
11315#[doc = "## Safety"]
11316#[doc = " * Neon instrinsic unsafe"]
11317#[inline]
11318#[target_feature(enable = "neon")]
11319#[cfg_attr(test, assert_instr(ldr))]
11320#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11321pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t {
11322 crate::ptr::read_unaligned(ptr.cast())
11323}
11324#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11325#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64)"]
11326#[doc = "## Safety"]
11327#[doc = " * Neon instrinsic unsafe"]
11328#[inline]
11329#[target_feature(enable = "neon")]
11330#[cfg_attr(test, assert_instr(ldr))]
11331#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11332pub unsafe fn vld1_u64(ptr: *const u64) -> uint64x1_t {
11333 crate::ptr::read_unaligned(ptr.cast())
11334}
11335#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11336#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64)"]
11337#[doc = "## Safety"]
11338#[doc = " * Neon instrinsic unsafe"]
11339#[inline]
11340#[target_feature(enable = "neon")]
11341#[cfg_attr(test, assert_instr(ldr))]
11342#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11343pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t {
11344 crate::ptr::read_unaligned(ptr.cast())
11345}
11346#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11347#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8)"]
11348#[doc = "## Safety"]
11349#[doc = " * Neon instrinsic unsafe"]
11350#[inline]
11351#[target_feature(enable = "neon")]
11352#[cfg_attr(test, assert_instr(ldr))]
11353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11354pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t {
11355 crate::ptr::read_unaligned(ptr.cast())
11356}
11357#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8)"]
11359#[doc = "## Safety"]
11360#[doc = " * Neon instrinsic unsafe"]
11361#[inline]
11362#[target_feature(enable = "neon")]
11363#[cfg_attr(test, assert_instr(ldr))]
11364#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11365pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t {
11366 crate::ptr::read_unaligned(ptr.cast())
11367}
11368#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11369#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16)"]
11370#[doc = "## Safety"]
11371#[doc = " * Neon instrinsic unsafe"]
11372#[inline]
11373#[target_feature(enable = "neon")]
11374#[cfg_attr(test, assert_instr(ldr))]
11375#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11376pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t {
11377 crate::ptr::read_unaligned(ptr.cast())
11378}
11379#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11380#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16)"]
11381#[doc = "## Safety"]
11382#[doc = " * Neon instrinsic unsafe"]
11383#[inline]
11384#[target_feature(enable = "neon")]
11385#[cfg_attr(test, assert_instr(ldr))]
11386#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11387pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t {
11388 crate::ptr::read_unaligned(ptr.cast())
11389}
11390#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11391#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64)"]
11392#[doc = "## Safety"]
11393#[doc = " * Neon instrinsic unsafe"]
11394#[inline]
11395#[target_feature(enable = "neon,aes")]
11396#[cfg_attr(test, assert_instr(ldr))]
11397#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11398pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t {
11399 crate::ptr::read_unaligned(ptr.cast())
11400}
11401#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11402#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64)"]
11403#[doc = "## Safety"]
11404#[doc = " * Neon instrinsic unsafe"]
11405#[inline]
11406#[target_feature(enable = "neon,aes")]
11407#[cfg_attr(test, assert_instr(ldr))]
11408#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11409pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t {
11410 crate::ptr::read_unaligned(ptr.cast())
11411}
11412#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x2)"]
11414#[doc = "## Safety"]
11415#[doc = " * Neon instrinsic unsafe"]
11416#[inline]
11417#[target_feature(enable = "neon")]
11418#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11419#[cfg_attr(test, assert_instr(ld1))]
11420pub unsafe fn vld1_f64_x2(a: *const f64) -> float64x1x2_t {
11421 unsafe extern "unadjusted" {
11422 #[cfg_attr(
11423 any(target_arch = "aarch64", target_arch = "arm64ec"),
11424 link_name = "llvm.aarch64.neon.ld1x2.v1f64.p0"
11425 )]
11426 fn _vld1_f64_x2(a: *const f64) -> float64x1x2_t;
11427 }
11428 _vld1_f64_x2(a)
11429}
11430#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x3)"]
11432#[doc = "## Safety"]
11433#[doc = " * Neon instrinsic unsafe"]
11434#[inline]
11435#[target_feature(enable = "neon")]
11436#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11437#[cfg_attr(test, assert_instr(ld1))]
11438pub unsafe fn vld1_f64_x3(a: *const f64) -> float64x1x3_t {
11439 unsafe extern "unadjusted" {
11440 #[cfg_attr(
11441 any(target_arch = "aarch64", target_arch = "arm64ec"),
11442 link_name = "llvm.aarch64.neon.ld1x3.v1f64.p0"
11443 )]
11444 fn _vld1_f64_x3(a: *const f64) -> float64x1x3_t;
11445 }
11446 _vld1_f64_x3(a)
11447}
11448#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11449#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x4)"]
11450#[doc = "## Safety"]
11451#[doc = " * Neon instrinsic unsafe"]
11452#[inline]
11453#[target_feature(enable = "neon")]
11454#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11455#[cfg_attr(test, assert_instr(ld1))]
11456pub unsafe fn vld1_f64_x4(a: *const f64) -> float64x1x4_t {
11457 unsafe extern "unadjusted" {
11458 #[cfg_attr(
11459 any(target_arch = "aarch64", target_arch = "arm64ec"),
11460 link_name = "llvm.aarch64.neon.ld1x4.v1f64.p0"
11461 )]
11462 fn _vld1_f64_x4(a: *const f64) -> float64x1x4_t;
11463 }
11464 _vld1_f64_x4(a)
11465}
11466#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x2)"]
11468#[doc = "## Safety"]
11469#[doc = " * Neon instrinsic unsafe"]
11470#[inline]
11471#[target_feature(enable = "neon")]
11472#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11473#[cfg_attr(test, assert_instr(ld1))]
11474pub unsafe fn vld1q_f64_x2(a: *const f64) -> float64x2x2_t {
11475 unsafe extern "unadjusted" {
11476 #[cfg_attr(
11477 any(target_arch = "aarch64", target_arch = "arm64ec"),
11478 link_name = "llvm.aarch64.neon.ld1x2.v2f64.p0"
11479 )]
11480 fn _vld1q_f64_x2(a: *const f64) -> float64x2x2_t;
11481 }
11482 _vld1q_f64_x2(a)
11483}
11484#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11485#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x3)"]
11486#[doc = "## Safety"]
11487#[doc = " * Neon instrinsic unsafe"]
11488#[inline]
11489#[target_feature(enable = "neon")]
11490#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11491#[cfg_attr(test, assert_instr(ld1))]
11492pub unsafe fn vld1q_f64_x3(a: *const f64) -> float64x2x3_t {
11493 unsafe extern "unadjusted" {
11494 #[cfg_attr(
11495 any(target_arch = "aarch64", target_arch = "arm64ec"),
11496 link_name = "llvm.aarch64.neon.ld1x3.v2f64.p0"
11497 )]
11498 fn _vld1q_f64_x3(a: *const f64) -> float64x2x3_t;
11499 }
11500 _vld1q_f64_x3(a)
11501}
11502#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x4)"]
11504#[doc = "## Safety"]
11505#[doc = " * Neon instrinsic unsafe"]
11506#[inline]
11507#[target_feature(enable = "neon")]
11508#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11509#[cfg_attr(test, assert_instr(ld1))]
11510pub unsafe fn vld1q_f64_x4(a: *const f64) -> float64x2x4_t {
11511 unsafe extern "unadjusted" {
11512 #[cfg_attr(
11513 any(target_arch = "aarch64", target_arch = "arm64ec"),
11514 link_name = "llvm.aarch64.neon.ld1x4.v2f64.p0"
11515 )]
11516 fn _vld1q_f64_x4(a: *const f64) -> float64x2x4_t;
11517 }
11518 _vld1q_f64_x4(a)
11519}
11520#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11521#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f64)"]
11522#[doc = "## Safety"]
11523#[doc = " * Neon instrinsic unsafe"]
11524#[inline]
11525#[target_feature(enable = "neon")]
11526#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11527#[cfg_attr(test, assert_instr(ld2r))]
11528pub unsafe fn vld2_dup_f64(a: *const f64) -> float64x1x2_t {
11529 unsafe extern "unadjusted" {
11530 #[cfg_attr(
11531 any(target_arch = "aarch64", target_arch = "arm64ec"),
11532 link_name = "llvm.aarch64.neon.ld2r.v1f64.p0"
11533 )]
11534 fn _vld2_dup_f64(ptr: *const f64) -> float64x1x2_t;
11535 }
11536 _vld2_dup_f64(a as _)
11537}
11538#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11539#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f64)"]
11540#[doc = "## Safety"]
11541#[doc = " * Neon instrinsic unsafe"]
11542#[inline]
11543#[target_feature(enable = "neon")]
11544#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11545#[cfg_attr(test, assert_instr(ld2r))]
11546pub unsafe fn vld2q_dup_f64(a: *const f64) -> float64x2x2_t {
11547 unsafe extern "unadjusted" {
11548 #[cfg_attr(
11549 any(target_arch = "aarch64", target_arch = "arm64ec"),
11550 link_name = "llvm.aarch64.neon.ld2r.v2f64.p0"
11551 )]
11552 fn _vld2q_dup_f64(ptr: *const f64) -> float64x2x2_t;
11553 }
11554 _vld2q_dup_f64(a as _)
11555}
11556#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11557#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s64)"]
11558#[doc = "## Safety"]
11559#[doc = " * Neon instrinsic unsafe"]
11560#[inline]
11561#[target_feature(enable = "neon")]
11562#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11563#[cfg_attr(test, assert_instr(ld2r))]
11564pub unsafe fn vld2q_dup_s64(a: *const i64) -> int64x2x2_t {
11565 unsafe extern "unadjusted" {
11566 #[cfg_attr(
11567 any(target_arch = "aarch64", target_arch = "arm64ec"),
11568 link_name = "llvm.aarch64.neon.ld2r.v2i64.p0"
11569 )]
11570 fn _vld2q_dup_s64(ptr: *const i64) -> int64x2x2_t;
11571 }
11572 _vld2q_dup_s64(a as _)
11573}
11574#[doc = "Load multiple 2-element structures to two registers"]
11575#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f64)"]
11576#[doc = "## Safety"]
11577#[doc = " * Neon instrinsic unsafe"]
11578#[inline]
11579#[target_feature(enable = "neon")]
11580#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11581#[cfg_attr(test, assert_instr(nop))]
11582pub unsafe fn vld2_f64(a: *const f64) -> float64x1x2_t {
11583 unsafe extern "unadjusted" {
11584 #[cfg_attr(
11585 any(target_arch = "aarch64", target_arch = "arm64ec"),
11586 link_name = "llvm.aarch64.neon.ld2.v1f64.p0"
11587 )]
11588 fn _vld2_f64(ptr: *const float64x1_t) -> float64x1x2_t;
11589 }
11590 _vld2_f64(a as _)
11591}
11592#[doc = "Load multiple 2-element structures to two registers"]
11593#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f64)"]
11594#[doc = "## Safety"]
11595#[doc = " * Neon instrinsic unsafe"]
11596#[inline]
11597#[target_feature(enable = "neon")]
11598#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11599#[rustc_legacy_const_generics(2)]
11600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11601pub unsafe fn vld2_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x2_t) -> float64x1x2_t {
11602 static_assert!(LANE == 0);
11603 unsafe extern "unadjusted" {
11604 #[cfg_attr(
11605 any(target_arch = "aarch64", target_arch = "arm64ec"),
11606 link_name = "llvm.aarch64.neon.ld2lane.v1f64.p0"
11607 )]
11608 fn _vld2_lane_f64(a: float64x1_t, b: float64x1_t, n: i64, ptr: *const i8) -> float64x1x2_t;
11609 }
11610 _vld2_lane_f64(b.0, b.1, LANE as i64, a as _)
11611}
11612#[doc = "Load multiple 2-element structures to two registers"]
11613#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s64)"]
11614#[doc = "## Safety"]
11615#[doc = " * Neon instrinsic unsafe"]
11616#[inline]
11617#[target_feature(enable = "neon")]
11618#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11619#[rustc_legacy_const_generics(2)]
11620#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11621pub unsafe fn vld2_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x2_t) -> int64x1x2_t {
11622 static_assert!(LANE == 0);
11623 unsafe extern "unadjusted" {
11624 #[cfg_attr(
11625 any(target_arch = "aarch64", target_arch = "arm64ec"),
11626 link_name = "llvm.aarch64.neon.ld2lane.v1i64.p0"
11627 )]
11628 fn _vld2_lane_s64(a: int64x1_t, b: int64x1_t, n: i64, ptr: *const i8) -> int64x1x2_t;
11629 }
11630 _vld2_lane_s64(b.0, b.1, LANE as i64, a as _)
11631}
11632#[doc = "Load multiple 2-element structures to two registers"]
11633#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p64)"]
11634#[doc = "## Safety"]
11635#[doc = " * Neon instrinsic unsafe"]
11636#[inline]
11637#[target_feature(enable = "neon,aes")]
11638#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11639#[rustc_legacy_const_generics(2)]
11640#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11641pub unsafe fn vld2_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x2_t) -> poly64x1x2_t {
11642 static_assert!(LANE == 0);
11643 transmute(vld2_lane_s64::<LANE>(transmute(a), transmute(b)))
11644}
11645#[doc = "Load multiple 2-element structures to two registers"]
11646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u64)"]
11647#[doc = "## Safety"]
11648#[doc = " * Neon instrinsic unsafe"]
11649#[inline]
11650#[target_feature(enable = "neon")]
11651#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11652#[rustc_legacy_const_generics(2)]
11653#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11654pub unsafe fn vld2_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x2_t) -> uint64x1x2_t {
11655 static_assert!(LANE == 0);
11656 transmute(vld2_lane_s64::<LANE>(transmute(a), transmute(b)))
11657}
11658#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11659#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p64)"]
11660#[doc = "## Safety"]
11661#[doc = " * Neon instrinsic unsafe"]
11662#[inline]
11663#[cfg(target_endian = "little")]
11664#[target_feature(enable = "neon,aes")]
11665#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11666#[cfg_attr(test, assert_instr(ld2r))]
11667pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t {
11668 transmute(vld2q_dup_s64(transmute(a)))
11669}
11670#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11671#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p64)"]
11672#[doc = "## Safety"]
11673#[doc = " * Neon instrinsic unsafe"]
11674#[inline]
11675#[cfg(target_endian = "big")]
11676#[target_feature(enable = "neon,aes")]
11677#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11678#[cfg_attr(test, assert_instr(ld2r))]
11679pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t {
11680 let mut ret_val: poly64x2x2_t = transmute(vld2q_dup_s64(transmute(a)));
11681 ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11682 ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11683 ret_val
11684}
11685#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11686#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u64)"]
11687#[doc = "## Safety"]
11688#[doc = " * Neon instrinsic unsafe"]
11689#[inline]
11690#[cfg(target_endian = "little")]
11691#[target_feature(enable = "neon")]
11692#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11693#[cfg_attr(test, assert_instr(ld2r))]
11694pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t {
11695 transmute(vld2q_dup_s64(transmute(a)))
11696}
11697#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11698#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u64)"]
11699#[doc = "## Safety"]
11700#[doc = " * Neon instrinsic unsafe"]
11701#[inline]
11702#[cfg(target_endian = "big")]
11703#[target_feature(enable = "neon")]
11704#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11705#[cfg_attr(test, assert_instr(ld2r))]
11706pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t {
11707 let mut ret_val: uint64x2x2_t = transmute(vld2q_dup_s64(transmute(a)));
11708 ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11709 ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11710 ret_val
11711}
11712#[doc = "Load multiple 2-element structures to two registers"]
11713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f64)"]
11714#[doc = "## Safety"]
11715#[doc = " * Neon instrinsic unsafe"]
11716#[inline]
11717#[target_feature(enable = "neon")]
11718#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11719#[cfg_attr(test, assert_instr(ld2))]
11720pub unsafe fn vld2q_f64(a: *const f64) -> float64x2x2_t {
11721 unsafe extern "unadjusted" {
11722 #[cfg_attr(
11723 any(target_arch = "aarch64", target_arch = "arm64ec"),
11724 link_name = "llvm.aarch64.neon.ld2.v2f64.p0"
11725 )]
11726 fn _vld2q_f64(ptr: *const float64x2_t) -> float64x2x2_t;
11727 }
11728 _vld2q_f64(a as _)
11729}
11730#[doc = "Load multiple 2-element structures to two registers"]
11731#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s64)"]
11732#[doc = "## Safety"]
11733#[doc = " * Neon instrinsic unsafe"]
11734#[inline]
11735#[target_feature(enable = "neon")]
11736#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11737#[cfg_attr(test, assert_instr(ld2))]
11738pub unsafe fn vld2q_s64(a: *const i64) -> int64x2x2_t {
11739 unsafe extern "unadjusted" {
11740 #[cfg_attr(
11741 any(target_arch = "aarch64", target_arch = "arm64ec"),
11742 link_name = "llvm.aarch64.neon.ld2.v2i64.p0"
11743 )]
11744 fn _vld2q_s64(ptr: *const int64x2_t) -> int64x2x2_t;
11745 }
11746 _vld2q_s64(a as _)
11747}
11748#[doc = "Load multiple 2-element structures to two registers"]
11749#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f64)"]
11750#[doc = "## Safety"]
11751#[doc = " * Neon instrinsic unsafe"]
11752#[inline]
11753#[target_feature(enable = "neon")]
11754#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11755#[rustc_legacy_const_generics(2)]
11756#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11757pub unsafe fn vld2q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x2_t) -> float64x2x2_t {
11758 static_assert_uimm_bits!(LANE, 1);
11759 unsafe extern "unadjusted" {
11760 #[cfg_attr(
11761 any(target_arch = "aarch64", target_arch = "arm64ec"),
11762 link_name = "llvm.aarch64.neon.ld2lane.v2f64.p0"
11763 )]
11764 fn _vld2q_lane_f64(a: float64x2_t, b: float64x2_t, n: i64, ptr: *const i8)
11765 -> float64x2x2_t;
11766 }
11767 _vld2q_lane_f64(b.0, b.1, LANE as i64, a as _)
11768}
11769#[doc = "Load multiple 2-element structures to two registers"]
11770#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s8)"]
11771#[doc = "## Safety"]
11772#[doc = " * Neon instrinsic unsafe"]
11773#[inline]
11774#[target_feature(enable = "neon")]
11775#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11776#[rustc_legacy_const_generics(2)]
11777#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11778pub unsafe fn vld2q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x2_t) -> int8x16x2_t {
11779 static_assert_uimm_bits!(LANE, 4);
11780 unsafe extern "unadjusted" {
11781 #[cfg_attr(
11782 any(target_arch = "aarch64", target_arch = "arm64ec"),
11783 link_name = "llvm.aarch64.neon.ld2lane.v16i8.p0"
11784 )]
11785 fn _vld2q_lane_s8(a: int8x16_t, b: int8x16_t, n: i64, ptr: *const i8) -> int8x16x2_t;
11786 }
11787 _vld2q_lane_s8(b.0, b.1, LANE as i64, a as _)
11788}
11789#[doc = "Load multiple 2-element structures to two registers"]
11790#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s64)"]
11791#[doc = "## Safety"]
11792#[doc = " * Neon instrinsic unsafe"]
11793#[inline]
11794#[target_feature(enable = "neon")]
11795#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11796#[rustc_legacy_const_generics(2)]
11797#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11798pub unsafe fn vld2q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x2_t) -> int64x2x2_t {
11799 static_assert_uimm_bits!(LANE, 1);
11800 unsafe extern "unadjusted" {
11801 #[cfg_attr(
11802 any(target_arch = "aarch64", target_arch = "arm64ec"),
11803 link_name = "llvm.aarch64.neon.ld2lane.v2i64.p0"
11804 )]
11805 fn _vld2q_lane_s64(a: int64x2_t, b: int64x2_t, n: i64, ptr: *const i8) -> int64x2x2_t;
11806 }
11807 _vld2q_lane_s64(b.0, b.1, LANE as i64, a as _)
11808}
11809#[doc = "Load multiple 2-element structures to two registers"]
11810#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p64)"]
11811#[doc = "## Safety"]
11812#[doc = " * Neon instrinsic unsafe"]
11813#[inline]
11814#[target_feature(enable = "neon,aes")]
11815#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11816#[rustc_legacy_const_generics(2)]
11817#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11818pub unsafe fn vld2q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x2_t) -> poly64x2x2_t {
11819 static_assert_uimm_bits!(LANE, 1);
11820 transmute(vld2q_lane_s64::<LANE>(transmute(a), transmute(b)))
11821}
11822#[doc = "Load multiple 2-element structures to two registers"]
11823#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u8)"]
11824#[doc = "## Safety"]
11825#[doc = " * Neon instrinsic unsafe"]
11826#[inline]
11827#[target_feature(enable = "neon")]
11828#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11829#[rustc_legacy_const_generics(2)]
11830#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11831pub unsafe fn vld2q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x2_t) -> uint8x16x2_t {
11832 static_assert_uimm_bits!(LANE, 4);
11833 transmute(vld2q_lane_s8::<LANE>(transmute(a), transmute(b)))
11834}
11835#[doc = "Load multiple 2-element structures to two registers"]
11836#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u64)"]
11837#[doc = "## Safety"]
11838#[doc = " * Neon instrinsic unsafe"]
11839#[inline]
11840#[target_feature(enable = "neon")]
11841#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11842#[rustc_legacy_const_generics(2)]
11843#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11844pub unsafe fn vld2q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x2_t) -> uint64x2x2_t {
11845 static_assert_uimm_bits!(LANE, 1);
11846 transmute(vld2q_lane_s64::<LANE>(transmute(a), transmute(b)))
11847}
11848#[doc = "Load multiple 2-element structures to two registers"]
11849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p8)"]
11850#[doc = "## Safety"]
11851#[doc = " * Neon instrinsic unsafe"]
11852#[inline]
11853#[target_feature(enable = "neon")]
11854#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11855#[rustc_legacy_const_generics(2)]
11856#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11857pub unsafe fn vld2q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x2_t) -> poly8x16x2_t {
11858 static_assert_uimm_bits!(LANE, 4);
11859 transmute(vld2q_lane_s8::<LANE>(transmute(a), transmute(b)))
11860}
11861#[doc = "Load multiple 2-element structures to two registers"]
11862#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p64)"]
11863#[doc = "## Safety"]
11864#[doc = " * Neon instrinsic unsafe"]
11865#[inline]
11866#[cfg(target_endian = "little")]
11867#[target_feature(enable = "neon,aes")]
11868#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11869#[cfg_attr(test, assert_instr(ld2))]
11870pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t {
11871 transmute(vld2q_s64(transmute(a)))
11872}
11873#[doc = "Load multiple 2-element structures to two registers"]
11874#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p64)"]
11875#[doc = "## Safety"]
11876#[doc = " * Neon instrinsic unsafe"]
11877#[inline]
11878#[cfg(target_endian = "big")]
11879#[target_feature(enable = "neon,aes")]
11880#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11881#[cfg_attr(test, assert_instr(ld2))]
11882pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t {
11883 let mut ret_val: poly64x2x2_t = transmute(vld2q_s64(transmute(a)));
11884 ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11885 ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11886 ret_val
11887}
11888#[doc = "Load multiple 2-element structures to two registers"]
11889#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u64)"]
11890#[doc = "## Safety"]
11891#[doc = " * Neon instrinsic unsafe"]
11892#[inline]
11893#[cfg(target_endian = "little")]
11894#[target_feature(enable = "neon")]
11895#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11896#[cfg_attr(test, assert_instr(ld2))]
11897pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t {
11898 transmute(vld2q_s64(transmute(a)))
11899}
11900#[doc = "Load multiple 2-element structures to two registers"]
11901#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u64)"]
11902#[doc = "## Safety"]
11903#[doc = " * Neon instrinsic unsafe"]
11904#[inline]
11905#[cfg(target_endian = "big")]
11906#[target_feature(enable = "neon")]
11907#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11908#[cfg_attr(test, assert_instr(ld2))]
11909pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t {
11910 let mut ret_val: uint64x2x2_t = transmute(vld2q_s64(transmute(a)));
11911 ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11912 ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11913 ret_val
11914}
11915#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
11916#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f64)"]
11917#[doc = "## Safety"]
11918#[doc = " * Neon instrinsic unsafe"]
11919#[inline]
11920#[target_feature(enable = "neon")]
11921#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11922#[cfg_attr(test, assert_instr(ld3r))]
11923pub unsafe fn vld3_dup_f64(a: *const f64) -> float64x1x3_t {
11924 unsafe extern "unadjusted" {
11925 #[cfg_attr(
11926 any(target_arch = "aarch64", target_arch = "arm64ec"),
11927 link_name = "llvm.aarch64.neon.ld3r.v1f64.p0"
11928 )]
11929 fn _vld3_dup_f64(ptr: *const f64) -> float64x1x3_t;
11930 }
11931 _vld3_dup_f64(a as _)
11932}
11933#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
11934#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f64)"]
11935#[doc = "## Safety"]
11936#[doc = " * Neon instrinsic unsafe"]
11937#[inline]
11938#[target_feature(enable = "neon")]
11939#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11940#[cfg_attr(test, assert_instr(ld3r))]
11941pub unsafe fn vld3q_dup_f64(a: *const f64) -> float64x2x3_t {
11942 unsafe extern "unadjusted" {
11943 #[cfg_attr(
11944 any(target_arch = "aarch64", target_arch = "arm64ec"),
11945 link_name = "llvm.aarch64.neon.ld3r.v2f64.p0"
11946 )]
11947 fn _vld3q_dup_f64(ptr: *const f64) -> float64x2x3_t;
11948 }
11949 _vld3q_dup_f64(a as _)
11950}
11951#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
11952#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s64)"]
11953#[doc = "## Safety"]
11954#[doc = " * Neon instrinsic unsafe"]
11955#[inline]
11956#[target_feature(enable = "neon")]
11957#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11958#[cfg_attr(test, assert_instr(ld3r))]
11959pub unsafe fn vld3q_dup_s64(a: *const i64) -> int64x2x3_t {
11960 unsafe extern "unadjusted" {
11961 #[cfg_attr(
11962 any(target_arch = "aarch64", target_arch = "arm64ec"),
11963 link_name = "llvm.aarch64.neon.ld3r.v2i64.p0"
11964 )]
11965 fn _vld3q_dup_s64(ptr: *const i64) -> int64x2x3_t;
11966 }
11967 _vld3q_dup_s64(a as _)
11968}
11969#[doc = "Load multiple 3-element structures to three registers"]
11970#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f64)"]
11971#[doc = "## Safety"]
11972#[doc = " * Neon instrinsic unsafe"]
11973#[inline]
11974#[target_feature(enable = "neon")]
11975#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11976#[cfg_attr(test, assert_instr(nop))]
11977pub unsafe fn vld3_f64(a: *const f64) -> float64x1x3_t {
11978 unsafe extern "unadjusted" {
11979 #[cfg_attr(
11980 any(target_arch = "aarch64", target_arch = "arm64ec"),
11981 link_name = "llvm.aarch64.neon.ld3.v1f64.p0"
11982 )]
11983 fn _vld3_f64(ptr: *const float64x1_t) -> float64x1x3_t;
11984 }
11985 _vld3_f64(a as _)
11986}
11987#[doc = "Load multiple 3-element structures to three registers"]
11988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f64)"]
11989#[doc = "## Safety"]
11990#[doc = " * Neon instrinsic unsafe"]
11991#[inline]
11992#[target_feature(enable = "neon")]
11993#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
11994#[rustc_legacy_const_generics(2)]
11995#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11996pub unsafe fn vld3_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x3_t) -> float64x1x3_t {
11997 static_assert!(LANE == 0);
11998 unsafe extern "unadjusted" {
11999 #[cfg_attr(
12000 any(target_arch = "aarch64", target_arch = "arm64ec"),
12001 link_name = "llvm.aarch64.neon.ld3lane.v1f64.p0"
12002 )]
12003 fn _vld3_lane_f64(
12004 a: float64x1_t,
12005 b: float64x1_t,
12006 c: float64x1_t,
12007 n: i64,
12008 ptr: *const i8,
12009 ) -> float64x1x3_t;
12010 }
12011 _vld3_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
12012}
12013#[doc = "Load multiple 3-element structures to three registers"]
12014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p64)"]
12015#[doc = "## Safety"]
12016#[doc = " * Neon instrinsic unsafe"]
12017#[inline]
12018#[target_feature(enable = "neon,aes")]
12019#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12020#[rustc_legacy_const_generics(2)]
12021#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12022pub unsafe fn vld3_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x3_t) -> poly64x1x3_t {
12023 static_assert!(LANE == 0);
12024 transmute(vld3_lane_s64::<LANE>(transmute(a), transmute(b)))
12025}
12026#[doc = "Load multiple 3-element structures to two registers"]
12027#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s64)"]
12028#[doc = "## Safety"]
12029#[doc = " * Neon instrinsic unsafe"]
12030#[inline]
12031#[target_feature(enable = "neon")]
12032#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12033#[rustc_legacy_const_generics(2)]
12034#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12035pub unsafe fn vld3_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x3_t) -> int64x1x3_t {
12036 static_assert!(LANE == 0);
12037 unsafe extern "unadjusted" {
12038 #[cfg_attr(
12039 any(target_arch = "aarch64", target_arch = "arm64ec"),
12040 link_name = "llvm.aarch64.neon.ld3lane.v1i64.p0"
12041 )]
12042 fn _vld3_lane_s64(
12043 a: int64x1_t,
12044 b: int64x1_t,
12045 c: int64x1_t,
12046 n: i64,
12047 ptr: *const i8,
12048 ) -> int64x1x3_t;
12049 }
12050 _vld3_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
12051}
12052#[doc = "Load multiple 3-element structures to three registers"]
12053#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u64)"]
12054#[doc = "## Safety"]
12055#[doc = " * Neon instrinsic unsafe"]
12056#[inline]
12057#[target_feature(enable = "neon")]
12058#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12059#[rustc_legacy_const_generics(2)]
12060#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12061pub unsafe fn vld3_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x3_t) -> uint64x1x3_t {
12062 static_assert!(LANE == 0);
12063 transmute(vld3_lane_s64::<LANE>(transmute(a), transmute(b)))
12064}
12065#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12066#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p64)"]
12067#[doc = "## Safety"]
12068#[doc = " * Neon instrinsic unsafe"]
12069#[inline]
12070#[cfg(target_endian = "little")]
12071#[target_feature(enable = "neon,aes")]
12072#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12073#[cfg_attr(test, assert_instr(ld3r))]
12074pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t {
12075 transmute(vld3q_dup_s64(transmute(a)))
12076}
12077#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12078#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p64)"]
12079#[doc = "## Safety"]
12080#[doc = " * Neon instrinsic unsafe"]
12081#[inline]
12082#[cfg(target_endian = "big")]
12083#[target_feature(enable = "neon,aes")]
12084#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12085#[cfg_attr(test, assert_instr(ld3r))]
12086pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t {
12087 let mut ret_val: poly64x2x3_t = transmute(vld3q_dup_s64(transmute(a)));
12088 ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12089 ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12090 ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12091 ret_val
12092}
12093#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12094#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u64)"]
12095#[doc = "## Safety"]
12096#[doc = " * Neon instrinsic unsafe"]
12097#[inline]
12098#[cfg(target_endian = "little")]
12099#[target_feature(enable = "neon")]
12100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12101#[cfg_attr(test, assert_instr(ld3r))]
12102pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t {
12103 transmute(vld3q_dup_s64(transmute(a)))
12104}
12105#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u64)"]
12107#[doc = "## Safety"]
12108#[doc = " * Neon instrinsic unsafe"]
12109#[inline]
12110#[cfg(target_endian = "big")]
12111#[target_feature(enable = "neon")]
12112#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12113#[cfg_attr(test, assert_instr(ld3r))]
12114pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t {
12115 let mut ret_val: uint64x2x3_t = transmute(vld3q_dup_s64(transmute(a)));
12116 ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12117 ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12118 ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12119 ret_val
12120}
12121#[doc = "Load multiple 3-element structures to three registers"]
12122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f64)"]
12123#[doc = "## Safety"]
12124#[doc = " * Neon instrinsic unsafe"]
12125#[inline]
12126#[target_feature(enable = "neon")]
12127#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12128#[cfg_attr(test, assert_instr(ld3))]
12129pub unsafe fn vld3q_f64(a: *const f64) -> float64x2x3_t {
12130 unsafe extern "unadjusted" {
12131 #[cfg_attr(
12132 any(target_arch = "aarch64", target_arch = "arm64ec"),
12133 link_name = "llvm.aarch64.neon.ld3.v2f64.p0"
12134 )]
12135 fn _vld3q_f64(ptr: *const float64x2_t) -> float64x2x3_t;
12136 }
12137 _vld3q_f64(a as _)
12138}
12139#[doc = "Load multiple 3-element structures to three registers"]
12140#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s64)"]
12141#[doc = "## Safety"]
12142#[doc = " * Neon instrinsic unsafe"]
12143#[inline]
12144#[target_feature(enable = "neon")]
12145#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12146#[cfg_attr(test, assert_instr(ld3))]
12147pub unsafe fn vld3q_s64(a: *const i64) -> int64x2x3_t {
12148 unsafe extern "unadjusted" {
12149 #[cfg_attr(
12150 any(target_arch = "aarch64", target_arch = "arm64ec"),
12151 link_name = "llvm.aarch64.neon.ld3.v2i64.p0"
12152 )]
12153 fn _vld3q_s64(ptr: *const int64x2_t) -> int64x2x3_t;
12154 }
12155 _vld3q_s64(a as _)
12156}
12157#[doc = "Load multiple 3-element structures to three registers"]
12158#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f64)"]
12159#[doc = "## Safety"]
12160#[doc = " * Neon instrinsic unsafe"]
12161#[inline]
12162#[target_feature(enable = "neon")]
12163#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12164#[rustc_legacy_const_generics(2)]
12165#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12166pub unsafe fn vld3q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x3_t) -> float64x2x3_t {
12167 static_assert_uimm_bits!(LANE, 1);
12168 unsafe extern "unadjusted" {
12169 #[cfg_attr(
12170 any(target_arch = "aarch64", target_arch = "arm64ec"),
12171 link_name = "llvm.aarch64.neon.ld3lane.v2f64.p0"
12172 )]
12173 fn _vld3q_lane_f64(
12174 a: float64x2_t,
12175 b: float64x2_t,
12176 c: float64x2_t,
12177 n: i64,
12178 ptr: *const i8,
12179 ) -> float64x2x3_t;
12180 }
12181 _vld3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
12182}
12183#[doc = "Load multiple 3-element structures to three registers"]
12184#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p64)"]
12185#[doc = "## Safety"]
12186#[doc = " * Neon instrinsic unsafe"]
12187#[inline]
12188#[target_feature(enable = "neon,aes")]
12189#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12190#[rustc_legacy_const_generics(2)]
12191#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12192pub unsafe fn vld3q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x3_t) -> poly64x2x3_t {
12193 static_assert_uimm_bits!(LANE, 1);
12194 transmute(vld3q_lane_s64::<LANE>(transmute(a), transmute(b)))
12195}
12196#[doc = "Load multiple 3-element structures to two registers"]
12197#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s8)"]
12198#[doc = "## Safety"]
12199#[doc = " * Neon instrinsic unsafe"]
12200#[inline]
12201#[target_feature(enable = "neon")]
12202#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12203#[rustc_legacy_const_generics(2)]
12204#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12205pub unsafe fn vld3q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x3_t) -> int8x16x3_t {
12206 static_assert_uimm_bits!(LANE, 3);
12207 unsafe extern "unadjusted" {
12208 #[cfg_attr(
12209 any(target_arch = "aarch64", target_arch = "arm64ec"),
12210 link_name = "llvm.aarch64.neon.ld3lane.v16i8.p0"
12211 )]
12212 fn _vld3q_lane_s8(
12213 a: int8x16_t,
12214 b: int8x16_t,
12215 c: int8x16_t,
12216 n: i64,
12217 ptr: *const i8,
12218 ) -> int8x16x3_t;
12219 }
12220 _vld3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _)
12221}
12222#[doc = "Load multiple 3-element structures to two registers"]
12223#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s64)"]
12224#[doc = "## Safety"]
12225#[doc = " * Neon instrinsic unsafe"]
12226#[inline]
12227#[target_feature(enable = "neon")]
12228#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12229#[rustc_legacy_const_generics(2)]
12230#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12231pub unsafe fn vld3q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x3_t) -> int64x2x3_t {
12232 static_assert_uimm_bits!(LANE, 1);
12233 unsafe extern "unadjusted" {
12234 #[cfg_attr(
12235 any(target_arch = "aarch64", target_arch = "arm64ec"),
12236 link_name = "llvm.aarch64.neon.ld3lane.v2i64.p0"
12237 )]
12238 fn _vld3q_lane_s64(
12239 a: int64x2_t,
12240 b: int64x2_t,
12241 c: int64x2_t,
12242 n: i64,
12243 ptr: *const i8,
12244 ) -> int64x2x3_t;
12245 }
12246 _vld3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
12247}
12248#[doc = "Load multiple 3-element structures to three registers"]
12249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u8)"]
12250#[doc = "## Safety"]
12251#[doc = " * Neon instrinsic unsafe"]
12252#[inline]
12253#[target_feature(enable = "neon")]
12254#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12255#[rustc_legacy_const_generics(2)]
12256#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12257pub unsafe fn vld3q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x3_t) -> uint8x16x3_t {
12258 static_assert_uimm_bits!(LANE, 4);
12259 transmute(vld3q_lane_s8::<LANE>(transmute(a), transmute(b)))
12260}
12261#[doc = "Load multiple 3-element structures to three registers"]
12262#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u64)"]
12263#[doc = "## Safety"]
12264#[doc = " * Neon instrinsic unsafe"]
12265#[inline]
12266#[target_feature(enable = "neon")]
12267#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12268#[rustc_legacy_const_generics(2)]
12269#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12270pub unsafe fn vld3q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x3_t) -> uint64x2x3_t {
12271 static_assert_uimm_bits!(LANE, 1);
12272 transmute(vld3q_lane_s64::<LANE>(transmute(a), transmute(b)))
12273}
12274#[doc = "Load multiple 3-element structures to three registers"]
12275#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p8)"]
12276#[doc = "## Safety"]
12277#[doc = " * Neon instrinsic unsafe"]
12278#[inline]
12279#[target_feature(enable = "neon")]
12280#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12281#[rustc_legacy_const_generics(2)]
12282#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12283pub unsafe fn vld3q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x3_t) -> poly8x16x3_t {
12284 static_assert_uimm_bits!(LANE, 4);
12285 transmute(vld3q_lane_s8::<LANE>(transmute(a), transmute(b)))
12286}
12287#[doc = "Load multiple 3-element structures to three registers"]
12288#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p64)"]
12289#[doc = "## Safety"]
12290#[doc = " * Neon instrinsic unsafe"]
12291#[inline]
12292#[cfg(target_endian = "little")]
12293#[target_feature(enable = "neon,aes")]
12294#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12295#[cfg_attr(test, assert_instr(ld3))]
12296pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t {
12297 transmute(vld3q_s64(transmute(a)))
12298}
12299#[doc = "Load multiple 3-element structures to three registers"]
12300#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p64)"]
12301#[doc = "## Safety"]
12302#[doc = " * Neon instrinsic unsafe"]
12303#[inline]
12304#[cfg(target_endian = "big")]
12305#[target_feature(enable = "neon,aes")]
12306#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12307#[cfg_attr(test, assert_instr(ld3))]
12308pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t {
12309 let mut ret_val: poly64x2x3_t = transmute(vld3q_s64(transmute(a)));
12310 ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12311 ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12312 ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12313 ret_val
12314}
12315#[doc = "Load multiple 3-element structures to three registers"]
12316#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u64)"]
12317#[doc = "## Safety"]
12318#[doc = " * Neon instrinsic unsafe"]
12319#[inline]
12320#[cfg(target_endian = "little")]
12321#[target_feature(enable = "neon")]
12322#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12323#[cfg_attr(test, assert_instr(ld3))]
12324pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t {
12325 transmute(vld3q_s64(transmute(a)))
12326}
12327#[doc = "Load multiple 3-element structures to three registers"]
12328#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u64)"]
12329#[doc = "## Safety"]
12330#[doc = " * Neon instrinsic unsafe"]
12331#[inline]
12332#[cfg(target_endian = "big")]
12333#[target_feature(enable = "neon")]
12334#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12335#[cfg_attr(test, assert_instr(ld3))]
12336pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t {
12337 let mut ret_val: uint64x2x3_t = transmute(vld3q_s64(transmute(a)));
12338 ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12339 ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12340 ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12341 ret_val
12342}
12343#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12344#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f64)"]
12345#[doc = "## Safety"]
12346#[doc = " * Neon instrinsic unsafe"]
12347#[inline]
12348#[target_feature(enable = "neon")]
12349#[cfg_attr(test, assert_instr(ld4r))]
12350#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12351pub unsafe fn vld4_dup_f64(a: *const f64) -> float64x1x4_t {
12352 unsafe extern "unadjusted" {
12353 #[cfg_attr(
12354 any(target_arch = "aarch64", target_arch = "arm64ec"),
12355 link_name = "llvm.aarch64.neon.ld4r.v1f64.p0"
12356 )]
12357 fn _vld4_dup_f64(ptr: *const f64) -> float64x1x4_t;
12358 }
12359 _vld4_dup_f64(a as _)
12360}
12361#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12362#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f64)"]
12363#[doc = "## Safety"]
12364#[doc = " * Neon instrinsic unsafe"]
12365#[inline]
12366#[target_feature(enable = "neon")]
12367#[cfg_attr(test, assert_instr(ld4r))]
12368#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12369pub unsafe fn vld4q_dup_f64(a: *const f64) -> float64x2x4_t {
12370 unsafe extern "unadjusted" {
12371 #[cfg_attr(
12372 any(target_arch = "aarch64", target_arch = "arm64ec"),
12373 link_name = "llvm.aarch64.neon.ld4r.v2f64.p0"
12374 )]
12375 fn _vld4q_dup_f64(ptr: *const f64) -> float64x2x4_t;
12376 }
12377 _vld4q_dup_f64(a as _)
12378}
12379#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12380#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s64)"]
12381#[doc = "## Safety"]
12382#[doc = " * Neon instrinsic unsafe"]
12383#[inline]
12384#[target_feature(enable = "neon")]
12385#[cfg_attr(test, assert_instr(ld4r))]
12386#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12387pub unsafe fn vld4q_dup_s64(a: *const i64) -> int64x2x4_t {
12388 unsafe extern "unadjusted" {
12389 #[cfg_attr(
12390 any(target_arch = "aarch64", target_arch = "arm64ec"),
12391 link_name = "llvm.aarch64.neon.ld4r.v2i64.p0"
12392 )]
12393 fn _vld4q_dup_s64(ptr: *const i64) -> int64x2x4_t;
12394 }
12395 _vld4q_dup_s64(a as _)
12396}
12397#[doc = "Load multiple 4-element structures to four registers"]
12398#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f64)"]
12399#[doc = "## Safety"]
12400#[doc = " * Neon instrinsic unsafe"]
12401#[inline]
12402#[target_feature(enable = "neon")]
12403#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12404#[cfg_attr(test, assert_instr(nop))]
12405pub unsafe fn vld4_f64(a: *const f64) -> float64x1x4_t {
12406 unsafe extern "unadjusted" {
12407 #[cfg_attr(
12408 any(target_arch = "aarch64", target_arch = "arm64ec"),
12409 link_name = "llvm.aarch64.neon.ld4.v1f64.p0"
12410 )]
12411 fn _vld4_f64(ptr: *const float64x1_t) -> float64x1x4_t;
12412 }
12413 _vld4_f64(a as _)
12414}
12415#[doc = "Load multiple 4-element structures to four registers"]
12416#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f64)"]
12417#[doc = "## Safety"]
12418#[doc = " * Neon instrinsic unsafe"]
12419#[inline]
12420#[target_feature(enable = "neon")]
12421#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12422#[rustc_legacy_const_generics(2)]
12423#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12424pub unsafe fn vld4_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x4_t) -> float64x1x4_t {
12425 static_assert!(LANE == 0);
12426 unsafe extern "unadjusted" {
12427 #[cfg_attr(
12428 any(target_arch = "aarch64", target_arch = "arm64ec"),
12429 link_name = "llvm.aarch64.neon.ld4lane.v1f64.p0"
12430 )]
12431 fn _vld4_lane_f64(
12432 a: float64x1_t,
12433 b: float64x1_t,
12434 c: float64x1_t,
12435 d: float64x1_t,
12436 n: i64,
12437 ptr: *const i8,
12438 ) -> float64x1x4_t;
12439 }
12440 _vld4_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12441}
12442#[doc = "Load multiple 4-element structures to four registers"]
12443#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s64)"]
12444#[doc = "## Safety"]
12445#[doc = " * Neon instrinsic unsafe"]
12446#[inline]
12447#[target_feature(enable = "neon")]
12448#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12449#[rustc_legacy_const_generics(2)]
12450#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12451pub unsafe fn vld4_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x4_t) -> int64x1x4_t {
12452 static_assert!(LANE == 0);
12453 unsafe extern "unadjusted" {
12454 #[cfg_attr(
12455 any(target_arch = "aarch64", target_arch = "arm64ec"),
12456 link_name = "llvm.aarch64.neon.ld4lane.v1i64.p0"
12457 )]
12458 fn _vld4_lane_s64(
12459 a: int64x1_t,
12460 b: int64x1_t,
12461 c: int64x1_t,
12462 d: int64x1_t,
12463 n: i64,
12464 ptr: *const i8,
12465 ) -> int64x1x4_t;
12466 }
12467 _vld4_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12468}
12469#[doc = "Load multiple 4-element structures to four registers"]
12470#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p64)"]
12471#[doc = "## Safety"]
12472#[doc = " * Neon instrinsic unsafe"]
12473#[inline]
12474#[target_feature(enable = "neon,aes")]
12475#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12476#[rustc_legacy_const_generics(2)]
12477#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12478pub unsafe fn vld4_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x4_t) -> poly64x1x4_t {
12479 static_assert!(LANE == 0);
12480 transmute(vld4_lane_s64::<LANE>(transmute(a), transmute(b)))
12481}
12482#[doc = "Load multiple 4-element structures to four registers"]
12483#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u64)"]
12484#[doc = "## Safety"]
12485#[doc = " * Neon instrinsic unsafe"]
12486#[inline]
12487#[target_feature(enable = "neon")]
12488#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12489#[rustc_legacy_const_generics(2)]
12490#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12491pub unsafe fn vld4_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x4_t) -> uint64x1x4_t {
12492 static_assert!(LANE == 0);
12493 transmute(vld4_lane_s64::<LANE>(transmute(a), transmute(b)))
12494}
12495#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p64)"]
12497#[doc = "## Safety"]
12498#[doc = " * Neon instrinsic unsafe"]
12499#[inline]
12500#[cfg(target_endian = "little")]
12501#[target_feature(enable = "neon,aes")]
12502#[cfg_attr(test, assert_instr(ld4r))]
12503#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12504pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t {
12505 transmute(vld4q_dup_s64(transmute(a)))
12506}
12507#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12508#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p64)"]
12509#[doc = "## Safety"]
12510#[doc = " * Neon instrinsic unsafe"]
12511#[inline]
12512#[cfg(target_endian = "big")]
12513#[target_feature(enable = "neon,aes")]
12514#[cfg_attr(test, assert_instr(ld4r))]
12515#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12516pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t {
12517 let mut ret_val: poly64x2x4_t = transmute(vld4q_dup_s64(transmute(a)));
12518 ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12519 ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12520 ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12521 ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12522 ret_val
12523}
12524#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12525#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u64)"]
12526#[doc = "## Safety"]
12527#[doc = " * Neon instrinsic unsafe"]
12528#[inline]
12529#[cfg(target_endian = "little")]
12530#[target_feature(enable = "neon")]
12531#[cfg_attr(test, assert_instr(ld4r))]
12532#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12533pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t {
12534 transmute(vld4q_dup_s64(transmute(a)))
12535}
12536#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12537#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u64)"]
12538#[doc = "## Safety"]
12539#[doc = " * Neon instrinsic unsafe"]
12540#[inline]
12541#[cfg(target_endian = "big")]
12542#[target_feature(enable = "neon")]
12543#[cfg_attr(test, assert_instr(ld4r))]
12544#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12545pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t {
12546 let mut ret_val: uint64x2x4_t = transmute(vld4q_dup_s64(transmute(a)));
12547 ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12548 ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12549 ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12550 ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12551 ret_val
12552}
12553#[doc = "Load multiple 4-element structures to four registers"]
12554#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f64)"]
12555#[doc = "## Safety"]
12556#[doc = " * Neon instrinsic unsafe"]
12557#[inline]
12558#[target_feature(enable = "neon")]
12559#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12560#[cfg_attr(test, assert_instr(ld4))]
12561pub unsafe fn vld4q_f64(a: *const f64) -> float64x2x4_t {
12562 unsafe extern "unadjusted" {
12563 #[cfg_attr(
12564 any(target_arch = "aarch64", target_arch = "arm64ec"),
12565 link_name = "llvm.aarch64.neon.ld4.v2f64.p0"
12566 )]
12567 fn _vld4q_f64(ptr: *const float64x2_t) -> float64x2x4_t;
12568 }
12569 _vld4q_f64(a as _)
12570}
12571#[doc = "Load multiple 4-element structures to four registers"]
12572#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s64)"]
12573#[doc = "## Safety"]
12574#[doc = " * Neon instrinsic unsafe"]
12575#[inline]
12576#[target_feature(enable = "neon")]
12577#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12578#[cfg_attr(test, assert_instr(ld4))]
12579pub unsafe fn vld4q_s64(a: *const i64) -> int64x2x4_t {
12580 unsafe extern "unadjusted" {
12581 #[cfg_attr(
12582 any(target_arch = "aarch64", target_arch = "arm64ec"),
12583 link_name = "llvm.aarch64.neon.ld4.v2i64.p0"
12584 )]
12585 fn _vld4q_s64(ptr: *const int64x2_t) -> int64x2x4_t;
12586 }
12587 _vld4q_s64(a as _)
12588}
12589#[doc = "Load multiple 4-element structures to four registers"]
12590#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f64)"]
12591#[doc = "## Safety"]
12592#[doc = " * Neon instrinsic unsafe"]
12593#[inline]
12594#[target_feature(enable = "neon")]
12595#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12596#[rustc_legacy_const_generics(2)]
12597#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12598pub unsafe fn vld4q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x4_t) -> float64x2x4_t {
12599 static_assert_uimm_bits!(LANE, 1);
12600 unsafe extern "unadjusted" {
12601 #[cfg_attr(
12602 any(target_arch = "aarch64", target_arch = "arm64ec"),
12603 link_name = "llvm.aarch64.neon.ld4lane.v2f64.p0"
12604 )]
12605 fn _vld4q_lane_f64(
12606 a: float64x2_t,
12607 b: float64x2_t,
12608 c: float64x2_t,
12609 d: float64x2_t,
12610 n: i64,
12611 ptr: *const i8,
12612 ) -> float64x2x4_t;
12613 }
12614 _vld4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12615}
12616#[doc = "Load multiple 4-element structures to four registers"]
12617#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s8)"]
12618#[doc = "## Safety"]
12619#[doc = " * Neon instrinsic unsafe"]
12620#[inline]
12621#[target_feature(enable = "neon")]
12622#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12623#[rustc_legacy_const_generics(2)]
12624#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12625pub unsafe fn vld4q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x4_t) -> int8x16x4_t {
12626 static_assert_uimm_bits!(LANE, 3);
12627 unsafe extern "unadjusted" {
12628 #[cfg_attr(
12629 any(target_arch = "aarch64", target_arch = "arm64ec"),
12630 link_name = "llvm.aarch64.neon.ld4lane.v16i8.p0"
12631 )]
12632 fn _vld4q_lane_s8(
12633 a: int8x16_t,
12634 b: int8x16_t,
12635 c: int8x16_t,
12636 d: int8x16_t,
12637 n: i64,
12638 ptr: *const i8,
12639 ) -> int8x16x4_t;
12640 }
12641 _vld4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12642}
12643#[doc = "Load multiple 4-element structures to four registers"]
12644#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s64)"]
12645#[doc = "## Safety"]
12646#[doc = " * Neon instrinsic unsafe"]
12647#[inline]
12648#[target_feature(enable = "neon")]
12649#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12650#[rustc_legacy_const_generics(2)]
12651#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12652pub unsafe fn vld4q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x4_t) -> int64x2x4_t {
12653 static_assert_uimm_bits!(LANE, 1);
12654 unsafe extern "unadjusted" {
12655 #[cfg_attr(
12656 any(target_arch = "aarch64", target_arch = "arm64ec"),
12657 link_name = "llvm.aarch64.neon.ld4lane.v2i64.p0"
12658 )]
12659 fn _vld4q_lane_s64(
12660 a: int64x2_t,
12661 b: int64x2_t,
12662 c: int64x2_t,
12663 d: int64x2_t,
12664 n: i64,
12665 ptr: *const i8,
12666 ) -> int64x2x4_t;
12667 }
12668 _vld4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12669}
12670#[doc = "Load multiple 4-element structures to four registers"]
12671#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p64)"]
12672#[doc = "## Safety"]
12673#[doc = " * Neon instrinsic unsafe"]
12674#[inline]
12675#[target_feature(enable = "neon,aes")]
12676#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12677#[rustc_legacy_const_generics(2)]
12678#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12679pub unsafe fn vld4q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x4_t) -> poly64x2x4_t {
12680 static_assert_uimm_bits!(LANE, 1);
12681 transmute(vld4q_lane_s64::<LANE>(transmute(a), transmute(b)))
12682}
12683#[doc = "Load multiple 4-element structures to four registers"]
12684#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u8)"]
12685#[doc = "## Safety"]
12686#[doc = " * Neon instrinsic unsafe"]
12687#[inline]
12688#[target_feature(enable = "neon")]
12689#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12690#[rustc_legacy_const_generics(2)]
12691#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12692pub unsafe fn vld4q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x4_t) -> uint8x16x4_t {
12693 static_assert_uimm_bits!(LANE, 4);
12694 transmute(vld4q_lane_s8::<LANE>(transmute(a), transmute(b)))
12695}
12696#[doc = "Load multiple 4-element structures to four registers"]
12697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u64)"]
12698#[doc = "## Safety"]
12699#[doc = " * Neon instrinsic unsafe"]
12700#[inline]
12701#[target_feature(enable = "neon")]
12702#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12703#[rustc_legacy_const_generics(2)]
12704#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12705pub unsafe fn vld4q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x4_t) -> uint64x2x4_t {
12706 static_assert_uimm_bits!(LANE, 1);
12707 transmute(vld4q_lane_s64::<LANE>(transmute(a), transmute(b)))
12708}
12709#[doc = "Load multiple 4-element structures to four registers"]
12710#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p8)"]
12711#[doc = "## Safety"]
12712#[doc = " * Neon instrinsic unsafe"]
12713#[inline]
12714#[target_feature(enable = "neon")]
12715#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12716#[rustc_legacy_const_generics(2)]
12717#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12718pub unsafe fn vld4q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x4_t) -> poly8x16x4_t {
12719 static_assert_uimm_bits!(LANE, 4);
12720 transmute(vld4q_lane_s8::<LANE>(transmute(a), transmute(b)))
12721}
12722#[doc = "Load multiple 4-element structures to four registers"]
12723#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p64)"]
12724#[doc = "## Safety"]
12725#[doc = " * Neon instrinsic unsafe"]
12726#[inline]
12727#[cfg(target_endian = "little")]
12728#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12729#[target_feature(enable = "neon,aes")]
12730#[cfg_attr(test, assert_instr(ld4))]
12731pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t {
12732 transmute(vld4q_s64(transmute(a)))
12733}
12734#[doc = "Load multiple 4-element structures to four registers"]
12735#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p64)"]
12736#[doc = "## Safety"]
12737#[doc = " * Neon instrinsic unsafe"]
12738#[inline]
12739#[cfg(target_endian = "big")]
12740#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12741#[target_feature(enable = "neon,aes")]
12742#[cfg_attr(test, assert_instr(ld4))]
12743pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t {
12744 let mut ret_val: poly64x2x4_t = transmute(vld4q_s64(transmute(a)));
12745 ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12746 ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12747 ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12748 ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12749 ret_val
12750}
12751#[doc = "Load multiple 4-element structures to four registers"]
12752#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u64)"]
12753#[doc = "## Safety"]
12754#[doc = " * Neon instrinsic unsafe"]
12755#[inline]
12756#[cfg(target_endian = "little")]
12757#[target_feature(enable = "neon")]
12758#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12759#[cfg_attr(test, assert_instr(ld4))]
12760pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t {
12761 transmute(vld4q_s64(transmute(a)))
12762}
12763#[doc = "Load multiple 4-element structures to four registers"]
12764#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u64)"]
12765#[doc = "## Safety"]
12766#[doc = " * Neon instrinsic unsafe"]
12767#[inline]
12768#[cfg(target_endian = "big")]
12769#[target_feature(enable = "neon")]
12770#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12771#[cfg_attr(test, assert_instr(ld4))]
12772pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t {
12773 let mut ret_val: uint64x2x4_t = transmute(vld4q_s64(transmute(a)));
12774 ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12775 ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12776 ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12777 ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12778 ret_val
12779}
12780#[doc = "Lookup table read with 2-bit indices"]
12781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_s8)"]
12782#[doc = "## Safety"]
12783#[doc = " * Neon instrinsic unsafe"]
12784#[inline]
12785#[target_feature(enable = "neon,lut")]
12786#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12787#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12788#[rustc_legacy_const_generics(2)]
12789pub unsafe fn vluti2_lane_s8<const LANE: i32>(a: int8x8_t, b: uint8x8_t) -> int8x16_t {
12790 static_assert!(LANE >= 0 && LANE <= 1);
12791 unsafe extern "unadjusted" {
12792 #[cfg_attr(
12793 any(target_arch = "aarch64", target_arch = "arm64ec"),
12794 link_name = "llvm.aarch64.neon.vluti2.lane.v16i8.v8i8"
12795 )]
12796 fn _vluti2_lane_s8(a: int8x8_t, b: uint8x8_t, n: i32) -> int8x16_t;
12797 }
12798 _vluti2_lane_s8(a, b, LANE)
12799}
12800#[doc = "Lookup table read with 2-bit indices"]
12801#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_s8)"]
12802#[doc = "## Safety"]
12803#[doc = " * Neon instrinsic unsafe"]
12804#[inline]
12805#[target_feature(enable = "neon,lut")]
12806#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12807#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12808#[rustc_legacy_const_generics(2)]
12809pub unsafe fn vluti2q_lane_s8<const LANE: i32>(a: int8x16_t, b: uint8x8_t) -> int8x16_t {
12810 static_assert!(LANE >= 0 && LANE <= 1);
12811 unsafe extern "unadjusted" {
12812 #[cfg_attr(
12813 any(target_arch = "aarch64", target_arch = "arm64ec"),
12814 link_name = "llvm.aarch64.neon.vluti2.lane.v16i8.v16i8"
12815 )]
12816 fn _vluti2q_lane_s8(a: int8x16_t, b: uint8x8_t, n: i32) -> int8x16_t;
12817 }
12818 _vluti2q_lane_s8(a, b, LANE)
12819}
12820#[doc = "Lookup table read with 2-bit indices"]
12821#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_s16)"]
12822#[doc = "## Safety"]
12823#[doc = " * Neon instrinsic unsafe"]
12824#[inline]
12825#[target_feature(enable = "neon,lut")]
12826#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12827#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12828#[rustc_legacy_const_generics(2)]
12829pub unsafe fn vluti2_lane_s16<const LANE: i32>(a: int16x4_t, b: uint8x8_t) -> int16x8_t {
12830 static_assert!(LANE >= 0 && LANE <= 3);
12831 unsafe extern "unadjusted" {
12832 #[cfg_attr(
12833 any(target_arch = "aarch64", target_arch = "arm64ec"),
12834 link_name = "llvm.aarch64.neon.vluti2.lane.v8i16.v4i16"
12835 )]
12836 fn _vluti2_lane_s16(a: int16x4_t, b: uint8x8_t, n: i32) -> int16x8_t;
12837 }
12838 _vluti2_lane_s16(a, b, LANE)
12839}
12840#[doc = "Lookup table read with 2-bit indices"]
12841#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_s16)"]
12842#[doc = "## Safety"]
12843#[doc = " * Neon instrinsic unsafe"]
12844#[inline]
12845#[target_feature(enable = "neon,lut")]
12846#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12847#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12848#[rustc_legacy_const_generics(2)]
12849pub unsafe fn vluti2q_lane_s16<const LANE: i32>(a: int16x8_t, b: uint8x8_t) -> int16x8_t {
12850 static_assert!(LANE >= 0 && LANE <= 3);
12851 unsafe extern "unadjusted" {
12852 #[cfg_attr(
12853 any(target_arch = "aarch64", target_arch = "arm64ec"),
12854 link_name = "llvm.aarch64.neon.vluti2.lane.v8i16.v8i16"
12855 )]
12856 fn _vluti2q_lane_s16(a: int16x8_t, b: uint8x8_t, n: i32) -> int16x8_t;
12857 }
12858 _vluti2q_lane_s16(a, b, LANE)
12859}
12860#[doc = "Lookup table read with 2-bit indices"]
12861#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_u8)"]
12862#[doc = "## Safety"]
12863#[doc = " * Neon instrinsic unsafe"]
12864#[inline]
12865#[target_feature(enable = "neon,lut")]
12866#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12867#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12868#[rustc_legacy_const_generics(2)]
12869pub unsafe fn vluti2_lane_u8<const LANE: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x16_t {
12870 static_assert!(LANE >= 0 && LANE <= 1);
12871 transmute(vluti2_lane_s8::<LANE>(transmute(a), b))
12872}
12873#[doc = "Lookup table read with 2-bit indices"]
12874#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_u8)"]
12875#[doc = "## Safety"]
12876#[doc = " * Neon instrinsic unsafe"]
12877#[inline]
12878#[target_feature(enable = "neon,lut")]
12879#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12880#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12881#[rustc_legacy_const_generics(2)]
12882pub unsafe fn vluti2q_lane_u8<const LANE: i32>(a: uint8x16_t, b: uint8x8_t) -> uint8x16_t {
12883 static_assert!(LANE >= 0 && LANE <= 1);
12884 transmute(vluti2q_lane_s8::<LANE>(transmute(a), b))
12885}
12886#[doc = "Lookup table read with 2-bit indices"]
12887#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_u16)"]
12888#[doc = "## Safety"]
12889#[doc = " * Neon instrinsic unsafe"]
12890#[inline]
12891#[target_feature(enable = "neon,lut")]
12892#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12893#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12894#[rustc_legacy_const_generics(2)]
12895pub unsafe fn vluti2_lane_u16<const LANE: i32>(a: uint16x4_t, b: uint8x8_t) -> uint16x8_t {
12896 static_assert!(LANE >= 0 && LANE <= 3);
12897 transmute(vluti2_lane_s16::<LANE>(transmute(a), b))
12898}
12899#[doc = "Lookup table read with 2-bit indices"]
12900#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_u16)"]
12901#[doc = "## Safety"]
12902#[doc = " * Neon instrinsic unsafe"]
12903#[inline]
12904#[target_feature(enable = "neon,lut")]
12905#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12906#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12907#[rustc_legacy_const_generics(2)]
12908pub unsafe fn vluti2q_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t {
12909 static_assert!(LANE >= 0 && LANE <= 3);
12910 transmute(vluti2q_lane_s16::<LANE>(transmute(a), b))
12911}
12912#[doc = "Lookup table read with 2-bit indices"]
12913#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_p8)"]
12914#[doc = "## Safety"]
12915#[doc = " * Neon instrinsic unsafe"]
12916#[inline]
12917#[target_feature(enable = "neon,lut")]
12918#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12919#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12920#[rustc_legacy_const_generics(2)]
12921pub unsafe fn vluti2_lane_p8<const LANE: i32>(a: poly8x8_t, b: uint8x8_t) -> poly8x16_t {
12922 static_assert!(LANE >= 0 && LANE <= 1);
12923 transmute(vluti2_lane_s8::<LANE>(transmute(a), b))
12924}
12925#[doc = "Lookup table read with 2-bit indices"]
12926#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_p8)"]
12927#[doc = "## Safety"]
12928#[doc = " * Neon instrinsic unsafe"]
12929#[inline]
12930#[target_feature(enable = "neon,lut")]
12931#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12932#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12933#[rustc_legacy_const_generics(2)]
12934pub unsafe fn vluti2q_lane_p8<const LANE: i32>(a: poly8x16_t, b: uint8x8_t) -> poly8x16_t {
12935 static_assert!(LANE >= 0 && LANE <= 1);
12936 transmute(vluti2q_lane_s8::<LANE>(transmute(a), b))
12937}
12938#[doc = "Lookup table read with 2-bit indices"]
12939#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_p16)"]
12940#[doc = "## Safety"]
12941#[doc = " * Neon instrinsic unsafe"]
12942#[inline]
12943#[target_feature(enable = "neon,lut")]
12944#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12945#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12946#[rustc_legacy_const_generics(2)]
12947pub unsafe fn vluti2_lane_p16<const LANE: i32>(a: poly16x4_t, b: uint8x8_t) -> poly16x8_t {
12948 static_assert!(LANE >= 0 && LANE <= 3);
12949 transmute(vluti2_lane_s16::<LANE>(transmute(a), b))
12950}
12951#[doc = "Lookup table read with 2-bit indices"]
12952#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_p16)"]
12953#[doc = "## Safety"]
12954#[doc = " * Neon instrinsic unsafe"]
12955#[inline]
12956#[target_feature(enable = "neon,lut")]
12957#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12958#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12959#[rustc_legacy_const_generics(2)]
12960pub unsafe fn vluti2q_lane_p16<const LANE: i32>(a: poly16x8_t, b: uint8x8_t) -> poly16x8_t {
12961 static_assert!(LANE >= 0 && LANE <= 3);
12962 transmute(vluti2q_lane_s16::<LANE>(transmute(a), b))
12963}
12964#[doc = "Lookup table read with 4-bit indices"]
12965#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_f16_x2)"]
12966#[doc = "## Safety"]
12967#[doc = " * Neon instrinsic unsafe"]
12968#[inline]
12969#[target_feature(enable = "neon,lut,fp16")]
12970#[cfg_attr(test, assert_instr(nop, LANE = 0))]
12971#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12972#[rustc_legacy_const_generics(2)]
12973pub unsafe fn vluti4q_lane_f16_x2<const LANE: i32>(a: float16x8x2_t, b: uint8x8_t) -> float16x8_t {
12974 static_assert!(LANE >= 0 && LANE <= 1);
12975 transmute(vluti4q_lane_s16_x2::<LANE>(transmute(a), b))
12976}
12977#[doc = "Lookup table read with 4-bit indices"]
12978#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_u16_x2)"]
12979#[doc = "## Safety"]
12980#[doc = " * Neon instrinsic unsafe"]
12981#[inline]
12982#[target_feature(enable = "neon,lut")]
12983#[cfg_attr(test, assert_instr(nop, LANE = 0))]
12984#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12985#[rustc_legacy_const_generics(2)]
12986pub unsafe fn vluti4q_lane_u16_x2<const LANE: i32>(a: uint16x8x2_t, b: uint8x8_t) -> uint16x8_t {
12987 static_assert!(LANE >= 0 && LANE <= 1);
12988 transmute(vluti4q_lane_s16_x2::<LANE>(transmute(a), b))
12989}
12990#[doc = "Lookup table read with 4-bit indices"]
12991#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_p16_x2)"]
12992#[doc = "## Safety"]
12993#[doc = " * Neon instrinsic unsafe"]
12994#[inline]
12995#[target_feature(enable = "neon,lut")]
12996#[cfg_attr(test, assert_instr(nop, LANE = 0))]
12997#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12998#[rustc_legacy_const_generics(2)]
12999pub unsafe fn vluti4q_lane_p16_x2<const LANE: i32>(a: poly16x8x2_t, b: uint8x8_t) -> poly16x8_t {
13000 static_assert!(LANE >= 0 && LANE <= 1);
13001 transmute(vluti4q_lane_s16_x2::<LANE>(transmute(a), b))
13002}
13003#[doc = "Lookup table read with 4-bit indices"]
13004#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_s16_x2)"]
13005#[doc = "## Safety"]
13006#[doc = " * Neon instrinsic unsafe"]
13007#[inline]
13008#[target_feature(enable = "neon,lut")]
13009#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13010#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13011#[rustc_legacy_const_generics(2)]
13012pub unsafe fn vluti4q_lane_s16_x2<const LANE: i32>(a: int16x8x2_t, b: uint8x8_t) -> int16x8_t {
13013 static_assert!(LANE >= 0 && LANE <= 1);
13014 unsafe extern "unadjusted" {
13015 #[cfg_attr(
13016 any(target_arch = "aarch64", target_arch = "arm64ec"),
13017 link_name = "llvm.aarch64.neon.vluti4q.lane.x2.v8i16"
13018 )]
13019 fn _vluti4q_lane_s16_x2(a: int16x8_t, a: int16x8_t, b: uint8x8_t, n: i32) -> int16x8_t;
13020 }
13021 _vluti4q_lane_s16_x2(a.0, a.1, b, LANE)
13022}
13023#[doc = "Lookup table read with 4-bit indices"]
13024#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_s8)"]
13025#[doc = "## Safety"]
13026#[doc = " * Neon instrinsic unsafe"]
13027#[inline]
13028#[target_feature(enable = "neon,lut")]
13029#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13030#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13031#[rustc_legacy_const_generics(2)]
13032pub unsafe fn vluti4q_lane_s8<const LANE: i32>(a: int8x16_t, b: uint8x8_t) -> int8x16_t {
13033 static_assert!(LANE == 0);
13034 unsafe extern "unadjusted" {
13035 #[cfg_attr(
13036 any(target_arch = "aarch64", target_arch = "arm64ec"),
13037 link_name = "llvm.aarch64.neon.vluti4q.lane.v8i8"
13038 )]
13039 fn _vluti4q_lane_s8(a: int8x16_t, b: uint8x8_t, n: i32) -> int8x16_t;
13040 }
13041 _vluti4q_lane_s8(a, b, LANE)
13042}
13043#[doc = "Lookup table read with 4-bit indices"]
13044#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_u8)"]
13045#[doc = "## Safety"]
13046#[doc = " * Neon instrinsic unsafe"]
13047#[inline]
13048#[target_feature(enable = "neon,lut")]
13049#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13050#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13051#[rustc_legacy_const_generics(2)]
13052pub unsafe fn vluti4q_lane_u8<const LANE: i32>(a: uint8x16_t, b: uint8x8_t) -> uint8x16_t {
13053 static_assert!(LANE == 0);
13054 transmute(vluti4q_lane_s8::<LANE>(transmute(a), b))
13055}
13056#[doc = "Lookup table read with 4-bit indices"]
13057#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_p8)"]
13058#[doc = "## Safety"]
13059#[doc = " * Neon instrinsic unsafe"]
13060#[inline]
13061#[target_feature(enable = "neon,lut")]
13062#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13063#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13064#[rustc_legacy_const_generics(2)]
13065pub unsafe fn vluti4q_lane_p8<const LANE: i32>(a: poly8x16_t, b: uint8x8_t) -> poly8x16_t {
13066 static_assert!(LANE == 0);
13067 transmute(vluti4q_lane_s8::<LANE>(transmute(a), b))
13068}
13069#[doc = "Lookup table read with 4-bit indices"]
13070#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_f16_x2)"]
13071#[doc = "## Safety"]
13072#[doc = " * Neon instrinsic unsafe"]
13073#[inline]
13074#[target_feature(enable = "neon,lut,fp16")]
13075#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13076#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13077#[rustc_legacy_const_generics(2)]
13078pub unsafe fn vluti4q_laneq_f16_x2<const LANE: i32>(
13079 a: float16x8x2_t,
13080 b: uint8x16_t,
13081) -> float16x8_t {
13082 static_assert!(LANE >= 0 && LANE <= 3);
13083 transmute(vluti4q_laneq_s16_x2::<LANE>(transmute(a), b))
13084}
13085#[doc = "Lookup table read with 4-bit indices"]
13086#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_u16_x2)"]
13087#[doc = "## Safety"]
13088#[doc = " * Neon instrinsic unsafe"]
13089#[inline]
13090#[target_feature(enable = "neon,lut")]
13091#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13092#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13093#[rustc_legacy_const_generics(2)]
13094pub unsafe fn vluti4q_laneq_u16_x2<const LANE: i32>(a: uint16x8x2_t, b: uint8x16_t) -> uint16x8_t {
13095 static_assert!(LANE >= 0 && LANE <= 3);
13096 transmute(vluti4q_laneq_s16_x2::<LANE>(transmute(a), b))
13097}
13098#[doc = "Lookup table read with 4-bit indices"]
13099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_p16_x2)"]
13100#[doc = "## Safety"]
13101#[doc = " * Neon instrinsic unsafe"]
13102#[inline]
13103#[target_feature(enable = "neon,lut")]
13104#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13105#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13106#[rustc_legacy_const_generics(2)]
13107pub unsafe fn vluti4q_laneq_p16_x2<const LANE: i32>(a: poly16x8x2_t, b: uint8x16_t) -> poly16x8_t {
13108 static_assert!(LANE >= 0 && LANE <= 3);
13109 transmute(vluti4q_laneq_s16_x2::<LANE>(transmute(a), b))
13110}
13111#[doc = "Lookup table read with 4-bit indices"]
13112#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_s16_x2)"]
13113#[doc = "## Safety"]
13114#[doc = " * Neon instrinsic unsafe"]
13115#[inline]
13116#[target_feature(enable = "neon,lut")]
13117#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13118#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13119#[rustc_legacy_const_generics(2)]
13120pub unsafe fn vluti4q_laneq_s16_x2<const LANE: i32>(a: int16x8x2_t, b: uint8x16_t) -> int16x8_t {
13121 static_assert!(LANE >= 0 && LANE <= 3);
13122 unsafe extern "unadjusted" {
13123 #[cfg_attr(
13124 any(target_arch = "aarch64", target_arch = "arm64ec"),
13125 link_name = "llvm.aarch64.neon.vluti4q.laneq.x2.v8i16"
13126 )]
13127 fn _vluti4q_laneq_s16_x2(a: int16x8_t, b: int16x8_t, c: uint8x16_t, n: i32) -> int16x8_t;
13128 }
13129 _vluti4q_laneq_s16_x2(a.0, a.1, b, LANE)
13130}
13131#[doc = "Lookup table read with 4-bit indices"]
13132#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_s8)"]
13133#[doc = "## Safety"]
13134#[doc = " * Neon instrinsic unsafe"]
13135#[inline]
13136#[target_feature(enable = "neon,lut")]
13137#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13138#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13139#[rustc_legacy_const_generics(2)]
13140pub unsafe fn vluti4q_laneq_s8<const LANE: i32>(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
13141 static_assert!(LANE >= 0 && LANE <= 1);
13142 unsafe extern "unadjusted" {
13143 #[cfg_attr(
13144 any(target_arch = "aarch64", target_arch = "arm64ec"),
13145 link_name = "llvm.aarch64.neon.vluti4q.laneq.v16i8"
13146 )]
13147 fn _vluti4q_laneq_s8(a: int8x16_t, b: uint8x16_t, n: i32) -> int8x16_t;
13148 }
13149 _vluti4q_laneq_s8(a, b, LANE)
13150}
13151#[doc = "Lookup table read with 4-bit indices"]
13152#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_u8)"]
13153#[doc = "## Safety"]
13154#[doc = " * Neon instrinsic unsafe"]
13155#[inline]
13156#[target_feature(enable = "neon,lut")]
13157#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13158#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13159#[rustc_legacy_const_generics(2)]
13160pub unsafe fn vluti4q_laneq_u8<const LANE: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
13161 static_assert!(LANE >= 0 && LANE <= 1);
13162 transmute(vluti4q_laneq_s8::<LANE>(transmute(a), b))
13163}
13164#[doc = "Lookup table read with 4-bit indices"]
13165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_p8)"]
13166#[doc = "## Safety"]
13167#[doc = " * Neon instrinsic unsafe"]
13168#[inline]
13169#[target_feature(enable = "neon,lut")]
13170#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13171#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13172#[rustc_legacy_const_generics(2)]
13173pub unsafe fn vluti4q_laneq_p8<const LANE: i32>(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t {
13174 static_assert!(LANE >= 0 && LANE <= 1);
13175 transmute(vluti4q_laneq_s8::<LANE>(transmute(a), b))
13176}
13177#[doc = "Maximum (vector)"]
13178#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f64)"]
13179#[inline]
13180#[target_feature(enable = "neon")]
13181#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13182#[cfg_attr(test, assert_instr(fmax))]
13183pub fn vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13184 unsafe extern "unadjusted" {
13185 #[cfg_attr(
13186 any(target_arch = "aarch64", target_arch = "arm64ec"),
13187 link_name = "llvm.aarch64.neon.fmax.v1f64"
13188 )]
13189 fn _vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
13190 }
13191 unsafe { _vmax_f64(a, b) }
13192}
13193#[doc = "Maximum (vector)"]
13194#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f64)"]
13195#[inline]
13196#[target_feature(enable = "neon")]
13197#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13198#[cfg_attr(test, assert_instr(fmax))]
13199pub fn vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13200 unsafe extern "unadjusted" {
13201 #[cfg_attr(
13202 any(target_arch = "aarch64", target_arch = "arm64ec"),
13203 link_name = "llvm.aarch64.neon.fmax.v2f64"
13204 )]
13205 fn _vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
13206 }
13207 unsafe { _vmaxq_f64(a, b) }
13208}
13209#[doc = "Maximum (vector)"]
13210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxh_f16)"]
13211#[inline]
13212#[target_feature(enable = "neon,fp16")]
13213#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13214#[cfg_attr(test, assert_instr(fmax))]
13215pub fn vmaxh_f16(a: f16, b: f16) -> f16 {
13216 unsafe extern "unadjusted" {
13217 #[cfg_attr(
13218 any(target_arch = "aarch64", target_arch = "arm64ec"),
13219 link_name = "llvm.aarch64.neon.fmax.f16"
13220 )]
13221 fn _vmaxh_f16(a: f16, b: f16) -> f16;
13222 }
13223 unsafe { _vmaxh_f16(a, b) }
13224}
13225#[doc = "Floating-point Maximum Number (vector)"]
13226#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f64)"]
13227#[inline]
13228#[target_feature(enable = "neon")]
13229#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13230#[cfg_attr(test, assert_instr(fmaxnm))]
13231pub fn vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13232 unsafe extern "unadjusted" {
13233 #[cfg_attr(
13234 any(target_arch = "aarch64", target_arch = "arm64ec"),
13235 link_name = "llvm.aarch64.neon.fmaxnm.v1f64"
13236 )]
13237 fn _vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
13238 }
13239 unsafe { _vmaxnm_f64(a, b) }
13240}
13241#[doc = "Floating-point Maximum Number (vector)"]
13242#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f64)"]
13243#[inline]
13244#[target_feature(enable = "neon")]
13245#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13246#[cfg_attr(test, assert_instr(fmaxnm))]
13247pub fn vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13248 unsafe extern "unadjusted" {
13249 #[cfg_attr(
13250 any(target_arch = "aarch64", target_arch = "arm64ec"),
13251 link_name = "llvm.aarch64.neon.fmaxnm.v2f64"
13252 )]
13253 fn _vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
13254 }
13255 unsafe { _vmaxnmq_f64(a, b) }
13256}
13257#[doc = "Floating-point Maximum Number"]
13258#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmh_f16)"]
13259#[inline]
13260#[target_feature(enable = "neon,fp16")]
13261#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13262#[cfg_attr(test, assert_instr(fmaxnm))]
13263pub fn vmaxnmh_f16(a: f16, b: f16) -> f16 {
13264 unsafe extern "unadjusted" {
13265 #[cfg_attr(
13266 any(target_arch = "aarch64", target_arch = "arm64ec"),
13267 link_name = "llvm.aarch64.neon.fmaxnm.f16"
13268 )]
13269 fn _vmaxnmh_f16(a: f16, b: f16) -> f16;
13270 }
13271 unsafe { _vmaxnmh_f16(a, b) }
13272}
13273#[doc = "Floating-point maximum number across vector"]
13274#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f16)"]
13275#[inline]
13276#[target_feature(enable = "neon,fp16")]
13277#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13278#[cfg_attr(test, assert_instr(fmaxnmv))]
13279pub fn vmaxnmv_f16(a: float16x4_t) -> f16 {
13280 unsafe extern "unadjusted" {
13281 #[cfg_attr(
13282 any(target_arch = "aarch64", target_arch = "arm64ec"),
13283 link_name = "llvm.aarch64.neon.fmaxnmv.f16.v4f16"
13284 )]
13285 fn _vmaxnmv_f16(a: float16x4_t) -> f16;
13286 }
13287 unsafe { _vmaxnmv_f16(a) }
13288}
13289#[doc = "Floating-point maximum number across vector"]
13290#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f16)"]
13291#[inline]
13292#[target_feature(enable = "neon,fp16")]
13293#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13294#[cfg_attr(test, assert_instr(fmaxnmv))]
13295pub fn vmaxnmvq_f16(a: float16x8_t) -> f16 {
13296 unsafe extern "unadjusted" {
13297 #[cfg_attr(
13298 any(target_arch = "aarch64", target_arch = "arm64ec"),
13299 link_name = "llvm.aarch64.neon.fmaxnmv.f16.v8f16"
13300 )]
13301 fn _vmaxnmvq_f16(a: float16x8_t) -> f16;
13302 }
13303 unsafe { _vmaxnmvq_f16(a) }
13304}
13305#[doc = "Floating-point maximum number across vector"]
13306#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f32)"]
13307#[inline]
13308#[target_feature(enable = "neon")]
13309#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13310#[cfg_attr(test, assert_instr(fmaxnmp))]
13311pub fn vmaxnmv_f32(a: float32x2_t) -> f32 {
13312 unsafe extern "unadjusted" {
13313 #[cfg_attr(
13314 any(target_arch = "aarch64", target_arch = "arm64ec"),
13315 link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32"
13316 )]
13317 fn _vmaxnmv_f32(a: float32x2_t) -> f32;
13318 }
13319 unsafe { _vmaxnmv_f32(a) }
13320}
13321#[doc = "Floating-point maximum number across vector"]
13322#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f64)"]
13323#[inline]
13324#[target_feature(enable = "neon")]
13325#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13326#[cfg_attr(test, assert_instr(fmaxnmp))]
13327pub fn vmaxnmvq_f64(a: float64x2_t) -> f64 {
13328 unsafe extern "unadjusted" {
13329 #[cfg_attr(
13330 any(target_arch = "aarch64", target_arch = "arm64ec"),
13331 link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64"
13332 )]
13333 fn _vmaxnmvq_f64(a: float64x2_t) -> f64;
13334 }
13335 unsafe { _vmaxnmvq_f64(a) }
13336}
13337#[doc = "Floating-point maximum number across vector"]
13338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f32)"]
13339#[inline]
13340#[target_feature(enable = "neon")]
13341#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13342#[cfg_attr(test, assert_instr(fmaxnmv))]
13343pub fn vmaxnmvq_f32(a: float32x4_t) -> f32 {
13344 unsafe extern "unadjusted" {
13345 #[cfg_attr(
13346 any(target_arch = "aarch64", target_arch = "arm64ec"),
13347 link_name = "llvm.aarch64.neon.fmaxnmv.f32.v4f32"
13348 )]
13349 fn _vmaxnmvq_f32(a: float32x4_t) -> f32;
13350 }
13351 unsafe { _vmaxnmvq_f32(a) }
13352}
13353#[doc = "Floating-point maximum number across vector"]
13354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_f16)"]
13355#[inline]
13356#[target_feature(enable = "neon,fp16")]
13357#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13358#[cfg_attr(test, assert_instr(fmaxv))]
13359pub fn vmaxv_f16(a: float16x4_t) -> f16 {
13360 unsafe extern "unadjusted" {
13361 #[cfg_attr(
13362 any(target_arch = "aarch64", target_arch = "arm64ec"),
13363 link_name = "llvm.aarch64.neon.fmaxv.f16.v4f16"
13364 )]
13365 fn _vmaxv_f16(a: float16x4_t) -> f16;
13366 }
13367 unsafe { _vmaxv_f16(a) }
13368}
13369#[doc = "Floating-point maximum number across vector"]
13370#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f16)"]
13371#[inline]
13372#[target_feature(enable = "neon,fp16")]
13373#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13374#[cfg_attr(test, assert_instr(fmaxv))]
13375pub fn vmaxvq_f16(a: float16x8_t) -> f16 {
13376 unsafe extern "unadjusted" {
13377 #[cfg_attr(
13378 any(target_arch = "aarch64", target_arch = "arm64ec"),
13379 link_name = "llvm.aarch64.neon.fmaxv.f16.v8f16"
13380 )]
13381 fn _vmaxvq_f16(a: float16x8_t) -> f16;
13382 }
13383 unsafe { _vmaxvq_f16(a) }
13384}
13385#[doc = "Horizontal vector max."]
13386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_f32)"]
13387#[inline]
13388#[target_feature(enable = "neon")]
13389#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13390#[cfg_attr(test, assert_instr(fmaxp))]
13391pub fn vmaxv_f32(a: float32x2_t) -> f32 {
13392 unsafe extern "unadjusted" {
13393 #[cfg_attr(
13394 any(target_arch = "aarch64", target_arch = "arm64ec"),
13395 link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32"
13396 )]
13397 fn _vmaxv_f32(a: float32x2_t) -> f32;
13398 }
13399 unsafe { _vmaxv_f32(a) }
13400}
13401#[doc = "Horizontal vector max."]
13402#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f32)"]
13403#[inline]
13404#[target_feature(enable = "neon")]
13405#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13406#[cfg_attr(test, assert_instr(fmaxv))]
13407pub fn vmaxvq_f32(a: float32x4_t) -> f32 {
13408 unsafe extern "unadjusted" {
13409 #[cfg_attr(
13410 any(target_arch = "aarch64", target_arch = "arm64ec"),
13411 link_name = "llvm.aarch64.neon.fmaxv.f32.v4f32"
13412 )]
13413 fn _vmaxvq_f32(a: float32x4_t) -> f32;
13414 }
13415 unsafe { _vmaxvq_f32(a) }
13416}
13417#[doc = "Horizontal vector max."]
13418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f64)"]
13419#[inline]
13420#[target_feature(enable = "neon")]
13421#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13422#[cfg_attr(test, assert_instr(fmaxp))]
13423pub fn vmaxvq_f64(a: float64x2_t) -> f64 {
13424 unsafe extern "unadjusted" {
13425 #[cfg_attr(
13426 any(target_arch = "aarch64", target_arch = "arm64ec"),
13427 link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64"
13428 )]
13429 fn _vmaxvq_f64(a: float64x2_t) -> f64;
13430 }
13431 unsafe { _vmaxvq_f64(a) }
13432}
13433#[doc = "Horizontal vector max."]
13434#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s8)"]
13435#[inline]
13436#[target_feature(enable = "neon")]
13437#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13438#[cfg_attr(test, assert_instr(smaxv))]
13439pub fn vmaxv_s8(a: int8x8_t) -> i8 {
13440 unsafe extern "unadjusted" {
13441 #[cfg_attr(
13442 any(target_arch = "aarch64", target_arch = "arm64ec"),
13443 link_name = "llvm.aarch64.neon.smaxv.i8.v8i8"
13444 )]
13445 fn _vmaxv_s8(a: int8x8_t) -> i8;
13446 }
13447 unsafe { _vmaxv_s8(a) }
13448}
13449#[doc = "Horizontal vector max."]
13450#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s8)"]
13451#[inline]
13452#[target_feature(enable = "neon")]
13453#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13454#[cfg_attr(test, assert_instr(smaxv))]
13455pub fn vmaxvq_s8(a: int8x16_t) -> i8 {
13456 unsafe extern "unadjusted" {
13457 #[cfg_attr(
13458 any(target_arch = "aarch64", target_arch = "arm64ec"),
13459 link_name = "llvm.aarch64.neon.smaxv.i8.v16i8"
13460 )]
13461 fn _vmaxvq_s8(a: int8x16_t) -> i8;
13462 }
13463 unsafe { _vmaxvq_s8(a) }
13464}
13465#[doc = "Horizontal vector max."]
13466#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s16)"]
13467#[inline]
13468#[target_feature(enable = "neon")]
13469#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13470#[cfg_attr(test, assert_instr(smaxv))]
13471pub fn vmaxv_s16(a: int16x4_t) -> i16 {
13472 unsafe extern "unadjusted" {
13473 #[cfg_attr(
13474 any(target_arch = "aarch64", target_arch = "arm64ec"),
13475 link_name = "llvm.aarch64.neon.smaxv.i16.v4i16"
13476 )]
13477 fn _vmaxv_s16(a: int16x4_t) -> i16;
13478 }
13479 unsafe { _vmaxv_s16(a) }
13480}
13481#[doc = "Horizontal vector max."]
13482#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s16)"]
13483#[inline]
13484#[target_feature(enable = "neon")]
13485#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13486#[cfg_attr(test, assert_instr(smaxv))]
13487pub fn vmaxvq_s16(a: int16x8_t) -> i16 {
13488 unsafe extern "unadjusted" {
13489 #[cfg_attr(
13490 any(target_arch = "aarch64", target_arch = "arm64ec"),
13491 link_name = "llvm.aarch64.neon.smaxv.i16.v8i16"
13492 )]
13493 fn _vmaxvq_s16(a: int16x8_t) -> i16;
13494 }
13495 unsafe { _vmaxvq_s16(a) }
13496}
13497#[doc = "Horizontal vector max."]
13498#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s32)"]
13499#[inline]
13500#[target_feature(enable = "neon")]
13501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13502#[cfg_attr(test, assert_instr(smaxp))]
13503pub fn vmaxv_s32(a: int32x2_t) -> i32 {
13504 unsafe extern "unadjusted" {
13505 #[cfg_attr(
13506 any(target_arch = "aarch64", target_arch = "arm64ec"),
13507 link_name = "llvm.aarch64.neon.smaxv.i32.v2i32"
13508 )]
13509 fn _vmaxv_s32(a: int32x2_t) -> i32;
13510 }
13511 unsafe { _vmaxv_s32(a) }
13512}
13513#[doc = "Horizontal vector max."]
13514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s32)"]
13515#[inline]
13516#[target_feature(enable = "neon")]
13517#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13518#[cfg_attr(test, assert_instr(smaxv))]
13519pub fn vmaxvq_s32(a: int32x4_t) -> i32 {
13520 unsafe extern "unadjusted" {
13521 #[cfg_attr(
13522 any(target_arch = "aarch64", target_arch = "arm64ec"),
13523 link_name = "llvm.aarch64.neon.smaxv.i32.v4i32"
13524 )]
13525 fn _vmaxvq_s32(a: int32x4_t) -> i32;
13526 }
13527 unsafe { _vmaxvq_s32(a) }
13528}
13529#[doc = "Horizontal vector max."]
13530#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u8)"]
13531#[inline]
13532#[target_feature(enable = "neon")]
13533#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13534#[cfg_attr(test, assert_instr(umaxv))]
13535pub fn vmaxv_u8(a: uint8x8_t) -> u8 {
13536 unsafe extern "unadjusted" {
13537 #[cfg_attr(
13538 any(target_arch = "aarch64", target_arch = "arm64ec"),
13539 link_name = "llvm.aarch64.neon.umaxv.i8.v8i8"
13540 )]
13541 fn _vmaxv_u8(a: uint8x8_t) -> u8;
13542 }
13543 unsafe { _vmaxv_u8(a) }
13544}
13545#[doc = "Horizontal vector max."]
13546#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u8)"]
13547#[inline]
13548#[target_feature(enable = "neon")]
13549#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13550#[cfg_attr(test, assert_instr(umaxv))]
13551pub fn vmaxvq_u8(a: uint8x16_t) -> u8 {
13552 unsafe extern "unadjusted" {
13553 #[cfg_attr(
13554 any(target_arch = "aarch64", target_arch = "arm64ec"),
13555 link_name = "llvm.aarch64.neon.umaxv.i8.v16i8"
13556 )]
13557 fn _vmaxvq_u8(a: uint8x16_t) -> u8;
13558 }
13559 unsafe { _vmaxvq_u8(a) }
13560}
13561#[doc = "Horizontal vector max."]
13562#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u16)"]
13563#[inline]
13564#[target_feature(enable = "neon")]
13565#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13566#[cfg_attr(test, assert_instr(umaxv))]
13567pub fn vmaxv_u16(a: uint16x4_t) -> u16 {
13568 unsafe extern "unadjusted" {
13569 #[cfg_attr(
13570 any(target_arch = "aarch64", target_arch = "arm64ec"),
13571 link_name = "llvm.aarch64.neon.umaxv.i16.v4i16"
13572 )]
13573 fn _vmaxv_u16(a: uint16x4_t) -> u16;
13574 }
13575 unsafe { _vmaxv_u16(a) }
13576}
13577#[doc = "Horizontal vector max."]
13578#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u16)"]
13579#[inline]
13580#[target_feature(enable = "neon")]
13581#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13582#[cfg_attr(test, assert_instr(umaxv))]
13583pub fn vmaxvq_u16(a: uint16x8_t) -> u16 {
13584 unsafe extern "unadjusted" {
13585 #[cfg_attr(
13586 any(target_arch = "aarch64", target_arch = "arm64ec"),
13587 link_name = "llvm.aarch64.neon.umaxv.i16.v8i16"
13588 )]
13589 fn _vmaxvq_u16(a: uint16x8_t) -> u16;
13590 }
13591 unsafe { _vmaxvq_u16(a) }
13592}
13593#[doc = "Horizontal vector max."]
13594#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u32)"]
13595#[inline]
13596#[target_feature(enable = "neon")]
13597#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13598#[cfg_attr(test, assert_instr(umaxp))]
13599pub fn vmaxv_u32(a: uint32x2_t) -> u32 {
13600 unsafe extern "unadjusted" {
13601 #[cfg_attr(
13602 any(target_arch = "aarch64", target_arch = "arm64ec"),
13603 link_name = "llvm.aarch64.neon.umaxv.i32.v2i32"
13604 )]
13605 fn _vmaxv_u32(a: uint32x2_t) -> u32;
13606 }
13607 unsafe { _vmaxv_u32(a) }
13608}
13609#[doc = "Horizontal vector max."]
13610#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u32)"]
13611#[inline]
13612#[target_feature(enable = "neon")]
13613#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13614#[cfg_attr(test, assert_instr(umaxv))]
13615pub fn vmaxvq_u32(a: uint32x4_t) -> u32 {
13616 unsafe extern "unadjusted" {
13617 #[cfg_attr(
13618 any(target_arch = "aarch64", target_arch = "arm64ec"),
13619 link_name = "llvm.aarch64.neon.umaxv.i32.v4i32"
13620 )]
13621 fn _vmaxvq_u32(a: uint32x4_t) -> u32;
13622 }
13623 unsafe { _vmaxvq_u32(a) }
13624}
13625#[doc = "Minimum (vector)"]
13626#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f64)"]
13627#[inline]
13628#[target_feature(enable = "neon")]
13629#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13630#[cfg_attr(test, assert_instr(fmin))]
13631pub fn vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13632 unsafe extern "unadjusted" {
13633 #[cfg_attr(
13634 any(target_arch = "aarch64", target_arch = "arm64ec"),
13635 link_name = "llvm.aarch64.neon.fmin.v1f64"
13636 )]
13637 fn _vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
13638 }
13639 unsafe { _vmin_f64(a, b) }
13640}
13641#[doc = "Minimum (vector)"]
13642#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f64)"]
13643#[inline]
13644#[target_feature(enable = "neon")]
13645#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13646#[cfg_attr(test, assert_instr(fmin))]
13647pub fn vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13648 unsafe extern "unadjusted" {
13649 #[cfg_attr(
13650 any(target_arch = "aarch64", target_arch = "arm64ec"),
13651 link_name = "llvm.aarch64.neon.fmin.v2f64"
13652 )]
13653 fn _vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
13654 }
13655 unsafe { _vminq_f64(a, b) }
13656}
13657#[doc = "Minimum (vector)"]
13658#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminh_f16)"]
13659#[inline]
13660#[target_feature(enable = "neon,fp16")]
13661#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13662#[cfg_attr(test, assert_instr(fmin))]
13663pub fn vminh_f16(a: f16, b: f16) -> f16 {
13664 unsafe extern "unadjusted" {
13665 #[cfg_attr(
13666 any(target_arch = "aarch64", target_arch = "arm64ec"),
13667 link_name = "llvm.aarch64.neon.fmin.f16"
13668 )]
13669 fn _vminh_f16(a: f16, b: f16) -> f16;
13670 }
13671 unsafe { _vminh_f16(a, b) }
13672}
13673#[doc = "Floating-point Minimum Number (vector)"]
13674#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f64)"]
13675#[inline]
13676#[target_feature(enable = "neon")]
13677#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13678#[cfg_attr(test, assert_instr(fminnm))]
13679pub fn vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13680 unsafe extern "unadjusted" {
13681 #[cfg_attr(
13682 any(target_arch = "aarch64", target_arch = "arm64ec"),
13683 link_name = "llvm.aarch64.neon.fminnm.v1f64"
13684 )]
13685 fn _vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
13686 }
13687 unsafe { _vminnm_f64(a, b) }
13688}
13689#[doc = "Floating-point Minimum Number (vector)"]
13690#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f64)"]
13691#[inline]
13692#[target_feature(enable = "neon")]
13693#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13694#[cfg_attr(test, assert_instr(fminnm))]
13695pub fn vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13696 unsafe extern "unadjusted" {
13697 #[cfg_attr(
13698 any(target_arch = "aarch64", target_arch = "arm64ec"),
13699 link_name = "llvm.aarch64.neon.fminnm.v2f64"
13700 )]
13701 fn _vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
13702 }
13703 unsafe { _vminnmq_f64(a, b) }
13704}
13705#[doc = "Floating-point Minimum Number"]
13706#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmh_f16)"]
13707#[inline]
13708#[target_feature(enable = "neon,fp16")]
13709#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13710#[cfg_attr(test, assert_instr(fminnm))]
13711pub fn vminnmh_f16(a: f16, b: f16) -> f16 {
13712 unsafe extern "unadjusted" {
13713 #[cfg_attr(
13714 any(target_arch = "aarch64", target_arch = "arm64ec"),
13715 link_name = "llvm.aarch64.neon.fminnm.f16"
13716 )]
13717 fn _vminnmh_f16(a: f16, b: f16) -> f16;
13718 }
13719 unsafe { _vminnmh_f16(a, b) }
13720}
13721#[doc = "Floating-point minimum number across vector"]
13722#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f16)"]
13723#[inline]
13724#[target_feature(enable = "neon,fp16")]
13725#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13726#[cfg_attr(test, assert_instr(fminnmv))]
13727pub fn vminnmv_f16(a: float16x4_t) -> f16 {
13728 unsafe extern "unadjusted" {
13729 #[cfg_attr(
13730 any(target_arch = "aarch64", target_arch = "arm64ec"),
13731 link_name = "llvm.aarch64.neon.fminnmv.f16.v4f16"
13732 )]
13733 fn _vminnmv_f16(a: float16x4_t) -> f16;
13734 }
13735 unsafe { _vminnmv_f16(a) }
13736}
13737#[doc = "Floating-point minimum number across vector"]
13738#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f16)"]
13739#[inline]
13740#[target_feature(enable = "neon,fp16")]
13741#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13742#[cfg_attr(test, assert_instr(fminnmv))]
13743pub fn vminnmvq_f16(a: float16x8_t) -> f16 {
13744 unsafe extern "unadjusted" {
13745 #[cfg_attr(
13746 any(target_arch = "aarch64", target_arch = "arm64ec"),
13747 link_name = "llvm.aarch64.neon.fminnmv.f16.v8f16"
13748 )]
13749 fn _vminnmvq_f16(a: float16x8_t) -> f16;
13750 }
13751 unsafe { _vminnmvq_f16(a) }
13752}
13753#[doc = "Floating-point minimum number across vector"]
13754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f32)"]
13755#[inline]
13756#[target_feature(enable = "neon")]
13757#[cfg_attr(test, assert_instr(fminnmp))]
13758#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13759pub fn vminnmv_f32(a: float32x2_t) -> f32 {
13760 unsafe extern "unadjusted" {
13761 #[cfg_attr(
13762 any(target_arch = "aarch64", target_arch = "arm64ec"),
13763 link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32"
13764 )]
13765 fn _vminnmv_f32(a: float32x2_t) -> f32;
13766 }
13767 unsafe { _vminnmv_f32(a) }
13768}
13769#[doc = "Floating-point minimum number across vector"]
13770#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f64)"]
13771#[inline]
13772#[target_feature(enable = "neon")]
13773#[cfg_attr(test, assert_instr(fminnmp))]
13774#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13775pub fn vminnmvq_f64(a: float64x2_t) -> f64 {
13776 unsafe extern "unadjusted" {
13777 #[cfg_attr(
13778 any(target_arch = "aarch64", target_arch = "arm64ec"),
13779 link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64"
13780 )]
13781 fn _vminnmvq_f64(a: float64x2_t) -> f64;
13782 }
13783 unsafe { _vminnmvq_f64(a) }
13784}
13785#[doc = "Floating-point minimum number across vector"]
13786#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f32)"]
13787#[inline]
13788#[target_feature(enable = "neon")]
13789#[cfg_attr(test, assert_instr(fminnmv))]
13790#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13791pub fn vminnmvq_f32(a: float32x4_t) -> f32 {
13792 unsafe extern "unadjusted" {
13793 #[cfg_attr(
13794 any(target_arch = "aarch64", target_arch = "arm64ec"),
13795 link_name = "llvm.aarch64.neon.fminnmv.f32.v4f32"
13796 )]
13797 fn _vminnmvq_f32(a: float32x4_t) -> f32;
13798 }
13799 unsafe { _vminnmvq_f32(a) }
13800}
13801#[doc = "Floating-point minimum number across vector"]
13802#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_f16)"]
13803#[inline]
13804#[target_feature(enable = "neon,fp16")]
13805#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13806#[cfg_attr(test, assert_instr(fminv))]
13807pub fn vminv_f16(a: float16x4_t) -> f16 {
13808 unsafe extern "unadjusted" {
13809 #[cfg_attr(
13810 any(target_arch = "aarch64", target_arch = "arm64ec"),
13811 link_name = "llvm.aarch64.neon.fminv.f16.v4f16"
13812 )]
13813 fn _vminv_f16(a: float16x4_t) -> f16;
13814 }
13815 unsafe { _vminv_f16(a) }
13816}
13817#[doc = "Floating-point minimum number across vector"]
13818#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f16)"]
13819#[inline]
13820#[target_feature(enable = "neon,fp16")]
13821#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13822#[cfg_attr(test, assert_instr(fminv))]
13823pub fn vminvq_f16(a: float16x8_t) -> f16 {
13824 unsafe extern "unadjusted" {
13825 #[cfg_attr(
13826 any(target_arch = "aarch64", target_arch = "arm64ec"),
13827 link_name = "llvm.aarch64.neon.fminv.f16.v8f16"
13828 )]
13829 fn _vminvq_f16(a: float16x8_t) -> f16;
13830 }
13831 unsafe { _vminvq_f16(a) }
13832}
13833#[doc = "Horizontal vector min."]
13834#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_f32)"]
13835#[inline]
13836#[target_feature(enable = "neon")]
13837#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13838#[cfg_attr(test, assert_instr(fminp))]
13839pub fn vminv_f32(a: float32x2_t) -> f32 {
13840 unsafe extern "unadjusted" {
13841 #[cfg_attr(
13842 any(target_arch = "aarch64", target_arch = "arm64ec"),
13843 link_name = "llvm.aarch64.neon.fminv.f32.v2f32"
13844 )]
13845 fn _vminv_f32(a: float32x2_t) -> f32;
13846 }
13847 unsafe { _vminv_f32(a) }
13848}
13849#[doc = "Horizontal vector min."]
13850#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f32)"]
13851#[inline]
13852#[target_feature(enable = "neon")]
13853#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13854#[cfg_attr(test, assert_instr(fminv))]
13855pub fn vminvq_f32(a: float32x4_t) -> f32 {
13856 unsafe extern "unadjusted" {
13857 #[cfg_attr(
13858 any(target_arch = "aarch64", target_arch = "arm64ec"),
13859 link_name = "llvm.aarch64.neon.fminv.f32.v4f32"
13860 )]
13861 fn _vminvq_f32(a: float32x4_t) -> f32;
13862 }
13863 unsafe { _vminvq_f32(a) }
13864}
13865#[doc = "Horizontal vector min."]
13866#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f64)"]
13867#[inline]
13868#[target_feature(enable = "neon")]
13869#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13870#[cfg_attr(test, assert_instr(fminp))]
13871pub fn vminvq_f64(a: float64x2_t) -> f64 {
13872 unsafe extern "unadjusted" {
13873 #[cfg_attr(
13874 any(target_arch = "aarch64", target_arch = "arm64ec"),
13875 link_name = "llvm.aarch64.neon.fminv.f64.v2f64"
13876 )]
13877 fn _vminvq_f64(a: float64x2_t) -> f64;
13878 }
13879 unsafe { _vminvq_f64(a) }
13880}
13881#[doc = "Horizontal vector min."]
13882#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s8)"]
13883#[inline]
13884#[target_feature(enable = "neon")]
13885#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13886#[cfg_attr(test, assert_instr(sminv))]
13887pub fn vminv_s8(a: int8x8_t) -> i8 {
13888 unsafe extern "unadjusted" {
13889 #[cfg_attr(
13890 any(target_arch = "aarch64", target_arch = "arm64ec"),
13891 link_name = "llvm.aarch64.neon.sminv.i8.v8i8"
13892 )]
13893 fn _vminv_s8(a: int8x8_t) -> i8;
13894 }
13895 unsafe { _vminv_s8(a) }
13896}
13897#[doc = "Horizontal vector min."]
13898#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s8)"]
13899#[inline]
13900#[target_feature(enable = "neon")]
13901#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13902#[cfg_attr(test, assert_instr(sminv))]
13903pub fn vminvq_s8(a: int8x16_t) -> i8 {
13904 unsafe extern "unadjusted" {
13905 #[cfg_attr(
13906 any(target_arch = "aarch64", target_arch = "arm64ec"),
13907 link_name = "llvm.aarch64.neon.sminv.i8.v16i8"
13908 )]
13909 fn _vminvq_s8(a: int8x16_t) -> i8;
13910 }
13911 unsafe { _vminvq_s8(a) }
13912}
13913#[doc = "Horizontal vector min."]
13914#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s16)"]
13915#[inline]
13916#[target_feature(enable = "neon")]
13917#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13918#[cfg_attr(test, assert_instr(sminv))]
13919pub fn vminv_s16(a: int16x4_t) -> i16 {
13920 unsafe extern "unadjusted" {
13921 #[cfg_attr(
13922 any(target_arch = "aarch64", target_arch = "arm64ec"),
13923 link_name = "llvm.aarch64.neon.sminv.i16.v4i16"
13924 )]
13925 fn _vminv_s16(a: int16x4_t) -> i16;
13926 }
13927 unsafe { _vminv_s16(a) }
13928}
13929#[doc = "Horizontal vector min."]
13930#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s16)"]
13931#[inline]
13932#[target_feature(enable = "neon")]
13933#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13934#[cfg_attr(test, assert_instr(sminv))]
13935pub fn vminvq_s16(a: int16x8_t) -> i16 {
13936 unsafe extern "unadjusted" {
13937 #[cfg_attr(
13938 any(target_arch = "aarch64", target_arch = "arm64ec"),
13939 link_name = "llvm.aarch64.neon.sminv.i16.v8i16"
13940 )]
13941 fn _vminvq_s16(a: int16x8_t) -> i16;
13942 }
13943 unsafe { _vminvq_s16(a) }
13944}
13945#[doc = "Horizontal vector min."]
13946#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s32)"]
13947#[inline]
13948#[target_feature(enable = "neon")]
13949#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13950#[cfg_attr(test, assert_instr(sminp))]
13951pub fn vminv_s32(a: int32x2_t) -> i32 {
13952 unsafe extern "unadjusted" {
13953 #[cfg_attr(
13954 any(target_arch = "aarch64", target_arch = "arm64ec"),
13955 link_name = "llvm.aarch64.neon.sminv.i32.v2i32"
13956 )]
13957 fn _vminv_s32(a: int32x2_t) -> i32;
13958 }
13959 unsafe { _vminv_s32(a) }
13960}
13961#[doc = "Horizontal vector min."]
13962#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s32)"]
13963#[inline]
13964#[target_feature(enable = "neon")]
13965#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13966#[cfg_attr(test, assert_instr(sminv))]
13967pub fn vminvq_s32(a: int32x4_t) -> i32 {
13968 unsafe extern "unadjusted" {
13969 #[cfg_attr(
13970 any(target_arch = "aarch64", target_arch = "arm64ec"),
13971 link_name = "llvm.aarch64.neon.sminv.i32.v4i32"
13972 )]
13973 fn _vminvq_s32(a: int32x4_t) -> i32;
13974 }
13975 unsafe { _vminvq_s32(a) }
13976}
13977#[doc = "Horizontal vector min."]
13978#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u8)"]
13979#[inline]
13980#[target_feature(enable = "neon")]
13981#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13982#[cfg_attr(test, assert_instr(uminv))]
13983pub fn vminv_u8(a: uint8x8_t) -> u8 {
13984 unsafe extern "unadjusted" {
13985 #[cfg_attr(
13986 any(target_arch = "aarch64", target_arch = "arm64ec"),
13987 link_name = "llvm.aarch64.neon.uminv.i8.v8i8"
13988 )]
13989 fn _vminv_u8(a: uint8x8_t) -> u8;
13990 }
13991 unsafe { _vminv_u8(a) }
13992}
13993#[doc = "Horizontal vector min."]
13994#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u8)"]
13995#[inline]
13996#[target_feature(enable = "neon")]
13997#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13998#[cfg_attr(test, assert_instr(uminv))]
13999pub fn vminvq_u8(a: uint8x16_t) -> u8 {
14000 unsafe extern "unadjusted" {
14001 #[cfg_attr(
14002 any(target_arch = "aarch64", target_arch = "arm64ec"),
14003 link_name = "llvm.aarch64.neon.uminv.i8.v16i8"
14004 )]
14005 fn _vminvq_u8(a: uint8x16_t) -> u8;
14006 }
14007 unsafe { _vminvq_u8(a) }
14008}
14009#[doc = "Horizontal vector min."]
14010#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u16)"]
14011#[inline]
14012#[target_feature(enable = "neon")]
14013#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14014#[cfg_attr(test, assert_instr(uminv))]
14015pub fn vminv_u16(a: uint16x4_t) -> u16 {
14016 unsafe extern "unadjusted" {
14017 #[cfg_attr(
14018 any(target_arch = "aarch64", target_arch = "arm64ec"),
14019 link_name = "llvm.aarch64.neon.uminv.i16.v4i16"
14020 )]
14021 fn _vminv_u16(a: uint16x4_t) -> u16;
14022 }
14023 unsafe { _vminv_u16(a) }
14024}
14025#[doc = "Horizontal vector min."]
14026#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u16)"]
14027#[inline]
14028#[target_feature(enable = "neon")]
14029#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14030#[cfg_attr(test, assert_instr(uminv))]
14031pub fn vminvq_u16(a: uint16x8_t) -> u16 {
14032 unsafe extern "unadjusted" {
14033 #[cfg_attr(
14034 any(target_arch = "aarch64", target_arch = "arm64ec"),
14035 link_name = "llvm.aarch64.neon.uminv.i16.v8i16"
14036 )]
14037 fn _vminvq_u16(a: uint16x8_t) -> u16;
14038 }
14039 unsafe { _vminvq_u16(a) }
14040}
14041#[doc = "Horizontal vector min."]
14042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u32)"]
14043#[inline]
14044#[target_feature(enable = "neon")]
14045#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14046#[cfg_attr(test, assert_instr(uminp))]
14047pub fn vminv_u32(a: uint32x2_t) -> u32 {
14048 unsafe extern "unadjusted" {
14049 #[cfg_attr(
14050 any(target_arch = "aarch64", target_arch = "arm64ec"),
14051 link_name = "llvm.aarch64.neon.uminv.i32.v2i32"
14052 )]
14053 fn _vminv_u32(a: uint32x2_t) -> u32;
14054 }
14055 unsafe { _vminv_u32(a) }
14056}
14057#[doc = "Horizontal vector min."]
14058#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u32)"]
14059#[inline]
14060#[target_feature(enable = "neon")]
14061#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14062#[cfg_attr(test, assert_instr(uminv))]
14063pub fn vminvq_u32(a: uint32x4_t) -> u32 {
14064 unsafe extern "unadjusted" {
14065 #[cfg_attr(
14066 any(target_arch = "aarch64", target_arch = "arm64ec"),
14067 link_name = "llvm.aarch64.neon.uminv.i32.v4i32"
14068 )]
14069 fn _vminvq_u32(a: uint32x4_t) -> u32;
14070 }
14071 unsafe { _vminvq_u32(a) }
14072}
14073#[doc = "Floating-point multiply-add to accumulator"]
14074#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f64)"]
14075#[inline]
14076#[target_feature(enable = "neon")]
14077#[cfg_attr(test, assert_instr(fmul))]
14078#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14079pub fn vmla_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
14080 unsafe { simd_add(a, simd_mul(b, c)) }
14081}
14082#[doc = "Floating-point multiply-add to accumulator"]
14083#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f64)"]
14084#[inline]
14085#[target_feature(enable = "neon")]
14086#[cfg_attr(test, assert_instr(fmul))]
14087#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14088pub fn vmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
14089 unsafe { simd_add(a, simd_mul(b, c)) }
14090}
14091#[doc = "Multiply-add long"]
14092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s16)"]
14093#[inline]
14094#[target_feature(enable = "neon")]
14095#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
14096#[rustc_legacy_const_generics(3)]
14097#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14098pub fn vmlal_high_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
14099 static_assert_uimm_bits!(LANE, 2);
14100 unsafe {
14101 vmlal_high_s16(
14102 a,
14103 b,
14104 simd_shuffle!(
14105 c,
14106 c,
14107 [
14108 LANE as u32,
14109 LANE as u32,
14110 LANE as u32,
14111 LANE as u32,
14112 LANE as u32,
14113 LANE as u32,
14114 LANE as u32,
14115 LANE as u32
14116 ]
14117 ),
14118 )
14119 }
14120}
14121#[doc = "Multiply-add long"]
14122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s16)"]
14123#[inline]
14124#[target_feature(enable = "neon")]
14125#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
14126#[rustc_legacy_const_generics(3)]
14127#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14128pub fn vmlal_high_laneq_s16<const LANE: i32>(
14129 a: int32x4_t,
14130 b: int16x8_t,
14131 c: int16x8_t,
14132) -> int32x4_t {
14133 static_assert_uimm_bits!(LANE, 3);
14134 unsafe {
14135 vmlal_high_s16(
14136 a,
14137 b,
14138 simd_shuffle!(
14139 c,
14140 c,
14141 [
14142 LANE as u32,
14143 LANE as u32,
14144 LANE as u32,
14145 LANE as u32,
14146 LANE as u32,
14147 LANE as u32,
14148 LANE as u32,
14149 LANE as u32
14150 ]
14151 ),
14152 )
14153 }
14154}
14155#[doc = "Multiply-add long"]
14156#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s32)"]
14157#[inline]
14158#[target_feature(enable = "neon")]
14159#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
14160#[rustc_legacy_const_generics(3)]
14161#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14162pub fn vmlal_high_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
14163 static_assert_uimm_bits!(LANE, 1);
14164 unsafe {
14165 vmlal_high_s32(
14166 a,
14167 b,
14168 simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14169 )
14170 }
14171}
14172#[doc = "Multiply-add long"]
14173#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s32)"]
14174#[inline]
14175#[target_feature(enable = "neon")]
14176#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
14177#[rustc_legacy_const_generics(3)]
14178#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14179pub fn vmlal_high_laneq_s32<const LANE: i32>(
14180 a: int64x2_t,
14181 b: int32x4_t,
14182 c: int32x4_t,
14183) -> int64x2_t {
14184 static_assert_uimm_bits!(LANE, 2);
14185 unsafe {
14186 vmlal_high_s32(
14187 a,
14188 b,
14189 simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14190 )
14191 }
14192}
14193#[doc = "Multiply-add long"]
14194#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u16)"]
14195#[inline]
14196#[target_feature(enable = "neon")]
14197#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14198#[rustc_legacy_const_generics(3)]
14199#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14200pub fn vmlal_high_lane_u16<const LANE: i32>(
14201 a: uint32x4_t,
14202 b: uint16x8_t,
14203 c: uint16x4_t,
14204) -> uint32x4_t {
14205 static_assert_uimm_bits!(LANE, 2);
14206 unsafe {
14207 vmlal_high_u16(
14208 a,
14209 b,
14210 simd_shuffle!(
14211 c,
14212 c,
14213 [
14214 LANE as u32,
14215 LANE as u32,
14216 LANE as u32,
14217 LANE as u32,
14218 LANE as u32,
14219 LANE as u32,
14220 LANE as u32,
14221 LANE as u32
14222 ]
14223 ),
14224 )
14225 }
14226}
14227#[doc = "Multiply-add long"]
14228#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u16)"]
14229#[inline]
14230#[target_feature(enable = "neon")]
14231#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14232#[rustc_legacy_const_generics(3)]
14233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14234pub fn vmlal_high_laneq_u16<const LANE: i32>(
14235 a: uint32x4_t,
14236 b: uint16x8_t,
14237 c: uint16x8_t,
14238) -> uint32x4_t {
14239 static_assert_uimm_bits!(LANE, 3);
14240 unsafe {
14241 vmlal_high_u16(
14242 a,
14243 b,
14244 simd_shuffle!(
14245 c,
14246 c,
14247 [
14248 LANE as u32,
14249 LANE as u32,
14250 LANE as u32,
14251 LANE as u32,
14252 LANE as u32,
14253 LANE as u32,
14254 LANE as u32,
14255 LANE as u32
14256 ]
14257 ),
14258 )
14259 }
14260}
14261#[doc = "Multiply-add long"]
14262#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u32)"]
14263#[inline]
14264#[target_feature(enable = "neon")]
14265#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14266#[rustc_legacy_const_generics(3)]
14267#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14268pub fn vmlal_high_lane_u32<const LANE: i32>(
14269 a: uint64x2_t,
14270 b: uint32x4_t,
14271 c: uint32x2_t,
14272) -> uint64x2_t {
14273 static_assert_uimm_bits!(LANE, 1);
14274 unsafe {
14275 vmlal_high_u32(
14276 a,
14277 b,
14278 simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14279 )
14280 }
14281}
14282#[doc = "Multiply-add long"]
14283#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u32)"]
14284#[inline]
14285#[target_feature(enable = "neon")]
14286#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14287#[rustc_legacy_const_generics(3)]
14288#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14289pub fn vmlal_high_laneq_u32<const LANE: i32>(
14290 a: uint64x2_t,
14291 b: uint32x4_t,
14292 c: uint32x4_t,
14293) -> uint64x2_t {
14294 static_assert_uimm_bits!(LANE, 2);
14295 unsafe {
14296 vmlal_high_u32(
14297 a,
14298 b,
14299 simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14300 )
14301 }
14302}
14303#[doc = "Multiply-add long"]
14304#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s16)"]
14305#[inline]
14306#[target_feature(enable = "neon")]
14307#[cfg_attr(test, assert_instr(smlal2))]
14308#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14309pub fn vmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
14310 vmlal_high_s16(a, b, vdupq_n_s16(c))
14311}
14312#[doc = "Multiply-add long"]
14313#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s32)"]
14314#[inline]
14315#[target_feature(enable = "neon")]
14316#[cfg_attr(test, assert_instr(smlal2))]
14317#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14318pub fn vmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
14319 vmlal_high_s32(a, b, vdupq_n_s32(c))
14320}
14321#[doc = "Multiply-add long"]
14322#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u16)"]
14323#[inline]
14324#[target_feature(enable = "neon")]
14325#[cfg_attr(test, assert_instr(umlal2))]
14326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14327pub fn vmlal_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t {
14328 vmlal_high_u16(a, b, vdupq_n_u16(c))
14329}
14330#[doc = "Multiply-add long"]
14331#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u32)"]
14332#[inline]
14333#[target_feature(enable = "neon")]
14334#[cfg_attr(test, assert_instr(umlal2))]
14335#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14336pub fn vmlal_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t {
14337 vmlal_high_u32(a, b, vdupq_n_u32(c))
14338}
14339#[doc = "Signed multiply-add long"]
14340#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s8)"]
14341#[inline]
14342#[target_feature(enable = "neon")]
14343#[cfg_attr(test, assert_instr(smlal2))]
14344#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14345pub fn vmlal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
14346 unsafe {
14347 let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14348 let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14349 vmlal_s8(a, b, c)
14350 }
14351}
14352#[doc = "Signed multiply-add long"]
14353#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s16)"]
14354#[inline]
14355#[target_feature(enable = "neon")]
14356#[cfg_attr(test, assert_instr(smlal2))]
14357#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14358pub fn vmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
14359 unsafe {
14360 let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14361 let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14362 vmlal_s16(a, b, c)
14363 }
14364}
14365#[doc = "Signed multiply-add long"]
14366#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s32)"]
14367#[inline]
14368#[target_feature(enable = "neon")]
14369#[cfg_attr(test, assert_instr(smlal2))]
14370#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14371pub fn vmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
14372 unsafe {
14373 let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
14374 let c: int32x2_t = simd_shuffle!(c, c, [2, 3]);
14375 vmlal_s32(a, b, c)
14376 }
14377}
14378#[doc = "Unsigned multiply-add long"]
14379#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u8)"]
14380#[inline]
14381#[target_feature(enable = "neon")]
14382#[cfg_attr(test, assert_instr(umlal2))]
14383#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14384pub fn vmlal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
14385 unsafe {
14386 let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14387 let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14388 vmlal_u8(a, b, c)
14389 }
14390}
14391#[doc = "Unsigned multiply-add long"]
14392#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u16)"]
14393#[inline]
14394#[target_feature(enable = "neon")]
14395#[cfg_attr(test, assert_instr(umlal2))]
14396#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14397pub fn vmlal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
14398 unsafe {
14399 let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14400 let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14401 vmlal_u16(a, b, c)
14402 }
14403}
14404#[doc = "Unsigned multiply-add long"]
14405#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u32)"]
14406#[inline]
14407#[target_feature(enable = "neon")]
14408#[cfg_attr(test, assert_instr(umlal2))]
14409#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14410pub fn vmlal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
14411 unsafe {
14412 let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
14413 let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
14414 vmlal_u32(a, b, c)
14415 }
14416}
14417#[doc = "Floating-point multiply-subtract from accumulator"]
14418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_f64)"]
14419#[inline]
14420#[target_feature(enable = "neon")]
14421#[cfg_attr(test, assert_instr(fmul))]
14422#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14423pub fn vmls_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
14424 unsafe { simd_sub(a, simd_mul(b, c)) }
14425}
14426#[doc = "Floating-point multiply-subtract from accumulator"]
14427#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f64)"]
14428#[inline]
14429#[target_feature(enable = "neon")]
14430#[cfg_attr(test, assert_instr(fmul))]
14431#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14432pub fn vmlsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
14433 unsafe { simd_sub(a, simd_mul(b, c)) }
14434}
14435#[doc = "Multiply-subtract long"]
14436#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s16)"]
14437#[inline]
14438#[target_feature(enable = "neon")]
14439#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14440#[rustc_legacy_const_generics(3)]
14441#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14442pub fn vmlsl_high_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
14443 static_assert_uimm_bits!(LANE, 2);
14444 unsafe {
14445 vmlsl_high_s16(
14446 a,
14447 b,
14448 simd_shuffle!(
14449 c,
14450 c,
14451 [
14452 LANE as u32,
14453 LANE as u32,
14454 LANE as u32,
14455 LANE as u32,
14456 LANE as u32,
14457 LANE as u32,
14458 LANE as u32,
14459 LANE as u32
14460 ]
14461 ),
14462 )
14463 }
14464}
14465#[doc = "Multiply-subtract long"]
14466#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s16)"]
14467#[inline]
14468#[target_feature(enable = "neon")]
14469#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14470#[rustc_legacy_const_generics(3)]
14471#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14472pub fn vmlsl_high_laneq_s16<const LANE: i32>(
14473 a: int32x4_t,
14474 b: int16x8_t,
14475 c: int16x8_t,
14476) -> int32x4_t {
14477 static_assert_uimm_bits!(LANE, 3);
14478 unsafe {
14479 vmlsl_high_s16(
14480 a,
14481 b,
14482 simd_shuffle!(
14483 c,
14484 c,
14485 [
14486 LANE as u32,
14487 LANE as u32,
14488 LANE as u32,
14489 LANE as u32,
14490 LANE as u32,
14491 LANE as u32,
14492 LANE as u32,
14493 LANE as u32
14494 ]
14495 ),
14496 )
14497 }
14498}
14499#[doc = "Multiply-subtract long"]
14500#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s32)"]
14501#[inline]
14502#[target_feature(enable = "neon")]
14503#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14504#[rustc_legacy_const_generics(3)]
14505#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14506pub fn vmlsl_high_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
14507 static_assert_uimm_bits!(LANE, 1);
14508 unsafe {
14509 vmlsl_high_s32(
14510 a,
14511 b,
14512 simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14513 )
14514 }
14515}
14516#[doc = "Multiply-subtract long"]
14517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s32)"]
14518#[inline]
14519#[target_feature(enable = "neon")]
14520#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14521#[rustc_legacy_const_generics(3)]
14522#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14523pub fn vmlsl_high_laneq_s32<const LANE: i32>(
14524 a: int64x2_t,
14525 b: int32x4_t,
14526 c: int32x4_t,
14527) -> int64x2_t {
14528 static_assert_uimm_bits!(LANE, 2);
14529 unsafe {
14530 vmlsl_high_s32(
14531 a,
14532 b,
14533 simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14534 )
14535 }
14536}
14537#[doc = "Multiply-subtract long"]
14538#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u16)"]
14539#[inline]
14540#[target_feature(enable = "neon")]
14541#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14542#[rustc_legacy_const_generics(3)]
14543#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14544pub fn vmlsl_high_lane_u16<const LANE: i32>(
14545 a: uint32x4_t,
14546 b: uint16x8_t,
14547 c: uint16x4_t,
14548) -> uint32x4_t {
14549 static_assert_uimm_bits!(LANE, 2);
14550 unsafe {
14551 vmlsl_high_u16(
14552 a,
14553 b,
14554 simd_shuffle!(
14555 c,
14556 c,
14557 [
14558 LANE as u32,
14559 LANE as u32,
14560 LANE as u32,
14561 LANE as u32,
14562 LANE as u32,
14563 LANE as u32,
14564 LANE as u32,
14565 LANE as u32
14566 ]
14567 ),
14568 )
14569 }
14570}
14571#[doc = "Multiply-subtract long"]
14572#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u16)"]
14573#[inline]
14574#[target_feature(enable = "neon")]
14575#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14576#[rustc_legacy_const_generics(3)]
14577#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14578pub fn vmlsl_high_laneq_u16<const LANE: i32>(
14579 a: uint32x4_t,
14580 b: uint16x8_t,
14581 c: uint16x8_t,
14582) -> uint32x4_t {
14583 static_assert_uimm_bits!(LANE, 3);
14584 unsafe {
14585 vmlsl_high_u16(
14586 a,
14587 b,
14588 simd_shuffle!(
14589 c,
14590 c,
14591 [
14592 LANE as u32,
14593 LANE as u32,
14594 LANE as u32,
14595 LANE as u32,
14596 LANE as u32,
14597 LANE as u32,
14598 LANE as u32,
14599 LANE as u32
14600 ]
14601 ),
14602 )
14603 }
14604}
14605#[doc = "Multiply-subtract long"]
14606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u32)"]
14607#[inline]
14608#[target_feature(enable = "neon")]
14609#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14610#[rustc_legacy_const_generics(3)]
14611#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14612pub fn vmlsl_high_lane_u32<const LANE: i32>(
14613 a: uint64x2_t,
14614 b: uint32x4_t,
14615 c: uint32x2_t,
14616) -> uint64x2_t {
14617 static_assert_uimm_bits!(LANE, 1);
14618 unsafe {
14619 vmlsl_high_u32(
14620 a,
14621 b,
14622 simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14623 )
14624 }
14625}
14626#[doc = "Multiply-subtract long"]
14627#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u32)"]
14628#[inline]
14629#[target_feature(enable = "neon")]
14630#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14631#[rustc_legacy_const_generics(3)]
14632#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14633pub fn vmlsl_high_laneq_u32<const LANE: i32>(
14634 a: uint64x2_t,
14635 b: uint32x4_t,
14636 c: uint32x4_t,
14637) -> uint64x2_t {
14638 static_assert_uimm_bits!(LANE, 2);
14639 unsafe {
14640 vmlsl_high_u32(
14641 a,
14642 b,
14643 simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14644 )
14645 }
14646}
14647#[doc = "Multiply-subtract long"]
14648#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s16)"]
14649#[inline]
14650#[target_feature(enable = "neon")]
14651#[cfg_attr(test, assert_instr(smlsl2))]
14652#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14653pub fn vmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
14654 vmlsl_high_s16(a, b, vdupq_n_s16(c))
14655}
14656#[doc = "Multiply-subtract long"]
14657#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s32)"]
14658#[inline]
14659#[target_feature(enable = "neon")]
14660#[cfg_attr(test, assert_instr(smlsl2))]
14661#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14662pub fn vmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
14663 vmlsl_high_s32(a, b, vdupq_n_s32(c))
14664}
14665#[doc = "Multiply-subtract long"]
14666#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u16)"]
14667#[inline]
14668#[target_feature(enable = "neon")]
14669#[cfg_attr(test, assert_instr(umlsl2))]
14670#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14671pub fn vmlsl_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t {
14672 vmlsl_high_u16(a, b, vdupq_n_u16(c))
14673}
14674#[doc = "Multiply-subtract long"]
14675#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u32)"]
14676#[inline]
14677#[target_feature(enable = "neon")]
14678#[cfg_attr(test, assert_instr(umlsl2))]
14679#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14680pub fn vmlsl_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t {
14681 vmlsl_high_u32(a, b, vdupq_n_u32(c))
14682}
14683#[doc = "Signed multiply-subtract long"]
14684#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s8)"]
14685#[inline]
14686#[target_feature(enable = "neon")]
14687#[cfg_attr(test, assert_instr(smlsl2))]
14688#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14689pub fn vmlsl_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
14690 unsafe {
14691 let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14692 let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14693 vmlsl_s8(a, b, c)
14694 }
14695}
14696#[doc = "Signed multiply-subtract long"]
14697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s16)"]
14698#[inline]
14699#[target_feature(enable = "neon")]
14700#[cfg_attr(test, assert_instr(smlsl2))]
14701#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14702pub fn vmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
14703 unsafe {
14704 let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14705 let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14706 vmlsl_s16(a, b, c)
14707 }
14708}
14709#[doc = "Signed multiply-subtract long"]
14710#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s32)"]
14711#[inline]
14712#[target_feature(enable = "neon")]
14713#[cfg_attr(test, assert_instr(smlsl2))]
14714#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14715pub fn vmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
14716 unsafe {
14717 let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
14718 let c: int32x2_t = simd_shuffle!(c, c, [2, 3]);
14719 vmlsl_s32(a, b, c)
14720 }
14721}
14722#[doc = "Unsigned multiply-subtract long"]
14723#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u8)"]
14724#[inline]
14725#[target_feature(enable = "neon")]
14726#[cfg_attr(test, assert_instr(umlsl2))]
14727#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14728pub fn vmlsl_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
14729 unsafe {
14730 let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14731 let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14732 vmlsl_u8(a, b, c)
14733 }
14734}
14735#[doc = "Unsigned multiply-subtract long"]
14736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u16)"]
14737#[inline]
14738#[target_feature(enable = "neon")]
14739#[cfg_attr(test, assert_instr(umlsl2))]
14740#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14741pub fn vmlsl_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
14742 unsafe {
14743 let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14744 let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14745 vmlsl_u16(a, b, c)
14746 }
14747}
14748#[doc = "Unsigned multiply-subtract long"]
14749#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u32)"]
14750#[inline]
14751#[target_feature(enable = "neon")]
14752#[cfg_attr(test, assert_instr(umlsl2))]
14753#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14754pub fn vmlsl_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
14755 unsafe {
14756 let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
14757 let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
14758 vmlsl_u32(a, b, c)
14759 }
14760}
14761#[doc = "Vector move"]
14762#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s8)"]
14763#[inline]
14764#[target_feature(enable = "neon")]
14765#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14766#[cfg_attr(test, assert_instr(sxtl2))]
14767pub fn vmovl_high_s8(a: int8x16_t) -> int16x8_t {
14768 unsafe {
14769 let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
14770 vmovl_s8(a)
14771 }
14772}
14773#[doc = "Vector move"]
14774#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s16)"]
14775#[inline]
14776#[target_feature(enable = "neon")]
14777#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14778#[cfg_attr(test, assert_instr(sxtl2))]
14779pub fn vmovl_high_s16(a: int16x8_t) -> int32x4_t {
14780 unsafe {
14781 let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
14782 vmovl_s16(a)
14783 }
14784}
14785#[doc = "Vector move"]
14786#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s32)"]
14787#[inline]
14788#[target_feature(enable = "neon")]
14789#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14790#[cfg_attr(test, assert_instr(sxtl2))]
14791pub fn vmovl_high_s32(a: int32x4_t) -> int64x2_t {
14792 unsafe {
14793 let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
14794 vmovl_s32(a)
14795 }
14796}
14797#[doc = "Vector move"]
14798#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u8)"]
14799#[inline]
14800#[target_feature(enable = "neon")]
14801#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14802#[cfg_attr(test, assert_instr(uxtl2))]
14803pub fn vmovl_high_u8(a: uint8x16_t) -> uint16x8_t {
14804 unsafe {
14805 let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
14806 vmovl_u8(a)
14807 }
14808}
14809#[doc = "Vector move"]
14810#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u16)"]
14811#[inline]
14812#[target_feature(enable = "neon")]
14813#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14814#[cfg_attr(test, assert_instr(uxtl2))]
14815pub fn vmovl_high_u16(a: uint16x8_t) -> uint32x4_t {
14816 unsafe {
14817 let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
14818 vmovl_u16(a)
14819 }
14820}
14821#[doc = "Vector move"]
14822#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u32)"]
14823#[inline]
14824#[target_feature(enable = "neon")]
14825#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14826#[cfg_attr(test, assert_instr(uxtl2))]
14827pub fn vmovl_high_u32(a: uint32x4_t) -> uint64x2_t {
14828 unsafe {
14829 let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
14830 vmovl_u32(a)
14831 }
14832}
14833#[doc = "Extract narrow"]
14834#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s16)"]
14835#[inline]
14836#[target_feature(enable = "neon")]
14837#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14838#[cfg_attr(test, assert_instr(xtn2))]
14839pub fn vmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t {
14840 unsafe {
14841 let c: int8x8_t = simd_cast(b);
14842 simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
14843 }
14844}
14845#[doc = "Extract narrow"]
14846#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s32)"]
14847#[inline]
14848#[target_feature(enable = "neon")]
14849#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14850#[cfg_attr(test, assert_instr(xtn2))]
14851pub fn vmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t {
14852 unsafe {
14853 let c: int16x4_t = simd_cast(b);
14854 simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7])
14855 }
14856}
14857#[doc = "Extract narrow"]
14858#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s64)"]
14859#[inline]
14860#[target_feature(enable = "neon")]
14861#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14862#[cfg_attr(test, assert_instr(xtn2))]
14863pub fn vmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t {
14864 unsafe {
14865 let c: int32x2_t = simd_cast(b);
14866 simd_shuffle!(a, c, [0, 1, 2, 3])
14867 }
14868}
14869#[doc = "Extract narrow"]
14870#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u16)"]
14871#[inline]
14872#[target_feature(enable = "neon")]
14873#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14874#[cfg_attr(test, assert_instr(xtn2))]
14875pub fn vmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
14876 unsafe {
14877 let c: uint8x8_t = simd_cast(b);
14878 simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
14879 }
14880}
14881#[doc = "Extract narrow"]
14882#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u32)"]
14883#[inline]
14884#[target_feature(enable = "neon")]
14885#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14886#[cfg_attr(test, assert_instr(xtn2))]
14887pub fn vmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
14888 unsafe {
14889 let c: uint16x4_t = simd_cast(b);
14890 simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7])
14891 }
14892}
14893#[doc = "Extract narrow"]
14894#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u64)"]
14895#[inline]
14896#[target_feature(enable = "neon")]
14897#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14898#[cfg_attr(test, assert_instr(xtn2))]
14899pub fn vmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
14900 unsafe {
14901 let c: uint32x2_t = simd_cast(b);
14902 simd_shuffle!(a, c, [0, 1, 2, 3])
14903 }
14904}
14905#[doc = "Multiply"]
14906#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f64)"]
14907#[inline]
14908#[target_feature(enable = "neon")]
14909#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14910#[cfg_attr(test, assert_instr(fmul))]
14911pub fn vmul_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
14912 unsafe { simd_mul(a, b) }
14913}
14914#[doc = "Multiply"]
14915#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f64)"]
14916#[inline]
14917#[target_feature(enable = "neon")]
14918#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14919#[cfg_attr(test, assert_instr(fmul))]
14920pub fn vmulq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
14921 unsafe { simd_mul(a, b) }
14922}
14923#[doc = "Floating-point multiply"]
14924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_f64)"]
14925#[inline]
14926#[target_feature(enable = "neon")]
14927#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14928#[rustc_legacy_const_generics(2)]
14929#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14930pub fn vmul_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t) -> float64x1_t {
14931 static_assert!(LANE == 0);
14932 unsafe { simd_mul(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
14933}
14934#[doc = "Floating-point multiply"]
14935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f16)"]
14936#[inline]
14937#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14938#[rustc_legacy_const_generics(2)]
14939#[target_feature(enable = "neon,fp16")]
14940#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
14941pub fn vmul_laneq_f16<const LANE: i32>(a: float16x4_t, b: float16x8_t) -> float16x4_t {
14942 static_assert_uimm_bits!(LANE, 3);
14943 unsafe {
14944 simd_mul(
14945 a,
14946 simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14947 )
14948 }
14949}
14950#[doc = "Floating-point multiply"]
14951#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f16)"]
14952#[inline]
14953#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14954#[rustc_legacy_const_generics(2)]
14955#[target_feature(enable = "neon,fp16")]
14956#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
14957pub fn vmulq_laneq_f16<const LANE: i32>(a: float16x8_t, b: float16x8_t) -> float16x8_t {
14958 static_assert_uimm_bits!(LANE, 3);
14959 unsafe {
14960 simd_mul(
14961 a,
14962 simd_shuffle!(
14963 b,
14964 b,
14965 [
14966 LANE as u32,
14967 LANE as u32,
14968 LANE as u32,
14969 LANE as u32,
14970 LANE as u32,
14971 LANE as u32,
14972 LANE as u32,
14973 LANE as u32
14974 ]
14975 ),
14976 )
14977 }
14978}
14979#[doc = "Floating-point multiply"]
14980#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f64)"]
14981#[inline]
14982#[target_feature(enable = "neon")]
14983#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14984#[rustc_legacy_const_generics(2)]
14985#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14986pub fn vmul_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x2_t) -> float64x1_t {
14987 static_assert_uimm_bits!(LANE, 1);
14988 unsafe { simd_mul(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
14989}
14990#[doc = "Vector multiply by scalar"]
14991#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_f64)"]
14992#[inline]
14993#[target_feature(enable = "neon")]
14994#[cfg_attr(test, assert_instr(fmul))]
14995#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14996pub fn vmul_n_f64(a: float64x1_t, b: f64) -> float64x1_t {
14997 unsafe { simd_mul(a, vdup_n_f64(b)) }
14998}
14999#[doc = "Vector multiply by scalar"]
15000#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f64)"]
15001#[inline]
15002#[target_feature(enable = "neon")]
15003#[cfg_attr(test, assert_instr(fmul))]
15004#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15005pub fn vmulq_n_f64(a: float64x2_t, b: f64) -> float64x2_t {
15006 unsafe { simd_mul(a, vdupq_n_f64(b)) }
15007}
15008#[doc = "Floating-point multiply"]
15009#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_lane_f64)"]
15010#[inline]
15011#[target_feature(enable = "neon")]
15012#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15013#[rustc_legacy_const_generics(2)]
15014#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15015pub fn vmuld_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> f64 {
15016 static_assert!(LANE == 0);
15017 unsafe {
15018 let b: f64 = simd_extract!(b, LANE as u32);
15019 a * b
15020 }
15021}
15022#[doc = "Add"]
15023#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_f16)"]
15024#[inline]
15025#[target_feature(enable = "neon,fp16")]
15026#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15027#[cfg_attr(test, assert_instr(nop))]
15028pub fn vmulh_f16(a: f16, b: f16) -> f16 {
15029 a * b
15030}
15031#[doc = "Floating-point multiply"]
15032#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_lane_f16)"]
15033#[inline]
15034#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15035#[rustc_legacy_const_generics(2)]
15036#[target_feature(enable = "neon,fp16")]
15037#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15038pub fn vmulh_lane_f16<const LANE: i32>(a: f16, b: float16x4_t) -> f16 {
15039 static_assert_uimm_bits!(LANE, 2);
15040 unsafe {
15041 let b: f16 = simd_extract!(b, LANE as u32);
15042 a * b
15043 }
15044}
15045#[doc = "Floating-point multiply"]
15046#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_laneq_f16)"]
15047#[inline]
15048#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15049#[rustc_legacy_const_generics(2)]
15050#[target_feature(enable = "neon,fp16")]
15051#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15052pub fn vmulh_laneq_f16<const LANE: i32>(a: f16, b: float16x8_t) -> f16 {
15053 static_assert_uimm_bits!(LANE, 3);
15054 unsafe {
15055 let b: f16 = simd_extract!(b, LANE as u32);
15056 a * b
15057 }
15058}
15059#[doc = "Multiply long"]
15060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s16)"]
15061#[inline]
15062#[target_feature(enable = "neon")]
15063#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
15064#[rustc_legacy_const_generics(2)]
15065#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15066pub fn vmull_high_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int32x4_t {
15067 static_assert_uimm_bits!(LANE, 2);
15068 unsafe {
15069 vmull_high_s16(
15070 a,
15071 simd_shuffle!(
15072 b,
15073 b,
15074 [
15075 LANE as u32,
15076 LANE as u32,
15077 LANE as u32,
15078 LANE as u32,
15079 LANE as u32,
15080 LANE as u32,
15081 LANE as u32,
15082 LANE as u32
15083 ]
15084 ),
15085 )
15086 }
15087}
15088#[doc = "Multiply long"]
15089#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s16)"]
15090#[inline]
15091#[target_feature(enable = "neon")]
15092#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
15093#[rustc_legacy_const_generics(2)]
15094#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15095pub fn vmull_high_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t) -> int32x4_t {
15096 static_assert_uimm_bits!(LANE, 3);
15097 unsafe {
15098 vmull_high_s16(
15099 a,
15100 simd_shuffle!(
15101 b,
15102 b,
15103 [
15104 LANE as u32,
15105 LANE as u32,
15106 LANE as u32,
15107 LANE as u32,
15108 LANE as u32,
15109 LANE as u32,
15110 LANE as u32,
15111 LANE as u32
15112 ]
15113 ),
15114 )
15115 }
15116}
15117#[doc = "Multiply long"]
15118#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s32)"]
15119#[inline]
15120#[target_feature(enable = "neon")]
15121#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
15122#[rustc_legacy_const_generics(2)]
15123#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15124pub fn vmull_high_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int64x2_t {
15125 static_assert_uimm_bits!(LANE, 1);
15126 unsafe {
15127 vmull_high_s32(
15128 a,
15129 simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15130 )
15131 }
15132}
15133#[doc = "Multiply long"]
15134#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s32)"]
15135#[inline]
15136#[target_feature(enable = "neon")]
15137#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
15138#[rustc_legacy_const_generics(2)]
15139#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15140pub fn vmull_high_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t) -> int64x2_t {
15141 static_assert_uimm_bits!(LANE, 2);
15142 unsafe {
15143 vmull_high_s32(
15144 a,
15145 simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15146 )
15147 }
15148}
15149#[doc = "Multiply long"]
15150#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u16)"]
15151#[inline]
15152#[target_feature(enable = "neon")]
15153#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
15154#[rustc_legacy_const_generics(2)]
15155#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15156pub fn vmull_high_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint16x4_t) -> uint32x4_t {
15157 static_assert_uimm_bits!(LANE, 2);
15158 unsafe {
15159 vmull_high_u16(
15160 a,
15161 simd_shuffle!(
15162 b,
15163 b,
15164 [
15165 LANE as u32,
15166 LANE as u32,
15167 LANE as u32,
15168 LANE as u32,
15169 LANE as u32,
15170 LANE as u32,
15171 LANE as u32,
15172 LANE as u32
15173 ]
15174 ),
15175 )
15176 }
15177}
15178#[doc = "Multiply long"]
15179#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u16)"]
15180#[inline]
15181#[target_feature(enable = "neon")]
15182#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
15183#[rustc_legacy_const_generics(2)]
15184#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15185pub fn vmull_high_laneq_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
15186 static_assert_uimm_bits!(LANE, 3);
15187 unsafe {
15188 vmull_high_u16(
15189 a,
15190 simd_shuffle!(
15191 b,
15192 b,
15193 [
15194 LANE as u32,
15195 LANE as u32,
15196 LANE as u32,
15197 LANE as u32,
15198 LANE as u32,
15199 LANE as u32,
15200 LANE as u32,
15201 LANE as u32
15202 ]
15203 ),
15204 )
15205 }
15206}
15207#[doc = "Multiply long"]
15208#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u32)"]
15209#[inline]
15210#[target_feature(enable = "neon")]
15211#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
15212#[rustc_legacy_const_generics(2)]
15213#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15214pub fn vmull_high_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint32x2_t) -> uint64x2_t {
15215 static_assert_uimm_bits!(LANE, 1);
15216 unsafe {
15217 vmull_high_u32(
15218 a,
15219 simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15220 )
15221 }
15222}
15223#[doc = "Multiply long"]
15224#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u32)"]
15225#[inline]
15226#[target_feature(enable = "neon")]
15227#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
15228#[rustc_legacy_const_generics(2)]
15229#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15230pub fn vmull_high_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
15231 static_assert_uimm_bits!(LANE, 2);
15232 unsafe {
15233 vmull_high_u32(
15234 a,
15235 simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15236 )
15237 }
15238}
15239#[doc = "Multiply long"]
15240#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s16)"]
15241#[inline]
15242#[target_feature(enable = "neon")]
15243#[cfg_attr(test, assert_instr(smull2))]
15244#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15245pub fn vmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t {
15246 vmull_high_s16(a, vdupq_n_s16(b))
15247}
15248#[doc = "Multiply long"]
15249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s32)"]
15250#[inline]
15251#[target_feature(enable = "neon")]
15252#[cfg_attr(test, assert_instr(smull2))]
15253#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15254pub fn vmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t {
15255 vmull_high_s32(a, vdupq_n_s32(b))
15256}
15257#[doc = "Multiply long"]
15258#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u16)"]
15259#[inline]
15260#[target_feature(enable = "neon")]
15261#[cfg_attr(test, assert_instr(umull2))]
15262#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15263pub fn vmull_high_n_u16(a: uint16x8_t, b: u16) -> uint32x4_t {
15264 vmull_high_u16(a, vdupq_n_u16(b))
15265}
15266#[doc = "Multiply long"]
15267#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u32)"]
15268#[inline]
15269#[target_feature(enable = "neon")]
15270#[cfg_attr(test, assert_instr(umull2))]
15271#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15272pub fn vmull_high_n_u32(a: uint32x4_t, b: u32) -> uint64x2_t {
15273 vmull_high_u32(a, vdupq_n_u32(b))
15274}
15275#[doc = "Polynomial multiply long"]
15276#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p64)"]
15277#[inline]
15278#[target_feature(enable = "neon,aes")]
15279#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15280#[cfg_attr(test, assert_instr(pmull))]
15281pub fn vmull_high_p64(a: poly64x2_t, b: poly64x2_t) -> p128 {
15282 unsafe { vmull_p64(simd_extract!(a, 1), simd_extract!(b, 1)) }
15283}
15284#[doc = "Polynomial multiply long"]
15285#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p8)"]
15286#[inline]
15287#[target_feature(enable = "neon")]
15288#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15289#[cfg_attr(test, assert_instr(pmull))]
15290pub fn vmull_high_p8(a: poly8x16_t, b: poly8x16_t) -> poly16x8_t {
15291 unsafe {
15292 let a: poly8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
15293 let b: poly8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
15294 vmull_p8(a, b)
15295 }
15296}
15297#[doc = "Signed multiply long"]
15298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s8)"]
15299#[inline]
15300#[target_feature(enable = "neon")]
15301#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15302#[cfg_attr(test, assert_instr(smull2))]
15303pub fn vmull_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
15304 unsafe {
15305 let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
15306 let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
15307 vmull_s8(a, b)
15308 }
15309}
15310#[doc = "Signed multiply long"]
15311#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s16)"]
15312#[inline]
15313#[target_feature(enable = "neon")]
15314#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15315#[cfg_attr(test, assert_instr(smull2))]
15316pub fn vmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
15317 unsafe {
15318 let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
15319 let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
15320 vmull_s16(a, b)
15321 }
15322}
15323#[doc = "Signed multiply long"]
15324#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s32)"]
15325#[inline]
15326#[target_feature(enable = "neon")]
15327#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15328#[cfg_attr(test, assert_instr(smull2))]
15329pub fn vmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
15330 unsafe {
15331 let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
15332 let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
15333 vmull_s32(a, b)
15334 }
15335}
15336#[doc = "Unsigned multiply long"]
15337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u8)"]
15338#[inline]
15339#[target_feature(enable = "neon")]
15340#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15341#[cfg_attr(test, assert_instr(umull2))]
15342pub fn vmull_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
15343 unsafe {
15344 let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
15345 let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
15346 vmull_u8(a, b)
15347 }
15348}
15349#[doc = "Unsigned multiply long"]
15350#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u16)"]
15351#[inline]
15352#[target_feature(enable = "neon")]
15353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15354#[cfg_attr(test, assert_instr(umull2))]
15355pub fn vmull_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
15356 unsafe {
15357 let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
15358 let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
15359 vmull_u16(a, b)
15360 }
15361}
15362#[doc = "Unsigned multiply long"]
15363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u32)"]
15364#[inline]
15365#[target_feature(enable = "neon")]
15366#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15367#[cfg_attr(test, assert_instr(umull2))]
15368pub fn vmull_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
15369 unsafe {
15370 let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
15371 let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
15372 vmull_u32(a, b)
15373 }
15374}
15375#[doc = "Polynomial multiply long"]
15376#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_p64)"]
15377#[inline]
15378#[target_feature(enable = "neon,aes")]
15379#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15380#[cfg_attr(test, assert_instr(pmull))]
15381pub fn vmull_p64(a: p64, b: p64) -> p128 {
15382 unsafe extern "unadjusted" {
15383 #[cfg_attr(
15384 any(target_arch = "aarch64", target_arch = "arm64ec"),
15385 link_name = "llvm.aarch64.neon.pmull64"
15386 )]
15387 fn _vmull_p64(a: p64, b: p64) -> int8x16_t;
15388 }
15389 unsafe { transmute(_vmull_p64(a, b)) }
15390}
15391#[doc = "Floating-point multiply"]
15392#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f64)"]
15393#[inline]
15394#[target_feature(enable = "neon")]
15395#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15396#[rustc_legacy_const_generics(2)]
15397#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15398pub fn vmulq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x1_t) -> float64x2_t {
15399 static_assert!(LANE == 0);
15400 unsafe { simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15401}
15402#[doc = "Floating-point multiply"]
15403#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f64)"]
15404#[inline]
15405#[target_feature(enable = "neon")]
15406#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15407#[rustc_legacy_const_generics(2)]
15408#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15409pub fn vmulq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15410 static_assert_uimm_bits!(LANE, 1);
15411 unsafe { simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15412}
15413#[doc = "Floating-point multiply"]
15414#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_lane_f32)"]
15415#[inline]
15416#[target_feature(enable = "neon")]
15417#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15418#[rustc_legacy_const_generics(2)]
15419#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15420pub fn vmuls_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> f32 {
15421 static_assert_uimm_bits!(LANE, 1);
15422 unsafe {
15423 let b: f32 = simd_extract!(b, LANE as u32);
15424 a * b
15425 }
15426}
15427#[doc = "Floating-point multiply"]
15428#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_laneq_f32)"]
15429#[inline]
15430#[target_feature(enable = "neon")]
15431#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15432#[rustc_legacy_const_generics(2)]
15433#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15434pub fn vmuls_laneq_f32<const LANE: i32>(a: f32, b: float32x4_t) -> f32 {
15435 static_assert_uimm_bits!(LANE, 2);
15436 unsafe {
15437 let b: f32 = simd_extract!(b, LANE as u32);
15438 a * b
15439 }
15440}
15441#[doc = "Floating-point multiply"]
15442#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_laneq_f64)"]
15443#[inline]
15444#[target_feature(enable = "neon")]
15445#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15446#[rustc_legacy_const_generics(2)]
15447#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15448pub fn vmuld_laneq_f64<const LANE: i32>(a: f64, b: float64x2_t) -> f64 {
15449 static_assert_uimm_bits!(LANE, 1);
15450 unsafe {
15451 let b: f64 = simd_extract!(b, LANE as u32);
15452 a * b
15453 }
15454}
15455#[doc = "Floating-point multiply extended"]
15456#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f16)"]
15457#[inline]
15458#[target_feature(enable = "neon,fp16")]
15459#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15460#[cfg_attr(test, assert_instr(fmulx))]
15461pub fn vmulx_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
15462 unsafe extern "unadjusted" {
15463 #[cfg_attr(
15464 any(target_arch = "aarch64", target_arch = "arm64ec"),
15465 link_name = "llvm.aarch64.neon.fmulx.v4f16"
15466 )]
15467 fn _vmulx_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
15468 }
15469 unsafe { _vmulx_f16(a, b) }
15470}
15471#[doc = "Floating-point multiply extended"]
15472#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f16)"]
15473#[inline]
15474#[target_feature(enable = "neon,fp16")]
15475#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15476#[cfg_attr(test, assert_instr(fmulx))]
15477pub fn vmulxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15478 unsafe extern "unadjusted" {
15479 #[cfg_attr(
15480 any(target_arch = "aarch64", target_arch = "arm64ec"),
15481 link_name = "llvm.aarch64.neon.fmulx.v8f16"
15482 )]
15483 fn _vmulxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
15484 }
15485 unsafe { _vmulxq_f16(a, b) }
15486}
15487#[doc = "Floating-point multiply extended"]
15488#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f32)"]
15489#[inline]
15490#[target_feature(enable = "neon")]
15491#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15492#[cfg_attr(test, assert_instr(fmulx))]
15493pub fn vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
15494 unsafe extern "unadjusted" {
15495 #[cfg_attr(
15496 any(target_arch = "aarch64", target_arch = "arm64ec"),
15497 link_name = "llvm.aarch64.neon.fmulx.v2f32"
15498 )]
15499 fn _vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
15500 }
15501 unsafe { _vmulx_f32(a, b) }
15502}
15503#[doc = "Floating-point multiply extended"]
15504#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f32)"]
15505#[inline]
15506#[target_feature(enable = "neon")]
15507#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15508#[cfg_attr(test, assert_instr(fmulx))]
15509pub fn vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
15510 unsafe extern "unadjusted" {
15511 #[cfg_attr(
15512 any(target_arch = "aarch64", target_arch = "arm64ec"),
15513 link_name = "llvm.aarch64.neon.fmulx.v4f32"
15514 )]
15515 fn _vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
15516 }
15517 unsafe { _vmulxq_f32(a, b) }
15518}
15519#[doc = "Floating-point multiply extended"]
15520#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f64)"]
15521#[inline]
15522#[target_feature(enable = "neon")]
15523#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15524#[cfg_attr(test, assert_instr(fmulx))]
15525pub fn vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
15526 unsafe extern "unadjusted" {
15527 #[cfg_attr(
15528 any(target_arch = "aarch64", target_arch = "arm64ec"),
15529 link_name = "llvm.aarch64.neon.fmulx.v1f64"
15530 )]
15531 fn _vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
15532 }
15533 unsafe { _vmulx_f64(a, b) }
15534}
15535#[doc = "Floating-point multiply extended"]
15536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f64)"]
15537#[inline]
15538#[target_feature(enable = "neon")]
15539#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15540#[cfg_attr(test, assert_instr(fmulx))]
15541pub fn vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15542 unsafe extern "unadjusted" {
15543 #[cfg_attr(
15544 any(target_arch = "aarch64", target_arch = "arm64ec"),
15545 link_name = "llvm.aarch64.neon.fmulx.v2f64"
15546 )]
15547 fn _vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
15548 }
15549 unsafe { _vmulxq_f64(a, b) }
15550}
15551#[doc = "Floating-point multiply extended"]
15552#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f16)"]
15553#[inline]
15554#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15555#[rustc_legacy_const_generics(2)]
15556#[target_feature(enable = "neon,fp16")]
15557#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15558pub fn vmulx_lane_f16<const LANE: i32>(a: float16x4_t, b: float16x4_t) -> float16x4_t {
15559 static_assert_uimm_bits!(LANE, 2);
15560 unsafe {
15561 vmulx_f16(
15562 a,
15563 simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15564 )
15565 }
15566}
15567#[doc = "Floating-point multiply extended"]
15568#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f16)"]
15569#[inline]
15570#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15571#[rustc_legacy_const_generics(2)]
15572#[target_feature(enable = "neon,fp16")]
15573#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15574pub fn vmulx_laneq_f16<const LANE: i32>(a: float16x4_t, b: float16x8_t) -> float16x4_t {
15575 static_assert_uimm_bits!(LANE, 3);
15576 unsafe {
15577 vmulx_f16(
15578 a,
15579 simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15580 )
15581 }
15582}
15583#[doc = "Floating-point multiply extended"]
15584#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f16)"]
15585#[inline]
15586#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15587#[rustc_legacy_const_generics(2)]
15588#[target_feature(enable = "neon,fp16")]
15589#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15590pub fn vmulxq_lane_f16<const LANE: i32>(a: float16x8_t, b: float16x4_t) -> float16x8_t {
15591 static_assert_uimm_bits!(LANE, 2);
15592 unsafe {
15593 vmulxq_f16(
15594 a,
15595 simd_shuffle!(
15596 b,
15597 b,
15598 [
15599 LANE as u32,
15600 LANE as u32,
15601 LANE as u32,
15602 LANE as u32,
15603 LANE as u32,
15604 LANE as u32,
15605 LANE as u32,
15606 LANE as u32
15607 ]
15608 ),
15609 )
15610 }
15611}
15612#[doc = "Floating-point multiply extended"]
15613#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f16)"]
15614#[inline]
15615#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15616#[rustc_legacy_const_generics(2)]
15617#[target_feature(enable = "neon,fp16")]
15618#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15619pub fn vmulxq_laneq_f16<const LANE: i32>(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15620 static_assert_uimm_bits!(LANE, 3);
15621 unsafe {
15622 vmulxq_f16(
15623 a,
15624 simd_shuffle!(
15625 b,
15626 b,
15627 [
15628 LANE as u32,
15629 LANE as u32,
15630 LANE as u32,
15631 LANE as u32,
15632 LANE as u32,
15633 LANE as u32,
15634 LANE as u32,
15635 LANE as u32
15636 ]
15637 ),
15638 )
15639 }
15640}
15641#[doc = "Floating-point multiply extended"]
15642#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f32)"]
15643#[inline]
15644#[target_feature(enable = "neon")]
15645#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15646#[rustc_legacy_const_generics(2)]
15647#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15648pub fn vmulx_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t) -> float32x2_t {
15649 static_assert_uimm_bits!(LANE, 1);
15650 unsafe { vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15651}
15652#[doc = "Floating-point multiply extended"]
15653#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f32)"]
15654#[inline]
15655#[target_feature(enable = "neon")]
15656#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15657#[rustc_legacy_const_generics(2)]
15658#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15659pub fn vmulx_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x4_t) -> float32x2_t {
15660 static_assert_uimm_bits!(LANE, 2);
15661 unsafe { vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15662}
15663#[doc = "Floating-point multiply extended"]
15664#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f32)"]
15665#[inline]
15666#[target_feature(enable = "neon")]
15667#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15668#[rustc_legacy_const_generics(2)]
15669#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15670pub fn vmulxq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x2_t) -> float32x4_t {
15671 static_assert_uimm_bits!(LANE, 1);
15672 unsafe {
15673 vmulxq_f32(
15674 a,
15675 simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15676 )
15677 }
15678}
15679#[doc = "Floating-point multiply extended"]
15680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f32)"]
15681#[inline]
15682#[target_feature(enable = "neon")]
15683#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15684#[rustc_legacy_const_generics(2)]
15685#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15686pub fn vmulxq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t) -> float32x4_t {
15687 static_assert_uimm_bits!(LANE, 2);
15688 unsafe {
15689 vmulxq_f32(
15690 a,
15691 simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15692 )
15693 }
15694}
15695#[doc = "Floating-point multiply extended"]
15696#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f64)"]
15697#[inline]
15698#[target_feature(enable = "neon")]
15699#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15700#[rustc_legacy_const_generics(2)]
15701#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15702pub fn vmulxq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15703 static_assert_uimm_bits!(LANE, 1);
15704 unsafe { vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15705}
15706#[doc = "Floating-point multiply extended"]
15707#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f64)"]
15708#[inline]
15709#[target_feature(enable = "neon")]
15710#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15711#[rustc_legacy_const_generics(2)]
15712#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15713pub fn vmulx_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t) -> float64x1_t {
15714 static_assert!(LANE == 0);
15715 unsafe { vmulx_f64(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
15716}
15717#[doc = "Floating-point multiply extended"]
15718#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f64)"]
15719#[inline]
15720#[target_feature(enable = "neon")]
15721#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15722#[rustc_legacy_const_generics(2)]
15723#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15724pub fn vmulx_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x2_t) -> float64x1_t {
15725 static_assert_uimm_bits!(LANE, 1);
15726 unsafe { vmulx_f64(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
15727}
15728#[doc = "Vector multiply by scalar"]
15729#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_n_f16)"]
15730#[inline]
15731#[cfg_attr(test, assert_instr(fmulx))]
15732#[target_feature(enable = "neon,fp16")]
15733#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15734pub fn vmulx_n_f16(a: float16x4_t, b: f16) -> float16x4_t {
15735 vmulx_f16(a, vdup_n_f16(b))
15736}
15737#[doc = "Vector multiply by scalar"]
15738#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_n_f16)"]
15739#[inline]
15740#[cfg_attr(test, assert_instr(fmulx))]
15741#[target_feature(enable = "neon,fp16")]
15742#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15743pub fn vmulxq_n_f16(a: float16x8_t, b: f16) -> float16x8_t {
15744 vmulxq_f16(a, vdupq_n_f16(b))
15745}
15746#[doc = "Floating-point multiply extended"]
15747#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_f64)"]
15748#[inline]
15749#[target_feature(enable = "neon")]
15750#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15751#[cfg_attr(test, assert_instr(fmulx))]
15752pub fn vmulxd_f64(a: f64, b: f64) -> f64 {
15753 unsafe extern "unadjusted" {
15754 #[cfg_attr(
15755 any(target_arch = "aarch64", target_arch = "arm64ec"),
15756 link_name = "llvm.aarch64.neon.fmulx.f64"
15757 )]
15758 fn _vmulxd_f64(a: f64, b: f64) -> f64;
15759 }
15760 unsafe { _vmulxd_f64(a, b) }
15761}
15762#[doc = "Floating-point multiply extended"]
15763#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_f32)"]
15764#[inline]
15765#[target_feature(enable = "neon")]
15766#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15767#[cfg_attr(test, assert_instr(fmulx))]
15768pub fn vmulxs_f32(a: f32, b: f32) -> f32 {
15769 unsafe extern "unadjusted" {
15770 #[cfg_attr(
15771 any(target_arch = "aarch64", target_arch = "arm64ec"),
15772 link_name = "llvm.aarch64.neon.fmulx.f32"
15773 )]
15774 fn _vmulxs_f32(a: f32, b: f32) -> f32;
15775 }
15776 unsafe { _vmulxs_f32(a, b) }
15777}
15778#[doc = "Floating-point multiply extended"]
15779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_lane_f64)"]
15780#[inline]
15781#[target_feature(enable = "neon")]
15782#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15783#[rustc_legacy_const_generics(2)]
15784#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15785pub fn vmulxd_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> f64 {
15786 static_assert!(LANE == 0);
15787 unsafe { vmulxd_f64(a, simd_extract!(b, LANE as u32)) }
15788}
15789#[doc = "Floating-point multiply extended"]
15790#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_laneq_f64)"]
15791#[inline]
15792#[target_feature(enable = "neon")]
15793#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15794#[rustc_legacy_const_generics(2)]
15795#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15796pub fn vmulxd_laneq_f64<const LANE: i32>(a: f64, b: float64x2_t) -> f64 {
15797 static_assert_uimm_bits!(LANE, 1);
15798 unsafe { vmulxd_f64(a, simd_extract!(b, LANE as u32)) }
15799}
15800#[doc = "Floating-point multiply extended"]
15801#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_lane_f32)"]
15802#[inline]
15803#[target_feature(enable = "neon")]
15804#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15805#[rustc_legacy_const_generics(2)]
15806#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15807pub fn vmulxs_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> f32 {
15808 static_assert_uimm_bits!(LANE, 1);
15809 unsafe { vmulxs_f32(a, simd_extract!(b, LANE as u32)) }
15810}
15811#[doc = "Floating-point multiply extended"]
15812#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_laneq_f32)"]
15813#[inline]
15814#[target_feature(enable = "neon")]
15815#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15816#[rustc_legacy_const_generics(2)]
15817#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15818pub fn vmulxs_laneq_f32<const LANE: i32>(a: f32, b: float32x4_t) -> f32 {
15819 static_assert_uimm_bits!(LANE, 2);
15820 unsafe { vmulxs_f32(a, simd_extract!(b, LANE as u32)) }
15821}
15822#[doc = "Floating-point multiply extended"]
15823#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_f16)"]
15824#[inline]
15825#[target_feature(enable = "neon,fp16")]
15826#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15827#[cfg_attr(test, assert_instr(fmulx))]
15828pub fn vmulxh_f16(a: f16, b: f16) -> f16 {
15829 unsafe extern "unadjusted" {
15830 #[cfg_attr(
15831 any(target_arch = "aarch64", target_arch = "arm64ec"),
15832 link_name = "llvm.aarch64.neon.fmulx.f16"
15833 )]
15834 fn _vmulxh_f16(a: f16, b: f16) -> f16;
15835 }
15836 unsafe { _vmulxh_f16(a, b) }
15837}
15838#[doc = "Floating-point multiply extended"]
15839#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_lane_f16)"]
15840#[inline]
15841#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15842#[rustc_legacy_const_generics(2)]
15843#[target_feature(enable = "neon,fp16")]
15844#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15845pub fn vmulxh_lane_f16<const LANE: i32>(a: f16, b: float16x4_t) -> f16 {
15846 static_assert_uimm_bits!(LANE, 2);
15847 unsafe { vmulxh_f16(a, simd_extract!(b, LANE as u32)) }
15848}
15849#[doc = "Floating-point multiply extended"]
15850#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_laneq_f16)"]
15851#[inline]
15852#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15853#[rustc_legacy_const_generics(2)]
15854#[target_feature(enable = "neon,fp16")]
15855#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15856pub fn vmulxh_laneq_f16<const LANE: i32>(a: f16, b: float16x8_t) -> f16 {
15857 static_assert_uimm_bits!(LANE, 3);
15858 unsafe { vmulxh_f16(a, simd_extract!(b, LANE as u32)) }
15859}
15860#[doc = "Floating-point multiply extended"]
15861#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f64)"]
15862#[inline]
15863#[target_feature(enable = "neon")]
15864#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15865#[rustc_legacy_const_generics(2)]
15866#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15867pub fn vmulxq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x1_t) -> float64x2_t {
15868 static_assert!(LANE == 0);
15869 unsafe { vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15870}
15871#[doc = "Negate"]
15872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f64)"]
15873#[inline]
15874#[target_feature(enable = "neon")]
15875#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15876#[cfg_attr(test, assert_instr(fneg))]
15877pub fn vneg_f64(a: float64x1_t) -> float64x1_t {
15878 unsafe { simd_neg(a) }
15879}
15880#[doc = "Negate"]
15881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f64)"]
15882#[inline]
15883#[target_feature(enable = "neon")]
15884#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15885#[cfg_attr(test, assert_instr(fneg))]
15886pub fn vnegq_f64(a: float64x2_t) -> float64x2_t {
15887 unsafe { simd_neg(a) }
15888}
15889#[doc = "Negate"]
15890#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s64)"]
15891#[inline]
15892#[target_feature(enable = "neon")]
15893#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15894#[cfg_attr(test, assert_instr(neg))]
15895pub fn vneg_s64(a: int64x1_t) -> int64x1_t {
15896 unsafe { simd_neg(a) }
15897}
15898#[doc = "Negate"]
15899#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s64)"]
15900#[inline]
15901#[target_feature(enable = "neon")]
15902#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15903#[cfg_attr(test, assert_instr(neg))]
15904pub fn vnegq_s64(a: int64x2_t) -> int64x2_t {
15905 unsafe { simd_neg(a) }
15906}
15907#[doc = "Negate"]
15908#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegd_s64)"]
15909#[inline]
15910#[target_feature(enable = "neon")]
15911#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15912#[cfg_attr(test, assert_instr(neg))]
15913pub fn vnegd_s64(a: i64) -> i64 {
15914 a.wrapping_neg()
15915}
15916#[doc = "Negate"]
15917#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegh_f16)"]
15918#[inline]
15919#[target_feature(enable = "neon,fp16")]
15920#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15921#[cfg_attr(test, assert_instr(fneg))]
15922pub fn vnegh_f16(a: f16) -> f16 {
15923 -a
15924}
15925#[doc = "Floating-point add pairwise"]
15926#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_f64)"]
15927#[inline]
15928#[target_feature(enable = "neon")]
15929#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15930#[cfg_attr(test, assert_instr(nop))]
15931pub fn vpaddd_f64(a: float64x2_t) -> f64 {
15932 unsafe {
15933 let a1: f64 = simd_extract!(a, 0);
15934 let a2: f64 = simd_extract!(a, 1);
15935 a1 + a2
15936 }
15937}
15938#[doc = "Floating-point add pairwise"]
15939#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadds_f32)"]
15940#[inline]
15941#[target_feature(enable = "neon")]
15942#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15943#[cfg_attr(test, assert_instr(nop))]
15944pub fn vpadds_f32(a: float32x2_t) -> f32 {
15945 unsafe {
15946 let a1: f32 = simd_extract!(a, 0);
15947 let a2: f32 = simd_extract!(a, 1);
15948 a1 + a2
15949 }
15950}
15951#[doc = "Add pairwise"]
15952#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_s64)"]
15953#[inline]
15954#[cfg(target_endian = "little")]
15955#[target_feature(enable = "neon")]
15956#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15957#[cfg_attr(test, assert_instr(addp))]
15958pub fn vpaddd_s64(a: int64x2_t) -> i64 {
15959 unsafe { transmute(vaddvq_u64(transmute(a))) }
15960}
15961#[doc = "Add pairwise"]
15962#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_s64)"]
15963#[inline]
15964#[cfg(target_endian = "big")]
15965#[target_feature(enable = "neon")]
15966#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15967#[cfg_attr(test, assert_instr(addp))]
15968pub fn vpaddd_s64(a: int64x2_t) -> i64 {
15969 let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
15970 unsafe { transmute(vaddvq_u64(transmute(a))) }
15971}
15972#[doc = "Add pairwise"]
15973#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_u64)"]
15974#[inline]
15975#[target_feature(enable = "neon")]
15976#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15977#[cfg_attr(test, assert_instr(addp))]
15978pub fn vpaddd_u64(a: uint64x2_t) -> u64 {
15979 vaddvq_u64(a)
15980}
15981#[doc = "Floating-point add pairwise"]
15982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f16)"]
15983#[inline]
15984#[target_feature(enable = "neon,fp16")]
15985#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15986#[cfg_attr(test, assert_instr(faddp))]
15987pub fn vpaddq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15988 unsafe extern "unadjusted" {
15989 #[cfg_attr(
15990 any(target_arch = "aarch64", target_arch = "arm64ec"),
15991 link_name = "llvm.aarch64.neon.faddp.v8f16"
15992 )]
15993 fn _vpaddq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
15994 }
15995 unsafe { _vpaddq_f16(a, b) }
15996}
15997#[doc = "Floating-point add pairwise"]
15998#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f32)"]
15999#[inline]
16000#[target_feature(enable = "neon")]
16001#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16002#[cfg_attr(test, assert_instr(faddp))]
16003pub fn vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16004 unsafe extern "unadjusted" {
16005 #[cfg_attr(
16006 any(target_arch = "aarch64", target_arch = "arm64ec"),
16007 link_name = "llvm.aarch64.neon.faddp.v4f32"
16008 )]
16009 fn _vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16010 }
16011 unsafe { _vpaddq_f32(a, b) }
16012}
16013#[doc = "Floating-point add pairwise"]
16014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f64)"]
16015#[inline]
16016#[target_feature(enable = "neon")]
16017#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16018#[cfg_attr(test, assert_instr(faddp))]
16019pub fn vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16020 unsafe extern "unadjusted" {
16021 #[cfg_attr(
16022 any(target_arch = "aarch64", target_arch = "arm64ec"),
16023 link_name = "llvm.aarch64.neon.faddp.v2f64"
16024 )]
16025 fn _vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16026 }
16027 unsafe { _vpaddq_f64(a, b) }
16028}
16029#[doc = "Add Pairwise"]
16030#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s8)"]
16031#[inline]
16032#[target_feature(enable = "neon")]
16033#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16034#[cfg_attr(test, assert_instr(addp))]
16035pub fn vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
16036 unsafe extern "unadjusted" {
16037 #[cfg_attr(
16038 any(target_arch = "aarch64", target_arch = "arm64ec"),
16039 link_name = "llvm.aarch64.neon.addp.v16i8"
16040 )]
16041 fn _vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t;
16042 }
16043 unsafe { _vpaddq_s8(a, b) }
16044}
16045#[doc = "Add Pairwise"]
16046#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s16)"]
16047#[inline]
16048#[target_feature(enable = "neon")]
16049#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16050#[cfg_attr(test, assert_instr(addp))]
16051pub fn vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
16052 unsafe extern "unadjusted" {
16053 #[cfg_attr(
16054 any(target_arch = "aarch64", target_arch = "arm64ec"),
16055 link_name = "llvm.aarch64.neon.addp.v8i16"
16056 )]
16057 fn _vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t;
16058 }
16059 unsafe { _vpaddq_s16(a, b) }
16060}
16061#[doc = "Add Pairwise"]
16062#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s32)"]
16063#[inline]
16064#[target_feature(enable = "neon")]
16065#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16066#[cfg_attr(test, assert_instr(addp))]
16067pub fn vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
16068 unsafe extern "unadjusted" {
16069 #[cfg_attr(
16070 any(target_arch = "aarch64", target_arch = "arm64ec"),
16071 link_name = "llvm.aarch64.neon.addp.v4i32"
16072 )]
16073 fn _vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t;
16074 }
16075 unsafe { _vpaddq_s32(a, b) }
16076}
16077#[doc = "Add Pairwise"]
16078#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s64)"]
16079#[inline]
16080#[target_feature(enable = "neon")]
16081#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16082#[cfg_attr(test, assert_instr(addp))]
16083pub fn vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
16084 unsafe extern "unadjusted" {
16085 #[cfg_attr(
16086 any(target_arch = "aarch64", target_arch = "arm64ec"),
16087 link_name = "llvm.aarch64.neon.addp.v2i64"
16088 )]
16089 fn _vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t;
16090 }
16091 unsafe { _vpaddq_s64(a, b) }
16092}
16093#[doc = "Add Pairwise"]
16094#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u8)"]
16095#[inline]
16096#[cfg(target_endian = "little")]
16097#[target_feature(enable = "neon")]
16098#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16099#[cfg_attr(test, assert_instr(addp))]
16100pub fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16101 unsafe { transmute(vpaddq_s8(transmute(a), transmute(b))) }
16102}
16103#[doc = "Add Pairwise"]
16104#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u8)"]
16105#[inline]
16106#[cfg(target_endian = "big")]
16107#[target_feature(enable = "neon")]
16108#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16109#[cfg_attr(test, assert_instr(addp))]
16110pub fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16111 let a: uint8x16_t =
16112 unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
16113 let b: uint8x16_t =
16114 unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
16115 unsafe {
16116 let ret_val: uint8x16_t = transmute(vpaddq_s8(transmute(a), transmute(b)));
16117 simd_shuffle!(
16118 ret_val,
16119 ret_val,
16120 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
16121 )
16122 }
16123}
16124#[doc = "Add Pairwise"]
16125#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u16)"]
16126#[inline]
16127#[cfg(target_endian = "little")]
16128#[target_feature(enable = "neon")]
16129#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16130#[cfg_attr(test, assert_instr(addp))]
16131pub fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16132 unsafe { transmute(vpaddq_s16(transmute(a), transmute(b))) }
16133}
16134#[doc = "Add Pairwise"]
16135#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u16)"]
16136#[inline]
16137#[cfg(target_endian = "big")]
16138#[target_feature(enable = "neon")]
16139#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16140#[cfg_attr(test, assert_instr(addp))]
16141pub fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16142 let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
16143 let b: uint16x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
16144 unsafe {
16145 let ret_val: uint16x8_t = transmute(vpaddq_s16(transmute(a), transmute(b)));
16146 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
16147 }
16148}
16149#[doc = "Add Pairwise"]
16150#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u32)"]
16151#[inline]
16152#[cfg(target_endian = "little")]
16153#[target_feature(enable = "neon")]
16154#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16155#[cfg_attr(test, assert_instr(addp))]
16156pub fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16157 unsafe { transmute(vpaddq_s32(transmute(a), transmute(b))) }
16158}
16159#[doc = "Add Pairwise"]
16160#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u32)"]
16161#[inline]
16162#[cfg(target_endian = "big")]
16163#[target_feature(enable = "neon")]
16164#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16165#[cfg_attr(test, assert_instr(addp))]
16166pub fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16167 let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
16168 let b: uint32x4_t = unsafe { simd_shuffle!(b, b, [3, 2, 1, 0]) };
16169 unsafe {
16170 let ret_val: uint32x4_t = transmute(vpaddq_s32(transmute(a), transmute(b)));
16171 simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
16172 }
16173}
16174#[doc = "Add Pairwise"]
16175#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u64)"]
16176#[inline]
16177#[cfg(target_endian = "little")]
16178#[target_feature(enable = "neon")]
16179#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16180#[cfg_attr(test, assert_instr(addp))]
16181pub fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
16182 unsafe { transmute(vpaddq_s64(transmute(a), transmute(b))) }
16183}
16184#[doc = "Add Pairwise"]
16185#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u64)"]
16186#[inline]
16187#[cfg(target_endian = "big")]
16188#[target_feature(enable = "neon")]
16189#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16190#[cfg_attr(test, assert_instr(addp))]
16191pub fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
16192 let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
16193 let b: uint64x2_t = unsafe { simd_shuffle!(b, b, [1, 0]) };
16194 unsafe {
16195 let ret_val: uint64x2_t = transmute(vpaddq_s64(transmute(a), transmute(b)));
16196 simd_shuffle!(ret_val, ret_val, [1, 0])
16197 }
16198}
16199#[doc = "Floating-point add pairwise"]
16200#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_f16)"]
16201#[inline]
16202#[target_feature(enable = "neon,fp16")]
16203#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16204#[cfg_attr(test, assert_instr(fmaxp))]
16205pub fn vpmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16206 unsafe extern "unadjusted" {
16207 #[cfg_attr(
16208 any(target_arch = "aarch64", target_arch = "arm64ec"),
16209 link_name = "llvm.aarch64.neon.fmaxp.v4f16"
16210 )]
16211 fn _vpmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16212 }
16213 unsafe { _vpmax_f16(a, b) }
16214}
16215#[doc = "Floating-point add pairwise"]
16216#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f16)"]
16217#[inline]
16218#[target_feature(enable = "neon,fp16")]
16219#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16220#[cfg_attr(test, assert_instr(fmaxp))]
16221pub fn vpmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16222 unsafe extern "unadjusted" {
16223 #[cfg_attr(
16224 any(target_arch = "aarch64", target_arch = "arm64ec"),
16225 link_name = "llvm.aarch64.neon.fmaxp.v8f16"
16226 )]
16227 fn _vpmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16228 }
16229 unsafe { _vpmaxq_f16(a, b) }
16230}
16231#[doc = "Floating-point add pairwise"]
16232#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f16)"]
16233#[inline]
16234#[target_feature(enable = "neon,fp16")]
16235#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16236#[cfg_attr(test, assert_instr(fmaxnmp))]
16237pub fn vpmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16238 unsafe extern "unadjusted" {
16239 #[cfg_attr(
16240 any(target_arch = "aarch64", target_arch = "arm64ec"),
16241 link_name = "llvm.aarch64.neon.fmaxnmp.v4f16"
16242 )]
16243 fn _vpmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16244 }
16245 unsafe { _vpmaxnm_f16(a, b) }
16246}
16247#[doc = "Floating-point add pairwise"]
16248#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f16)"]
16249#[inline]
16250#[target_feature(enable = "neon,fp16")]
16251#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16252#[cfg_attr(test, assert_instr(fmaxnmp))]
16253pub fn vpmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16254 unsafe extern "unadjusted" {
16255 #[cfg_attr(
16256 any(target_arch = "aarch64", target_arch = "arm64ec"),
16257 link_name = "llvm.aarch64.neon.fmaxnmp.v8f16"
16258 )]
16259 fn _vpmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16260 }
16261 unsafe { _vpmaxnmq_f16(a, b) }
16262}
16263#[doc = "Floating-point Maximum Number Pairwise (vector)."]
16264#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f32)"]
16265#[inline]
16266#[target_feature(enable = "neon")]
16267#[cfg_attr(test, assert_instr(fmaxnmp))]
16268#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16269pub fn vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
16270 unsafe extern "unadjusted" {
16271 #[cfg_attr(
16272 any(target_arch = "aarch64", target_arch = "arm64ec"),
16273 link_name = "llvm.aarch64.neon.fmaxnmp.v2f32"
16274 )]
16275 fn _vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
16276 }
16277 unsafe { _vpmaxnm_f32(a, b) }
16278}
16279#[doc = "Floating-point Maximum Number Pairwise (vector)."]
16280#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f32)"]
16281#[inline]
16282#[target_feature(enable = "neon")]
16283#[cfg_attr(test, assert_instr(fmaxnmp))]
16284#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16285pub fn vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16286 unsafe extern "unadjusted" {
16287 #[cfg_attr(
16288 any(target_arch = "aarch64", target_arch = "arm64ec"),
16289 link_name = "llvm.aarch64.neon.fmaxnmp.v4f32"
16290 )]
16291 fn _vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16292 }
16293 unsafe { _vpmaxnmq_f32(a, b) }
16294}
16295#[doc = "Floating-point Maximum Number Pairwise (vector)."]
16296#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f64)"]
16297#[inline]
16298#[target_feature(enable = "neon")]
16299#[cfg_attr(test, assert_instr(fmaxnmp))]
16300#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16301pub fn vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16302 unsafe extern "unadjusted" {
16303 #[cfg_attr(
16304 any(target_arch = "aarch64", target_arch = "arm64ec"),
16305 link_name = "llvm.aarch64.neon.fmaxnmp.v2f64"
16306 )]
16307 fn _vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16308 }
16309 unsafe { _vpmaxnmq_f64(a, b) }
16310}
16311#[doc = "Floating-point maximum number pairwise"]
16312#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmqd_f64)"]
16313#[inline]
16314#[target_feature(enable = "neon")]
16315#[cfg_attr(test, assert_instr(fmaxnmp))]
16316#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16317pub fn vpmaxnmqd_f64(a: float64x2_t) -> f64 {
16318 unsafe extern "unadjusted" {
16319 #[cfg_attr(
16320 any(target_arch = "aarch64", target_arch = "arm64ec"),
16321 link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64"
16322 )]
16323 fn _vpmaxnmqd_f64(a: float64x2_t) -> f64;
16324 }
16325 unsafe { _vpmaxnmqd_f64(a) }
16326}
16327#[doc = "Floating-point maximum number pairwise"]
16328#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnms_f32)"]
16329#[inline]
16330#[target_feature(enable = "neon")]
16331#[cfg_attr(test, assert_instr(fmaxnmp))]
16332#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16333pub fn vpmaxnms_f32(a: float32x2_t) -> f32 {
16334 unsafe extern "unadjusted" {
16335 #[cfg_attr(
16336 any(target_arch = "aarch64", target_arch = "arm64ec"),
16337 link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32"
16338 )]
16339 fn _vpmaxnms_f32(a: float32x2_t) -> f32;
16340 }
16341 unsafe { _vpmaxnms_f32(a) }
16342}
16343#[doc = "Folding maximum of adjacent pairs"]
16344#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f32)"]
16345#[inline]
16346#[target_feature(enable = "neon")]
16347#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16348#[cfg_attr(test, assert_instr(fmaxp))]
16349pub fn vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16350 unsafe extern "unadjusted" {
16351 #[cfg_attr(
16352 any(target_arch = "aarch64", target_arch = "arm64ec"),
16353 link_name = "llvm.aarch64.neon.fmaxp.v4f32"
16354 )]
16355 fn _vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16356 }
16357 unsafe { _vpmaxq_f32(a, b) }
16358}
16359#[doc = "Folding maximum of adjacent pairs"]
16360#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f64)"]
16361#[inline]
16362#[target_feature(enable = "neon")]
16363#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16364#[cfg_attr(test, assert_instr(fmaxp))]
16365pub fn vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16366 unsafe extern "unadjusted" {
16367 #[cfg_attr(
16368 any(target_arch = "aarch64", target_arch = "arm64ec"),
16369 link_name = "llvm.aarch64.neon.fmaxp.v2f64"
16370 )]
16371 fn _vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16372 }
16373 unsafe { _vpmaxq_f64(a, b) }
16374}
16375#[doc = "Folding maximum of adjacent pairs"]
16376#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s8)"]
16377#[inline]
16378#[target_feature(enable = "neon")]
16379#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16380#[cfg_attr(test, assert_instr(smaxp))]
16381pub fn vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
16382 unsafe extern "unadjusted" {
16383 #[cfg_attr(
16384 any(target_arch = "aarch64", target_arch = "arm64ec"),
16385 link_name = "llvm.aarch64.neon.smaxp.v16i8"
16386 )]
16387 fn _vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t;
16388 }
16389 unsafe { _vpmaxq_s8(a, b) }
16390}
16391#[doc = "Folding maximum of adjacent pairs"]
16392#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s16)"]
16393#[inline]
16394#[target_feature(enable = "neon")]
16395#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16396#[cfg_attr(test, assert_instr(smaxp))]
16397pub fn vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
16398 unsafe extern "unadjusted" {
16399 #[cfg_attr(
16400 any(target_arch = "aarch64", target_arch = "arm64ec"),
16401 link_name = "llvm.aarch64.neon.smaxp.v8i16"
16402 )]
16403 fn _vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t;
16404 }
16405 unsafe { _vpmaxq_s16(a, b) }
16406}
16407#[doc = "Folding maximum of adjacent pairs"]
16408#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s32)"]
16409#[inline]
16410#[target_feature(enable = "neon")]
16411#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16412#[cfg_attr(test, assert_instr(smaxp))]
16413pub fn vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
16414 unsafe extern "unadjusted" {
16415 #[cfg_attr(
16416 any(target_arch = "aarch64", target_arch = "arm64ec"),
16417 link_name = "llvm.aarch64.neon.smaxp.v4i32"
16418 )]
16419 fn _vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t;
16420 }
16421 unsafe { _vpmaxq_s32(a, b) }
16422}
16423#[doc = "Folding maximum of adjacent pairs"]
16424#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u8)"]
16425#[inline]
16426#[target_feature(enable = "neon")]
16427#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16428#[cfg_attr(test, assert_instr(umaxp))]
16429pub fn vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16430 unsafe extern "unadjusted" {
16431 #[cfg_attr(
16432 any(target_arch = "aarch64", target_arch = "arm64ec"),
16433 link_name = "llvm.aarch64.neon.umaxp.v16i8"
16434 )]
16435 fn _vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t;
16436 }
16437 unsafe { _vpmaxq_u8(a, b) }
16438}
16439#[doc = "Folding maximum of adjacent pairs"]
16440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u16)"]
16441#[inline]
16442#[target_feature(enable = "neon")]
16443#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16444#[cfg_attr(test, assert_instr(umaxp))]
16445pub fn vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16446 unsafe extern "unadjusted" {
16447 #[cfg_attr(
16448 any(target_arch = "aarch64", target_arch = "arm64ec"),
16449 link_name = "llvm.aarch64.neon.umaxp.v8i16"
16450 )]
16451 fn _vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t;
16452 }
16453 unsafe { _vpmaxq_u16(a, b) }
16454}
16455#[doc = "Folding maximum of adjacent pairs"]
16456#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u32)"]
16457#[inline]
16458#[target_feature(enable = "neon")]
16459#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16460#[cfg_attr(test, assert_instr(umaxp))]
16461pub fn vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16462 unsafe extern "unadjusted" {
16463 #[cfg_attr(
16464 any(target_arch = "aarch64", target_arch = "arm64ec"),
16465 link_name = "llvm.aarch64.neon.umaxp.v4i32"
16466 )]
16467 fn _vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
16468 }
16469 unsafe { _vpmaxq_u32(a, b) }
16470}
16471#[doc = "Floating-point maximum pairwise"]
16472#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxqd_f64)"]
16473#[inline]
16474#[target_feature(enable = "neon")]
16475#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16476#[cfg_attr(test, assert_instr(fmaxp))]
16477pub fn vpmaxqd_f64(a: float64x2_t) -> f64 {
16478 unsafe extern "unadjusted" {
16479 #[cfg_attr(
16480 any(target_arch = "aarch64", target_arch = "arm64ec"),
16481 link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64"
16482 )]
16483 fn _vpmaxqd_f64(a: float64x2_t) -> f64;
16484 }
16485 unsafe { _vpmaxqd_f64(a) }
16486}
16487#[doc = "Floating-point maximum pairwise"]
16488#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxs_f32)"]
16489#[inline]
16490#[target_feature(enable = "neon")]
16491#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16492#[cfg_attr(test, assert_instr(fmaxp))]
16493pub fn vpmaxs_f32(a: float32x2_t) -> f32 {
16494 unsafe extern "unadjusted" {
16495 #[cfg_attr(
16496 any(target_arch = "aarch64", target_arch = "arm64ec"),
16497 link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32"
16498 )]
16499 fn _vpmaxs_f32(a: float32x2_t) -> f32;
16500 }
16501 unsafe { _vpmaxs_f32(a) }
16502}
16503#[doc = "Floating-point add pairwise"]
16504#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_f16)"]
16505#[inline]
16506#[target_feature(enable = "neon,fp16")]
16507#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16508#[cfg_attr(test, assert_instr(fminp))]
16509pub fn vpmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16510 unsafe extern "unadjusted" {
16511 #[cfg_attr(
16512 any(target_arch = "aarch64", target_arch = "arm64ec"),
16513 link_name = "llvm.aarch64.neon.fminp.v4f16"
16514 )]
16515 fn _vpmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16516 }
16517 unsafe { _vpmin_f16(a, b) }
16518}
16519#[doc = "Floating-point add pairwise"]
16520#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f16)"]
16521#[inline]
16522#[target_feature(enable = "neon,fp16")]
16523#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16524#[cfg_attr(test, assert_instr(fminp))]
16525pub fn vpminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16526 unsafe extern "unadjusted" {
16527 #[cfg_attr(
16528 any(target_arch = "aarch64", target_arch = "arm64ec"),
16529 link_name = "llvm.aarch64.neon.fminp.v8f16"
16530 )]
16531 fn _vpminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16532 }
16533 unsafe { _vpminq_f16(a, b) }
16534}
16535#[doc = "Floating-point add pairwise"]
16536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f16)"]
16537#[inline]
16538#[target_feature(enable = "neon,fp16")]
16539#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16540#[cfg_attr(test, assert_instr(fminnmp))]
16541pub fn vpminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16542 unsafe extern "unadjusted" {
16543 #[cfg_attr(
16544 any(target_arch = "aarch64", target_arch = "arm64ec"),
16545 link_name = "llvm.aarch64.neon.fminnmp.v4f16"
16546 )]
16547 fn _vpminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16548 }
16549 unsafe { _vpminnm_f16(a, b) }
16550}
16551#[doc = "Floating-point add pairwise"]
16552#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f16)"]
16553#[inline]
16554#[target_feature(enable = "neon,fp16")]
16555#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16556#[cfg_attr(test, assert_instr(fminnmp))]
16557pub fn vpminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16558 unsafe extern "unadjusted" {
16559 #[cfg_attr(
16560 any(target_arch = "aarch64", target_arch = "arm64ec"),
16561 link_name = "llvm.aarch64.neon.fminnmp.v8f16"
16562 )]
16563 fn _vpminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16564 }
16565 unsafe { _vpminnmq_f16(a, b) }
16566}
16567#[doc = "Floating-point Minimum Number Pairwise (vector)."]
16568#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f32)"]
16569#[inline]
16570#[target_feature(enable = "neon")]
16571#[cfg_attr(test, assert_instr(fminnmp))]
16572#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16573pub fn vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
16574 unsafe extern "unadjusted" {
16575 #[cfg_attr(
16576 any(target_arch = "aarch64", target_arch = "arm64ec"),
16577 link_name = "llvm.aarch64.neon.fminnmp.v2f32"
16578 )]
16579 fn _vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
16580 }
16581 unsafe { _vpminnm_f32(a, b) }
16582}
16583#[doc = "Floating-point Minimum Number Pairwise (vector)."]
16584#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f32)"]
16585#[inline]
16586#[target_feature(enable = "neon")]
16587#[cfg_attr(test, assert_instr(fminnmp))]
16588#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16589pub fn vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16590 unsafe extern "unadjusted" {
16591 #[cfg_attr(
16592 any(target_arch = "aarch64", target_arch = "arm64ec"),
16593 link_name = "llvm.aarch64.neon.fminnmp.v4f32"
16594 )]
16595 fn _vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16596 }
16597 unsafe { _vpminnmq_f32(a, b) }
16598}
16599#[doc = "Floating-point Minimum Number Pairwise (vector)."]
16600#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f64)"]
16601#[inline]
16602#[target_feature(enable = "neon")]
16603#[cfg_attr(test, assert_instr(fminnmp))]
16604#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16605pub fn vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16606 unsafe extern "unadjusted" {
16607 #[cfg_attr(
16608 any(target_arch = "aarch64", target_arch = "arm64ec"),
16609 link_name = "llvm.aarch64.neon.fminnmp.v2f64"
16610 )]
16611 fn _vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16612 }
16613 unsafe { _vpminnmq_f64(a, b) }
16614}
16615#[doc = "Floating-point minimum number pairwise"]
16616#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmqd_f64)"]
16617#[inline]
16618#[target_feature(enable = "neon")]
16619#[cfg_attr(test, assert_instr(fminnmp))]
16620#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16621pub fn vpminnmqd_f64(a: float64x2_t) -> f64 {
16622 unsafe extern "unadjusted" {
16623 #[cfg_attr(
16624 any(target_arch = "aarch64", target_arch = "arm64ec"),
16625 link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64"
16626 )]
16627 fn _vpminnmqd_f64(a: float64x2_t) -> f64;
16628 }
16629 unsafe { _vpminnmqd_f64(a) }
16630}
16631#[doc = "Floating-point minimum number pairwise"]
16632#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnms_f32)"]
16633#[inline]
16634#[target_feature(enable = "neon")]
16635#[cfg_attr(test, assert_instr(fminnmp))]
16636#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16637pub fn vpminnms_f32(a: float32x2_t) -> f32 {
16638 unsafe extern "unadjusted" {
16639 #[cfg_attr(
16640 any(target_arch = "aarch64", target_arch = "arm64ec"),
16641 link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32"
16642 )]
16643 fn _vpminnms_f32(a: float32x2_t) -> f32;
16644 }
16645 unsafe { _vpminnms_f32(a) }
16646}
16647#[doc = "Folding minimum of adjacent pairs"]
16648#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f32)"]
16649#[inline]
16650#[target_feature(enable = "neon")]
16651#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16652#[cfg_attr(test, assert_instr(fminp))]
16653pub fn vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16654 unsafe extern "unadjusted" {
16655 #[cfg_attr(
16656 any(target_arch = "aarch64", target_arch = "arm64ec"),
16657 link_name = "llvm.aarch64.neon.fminp.v4f32"
16658 )]
16659 fn _vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16660 }
16661 unsafe { _vpminq_f32(a, b) }
16662}
16663#[doc = "Folding minimum of adjacent pairs"]
16664#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f64)"]
16665#[inline]
16666#[target_feature(enable = "neon")]
16667#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16668#[cfg_attr(test, assert_instr(fminp))]
16669pub fn vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16670 unsafe extern "unadjusted" {
16671 #[cfg_attr(
16672 any(target_arch = "aarch64", target_arch = "arm64ec"),
16673 link_name = "llvm.aarch64.neon.fminp.v2f64"
16674 )]
16675 fn _vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16676 }
16677 unsafe { _vpminq_f64(a, b) }
16678}
16679#[doc = "Folding minimum of adjacent pairs"]
16680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s8)"]
16681#[inline]
16682#[target_feature(enable = "neon")]
16683#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16684#[cfg_attr(test, assert_instr(sminp))]
16685pub fn vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
16686 unsafe extern "unadjusted" {
16687 #[cfg_attr(
16688 any(target_arch = "aarch64", target_arch = "arm64ec"),
16689 link_name = "llvm.aarch64.neon.sminp.v16i8"
16690 )]
16691 fn _vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t;
16692 }
16693 unsafe { _vpminq_s8(a, b) }
16694}
16695#[doc = "Folding minimum of adjacent pairs"]
16696#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s16)"]
16697#[inline]
16698#[target_feature(enable = "neon")]
16699#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16700#[cfg_attr(test, assert_instr(sminp))]
16701pub fn vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
16702 unsafe extern "unadjusted" {
16703 #[cfg_attr(
16704 any(target_arch = "aarch64", target_arch = "arm64ec"),
16705 link_name = "llvm.aarch64.neon.sminp.v8i16"
16706 )]
16707 fn _vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t;
16708 }
16709 unsafe { _vpminq_s16(a, b) }
16710}
16711#[doc = "Folding minimum of adjacent pairs"]
16712#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s32)"]
16713#[inline]
16714#[target_feature(enable = "neon")]
16715#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16716#[cfg_attr(test, assert_instr(sminp))]
16717pub fn vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
16718 unsafe extern "unadjusted" {
16719 #[cfg_attr(
16720 any(target_arch = "aarch64", target_arch = "arm64ec"),
16721 link_name = "llvm.aarch64.neon.sminp.v4i32"
16722 )]
16723 fn _vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t;
16724 }
16725 unsafe { _vpminq_s32(a, b) }
16726}
16727#[doc = "Folding minimum of adjacent pairs"]
16728#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u8)"]
16729#[inline]
16730#[target_feature(enable = "neon")]
16731#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16732#[cfg_attr(test, assert_instr(uminp))]
16733pub fn vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16734 unsafe extern "unadjusted" {
16735 #[cfg_attr(
16736 any(target_arch = "aarch64", target_arch = "arm64ec"),
16737 link_name = "llvm.aarch64.neon.uminp.v16i8"
16738 )]
16739 fn _vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t;
16740 }
16741 unsafe { _vpminq_u8(a, b) }
16742}
16743#[doc = "Folding minimum of adjacent pairs"]
16744#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u16)"]
16745#[inline]
16746#[target_feature(enable = "neon")]
16747#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16748#[cfg_attr(test, assert_instr(uminp))]
16749pub fn vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16750 unsafe extern "unadjusted" {
16751 #[cfg_attr(
16752 any(target_arch = "aarch64", target_arch = "arm64ec"),
16753 link_name = "llvm.aarch64.neon.uminp.v8i16"
16754 )]
16755 fn _vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t;
16756 }
16757 unsafe { _vpminq_u16(a, b) }
16758}
16759#[doc = "Folding minimum of adjacent pairs"]
16760#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u32)"]
16761#[inline]
16762#[target_feature(enable = "neon")]
16763#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16764#[cfg_attr(test, assert_instr(uminp))]
16765pub fn vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16766 unsafe extern "unadjusted" {
16767 #[cfg_attr(
16768 any(target_arch = "aarch64", target_arch = "arm64ec"),
16769 link_name = "llvm.aarch64.neon.uminp.v4i32"
16770 )]
16771 fn _vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
16772 }
16773 unsafe { _vpminq_u32(a, b) }
16774}
16775#[doc = "Floating-point minimum pairwise"]
16776#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminqd_f64)"]
16777#[inline]
16778#[target_feature(enable = "neon")]
16779#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16780#[cfg_attr(test, assert_instr(fminp))]
16781pub fn vpminqd_f64(a: float64x2_t) -> f64 {
16782 unsafe extern "unadjusted" {
16783 #[cfg_attr(
16784 any(target_arch = "aarch64", target_arch = "arm64ec"),
16785 link_name = "llvm.aarch64.neon.fminv.f64.v2f64"
16786 )]
16787 fn _vpminqd_f64(a: float64x2_t) -> f64;
16788 }
16789 unsafe { _vpminqd_f64(a) }
16790}
16791#[doc = "Floating-point minimum pairwise"]
16792#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmins_f32)"]
16793#[inline]
16794#[target_feature(enable = "neon")]
16795#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16796#[cfg_attr(test, assert_instr(fminp))]
16797pub fn vpmins_f32(a: float32x2_t) -> f32 {
16798 unsafe extern "unadjusted" {
16799 #[cfg_attr(
16800 any(target_arch = "aarch64", target_arch = "arm64ec"),
16801 link_name = "llvm.aarch64.neon.fminv.f32.v2f32"
16802 )]
16803 fn _vpmins_f32(a: float32x2_t) -> f32;
16804 }
16805 unsafe { _vpmins_f32(a) }
16806}
16807#[doc = "Signed saturating Absolute value"]
16808#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s64)"]
16809#[inline]
16810#[target_feature(enable = "neon")]
16811#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16812#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16813pub fn vqabs_s64(a: int64x1_t) -> int64x1_t {
16814 unsafe extern "unadjusted" {
16815 #[cfg_attr(
16816 any(target_arch = "aarch64", target_arch = "arm64ec"),
16817 link_name = "llvm.aarch64.neon.sqabs.v1i64"
16818 )]
16819 fn _vqabs_s64(a: int64x1_t) -> int64x1_t;
16820 }
16821 unsafe { _vqabs_s64(a) }
16822}
16823#[doc = "Signed saturating Absolute value"]
16824#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s64)"]
16825#[inline]
16826#[target_feature(enable = "neon")]
16827#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16828#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16829pub fn vqabsq_s64(a: int64x2_t) -> int64x2_t {
16830 unsafe extern "unadjusted" {
16831 #[cfg_attr(
16832 any(target_arch = "aarch64", target_arch = "arm64ec"),
16833 link_name = "llvm.aarch64.neon.sqabs.v2i64"
16834 )]
16835 fn _vqabsq_s64(a: int64x2_t) -> int64x2_t;
16836 }
16837 unsafe { _vqabsq_s64(a) }
16838}
16839#[doc = "Signed saturating absolute value"]
16840#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsb_s8)"]
16841#[inline]
16842#[target_feature(enable = "neon")]
16843#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16844#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16845pub fn vqabsb_s8(a: i8) -> i8 {
16846 unsafe { simd_extract!(vqabs_s8(vdup_n_s8(a)), 0) }
16847}
16848#[doc = "Signed saturating absolute value"]
16849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsh_s16)"]
16850#[inline]
16851#[target_feature(enable = "neon")]
16852#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16853#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16854pub fn vqabsh_s16(a: i16) -> i16 {
16855 unsafe { simd_extract!(vqabs_s16(vdup_n_s16(a)), 0) }
16856}
16857#[doc = "Signed saturating absolute value"]
16858#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabss_s32)"]
16859#[inline]
16860#[target_feature(enable = "neon")]
16861#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16862#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16863pub fn vqabss_s32(a: i32) -> i32 {
16864 unsafe extern "unadjusted" {
16865 #[cfg_attr(
16866 any(target_arch = "aarch64", target_arch = "arm64ec"),
16867 link_name = "llvm.aarch64.neon.sqabs.i32"
16868 )]
16869 fn _vqabss_s32(a: i32) -> i32;
16870 }
16871 unsafe { _vqabss_s32(a) }
16872}
16873#[doc = "Signed saturating absolute value"]
16874#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsd_s64)"]
16875#[inline]
16876#[target_feature(enable = "neon")]
16877#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16878#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16879pub fn vqabsd_s64(a: i64) -> i64 {
16880 unsafe extern "unadjusted" {
16881 #[cfg_attr(
16882 any(target_arch = "aarch64", target_arch = "arm64ec"),
16883 link_name = "llvm.aarch64.neon.sqabs.i64"
16884 )]
16885 fn _vqabsd_s64(a: i64) -> i64;
16886 }
16887 unsafe { _vqabsd_s64(a) }
16888}
16889#[doc = "Saturating add"]
16890#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_s8)"]
16891#[inline]
16892#[target_feature(enable = "neon")]
16893#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16894#[cfg_attr(test, assert_instr(sqadd))]
16895pub fn vqaddb_s8(a: i8, b: i8) -> i8 {
16896 let a: int8x8_t = vdup_n_s8(a);
16897 let b: int8x8_t = vdup_n_s8(b);
16898 unsafe { simd_extract!(vqadd_s8(a, b), 0) }
16899}
16900#[doc = "Saturating add"]
16901#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_s16)"]
16902#[inline]
16903#[target_feature(enable = "neon")]
16904#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16905#[cfg_attr(test, assert_instr(sqadd))]
16906pub fn vqaddh_s16(a: i16, b: i16) -> i16 {
16907 let a: int16x4_t = vdup_n_s16(a);
16908 let b: int16x4_t = vdup_n_s16(b);
16909 unsafe { simd_extract!(vqadd_s16(a, b), 0) }
16910}
16911#[doc = "Saturating add"]
16912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_u8)"]
16913#[inline]
16914#[target_feature(enable = "neon")]
16915#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16916#[cfg_attr(test, assert_instr(uqadd))]
16917pub fn vqaddb_u8(a: u8, b: u8) -> u8 {
16918 let a: uint8x8_t = vdup_n_u8(a);
16919 let b: uint8x8_t = vdup_n_u8(b);
16920 unsafe { simd_extract!(vqadd_u8(a, b), 0) }
16921}
16922#[doc = "Saturating add"]
16923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_u16)"]
16924#[inline]
16925#[target_feature(enable = "neon")]
16926#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16927#[cfg_attr(test, assert_instr(uqadd))]
16928pub fn vqaddh_u16(a: u16, b: u16) -> u16 {
16929 let a: uint16x4_t = vdup_n_u16(a);
16930 let b: uint16x4_t = vdup_n_u16(b);
16931 unsafe { simd_extract!(vqadd_u16(a, b), 0) }
16932}
16933#[doc = "Saturating add"]
16934#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_s32)"]
16935#[inline]
16936#[target_feature(enable = "neon")]
16937#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16938#[cfg_attr(test, assert_instr(sqadd))]
16939pub fn vqadds_s32(a: i32, b: i32) -> i32 {
16940 unsafe extern "unadjusted" {
16941 #[cfg_attr(
16942 any(target_arch = "aarch64", target_arch = "arm64ec"),
16943 link_name = "llvm.aarch64.neon.sqadd.i32"
16944 )]
16945 fn _vqadds_s32(a: i32, b: i32) -> i32;
16946 }
16947 unsafe { _vqadds_s32(a, b) }
16948}
16949#[doc = "Saturating add"]
16950#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_s64)"]
16951#[inline]
16952#[target_feature(enable = "neon")]
16953#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16954#[cfg_attr(test, assert_instr(sqadd))]
16955pub fn vqaddd_s64(a: i64, b: i64) -> i64 {
16956 unsafe extern "unadjusted" {
16957 #[cfg_attr(
16958 any(target_arch = "aarch64", target_arch = "arm64ec"),
16959 link_name = "llvm.aarch64.neon.sqadd.i64"
16960 )]
16961 fn _vqaddd_s64(a: i64, b: i64) -> i64;
16962 }
16963 unsafe { _vqaddd_s64(a, b) }
16964}
16965#[doc = "Saturating add"]
16966#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_u32)"]
16967#[inline]
16968#[target_feature(enable = "neon")]
16969#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16970#[cfg_attr(test, assert_instr(uqadd))]
16971pub fn vqadds_u32(a: u32, b: u32) -> u32 {
16972 unsafe extern "unadjusted" {
16973 #[cfg_attr(
16974 any(target_arch = "aarch64", target_arch = "arm64ec"),
16975 link_name = "llvm.aarch64.neon.uqadd.i32"
16976 )]
16977 fn _vqadds_u32(a: u32, b: u32) -> u32;
16978 }
16979 unsafe { _vqadds_u32(a, b) }
16980}
16981#[doc = "Saturating add"]
16982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_u64)"]
16983#[inline]
16984#[target_feature(enable = "neon")]
16985#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16986#[cfg_attr(test, assert_instr(uqadd))]
16987pub fn vqaddd_u64(a: u64, b: u64) -> u64 {
16988 unsafe extern "unadjusted" {
16989 #[cfg_attr(
16990 any(target_arch = "aarch64", target_arch = "arm64ec"),
16991 link_name = "llvm.aarch64.neon.uqadd.i64"
16992 )]
16993 fn _vqaddd_u64(a: u64, b: u64) -> u64;
16994 }
16995 unsafe { _vqaddd_u64(a, b) }
16996}
16997#[doc = "Signed saturating doubling multiply-add long"]
16998#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s16)"]
16999#[inline]
17000#[target_feature(enable = "neon")]
17001#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
17002#[rustc_legacy_const_generics(3)]
17003#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17004pub fn vqdmlal_high_lane_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
17005 static_assert_uimm_bits!(N, 2);
17006 vqaddq_s32(a, vqdmull_high_lane_s16::<N>(b, c))
17007}
17008#[doc = "Signed saturating doubling multiply-add long"]
17009#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s16)"]
17010#[inline]
17011#[target_feature(enable = "neon")]
17012#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
17013#[rustc_legacy_const_generics(3)]
17014#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17015pub fn vqdmlal_high_laneq_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
17016 static_assert_uimm_bits!(N, 3);
17017 vqaddq_s32(a, vqdmull_high_laneq_s16::<N>(b, c))
17018}
17019#[doc = "Signed saturating doubling multiply-add long"]
17020#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s32)"]
17021#[inline]
17022#[target_feature(enable = "neon")]
17023#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
17024#[rustc_legacy_const_generics(3)]
17025#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17026pub fn vqdmlal_high_lane_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
17027 static_assert_uimm_bits!(N, 1);
17028 vqaddq_s64(a, vqdmull_high_lane_s32::<N>(b, c))
17029}
17030#[doc = "Signed saturating doubling multiply-add long"]
17031#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s32)"]
17032#[inline]
17033#[target_feature(enable = "neon")]
17034#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
17035#[rustc_legacy_const_generics(3)]
17036#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17037pub fn vqdmlal_high_laneq_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
17038 static_assert_uimm_bits!(N, 2);
17039 vqaddq_s64(a, vqdmull_high_laneq_s32::<N>(b, c))
17040}
17041#[doc = "Signed saturating doubling multiply-add long"]
17042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s16)"]
17043#[inline]
17044#[target_feature(enable = "neon")]
17045#[cfg_attr(test, assert_instr(sqdmlal2))]
17046#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17047pub fn vqdmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
17048 vqaddq_s32(a, vqdmull_high_n_s16(b, c))
17049}
17050#[doc = "Signed saturating doubling multiply-add long"]
17051#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s16)"]
17052#[inline]
17053#[target_feature(enable = "neon")]
17054#[cfg_attr(test, assert_instr(sqdmlal2))]
17055#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17056pub fn vqdmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
17057 vqaddq_s32(a, vqdmull_high_s16(b, c))
17058}
17059#[doc = "Signed saturating doubling multiply-add long"]
17060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s32)"]
17061#[inline]
17062#[target_feature(enable = "neon")]
17063#[cfg_attr(test, assert_instr(sqdmlal2))]
17064#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17065pub fn vqdmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
17066 vqaddq_s64(a, vqdmull_high_n_s32(b, c))
17067}
17068#[doc = "Signed saturating doubling multiply-add long"]
17069#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s32)"]
17070#[inline]
17071#[target_feature(enable = "neon")]
17072#[cfg_attr(test, assert_instr(sqdmlal2))]
17073#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17074pub fn vqdmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
17075 vqaddq_s64(a, vqdmull_high_s32(b, c))
17076}
17077#[doc = "Vector widening saturating doubling multiply accumulate with scalar"]
17078#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s16)"]
17079#[inline]
17080#[target_feature(enable = "neon")]
17081#[cfg_attr(test, assert_instr(sqdmlal, N = 2))]
17082#[rustc_legacy_const_generics(3)]
17083#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17084pub fn vqdmlal_laneq_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t {
17085 static_assert_uimm_bits!(N, 3);
17086 vqaddq_s32(a, vqdmull_laneq_s16::<N>(b, c))
17087}
17088#[doc = "Vector widening saturating doubling multiply accumulate with scalar"]
17089#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s32)"]
17090#[inline]
17091#[target_feature(enable = "neon")]
17092#[cfg_attr(test, assert_instr(sqdmlal, N = 1))]
17093#[rustc_legacy_const_generics(3)]
17094#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17095pub fn vqdmlal_laneq_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t {
17096 static_assert_uimm_bits!(N, 2);
17097 vqaddq_s64(a, vqdmull_laneq_s32::<N>(b, c))
17098}
17099#[doc = "Signed saturating doubling multiply-add long"]
17100#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_lane_s16)"]
17101#[inline]
17102#[target_feature(enable = "neon")]
17103#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
17104#[rustc_legacy_const_generics(3)]
17105#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17106pub fn vqdmlalh_lane_s16<const LANE: i32>(a: i32, b: i16, c: int16x4_t) -> i32 {
17107 static_assert_uimm_bits!(LANE, 2);
17108 unsafe { vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) }
17109}
17110#[doc = "Signed saturating doubling multiply-add long"]
17111#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_laneq_s16)"]
17112#[inline]
17113#[target_feature(enable = "neon")]
17114#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
17115#[rustc_legacy_const_generics(3)]
17116#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17117pub fn vqdmlalh_laneq_s16<const LANE: i32>(a: i32, b: i16, c: int16x8_t) -> i32 {
17118 static_assert_uimm_bits!(LANE, 3);
17119 unsafe { vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) }
17120}
17121#[doc = "Signed saturating doubling multiply-add long"]
17122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_lane_s32)"]
17123#[inline]
17124#[target_feature(enable = "neon")]
17125#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
17126#[rustc_legacy_const_generics(3)]
17127#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17128pub fn vqdmlals_lane_s32<const LANE: i32>(a: i64, b: i32, c: int32x2_t) -> i64 {
17129 static_assert_uimm_bits!(LANE, 1);
17130 unsafe { vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) }
17131}
17132#[doc = "Signed saturating doubling multiply-add long"]
17133#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_laneq_s32)"]
17134#[inline]
17135#[target_feature(enable = "neon")]
17136#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
17137#[rustc_legacy_const_generics(3)]
17138#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17139pub fn vqdmlals_laneq_s32<const LANE: i32>(a: i64, b: i32, c: int32x4_t) -> i64 {
17140 static_assert_uimm_bits!(LANE, 2);
17141 unsafe { vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) }
17142}
17143#[doc = "Signed saturating doubling multiply-add long"]
17144#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_s16)"]
17145#[inline]
17146#[target_feature(enable = "neon")]
17147#[cfg_attr(test, assert_instr(sqdmlal))]
17148#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17149pub fn vqdmlalh_s16(a: i32, b: i16, c: i16) -> i32 {
17150 let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c));
17151 unsafe { vqadds_s32(a, simd_extract!(x, 0)) }
17152}
17153#[doc = "Signed saturating doubling multiply-add long"]
17154#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_s32)"]
17155#[inline]
17156#[target_feature(enable = "neon")]
17157#[cfg_attr(test, assert_instr(sqdmlal))]
17158#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17159pub fn vqdmlals_s32(a: i64, b: i32, c: i32) -> i64 {
17160 let x: i64 = vqaddd_s64(a, vqdmulls_s32(b, c));
17161 x
17162}
17163#[doc = "Signed saturating doubling multiply-subtract long"]
17164#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s16)"]
17165#[inline]
17166#[target_feature(enable = "neon")]
17167#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
17168#[rustc_legacy_const_generics(3)]
17169#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17170pub fn vqdmlsl_high_lane_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
17171 static_assert_uimm_bits!(N, 2);
17172 vqsubq_s32(a, vqdmull_high_lane_s16::<N>(b, c))
17173}
17174#[doc = "Signed saturating doubling multiply-subtract long"]
17175#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s16)"]
17176#[inline]
17177#[target_feature(enable = "neon")]
17178#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
17179#[rustc_legacy_const_generics(3)]
17180#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17181pub fn vqdmlsl_high_laneq_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
17182 static_assert_uimm_bits!(N, 3);
17183 vqsubq_s32(a, vqdmull_high_laneq_s16::<N>(b, c))
17184}
17185#[doc = "Signed saturating doubling multiply-subtract long"]
17186#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s32)"]
17187#[inline]
17188#[target_feature(enable = "neon")]
17189#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
17190#[rustc_legacy_const_generics(3)]
17191#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17192pub fn vqdmlsl_high_lane_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
17193 static_assert_uimm_bits!(N, 1);
17194 vqsubq_s64(a, vqdmull_high_lane_s32::<N>(b, c))
17195}
17196#[doc = "Signed saturating doubling multiply-subtract long"]
17197#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s32)"]
17198#[inline]
17199#[target_feature(enable = "neon")]
17200#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
17201#[rustc_legacy_const_generics(3)]
17202#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17203pub fn vqdmlsl_high_laneq_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
17204 static_assert_uimm_bits!(N, 2);
17205 vqsubq_s64(a, vqdmull_high_laneq_s32::<N>(b, c))
17206}
17207#[doc = "Signed saturating doubling multiply-subtract long"]
17208#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s16)"]
17209#[inline]
17210#[target_feature(enable = "neon")]
17211#[cfg_attr(test, assert_instr(sqdmlsl2))]
17212#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17213pub fn vqdmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
17214 vqsubq_s32(a, vqdmull_high_n_s16(b, c))
17215}
17216#[doc = "Signed saturating doubling multiply-subtract long"]
17217#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s16)"]
17218#[inline]
17219#[target_feature(enable = "neon")]
17220#[cfg_attr(test, assert_instr(sqdmlsl2))]
17221#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17222pub fn vqdmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
17223 vqsubq_s32(a, vqdmull_high_s16(b, c))
17224}
17225#[doc = "Signed saturating doubling multiply-subtract long"]
17226#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s32)"]
17227#[inline]
17228#[target_feature(enable = "neon")]
17229#[cfg_attr(test, assert_instr(sqdmlsl2))]
17230#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17231pub fn vqdmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
17232 vqsubq_s64(a, vqdmull_high_n_s32(b, c))
17233}
17234#[doc = "Signed saturating doubling multiply-subtract long"]
17235#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s32)"]
17236#[inline]
17237#[target_feature(enable = "neon")]
17238#[cfg_attr(test, assert_instr(sqdmlsl2))]
17239#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17240pub fn vqdmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
17241 vqsubq_s64(a, vqdmull_high_s32(b, c))
17242}
17243#[doc = "Vector widening saturating doubling multiply subtract with scalar"]
17244#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s16)"]
17245#[inline]
17246#[target_feature(enable = "neon")]
17247#[cfg_attr(test, assert_instr(sqdmlsl, N = 2))]
17248#[rustc_legacy_const_generics(3)]
17249#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17250pub fn vqdmlsl_laneq_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t {
17251 static_assert_uimm_bits!(N, 3);
17252 vqsubq_s32(a, vqdmull_laneq_s16::<N>(b, c))
17253}
17254#[doc = "Vector widening saturating doubling multiply subtract with scalar"]
17255#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s32)"]
17256#[inline]
17257#[target_feature(enable = "neon")]
17258#[cfg_attr(test, assert_instr(sqdmlsl, N = 1))]
17259#[rustc_legacy_const_generics(3)]
17260#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17261pub fn vqdmlsl_laneq_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t {
17262 static_assert_uimm_bits!(N, 2);
17263 vqsubq_s64(a, vqdmull_laneq_s32::<N>(b, c))
17264}
17265#[doc = "Signed saturating doubling multiply-subtract long"]
17266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_lane_s16)"]
17267#[inline]
17268#[target_feature(enable = "neon")]
17269#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
17270#[rustc_legacy_const_generics(3)]
17271#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17272pub fn vqdmlslh_lane_s16<const LANE: i32>(a: i32, b: i16, c: int16x4_t) -> i32 {
17273 static_assert_uimm_bits!(LANE, 2);
17274 unsafe { vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) }
17275}
17276#[doc = "Signed saturating doubling multiply-subtract long"]
17277#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_laneq_s16)"]
17278#[inline]
17279#[target_feature(enable = "neon")]
17280#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
17281#[rustc_legacy_const_generics(3)]
17282#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17283pub fn vqdmlslh_laneq_s16<const LANE: i32>(a: i32, b: i16, c: int16x8_t) -> i32 {
17284 static_assert_uimm_bits!(LANE, 3);
17285 unsafe { vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) }
17286}
17287#[doc = "Signed saturating doubling multiply-subtract long"]
17288#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_lane_s32)"]
17289#[inline]
17290#[target_feature(enable = "neon")]
17291#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
17292#[rustc_legacy_const_generics(3)]
17293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17294pub fn vqdmlsls_lane_s32<const LANE: i32>(a: i64, b: i32, c: int32x2_t) -> i64 {
17295 static_assert_uimm_bits!(LANE, 1);
17296 unsafe { vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) }
17297}
17298#[doc = "Signed saturating doubling multiply-subtract long"]
17299#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_laneq_s32)"]
17300#[inline]
17301#[target_feature(enable = "neon")]
17302#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
17303#[rustc_legacy_const_generics(3)]
17304#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17305pub fn vqdmlsls_laneq_s32<const LANE: i32>(a: i64, b: i32, c: int32x4_t) -> i64 {
17306 static_assert_uimm_bits!(LANE, 2);
17307 unsafe { vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) }
17308}
17309#[doc = "Signed saturating doubling multiply-subtract long"]
17310#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_s16)"]
17311#[inline]
17312#[target_feature(enable = "neon")]
17313#[cfg_attr(test, assert_instr(sqdmlsl))]
17314#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17315pub fn vqdmlslh_s16(a: i32, b: i16, c: i16) -> i32 {
17316 let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c));
17317 unsafe { vqsubs_s32(a, simd_extract!(x, 0)) }
17318}
17319#[doc = "Signed saturating doubling multiply-subtract long"]
17320#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_s32)"]
17321#[inline]
17322#[target_feature(enable = "neon")]
17323#[cfg_attr(test, assert_instr(sqdmlsl))]
17324#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17325pub fn vqdmlsls_s32(a: i64, b: i32, c: i32) -> i64 {
17326 let x: i64 = vqsubd_s64(a, vqdmulls_s32(b, c));
17327 x
17328}
17329#[doc = "Vector saturating doubling multiply high by scalar"]
17330#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s16)"]
17331#[inline]
17332#[target_feature(enable = "neon")]
17333#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17334#[rustc_legacy_const_generics(2)]
17335#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17336pub fn vqdmulh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
17337 static_assert_uimm_bits!(LANE, 2);
17338 unsafe { vqdmulh_s16(a, vdup_n_s16(simd_extract!(b, LANE as u32))) }
17339}
17340#[doc = "Vector saturating doubling multiply high by scalar"]
17341#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s16)"]
17342#[inline]
17343#[target_feature(enable = "neon")]
17344#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17345#[rustc_legacy_const_generics(2)]
17346#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17347pub fn vqdmulhq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int16x8_t {
17348 static_assert_uimm_bits!(LANE, 2);
17349 unsafe { vqdmulhq_s16(a, vdupq_n_s16(simd_extract!(b, LANE as u32))) }
17350}
17351#[doc = "Vector saturating doubling multiply high by scalar"]
17352#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s32)"]
17353#[inline]
17354#[target_feature(enable = "neon")]
17355#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17356#[rustc_legacy_const_generics(2)]
17357#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17358pub fn vqdmulh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
17359 static_assert_uimm_bits!(LANE, 1);
17360 unsafe { vqdmulh_s32(a, vdup_n_s32(simd_extract!(b, LANE as u32))) }
17361}
17362#[doc = "Vector saturating doubling multiply high by scalar"]
17363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s32)"]
17364#[inline]
17365#[target_feature(enable = "neon")]
17366#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17367#[rustc_legacy_const_generics(2)]
17368#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17369pub fn vqdmulhq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int32x4_t {
17370 static_assert_uimm_bits!(LANE, 1);
17371 unsafe { vqdmulhq_s32(a, vdupq_n_s32(simd_extract!(b, LANE as u32))) }
17372}
17373#[doc = "Signed saturating doubling multiply returning high half"]
17374#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_lane_s16)"]
17375#[inline]
17376#[target_feature(enable = "neon")]
17377#[cfg_attr(test, assert_instr(sqdmulh, N = 2))]
17378#[rustc_legacy_const_generics(2)]
17379#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17380pub fn vqdmulhh_lane_s16<const N: i32>(a: i16, b: int16x4_t) -> i16 {
17381 static_assert_uimm_bits!(N, 2);
17382 unsafe {
17383 let b: i16 = simd_extract!(b, N as u32);
17384 vqdmulhh_s16(a, b)
17385 }
17386}
17387#[doc = "Signed saturating doubling multiply returning high half"]
17388#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_laneq_s16)"]
17389#[inline]
17390#[target_feature(enable = "neon")]
17391#[cfg_attr(test, assert_instr(sqdmulh, N = 2))]
17392#[rustc_legacy_const_generics(2)]
17393#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17394pub fn vqdmulhh_laneq_s16<const N: i32>(a: i16, b: int16x8_t) -> i16 {
17395 static_assert_uimm_bits!(N, 3);
17396 unsafe {
17397 let b: i16 = simd_extract!(b, N as u32);
17398 vqdmulhh_s16(a, b)
17399 }
17400}
17401#[doc = "Signed saturating doubling multiply returning high half"]
17402#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_s16)"]
17403#[inline]
17404#[target_feature(enable = "neon")]
17405#[cfg_attr(test, assert_instr(sqdmulh))]
17406#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17407pub fn vqdmulhh_s16(a: i16, b: i16) -> i16 {
17408 let a: int16x4_t = vdup_n_s16(a);
17409 let b: int16x4_t = vdup_n_s16(b);
17410 unsafe { simd_extract!(vqdmulh_s16(a, b), 0) }
17411}
17412#[doc = "Signed saturating doubling multiply returning high half"]
17413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_s32)"]
17414#[inline]
17415#[target_feature(enable = "neon")]
17416#[cfg_attr(test, assert_instr(sqdmulh))]
17417#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17418pub fn vqdmulhs_s32(a: i32, b: i32) -> i32 {
17419 let a: int32x2_t = vdup_n_s32(a);
17420 let b: int32x2_t = vdup_n_s32(b);
17421 unsafe { simd_extract!(vqdmulh_s32(a, b), 0) }
17422}
17423#[doc = "Signed saturating doubling multiply returning high half"]
17424#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_lane_s32)"]
17425#[inline]
17426#[target_feature(enable = "neon")]
17427#[cfg_attr(test, assert_instr(sqdmulh, N = 1))]
17428#[rustc_legacy_const_generics(2)]
17429#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17430pub fn vqdmulhs_lane_s32<const N: i32>(a: i32, b: int32x2_t) -> i32 {
17431 static_assert_uimm_bits!(N, 1);
17432 unsafe {
17433 let b: i32 = simd_extract!(b, N as u32);
17434 vqdmulhs_s32(a, b)
17435 }
17436}
17437#[doc = "Signed saturating doubling multiply returning high half"]
17438#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_laneq_s32)"]
17439#[inline]
17440#[target_feature(enable = "neon")]
17441#[cfg_attr(test, assert_instr(sqdmulh, N = 1))]
17442#[rustc_legacy_const_generics(2)]
17443#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17444pub fn vqdmulhs_laneq_s32<const N: i32>(a: i32, b: int32x4_t) -> i32 {
17445 static_assert_uimm_bits!(N, 2);
17446 unsafe {
17447 let b: i32 = simd_extract!(b, N as u32);
17448 vqdmulhs_s32(a, b)
17449 }
17450}
17451#[doc = "Signed saturating doubling multiply long"]
17452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s16)"]
17453#[inline]
17454#[target_feature(enable = "neon")]
17455#[cfg_attr(test, assert_instr(sqdmull2, N = 2))]
17456#[rustc_legacy_const_generics(2)]
17457#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17458pub fn vqdmull_high_lane_s16<const N: i32>(a: int16x8_t, b: int16x4_t) -> int32x4_t {
17459 static_assert_uimm_bits!(N, 2);
17460 unsafe {
17461 let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17462 let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
17463 vqdmull_s16(a, b)
17464 }
17465}
17466#[doc = "Signed saturating doubling multiply long"]
17467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s32)"]
17468#[inline]
17469#[target_feature(enable = "neon")]
17470#[cfg_attr(test, assert_instr(sqdmull2, N = 2))]
17471#[rustc_legacy_const_generics(2)]
17472#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17473pub fn vqdmull_high_laneq_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int64x2_t {
17474 static_assert_uimm_bits!(N, 2);
17475 unsafe {
17476 let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17477 let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
17478 vqdmull_s32(a, b)
17479 }
17480}
17481#[doc = "Signed saturating doubling multiply long"]
17482#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s32)"]
17483#[inline]
17484#[target_feature(enable = "neon")]
17485#[cfg_attr(test, assert_instr(sqdmull2, N = 1))]
17486#[rustc_legacy_const_generics(2)]
17487#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17488pub fn vqdmull_high_lane_s32<const N: i32>(a: int32x4_t, b: int32x2_t) -> int64x2_t {
17489 static_assert_uimm_bits!(N, 1);
17490 unsafe {
17491 let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17492 let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
17493 vqdmull_s32(a, b)
17494 }
17495}
17496#[doc = "Signed saturating doubling multiply long"]
17497#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s16)"]
17498#[inline]
17499#[target_feature(enable = "neon")]
17500#[cfg_attr(test, assert_instr(sqdmull2, N = 4))]
17501#[rustc_legacy_const_generics(2)]
17502#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17503pub fn vqdmull_high_laneq_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int32x4_t {
17504 static_assert_uimm_bits!(N, 3);
17505 unsafe {
17506 let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17507 let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
17508 vqdmull_s16(a, b)
17509 }
17510}
17511#[doc = "Signed saturating doubling multiply long"]
17512#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s16)"]
17513#[inline]
17514#[target_feature(enable = "neon")]
17515#[cfg_attr(test, assert_instr(sqdmull2))]
17516#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17517pub fn vqdmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t {
17518 unsafe {
17519 let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17520 let b: int16x4_t = vdup_n_s16(b);
17521 vqdmull_s16(a, b)
17522 }
17523}
17524#[doc = "Signed saturating doubling multiply long"]
17525#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s32)"]
17526#[inline]
17527#[target_feature(enable = "neon")]
17528#[cfg_attr(test, assert_instr(sqdmull2))]
17529#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17530pub fn vqdmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t {
17531 unsafe {
17532 let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17533 let b: int32x2_t = vdup_n_s32(b);
17534 vqdmull_s32(a, b)
17535 }
17536}
17537#[doc = "Signed saturating doubling multiply long"]
17538#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s16)"]
17539#[inline]
17540#[target_feature(enable = "neon")]
17541#[cfg_attr(test, assert_instr(sqdmull2))]
17542#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17543pub fn vqdmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
17544 unsafe {
17545 let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17546 let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
17547 vqdmull_s16(a, b)
17548 }
17549}
17550#[doc = "Signed saturating doubling multiply long"]
17551#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s32)"]
17552#[inline]
17553#[target_feature(enable = "neon")]
17554#[cfg_attr(test, assert_instr(sqdmull2))]
17555#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17556pub fn vqdmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
17557 unsafe {
17558 let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17559 let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
17560 vqdmull_s32(a, b)
17561 }
17562}
17563#[doc = "Vector saturating doubling long multiply by scalar"]
17564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s16)"]
17565#[inline]
17566#[target_feature(enable = "neon")]
17567#[cfg_attr(test, assert_instr(sqdmull, N = 4))]
17568#[rustc_legacy_const_generics(2)]
17569#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17570pub fn vqdmull_laneq_s16<const N: i32>(a: int16x4_t, b: int16x8_t) -> int32x4_t {
17571 static_assert_uimm_bits!(N, 3);
17572 unsafe {
17573 let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
17574 vqdmull_s16(a, b)
17575 }
17576}
17577#[doc = "Vector saturating doubling long multiply by scalar"]
17578#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s32)"]
17579#[inline]
17580#[target_feature(enable = "neon")]
17581#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
17582#[rustc_legacy_const_generics(2)]
17583#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17584pub fn vqdmull_laneq_s32<const N: i32>(a: int32x2_t, b: int32x4_t) -> int64x2_t {
17585 static_assert_uimm_bits!(N, 2);
17586 unsafe {
17587 let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
17588 vqdmull_s32(a, b)
17589 }
17590}
17591#[doc = "Signed saturating doubling multiply long"]
17592#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_lane_s16)"]
17593#[inline]
17594#[target_feature(enable = "neon")]
17595#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
17596#[rustc_legacy_const_generics(2)]
17597#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17598pub fn vqdmullh_lane_s16<const N: i32>(a: i16, b: int16x4_t) -> i32 {
17599 static_assert_uimm_bits!(N, 2);
17600 unsafe {
17601 let b: i16 = simd_extract!(b, N as u32);
17602 vqdmullh_s16(a, b)
17603 }
17604}
17605#[doc = "Signed saturating doubling multiply long"]
17606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_laneq_s32)"]
17607#[inline]
17608#[target_feature(enable = "neon")]
17609#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
17610#[rustc_legacy_const_generics(2)]
17611#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17612pub fn vqdmulls_laneq_s32<const N: i32>(a: i32, b: int32x4_t) -> i64 {
17613 static_assert_uimm_bits!(N, 2);
17614 unsafe {
17615 let b: i32 = simd_extract!(b, N as u32);
17616 vqdmulls_s32(a, b)
17617 }
17618}
17619#[doc = "Signed saturating doubling multiply long"]
17620#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_laneq_s16)"]
17621#[inline]
17622#[target_feature(enable = "neon")]
17623#[cfg_attr(test, assert_instr(sqdmull, N = 4))]
17624#[rustc_legacy_const_generics(2)]
17625#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17626pub fn vqdmullh_laneq_s16<const N: i32>(a: i16, b: int16x8_t) -> i32 {
17627 static_assert_uimm_bits!(N, 3);
17628 unsafe {
17629 let b: i16 = simd_extract!(b, N as u32);
17630 vqdmullh_s16(a, b)
17631 }
17632}
17633#[doc = "Signed saturating doubling multiply long"]
17634#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_s16)"]
17635#[inline]
17636#[target_feature(enable = "neon")]
17637#[cfg_attr(test, assert_instr(sqdmull))]
17638#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17639pub fn vqdmullh_s16(a: i16, b: i16) -> i32 {
17640 let a: int16x4_t = vdup_n_s16(a);
17641 let b: int16x4_t = vdup_n_s16(b);
17642 unsafe { simd_extract!(vqdmull_s16(a, b), 0) }
17643}
17644#[doc = "Signed saturating doubling multiply long"]
17645#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_lane_s32)"]
17646#[inline]
17647#[target_feature(enable = "neon")]
17648#[cfg_attr(test, assert_instr(sqdmull, N = 1))]
17649#[rustc_legacy_const_generics(2)]
17650#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17651pub fn vqdmulls_lane_s32<const N: i32>(a: i32, b: int32x2_t) -> i64 {
17652 static_assert_uimm_bits!(N, 1);
17653 unsafe {
17654 let b: i32 = simd_extract!(b, N as u32);
17655 vqdmulls_s32(a, b)
17656 }
17657}
17658#[doc = "Signed saturating doubling multiply long"]
17659#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_s32)"]
17660#[inline]
17661#[target_feature(enable = "neon")]
17662#[cfg_attr(test, assert_instr(sqdmull))]
17663#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17664pub fn vqdmulls_s32(a: i32, b: i32) -> i64 {
17665 unsafe extern "unadjusted" {
17666 #[cfg_attr(
17667 any(target_arch = "aarch64", target_arch = "arm64ec"),
17668 link_name = "llvm.aarch64.neon.sqdmulls.scalar"
17669 )]
17670 fn _vqdmulls_s32(a: i32, b: i32) -> i64;
17671 }
17672 unsafe { _vqdmulls_s32(a, b) }
17673}
17674#[doc = "Signed saturating extract narrow"]
17675#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s16)"]
17676#[inline]
17677#[target_feature(enable = "neon")]
17678#[cfg_attr(test, assert_instr(sqxtn2))]
17679#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17680pub fn vqmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t {
17681 unsafe {
17682 simd_shuffle!(
17683 a,
17684 vqmovn_s16(b),
17685 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
17686 )
17687 }
17688}
17689#[doc = "Signed saturating extract narrow"]
17690#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s32)"]
17691#[inline]
17692#[target_feature(enable = "neon")]
17693#[cfg_attr(test, assert_instr(sqxtn2))]
17694#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17695pub fn vqmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t {
17696 unsafe { simd_shuffle!(a, vqmovn_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
17697}
17698#[doc = "Signed saturating extract narrow"]
17699#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s64)"]
17700#[inline]
17701#[target_feature(enable = "neon")]
17702#[cfg_attr(test, assert_instr(sqxtn2))]
17703#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17704pub fn vqmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t {
17705 unsafe { simd_shuffle!(a, vqmovn_s64(b), [0, 1, 2, 3]) }
17706}
17707#[doc = "Signed saturating extract narrow"]
17708#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u16)"]
17709#[inline]
17710#[target_feature(enable = "neon")]
17711#[cfg_attr(test, assert_instr(uqxtn2))]
17712#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17713pub fn vqmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
17714 unsafe {
17715 simd_shuffle!(
17716 a,
17717 vqmovn_u16(b),
17718 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
17719 )
17720 }
17721}
17722#[doc = "Signed saturating extract narrow"]
17723#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u32)"]
17724#[inline]
17725#[target_feature(enable = "neon")]
17726#[cfg_attr(test, assert_instr(uqxtn2))]
17727#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17728pub fn vqmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
17729 unsafe { simd_shuffle!(a, vqmovn_u32(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
17730}
17731#[doc = "Signed saturating extract narrow"]
17732#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u64)"]
17733#[inline]
17734#[target_feature(enable = "neon")]
17735#[cfg_attr(test, assert_instr(uqxtn2))]
17736#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17737pub fn vqmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
17738 unsafe { simd_shuffle!(a, vqmovn_u64(b), [0, 1, 2, 3]) }
17739}
17740#[doc = "Saturating extract narrow"]
17741#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_s64)"]
17742#[inline]
17743#[target_feature(enable = "neon")]
17744#[cfg_attr(test, assert_instr(sqxtn))]
17745#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17746pub fn vqmovnd_s64(a: i64) -> i32 {
17747 unsafe extern "unadjusted" {
17748 #[cfg_attr(
17749 any(target_arch = "aarch64", target_arch = "arm64ec"),
17750 link_name = "llvm.aarch64.neon.scalar.sqxtn.i32.i64"
17751 )]
17752 fn _vqmovnd_s64(a: i64) -> i32;
17753 }
17754 unsafe { _vqmovnd_s64(a) }
17755}
17756#[doc = "Saturating extract narrow"]
17757#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_u64)"]
17758#[inline]
17759#[target_feature(enable = "neon")]
17760#[cfg_attr(test, assert_instr(uqxtn))]
17761#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17762pub fn vqmovnd_u64(a: u64) -> u32 {
17763 unsafe extern "unadjusted" {
17764 #[cfg_attr(
17765 any(target_arch = "aarch64", target_arch = "arm64ec"),
17766 link_name = "llvm.aarch64.neon.scalar.uqxtn.i32.i64"
17767 )]
17768 fn _vqmovnd_u64(a: u64) -> u32;
17769 }
17770 unsafe { _vqmovnd_u64(a) }
17771}
17772#[doc = "Saturating extract narrow"]
17773#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_s16)"]
17774#[inline]
17775#[target_feature(enable = "neon")]
17776#[cfg_attr(test, assert_instr(sqxtn))]
17777#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17778pub fn vqmovnh_s16(a: i16) -> i8 {
17779 unsafe { simd_extract!(vqmovn_s16(vdupq_n_s16(a)), 0) }
17780}
17781#[doc = "Saturating extract narrow"]
17782#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_s32)"]
17783#[inline]
17784#[target_feature(enable = "neon")]
17785#[cfg_attr(test, assert_instr(sqxtn))]
17786#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17787pub fn vqmovns_s32(a: i32) -> i16 {
17788 unsafe { simd_extract!(vqmovn_s32(vdupq_n_s32(a)), 0) }
17789}
17790#[doc = "Saturating extract narrow"]
17791#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_u16)"]
17792#[inline]
17793#[target_feature(enable = "neon")]
17794#[cfg_attr(test, assert_instr(uqxtn))]
17795#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17796pub fn vqmovnh_u16(a: u16) -> u8 {
17797 unsafe { simd_extract!(vqmovn_u16(vdupq_n_u16(a)), 0) }
17798}
17799#[doc = "Saturating extract narrow"]
17800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_u32)"]
17801#[inline]
17802#[target_feature(enable = "neon")]
17803#[cfg_attr(test, assert_instr(uqxtn))]
17804#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17805pub fn vqmovns_u32(a: u32) -> u16 {
17806 unsafe { simd_extract!(vqmovn_u32(vdupq_n_u32(a)), 0) }
17807}
17808#[doc = "Signed saturating extract unsigned narrow"]
17809#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s16)"]
17810#[inline]
17811#[target_feature(enable = "neon")]
17812#[cfg_attr(test, assert_instr(sqxtun2))]
17813#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17814pub fn vqmovun_high_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
17815 unsafe {
17816 simd_shuffle!(
17817 a,
17818 vqmovun_s16(b),
17819 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
17820 )
17821 }
17822}
17823#[doc = "Signed saturating extract unsigned narrow"]
17824#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s32)"]
17825#[inline]
17826#[target_feature(enable = "neon")]
17827#[cfg_attr(test, assert_instr(sqxtun2))]
17828#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17829pub fn vqmovun_high_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
17830 unsafe { simd_shuffle!(a, vqmovun_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
17831}
17832#[doc = "Signed saturating extract unsigned narrow"]
17833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s64)"]
17834#[inline]
17835#[target_feature(enable = "neon")]
17836#[cfg_attr(test, assert_instr(sqxtun2))]
17837#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17838pub fn vqmovun_high_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
17839 unsafe { simd_shuffle!(a, vqmovun_s64(b), [0, 1, 2, 3]) }
17840}
17841#[doc = "Signed saturating extract unsigned narrow"]
17842#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovunh_s16)"]
17843#[inline]
17844#[target_feature(enable = "neon")]
17845#[cfg_attr(test, assert_instr(sqxtun))]
17846#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17847pub fn vqmovunh_s16(a: i16) -> u8 {
17848 unsafe { simd_extract!(vqmovun_s16(vdupq_n_s16(a)), 0) }
17849}
17850#[doc = "Signed saturating extract unsigned narrow"]
17851#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovuns_s32)"]
17852#[inline]
17853#[target_feature(enable = "neon")]
17854#[cfg_attr(test, assert_instr(sqxtun))]
17855#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17856pub fn vqmovuns_s32(a: i32) -> u16 {
17857 unsafe { simd_extract!(vqmovun_s32(vdupq_n_s32(a)), 0) }
17858}
17859#[doc = "Signed saturating extract unsigned narrow"]
17860#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovund_s64)"]
17861#[inline]
17862#[target_feature(enable = "neon")]
17863#[cfg_attr(test, assert_instr(sqxtun))]
17864#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17865pub fn vqmovund_s64(a: i64) -> u32 {
17866 unsafe { simd_extract!(vqmovun_s64(vdupq_n_s64(a)), 0) }
17867}
17868#[doc = "Signed saturating negate"]
17869#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s64)"]
17870#[inline]
17871#[target_feature(enable = "neon")]
17872#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17873#[cfg_attr(test, assert_instr(sqneg))]
17874pub fn vqneg_s64(a: int64x1_t) -> int64x1_t {
17875 unsafe extern "unadjusted" {
17876 #[cfg_attr(
17877 any(target_arch = "aarch64", target_arch = "arm64ec"),
17878 link_name = "llvm.aarch64.neon.sqneg.v1i64"
17879 )]
17880 fn _vqneg_s64(a: int64x1_t) -> int64x1_t;
17881 }
17882 unsafe { _vqneg_s64(a) }
17883}
17884#[doc = "Signed saturating negate"]
17885#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s64)"]
17886#[inline]
17887#[target_feature(enable = "neon")]
17888#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17889#[cfg_attr(test, assert_instr(sqneg))]
17890pub fn vqnegq_s64(a: int64x2_t) -> int64x2_t {
17891 unsafe extern "unadjusted" {
17892 #[cfg_attr(
17893 any(target_arch = "aarch64", target_arch = "arm64ec"),
17894 link_name = "llvm.aarch64.neon.sqneg.v2i64"
17895 )]
17896 fn _vqnegq_s64(a: int64x2_t) -> int64x2_t;
17897 }
17898 unsafe { _vqnegq_s64(a) }
17899}
17900#[doc = "Signed saturating negate"]
17901#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegb_s8)"]
17902#[inline]
17903#[target_feature(enable = "neon")]
17904#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17905#[cfg_attr(test, assert_instr(sqneg))]
17906pub fn vqnegb_s8(a: i8) -> i8 {
17907 unsafe { simd_extract!(vqneg_s8(vdup_n_s8(a)), 0) }
17908}
17909#[doc = "Signed saturating negate"]
17910#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegh_s16)"]
17911#[inline]
17912#[target_feature(enable = "neon")]
17913#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17914#[cfg_attr(test, assert_instr(sqneg))]
17915pub fn vqnegh_s16(a: i16) -> i16 {
17916 unsafe { simd_extract!(vqneg_s16(vdup_n_s16(a)), 0) }
17917}
17918#[doc = "Signed saturating negate"]
17919#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegs_s32)"]
17920#[inline]
17921#[target_feature(enable = "neon")]
17922#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17923#[cfg_attr(test, assert_instr(sqneg))]
17924pub fn vqnegs_s32(a: i32) -> i32 {
17925 unsafe { simd_extract!(vqneg_s32(vdup_n_s32(a)), 0) }
17926}
17927#[doc = "Signed saturating negate"]
17928#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegd_s64)"]
17929#[inline]
17930#[target_feature(enable = "neon")]
17931#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17932#[cfg_attr(test, assert_instr(sqneg))]
17933pub fn vqnegd_s64(a: i64) -> i64 {
17934 unsafe { simd_extract!(vqneg_s64(vdup_n_s64(a)), 0) }
17935}
17936#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17937#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s16)"]
17938#[inline]
17939#[target_feature(enable = "rdm")]
17940#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17941#[rustc_legacy_const_generics(3)]
17942#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17943pub fn vqrdmlah_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
17944 static_assert_uimm_bits!(LANE, 2);
17945 unsafe {
17946 let c: int16x4_t =
17947 simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
17948 vqrdmlah_s16(a, b, c)
17949 }
17950}
17951#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17952#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s32)"]
17953#[inline]
17954#[target_feature(enable = "rdm")]
17955#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17956#[rustc_legacy_const_generics(3)]
17957#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17958pub fn vqrdmlah_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
17959 static_assert_uimm_bits!(LANE, 1);
17960 unsafe {
17961 let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
17962 vqrdmlah_s32(a, b, c)
17963 }
17964}
17965#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17966#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s16)"]
17967#[inline]
17968#[target_feature(enable = "rdm")]
17969#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17970#[rustc_legacy_const_generics(3)]
17971#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17972pub fn vqrdmlah_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t {
17973 static_assert_uimm_bits!(LANE, 3);
17974 unsafe {
17975 let c: int16x4_t =
17976 simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
17977 vqrdmlah_s16(a, b, c)
17978 }
17979}
17980#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17981#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s32)"]
17982#[inline]
17983#[target_feature(enable = "rdm")]
17984#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17985#[rustc_legacy_const_generics(3)]
17986#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17987pub fn vqrdmlah_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t {
17988 static_assert_uimm_bits!(LANE, 2);
17989 unsafe {
17990 let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
17991 vqrdmlah_s32(a, b, c)
17992 }
17993}
17994#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17995#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s16)"]
17996#[inline]
17997#[target_feature(enable = "rdm")]
17998#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17999#[rustc_legacy_const_generics(3)]
18000#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18001pub fn vqrdmlahq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t {
18002 static_assert_uimm_bits!(LANE, 2);
18003 unsafe {
18004 let c: int16x8_t = simd_shuffle!(
18005 c,
18006 c,
18007 [
18008 LANE as u32,
18009 LANE as u32,
18010 LANE as u32,
18011 LANE as u32,
18012 LANE as u32,
18013 LANE as u32,
18014 LANE as u32,
18015 LANE as u32
18016 ]
18017 );
18018 vqrdmlahq_s16(a, b, c)
18019 }
18020}
18021#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18022#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s32)"]
18023#[inline]
18024#[target_feature(enable = "rdm")]
18025#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18026#[rustc_legacy_const_generics(3)]
18027#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18028pub fn vqrdmlahq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t {
18029 static_assert_uimm_bits!(LANE, 1);
18030 unsafe {
18031 let c: int32x4_t =
18032 simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18033 vqrdmlahq_s32(a, b, c)
18034 }
18035}
18036#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18037#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s16)"]
18038#[inline]
18039#[target_feature(enable = "rdm")]
18040#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18041#[rustc_legacy_const_generics(3)]
18042#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18043pub fn vqrdmlahq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
18044 static_assert_uimm_bits!(LANE, 3);
18045 unsafe {
18046 let c: int16x8_t = simd_shuffle!(
18047 c,
18048 c,
18049 [
18050 LANE as u32,
18051 LANE as u32,
18052 LANE as u32,
18053 LANE as u32,
18054 LANE as u32,
18055 LANE as u32,
18056 LANE as u32,
18057 LANE as u32
18058 ]
18059 );
18060 vqrdmlahq_s16(a, b, c)
18061 }
18062}
18063#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18064#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s32)"]
18065#[inline]
18066#[target_feature(enable = "rdm")]
18067#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18068#[rustc_legacy_const_generics(3)]
18069#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18070pub fn vqrdmlahq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
18071 static_assert_uimm_bits!(LANE, 2);
18072 unsafe {
18073 let c: int32x4_t =
18074 simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18075 vqrdmlahq_s32(a, b, c)
18076 }
18077}
18078#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18079#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s16)"]
18080#[inline]
18081#[target_feature(enable = "rdm")]
18082#[cfg_attr(test, assert_instr(sqrdmlah))]
18083#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18084pub fn vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
18085 unsafe extern "unadjusted" {
18086 #[cfg_attr(
18087 any(target_arch = "aarch64", target_arch = "arm64ec"),
18088 link_name = "llvm.aarch64.neon.sqrdmlah.v4i16"
18089 )]
18090 fn _vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t;
18091 }
18092 unsafe { _vqrdmlah_s16(a, b, c) }
18093}
18094#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18095#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s16)"]
18096#[inline]
18097#[target_feature(enable = "rdm")]
18098#[cfg_attr(test, assert_instr(sqrdmlah))]
18099#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18100pub fn vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
18101 unsafe extern "unadjusted" {
18102 #[cfg_attr(
18103 any(target_arch = "aarch64", target_arch = "arm64ec"),
18104 link_name = "llvm.aarch64.neon.sqrdmlah.v8i16"
18105 )]
18106 fn _vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
18107 }
18108 unsafe { _vqrdmlahq_s16(a, b, c) }
18109}
18110#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18111#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s32)"]
18112#[inline]
18113#[target_feature(enable = "rdm")]
18114#[cfg_attr(test, assert_instr(sqrdmlah))]
18115#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18116pub fn vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
18117 unsafe extern "unadjusted" {
18118 #[cfg_attr(
18119 any(target_arch = "aarch64", target_arch = "arm64ec"),
18120 link_name = "llvm.aarch64.neon.sqrdmlah.v2i32"
18121 )]
18122 fn _vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t;
18123 }
18124 unsafe { _vqrdmlah_s32(a, b, c) }
18125}
18126#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18127#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s32)"]
18128#[inline]
18129#[target_feature(enable = "rdm")]
18130#[cfg_attr(test, assert_instr(sqrdmlah))]
18131#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18132pub fn vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
18133 unsafe extern "unadjusted" {
18134 #[cfg_attr(
18135 any(target_arch = "aarch64", target_arch = "arm64ec"),
18136 link_name = "llvm.aarch64.neon.sqrdmlah.v4i32"
18137 )]
18138 fn _vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
18139 }
18140 unsafe { _vqrdmlahq_s32(a, b, c) }
18141}
18142#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18143#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_lane_s16)"]
18144#[inline]
18145#[target_feature(enable = "rdm")]
18146#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18147#[rustc_legacy_const_generics(3)]
18148#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18149pub fn vqrdmlahh_lane_s16<const LANE: i32>(a: i16, b: i16, c: int16x4_t) -> i16 {
18150 static_assert_uimm_bits!(LANE, 2);
18151 unsafe { vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) }
18152}
18153#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18154#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_laneq_s16)"]
18155#[inline]
18156#[target_feature(enable = "rdm")]
18157#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18158#[rustc_legacy_const_generics(3)]
18159#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18160pub fn vqrdmlahh_laneq_s16<const LANE: i32>(a: i16, b: i16, c: int16x8_t) -> i16 {
18161 static_assert_uimm_bits!(LANE, 3);
18162 unsafe { vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) }
18163}
18164#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_lane_s32)"]
18166#[inline]
18167#[target_feature(enable = "rdm")]
18168#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18169#[rustc_legacy_const_generics(3)]
18170#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18171pub fn vqrdmlahs_lane_s32<const LANE: i32>(a: i32, b: i32, c: int32x2_t) -> i32 {
18172 static_assert_uimm_bits!(LANE, 1);
18173 unsafe { vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) }
18174}
18175#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18176#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_laneq_s32)"]
18177#[inline]
18178#[target_feature(enable = "rdm")]
18179#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18180#[rustc_legacy_const_generics(3)]
18181#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18182pub fn vqrdmlahs_laneq_s32<const LANE: i32>(a: i32, b: i32, c: int32x4_t) -> i32 {
18183 static_assert_uimm_bits!(LANE, 2);
18184 unsafe { vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) }
18185}
18186#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18187#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_s16)"]
18188#[inline]
18189#[target_feature(enable = "rdm")]
18190#[cfg_attr(test, assert_instr(sqrdmlah))]
18191#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18192pub fn vqrdmlahh_s16(a: i16, b: i16, c: i16) -> i16 {
18193 let a: int16x4_t = vdup_n_s16(a);
18194 let b: int16x4_t = vdup_n_s16(b);
18195 let c: int16x4_t = vdup_n_s16(c);
18196 unsafe { simd_extract!(vqrdmlah_s16(a, b, c), 0) }
18197}
18198#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_s32)"]
18200#[inline]
18201#[target_feature(enable = "rdm")]
18202#[cfg_attr(test, assert_instr(sqrdmlah))]
18203#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18204pub fn vqrdmlahs_s32(a: i32, b: i32, c: i32) -> i32 {
18205 let a: int32x2_t = vdup_n_s32(a);
18206 let b: int32x2_t = vdup_n_s32(b);
18207 let c: int32x2_t = vdup_n_s32(c);
18208 unsafe { simd_extract!(vqrdmlah_s32(a, b, c), 0) }
18209}
18210#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18211#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s16)"]
18212#[inline]
18213#[target_feature(enable = "rdm")]
18214#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18215#[rustc_legacy_const_generics(3)]
18216#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18217pub fn vqrdmlsh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
18218 static_assert_uimm_bits!(LANE, 2);
18219 unsafe {
18220 let c: int16x4_t =
18221 simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18222 vqrdmlsh_s16(a, b, c)
18223 }
18224}
18225#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18226#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s32)"]
18227#[inline]
18228#[target_feature(enable = "rdm")]
18229#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18230#[rustc_legacy_const_generics(3)]
18231#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18232pub fn vqrdmlsh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
18233 static_assert_uimm_bits!(LANE, 1);
18234 unsafe {
18235 let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
18236 vqrdmlsh_s32(a, b, c)
18237 }
18238}
18239#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18240#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s16)"]
18241#[inline]
18242#[target_feature(enable = "rdm")]
18243#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18244#[rustc_legacy_const_generics(3)]
18245#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18246pub fn vqrdmlsh_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t {
18247 static_assert_uimm_bits!(LANE, 3);
18248 unsafe {
18249 let c: int16x4_t =
18250 simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18251 vqrdmlsh_s16(a, b, c)
18252 }
18253}
18254#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18255#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s32)"]
18256#[inline]
18257#[target_feature(enable = "rdm")]
18258#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18259#[rustc_legacy_const_generics(3)]
18260#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18261pub fn vqrdmlsh_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t {
18262 static_assert_uimm_bits!(LANE, 2);
18263 unsafe {
18264 let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
18265 vqrdmlsh_s32(a, b, c)
18266 }
18267}
18268#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18269#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s16)"]
18270#[inline]
18271#[target_feature(enable = "rdm")]
18272#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18273#[rustc_legacy_const_generics(3)]
18274#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18275pub fn vqrdmlshq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t {
18276 static_assert_uimm_bits!(LANE, 2);
18277 unsafe {
18278 let c: int16x8_t = simd_shuffle!(
18279 c,
18280 c,
18281 [
18282 LANE as u32,
18283 LANE as u32,
18284 LANE as u32,
18285 LANE as u32,
18286 LANE as u32,
18287 LANE as u32,
18288 LANE as u32,
18289 LANE as u32
18290 ]
18291 );
18292 vqrdmlshq_s16(a, b, c)
18293 }
18294}
18295#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18296#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s32)"]
18297#[inline]
18298#[target_feature(enable = "rdm")]
18299#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18300#[rustc_legacy_const_generics(3)]
18301#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18302pub fn vqrdmlshq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t {
18303 static_assert_uimm_bits!(LANE, 1);
18304 unsafe {
18305 let c: int32x4_t =
18306 simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18307 vqrdmlshq_s32(a, b, c)
18308 }
18309}
18310#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18311#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s16)"]
18312#[inline]
18313#[target_feature(enable = "rdm")]
18314#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18315#[rustc_legacy_const_generics(3)]
18316#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18317pub fn vqrdmlshq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
18318 static_assert_uimm_bits!(LANE, 3);
18319 unsafe {
18320 let c: int16x8_t = simd_shuffle!(
18321 c,
18322 c,
18323 [
18324 LANE as u32,
18325 LANE as u32,
18326 LANE as u32,
18327 LANE as u32,
18328 LANE as u32,
18329 LANE as u32,
18330 LANE as u32,
18331 LANE as u32
18332 ]
18333 );
18334 vqrdmlshq_s16(a, b, c)
18335 }
18336}
18337#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s32)"]
18339#[inline]
18340#[target_feature(enable = "rdm")]
18341#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18342#[rustc_legacy_const_generics(3)]
18343#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18344pub fn vqrdmlshq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
18345 static_assert_uimm_bits!(LANE, 2);
18346 unsafe {
18347 let c: int32x4_t =
18348 simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18349 vqrdmlshq_s32(a, b, c)
18350 }
18351}
18352#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18353#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s16)"]
18354#[inline]
18355#[target_feature(enable = "rdm")]
18356#[cfg_attr(test, assert_instr(sqrdmlsh))]
18357#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18358pub fn vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
18359 unsafe extern "unadjusted" {
18360 #[cfg_attr(
18361 any(target_arch = "aarch64", target_arch = "arm64ec"),
18362 link_name = "llvm.aarch64.neon.sqrdmlsh.v4i16"
18363 )]
18364 fn _vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t;
18365 }
18366 unsafe { _vqrdmlsh_s16(a, b, c) }
18367}
18368#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18369#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s16)"]
18370#[inline]
18371#[target_feature(enable = "rdm")]
18372#[cfg_attr(test, assert_instr(sqrdmlsh))]
18373#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18374pub fn vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
18375 unsafe extern "unadjusted" {
18376 #[cfg_attr(
18377 any(target_arch = "aarch64", target_arch = "arm64ec"),
18378 link_name = "llvm.aarch64.neon.sqrdmlsh.v8i16"
18379 )]
18380 fn _vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
18381 }
18382 unsafe { _vqrdmlshq_s16(a, b, c) }
18383}
18384#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18385#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s32)"]
18386#[inline]
18387#[target_feature(enable = "rdm")]
18388#[cfg_attr(test, assert_instr(sqrdmlsh))]
18389#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18390pub fn vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
18391 unsafe extern "unadjusted" {
18392 #[cfg_attr(
18393 any(target_arch = "aarch64", target_arch = "arm64ec"),
18394 link_name = "llvm.aarch64.neon.sqrdmlsh.v2i32"
18395 )]
18396 fn _vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t;
18397 }
18398 unsafe { _vqrdmlsh_s32(a, b, c) }
18399}
18400#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18401#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s32)"]
18402#[inline]
18403#[target_feature(enable = "rdm")]
18404#[cfg_attr(test, assert_instr(sqrdmlsh))]
18405#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18406pub fn vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
18407 unsafe extern "unadjusted" {
18408 #[cfg_attr(
18409 any(target_arch = "aarch64", target_arch = "arm64ec"),
18410 link_name = "llvm.aarch64.neon.sqrdmlsh.v4i32"
18411 )]
18412 fn _vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
18413 }
18414 unsafe { _vqrdmlshq_s32(a, b, c) }
18415}
18416#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18417#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_lane_s16)"]
18418#[inline]
18419#[target_feature(enable = "rdm")]
18420#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18421#[rustc_legacy_const_generics(3)]
18422#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18423pub fn vqrdmlshh_lane_s16<const LANE: i32>(a: i16, b: i16, c: int16x4_t) -> i16 {
18424 static_assert_uimm_bits!(LANE, 2);
18425 unsafe { vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) }
18426}
18427#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18428#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_laneq_s16)"]
18429#[inline]
18430#[target_feature(enable = "rdm")]
18431#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18432#[rustc_legacy_const_generics(3)]
18433#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18434pub fn vqrdmlshh_laneq_s16<const LANE: i32>(a: i16, b: i16, c: int16x8_t) -> i16 {
18435 static_assert_uimm_bits!(LANE, 3);
18436 unsafe { vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) }
18437}
18438#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18439#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_lane_s32)"]
18440#[inline]
18441#[target_feature(enable = "rdm")]
18442#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18443#[rustc_legacy_const_generics(3)]
18444#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18445pub fn vqrdmlshs_lane_s32<const LANE: i32>(a: i32, b: i32, c: int32x2_t) -> i32 {
18446 static_assert_uimm_bits!(LANE, 1);
18447 unsafe { vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) }
18448}
18449#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18450#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_laneq_s32)"]
18451#[inline]
18452#[target_feature(enable = "rdm")]
18453#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18454#[rustc_legacy_const_generics(3)]
18455#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18456pub fn vqrdmlshs_laneq_s32<const LANE: i32>(a: i32, b: i32, c: int32x4_t) -> i32 {
18457 static_assert_uimm_bits!(LANE, 2);
18458 unsafe { vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) }
18459}
18460#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18461#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_s16)"]
18462#[inline]
18463#[target_feature(enable = "rdm")]
18464#[cfg_attr(test, assert_instr(sqrdmlsh))]
18465#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18466pub fn vqrdmlshh_s16(a: i16, b: i16, c: i16) -> i16 {
18467 let a: int16x4_t = vdup_n_s16(a);
18468 let b: int16x4_t = vdup_n_s16(b);
18469 let c: int16x4_t = vdup_n_s16(c);
18470 unsafe { simd_extract!(vqrdmlsh_s16(a, b, c), 0) }
18471}
18472#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18473#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_s32)"]
18474#[inline]
18475#[target_feature(enable = "rdm")]
18476#[cfg_attr(test, assert_instr(sqrdmlsh))]
18477#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18478pub fn vqrdmlshs_s32(a: i32, b: i32, c: i32) -> i32 {
18479 let a: int32x2_t = vdup_n_s32(a);
18480 let b: int32x2_t = vdup_n_s32(b);
18481 let c: int32x2_t = vdup_n_s32(c);
18482 unsafe { simd_extract!(vqrdmlsh_s32(a, b, c), 0) }
18483}
18484#[doc = "Signed saturating rounding doubling multiply returning high half"]
18485#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_lane_s16)"]
18486#[inline]
18487#[target_feature(enable = "neon")]
18488#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18489#[rustc_legacy_const_generics(2)]
18490#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18491pub fn vqrdmulhh_lane_s16<const LANE: i32>(a: i16, b: int16x4_t) -> i16 {
18492 static_assert_uimm_bits!(LANE, 2);
18493 unsafe { vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) }
18494}
18495#[doc = "Signed saturating rounding doubling multiply returning high half"]
18496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_laneq_s16)"]
18497#[inline]
18498#[target_feature(enable = "neon")]
18499#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18500#[rustc_legacy_const_generics(2)]
18501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18502pub fn vqrdmulhh_laneq_s16<const LANE: i32>(a: i16, b: int16x8_t) -> i16 {
18503 static_assert_uimm_bits!(LANE, 3);
18504 unsafe { vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) }
18505}
18506#[doc = "Signed saturating rounding doubling multiply returning high half"]
18507#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_lane_s32)"]
18508#[inline]
18509#[target_feature(enable = "neon")]
18510#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18511#[rustc_legacy_const_generics(2)]
18512#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18513pub fn vqrdmulhs_lane_s32<const LANE: i32>(a: i32, b: int32x2_t) -> i32 {
18514 static_assert_uimm_bits!(LANE, 1);
18515 unsafe { vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) }
18516}
18517#[doc = "Signed saturating rounding doubling multiply returning high half"]
18518#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_laneq_s32)"]
18519#[inline]
18520#[target_feature(enable = "neon")]
18521#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18522#[rustc_legacy_const_generics(2)]
18523#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18524pub fn vqrdmulhs_laneq_s32<const LANE: i32>(a: i32, b: int32x4_t) -> i32 {
18525 static_assert_uimm_bits!(LANE, 2);
18526 unsafe { vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) }
18527}
18528#[doc = "Signed saturating rounding doubling multiply returning high half"]
18529#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_s16)"]
18530#[inline]
18531#[target_feature(enable = "neon")]
18532#[cfg_attr(test, assert_instr(sqrdmulh))]
18533#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18534pub fn vqrdmulhh_s16(a: i16, b: i16) -> i16 {
18535 unsafe { simd_extract!(vqrdmulh_s16(vdup_n_s16(a), vdup_n_s16(b)), 0) }
18536}
18537#[doc = "Signed saturating rounding doubling multiply returning high half"]
18538#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_s32)"]
18539#[inline]
18540#[target_feature(enable = "neon")]
18541#[cfg_attr(test, assert_instr(sqrdmulh))]
18542#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18543pub fn vqrdmulhs_s32(a: i32, b: i32) -> i32 {
18544 unsafe { simd_extract!(vqrdmulh_s32(vdup_n_s32(a), vdup_n_s32(b)), 0) }
18545}
18546#[doc = "Signed saturating rounding shift left"]
18547#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_s8)"]
18548#[inline]
18549#[target_feature(enable = "neon")]
18550#[cfg_attr(test, assert_instr(sqrshl))]
18551#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18552pub fn vqrshlb_s8(a: i8, b: i8) -> i8 {
18553 let a: int8x8_t = vdup_n_s8(a);
18554 let b: int8x8_t = vdup_n_s8(b);
18555 unsafe { simd_extract!(vqrshl_s8(a, b), 0) }
18556}
18557#[doc = "Signed saturating rounding shift left"]
18558#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_s16)"]
18559#[inline]
18560#[target_feature(enable = "neon")]
18561#[cfg_attr(test, assert_instr(sqrshl))]
18562#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18563pub fn vqrshlh_s16(a: i16, b: i16) -> i16 {
18564 let a: int16x4_t = vdup_n_s16(a);
18565 let b: int16x4_t = vdup_n_s16(b);
18566 unsafe { simd_extract!(vqrshl_s16(a, b), 0) }
18567}
18568#[doc = "Unsigned signed saturating rounding shift left"]
18569#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_u8)"]
18570#[inline]
18571#[target_feature(enable = "neon")]
18572#[cfg_attr(test, assert_instr(uqrshl))]
18573#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18574pub fn vqrshlb_u8(a: u8, b: i8) -> u8 {
18575 let a: uint8x8_t = vdup_n_u8(a);
18576 let b: int8x8_t = vdup_n_s8(b);
18577 unsafe { simd_extract!(vqrshl_u8(a, b), 0) }
18578}
18579#[doc = "Unsigned signed saturating rounding shift left"]
18580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_u16)"]
18581#[inline]
18582#[target_feature(enable = "neon")]
18583#[cfg_attr(test, assert_instr(uqrshl))]
18584#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18585pub fn vqrshlh_u16(a: u16, b: i16) -> u16 {
18586 let a: uint16x4_t = vdup_n_u16(a);
18587 let b: int16x4_t = vdup_n_s16(b);
18588 unsafe { simd_extract!(vqrshl_u16(a, b), 0) }
18589}
18590#[doc = "Signed saturating rounding shift left"]
18591#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_s64)"]
18592#[inline]
18593#[target_feature(enable = "neon")]
18594#[cfg_attr(test, assert_instr(sqrshl))]
18595#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18596pub fn vqrshld_s64(a: i64, b: i64) -> i64 {
18597 unsafe extern "unadjusted" {
18598 #[cfg_attr(
18599 any(target_arch = "aarch64", target_arch = "arm64ec"),
18600 link_name = "llvm.aarch64.neon.sqrshl.i64"
18601 )]
18602 fn _vqrshld_s64(a: i64, b: i64) -> i64;
18603 }
18604 unsafe { _vqrshld_s64(a, b) }
18605}
18606#[doc = "Signed saturating rounding shift left"]
18607#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_s32)"]
18608#[inline]
18609#[target_feature(enable = "neon")]
18610#[cfg_attr(test, assert_instr(sqrshl))]
18611#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18612pub fn vqrshls_s32(a: i32, b: i32) -> i32 {
18613 unsafe extern "unadjusted" {
18614 #[cfg_attr(
18615 any(target_arch = "aarch64", target_arch = "arm64ec"),
18616 link_name = "llvm.aarch64.neon.sqrshl.i32"
18617 )]
18618 fn _vqrshls_s32(a: i32, b: i32) -> i32;
18619 }
18620 unsafe { _vqrshls_s32(a, b) }
18621}
18622#[doc = "Unsigned signed saturating rounding shift left"]
18623#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_u32)"]
18624#[inline]
18625#[target_feature(enable = "neon")]
18626#[cfg_attr(test, assert_instr(uqrshl))]
18627#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18628pub fn vqrshls_u32(a: u32, b: i32) -> u32 {
18629 unsafe extern "unadjusted" {
18630 #[cfg_attr(
18631 any(target_arch = "aarch64", target_arch = "arm64ec"),
18632 link_name = "llvm.aarch64.neon.uqrshl.i32"
18633 )]
18634 fn _vqrshls_u32(a: u32, b: i32) -> u32;
18635 }
18636 unsafe { _vqrshls_u32(a, b) }
18637}
18638#[doc = "Unsigned signed saturating rounding shift left"]
18639#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_u64)"]
18640#[inline]
18641#[target_feature(enable = "neon")]
18642#[cfg_attr(test, assert_instr(uqrshl))]
18643#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18644pub fn vqrshld_u64(a: u64, b: i64) -> u64 {
18645 unsafe extern "unadjusted" {
18646 #[cfg_attr(
18647 any(target_arch = "aarch64", target_arch = "arm64ec"),
18648 link_name = "llvm.aarch64.neon.uqrshl.i64"
18649 )]
18650 fn _vqrshld_u64(a: u64, b: i64) -> u64;
18651 }
18652 unsafe { _vqrshld_u64(a, b) }
18653}
18654#[doc = "Signed saturating rounded shift right narrow"]
18655#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s16)"]
18656#[inline]
18657#[target_feature(enable = "neon")]
18658#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
18659#[rustc_legacy_const_generics(2)]
18660#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18661pub fn vqrshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
18662 static_assert!(N >= 1 && N <= 8);
18663 unsafe {
18664 simd_shuffle!(
18665 a,
18666 vqrshrn_n_s16::<N>(b),
18667 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18668 )
18669 }
18670}
18671#[doc = "Signed saturating rounded shift right narrow"]
18672#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s32)"]
18673#[inline]
18674#[target_feature(enable = "neon")]
18675#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
18676#[rustc_legacy_const_generics(2)]
18677#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18678pub fn vqrshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
18679 static_assert!(N >= 1 && N <= 16);
18680 unsafe { simd_shuffle!(a, vqrshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18681}
18682#[doc = "Signed saturating rounded shift right narrow"]
18683#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s64)"]
18684#[inline]
18685#[target_feature(enable = "neon")]
18686#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
18687#[rustc_legacy_const_generics(2)]
18688#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18689pub fn vqrshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
18690 static_assert!(N >= 1 && N <= 32);
18691 unsafe { simd_shuffle!(a, vqrshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
18692}
18693#[doc = "Unsigned saturating rounded shift right narrow"]
18694#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u16)"]
18695#[inline]
18696#[target_feature(enable = "neon")]
18697#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
18698#[rustc_legacy_const_generics(2)]
18699#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18700pub fn vqrshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
18701 static_assert!(N >= 1 && N <= 8);
18702 unsafe {
18703 simd_shuffle!(
18704 a,
18705 vqrshrn_n_u16::<N>(b),
18706 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18707 )
18708 }
18709}
18710#[doc = "Unsigned saturating rounded shift right narrow"]
18711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u32)"]
18712#[inline]
18713#[target_feature(enable = "neon")]
18714#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
18715#[rustc_legacy_const_generics(2)]
18716#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18717pub fn vqrshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
18718 static_assert!(N >= 1 && N <= 16);
18719 unsafe { simd_shuffle!(a, vqrshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18720}
18721#[doc = "Unsigned saturating rounded shift right narrow"]
18722#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u64)"]
18723#[inline]
18724#[target_feature(enable = "neon")]
18725#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
18726#[rustc_legacy_const_generics(2)]
18727#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18728pub fn vqrshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
18729 static_assert!(N >= 1 && N <= 32);
18730 unsafe { simd_shuffle!(a, vqrshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
18731}
18732#[doc = "Unsigned saturating rounded shift right narrow"]
18733#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_u64)"]
18734#[inline]
18735#[target_feature(enable = "neon")]
18736#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
18737#[rustc_legacy_const_generics(1)]
18738#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18739pub fn vqrshrnd_n_u64<const N: i32>(a: u64) -> u32 {
18740 static_assert!(N >= 1 && N <= 32);
18741 let a: uint64x2_t = vdupq_n_u64(a);
18742 unsafe { simd_extract!(vqrshrn_n_u64::<N>(a), 0) }
18743}
18744#[doc = "Unsigned saturating rounded shift right narrow"]
18745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_u16)"]
18746#[inline]
18747#[target_feature(enable = "neon")]
18748#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
18749#[rustc_legacy_const_generics(1)]
18750#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18751pub fn vqrshrnh_n_u16<const N: i32>(a: u16) -> u8 {
18752 static_assert!(N >= 1 && N <= 8);
18753 let a: uint16x8_t = vdupq_n_u16(a);
18754 unsafe { simd_extract!(vqrshrn_n_u16::<N>(a), 0) }
18755}
18756#[doc = "Unsigned saturating rounded shift right narrow"]
18757#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_u32)"]
18758#[inline]
18759#[target_feature(enable = "neon")]
18760#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
18761#[rustc_legacy_const_generics(1)]
18762#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18763pub fn vqrshrns_n_u32<const N: i32>(a: u32) -> u16 {
18764 static_assert!(N >= 1 && N <= 16);
18765 let a: uint32x4_t = vdupq_n_u32(a);
18766 unsafe { simd_extract!(vqrshrn_n_u32::<N>(a), 0) }
18767}
18768#[doc = "Signed saturating rounded shift right narrow"]
18769#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_s16)"]
18770#[inline]
18771#[target_feature(enable = "neon")]
18772#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
18773#[rustc_legacy_const_generics(1)]
18774#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18775pub fn vqrshrnh_n_s16<const N: i32>(a: i16) -> i8 {
18776 static_assert!(N >= 1 && N <= 8);
18777 let a: int16x8_t = vdupq_n_s16(a);
18778 unsafe { simd_extract!(vqrshrn_n_s16::<N>(a), 0) }
18779}
18780#[doc = "Signed saturating rounded shift right narrow"]
18781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_s32)"]
18782#[inline]
18783#[target_feature(enable = "neon")]
18784#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
18785#[rustc_legacy_const_generics(1)]
18786#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18787pub fn vqrshrns_n_s32<const N: i32>(a: i32) -> i16 {
18788 static_assert!(N >= 1 && N <= 16);
18789 let a: int32x4_t = vdupq_n_s32(a);
18790 unsafe { simd_extract!(vqrshrn_n_s32::<N>(a), 0) }
18791}
18792#[doc = "Signed saturating rounded shift right narrow"]
18793#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_s64)"]
18794#[inline]
18795#[target_feature(enable = "neon")]
18796#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
18797#[rustc_legacy_const_generics(1)]
18798#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18799pub fn vqrshrnd_n_s64<const N: i32>(a: i64) -> i32 {
18800 static_assert!(N >= 1 && N <= 32);
18801 let a: int64x2_t = vdupq_n_s64(a);
18802 unsafe { simd_extract!(vqrshrn_n_s64::<N>(a), 0) }
18803}
18804#[doc = "Signed saturating rounded shift right unsigned narrow"]
18805#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s16)"]
18806#[inline]
18807#[target_feature(enable = "neon")]
18808#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
18809#[rustc_legacy_const_generics(2)]
18810#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18811pub fn vqrshrun_high_n_s16<const N: i32>(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
18812 static_assert!(N >= 1 && N <= 8);
18813 unsafe {
18814 simd_shuffle!(
18815 a,
18816 vqrshrun_n_s16::<N>(b),
18817 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18818 )
18819 }
18820}
18821#[doc = "Signed saturating rounded shift right unsigned narrow"]
18822#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s32)"]
18823#[inline]
18824#[target_feature(enable = "neon")]
18825#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
18826#[rustc_legacy_const_generics(2)]
18827#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18828pub fn vqrshrun_high_n_s32<const N: i32>(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
18829 static_assert!(N >= 1 && N <= 16);
18830 unsafe { simd_shuffle!(a, vqrshrun_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18831}
18832#[doc = "Signed saturating rounded shift right unsigned narrow"]
18833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s64)"]
18834#[inline]
18835#[target_feature(enable = "neon")]
18836#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
18837#[rustc_legacy_const_generics(2)]
18838#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18839pub fn vqrshrun_high_n_s64<const N: i32>(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
18840 static_assert!(N >= 1 && N <= 32);
18841 unsafe { simd_shuffle!(a, vqrshrun_n_s64::<N>(b), [0, 1, 2, 3]) }
18842}
18843#[doc = "Signed saturating rounded shift right unsigned narrow"]
18844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrund_n_s64)"]
18845#[inline]
18846#[target_feature(enable = "neon")]
18847#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
18848#[rustc_legacy_const_generics(1)]
18849#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18850pub fn vqrshrund_n_s64<const N: i32>(a: i64) -> u32 {
18851 static_assert!(N >= 1 && N <= 32);
18852 let a: int64x2_t = vdupq_n_s64(a);
18853 unsafe { simd_extract!(vqrshrun_n_s64::<N>(a), 0) }
18854}
18855#[doc = "Signed saturating rounded shift right unsigned narrow"]
18856#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrunh_n_s16)"]
18857#[inline]
18858#[target_feature(enable = "neon")]
18859#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
18860#[rustc_legacy_const_generics(1)]
18861#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18862pub fn vqrshrunh_n_s16<const N: i32>(a: i16) -> u8 {
18863 static_assert!(N >= 1 && N <= 8);
18864 let a: int16x8_t = vdupq_n_s16(a);
18865 unsafe { simd_extract!(vqrshrun_n_s16::<N>(a), 0) }
18866}
18867#[doc = "Signed saturating rounded shift right unsigned narrow"]
18868#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshruns_n_s32)"]
18869#[inline]
18870#[target_feature(enable = "neon")]
18871#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
18872#[rustc_legacy_const_generics(1)]
18873#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18874pub fn vqrshruns_n_s32<const N: i32>(a: i32) -> u16 {
18875 static_assert!(N >= 1 && N <= 16);
18876 let a: int32x4_t = vdupq_n_s32(a);
18877 unsafe { simd_extract!(vqrshrun_n_s32::<N>(a), 0) }
18878}
18879#[doc = "Signed saturating shift left"]
18880#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_s8)"]
18881#[inline]
18882#[target_feature(enable = "neon")]
18883#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18884#[rustc_legacy_const_generics(1)]
18885#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18886pub fn vqshlb_n_s8<const N: i32>(a: i8) -> i8 {
18887 static_assert_uimm_bits!(N, 3);
18888 unsafe { simd_extract!(vqshl_n_s8::<N>(vdup_n_s8(a)), 0) }
18889}
18890#[doc = "Signed saturating shift left"]
18891#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_s64)"]
18892#[inline]
18893#[target_feature(enable = "neon")]
18894#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18895#[rustc_legacy_const_generics(1)]
18896#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18897pub fn vqshld_n_s64<const N: i32>(a: i64) -> i64 {
18898 static_assert_uimm_bits!(N, 6);
18899 unsafe { simd_extract!(vqshl_n_s64::<N>(vdup_n_s64(a)), 0) }
18900}
18901#[doc = "Signed saturating shift left"]
18902#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_s16)"]
18903#[inline]
18904#[target_feature(enable = "neon")]
18905#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18906#[rustc_legacy_const_generics(1)]
18907#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18908pub fn vqshlh_n_s16<const N: i32>(a: i16) -> i16 {
18909 static_assert_uimm_bits!(N, 4);
18910 unsafe { simd_extract!(vqshl_n_s16::<N>(vdup_n_s16(a)), 0) }
18911}
18912#[doc = "Signed saturating shift left"]
18913#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_s32)"]
18914#[inline]
18915#[target_feature(enable = "neon")]
18916#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18917#[rustc_legacy_const_generics(1)]
18918#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18919pub fn vqshls_n_s32<const N: i32>(a: i32) -> i32 {
18920 static_assert_uimm_bits!(N, 5);
18921 unsafe { simd_extract!(vqshl_n_s32::<N>(vdup_n_s32(a)), 0) }
18922}
18923#[doc = "Unsigned saturating shift left"]
18924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_u8)"]
18925#[inline]
18926#[target_feature(enable = "neon")]
18927#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18928#[rustc_legacy_const_generics(1)]
18929#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18930pub fn vqshlb_n_u8<const N: i32>(a: u8) -> u8 {
18931 static_assert_uimm_bits!(N, 3);
18932 unsafe { simd_extract!(vqshl_n_u8::<N>(vdup_n_u8(a)), 0) }
18933}
18934#[doc = "Unsigned saturating shift left"]
18935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_u64)"]
18936#[inline]
18937#[target_feature(enable = "neon")]
18938#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18939#[rustc_legacy_const_generics(1)]
18940#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18941pub fn vqshld_n_u64<const N: i32>(a: u64) -> u64 {
18942 static_assert_uimm_bits!(N, 6);
18943 unsafe { simd_extract!(vqshl_n_u64::<N>(vdup_n_u64(a)), 0) }
18944}
18945#[doc = "Unsigned saturating shift left"]
18946#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_u16)"]
18947#[inline]
18948#[target_feature(enable = "neon")]
18949#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18950#[rustc_legacy_const_generics(1)]
18951#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18952pub fn vqshlh_n_u16<const N: i32>(a: u16) -> u16 {
18953 static_assert_uimm_bits!(N, 4);
18954 unsafe { simd_extract!(vqshl_n_u16::<N>(vdup_n_u16(a)), 0) }
18955}
18956#[doc = "Unsigned saturating shift left"]
18957#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_u32)"]
18958#[inline]
18959#[target_feature(enable = "neon")]
18960#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18961#[rustc_legacy_const_generics(1)]
18962#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18963pub fn vqshls_n_u32<const N: i32>(a: u32) -> u32 {
18964 static_assert_uimm_bits!(N, 5);
18965 unsafe { simd_extract!(vqshl_n_u32::<N>(vdup_n_u32(a)), 0) }
18966}
18967#[doc = "Signed saturating shift left"]
18968#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_s8)"]
18969#[inline]
18970#[target_feature(enable = "neon")]
18971#[cfg_attr(test, assert_instr(sqshl))]
18972#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18973pub fn vqshlb_s8(a: i8, b: i8) -> i8 {
18974 let c: int8x8_t = vqshl_s8(vdup_n_s8(a), vdup_n_s8(b));
18975 unsafe { simd_extract!(c, 0) }
18976}
18977#[doc = "Signed saturating shift left"]
18978#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_s16)"]
18979#[inline]
18980#[target_feature(enable = "neon")]
18981#[cfg_attr(test, assert_instr(sqshl))]
18982#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18983pub fn vqshlh_s16(a: i16, b: i16) -> i16 {
18984 let c: int16x4_t = vqshl_s16(vdup_n_s16(a), vdup_n_s16(b));
18985 unsafe { simd_extract!(c, 0) }
18986}
18987#[doc = "Signed saturating shift left"]
18988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_s32)"]
18989#[inline]
18990#[target_feature(enable = "neon")]
18991#[cfg_attr(test, assert_instr(sqshl))]
18992#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18993pub fn vqshls_s32(a: i32, b: i32) -> i32 {
18994 let c: int32x2_t = vqshl_s32(vdup_n_s32(a), vdup_n_s32(b));
18995 unsafe { simd_extract!(c, 0) }
18996}
18997#[doc = "Unsigned saturating shift left"]
18998#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_u8)"]
18999#[inline]
19000#[target_feature(enable = "neon")]
19001#[cfg_attr(test, assert_instr(uqshl))]
19002#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19003pub fn vqshlb_u8(a: u8, b: i8) -> u8 {
19004 let c: uint8x8_t = vqshl_u8(vdup_n_u8(a), vdup_n_s8(b));
19005 unsafe { simd_extract!(c, 0) }
19006}
19007#[doc = "Unsigned saturating shift left"]
19008#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_u16)"]
19009#[inline]
19010#[target_feature(enable = "neon")]
19011#[cfg_attr(test, assert_instr(uqshl))]
19012#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19013pub fn vqshlh_u16(a: u16, b: i16) -> u16 {
19014 let c: uint16x4_t = vqshl_u16(vdup_n_u16(a), vdup_n_s16(b));
19015 unsafe { simd_extract!(c, 0) }
19016}
19017#[doc = "Unsigned saturating shift left"]
19018#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_u32)"]
19019#[inline]
19020#[target_feature(enable = "neon")]
19021#[cfg_attr(test, assert_instr(uqshl))]
19022#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19023pub fn vqshls_u32(a: u32, b: i32) -> u32 {
19024 let c: uint32x2_t = vqshl_u32(vdup_n_u32(a), vdup_n_s32(b));
19025 unsafe { simd_extract!(c, 0) }
19026}
19027#[doc = "Signed saturating shift left"]
19028#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_s64)"]
19029#[inline]
19030#[target_feature(enable = "neon")]
19031#[cfg_attr(test, assert_instr(sqshl))]
19032#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19033pub fn vqshld_s64(a: i64, b: i64) -> i64 {
19034 unsafe extern "unadjusted" {
19035 #[cfg_attr(
19036 any(target_arch = "aarch64", target_arch = "arm64ec"),
19037 link_name = "llvm.aarch64.neon.sqshl.i64"
19038 )]
19039 fn _vqshld_s64(a: i64, b: i64) -> i64;
19040 }
19041 unsafe { _vqshld_s64(a, b) }
19042}
19043#[doc = "Unsigned saturating shift left"]
19044#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_u64)"]
19045#[inline]
19046#[target_feature(enable = "neon")]
19047#[cfg_attr(test, assert_instr(uqshl))]
19048#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19049pub fn vqshld_u64(a: u64, b: i64) -> u64 {
19050 unsafe extern "unadjusted" {
19051 #[cfg_attr(
19052 any(target_arch = "aarch64", target_arch = "arm64ec"),
19053 link_name = "llvm.aarch64.neon.uqshl.i64"
19054 )]
19055 fn _vqshld_u64(a: u64, b: i64) -> u64;
19056 }
19057 unsafe { _vqshld_u64(a, b) }
19058}
19059#[doc = "Signed saturating shift left unsigned"]
19060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlub_n_s8)"]
19061#[inline]
19062#[target_feature(enable = "neon")]
19063#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
19064#[rustc_legacy_const_generics(1)]
19065#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19066pub fn vqshlub_n_s8<const N: i32>(a: i8) -> u8 {
19067 static_assert_uimm_bits!(N, 3);
19068 unsafe { simd_extract!(vqshlu_n_s8::<N>(vdup_n_s8(a)), 0) }
19069}
19070#[doc = "Signed saturating shift left unsigned"]
19071#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlud_n_s64)"]
19072#[inline]
19073#[target_feature(enable = "neon")]
19074#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
19075#[rustc_legacy_const_generics(1)]
19076#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19077pub fn vqshlud_n_s64<const N: i32>(a: i64) -> u64 {
19078 static_assert_uimm_bits!(N, 6);
19079 unsafe { simd_extract!(vqshlu_n_s64::<N>(vdup_n_s64(a)), 0) }
19080}
19081#[doc = "Signed saturating shift left unsigned"]
19082#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluh_n_s16)"]
19083#[inline]
19084#[target_feature(enable = "neon")]
19085#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
19086#[rustc_legacy_const_generics(1)]
19087#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19088pub fn vqshluh_n_s16<const N: i32>(a: i16) -> u16 {
19089 static_assert_uimm_bits!(N, 4);
19090 unsafe { simd_extract!(vqshlu_n_s16::<N>(vdup_n_s16(a)), 0) }
19091}
19092#[doc = "Signed saturating shift left unsigned"]
19093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlus_n_s32)"]
19094#[inline]
19095#[target_feature(enable = "neon")]
19096#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
19097#[rustc_legacy_const_generics(1)]
19098#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19099pub fn vqshlus_n_s32<const N: i32>(a: i32) -> u32 {
19100 static_assert_uimm_bits!(N, 5);
19101 unsafe { simd_extract!(vqshlu_n_s32::<N>(vdup_n_s32(a)), 0) }
19102}
19103#[doc = "Signed saturating shift right narrow"]
19104#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s16)"]
19105#[inline]
19106#[target_feature(enable = "neon")]
19107#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
19108#[rustc_legacy_const_generics(2)]
19109#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19110pub fn vqshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
19111 static_assert!(N >= 1 && N <= 8);
19112 unsafe {
19113 simd_shuffle!(
19114 a,
19115 vqshrn_n_s16::<N>(b),
19116 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
19117 )
19118 }
19119}
19120#[doc = "Signed saturating shift right narrow"]
19121#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s32)"]
19122#[inline]
19123#[target_feature(enable = "neon")]
19124#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
19125#[rustc_legacy_const_generics(2)]
19126#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19127pub fn vqshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
19128 static_assert!(N >= 1 && N <= 16);
19129 unsafe { simd_shuffle!(a, vqshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
19130}
19131#[doc = "Signed saturating shift right narrow"]
19132#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s64)"]
19133#[inline]
19134#[target_feature(enable = "neon")]
19135#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
19136#[rustc_legacy_const_generics(2)]
19137#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19138pub fn vqshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
19139 static_assert!(N >= 1 && N <= 32);
19140 unsafe { simd_shuffle!(a, vqshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
19141}
19142#[doc = "Unsigned saturating shift right narrow"]
19143#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u16)"]
19144#[inline]
19145#[target_feature(enable = "neon")]
19146#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
19147#[rustc_legacy_const_generics(2)]
19148#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19149pub fn vqshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
19150 static_assert!(N >= 1 && N <= 8);
19151 unsafe {
19152 simd_shuffle!(
19153 a,
19154 vqshrn_n_u16::<N>(b),
19155 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
19156 )
19157 }
19158}
19159#[doc = "Unsigned saturating shift right narrow"]
19160#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u32)"]
19161#[inline]
19162#[target_feature(enable = "neon")]
19163#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
19164#[rustc_legacy_const_generics(2)]
19165#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19166pub fn vqshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
19167 static_assert!(N >= 1 && N <= 16);
19168 unsafe { simd_shuffle!(a, vqshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
19169}
19170#[doc = "Unsigned saturating shift right narrow"]
19171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u64)"]
19172#[inline]
19173#[target_feature(enable = "neon")]
19174#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
19175#[rustc_legacy_const_generics(2)]
19176#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19177pub fn vqshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
19178 static_assert!(N >= 1 && N <= 32);
19179 unsafe { simd_shuffle!(a, vqshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
19180}
19181#[doc = "Signed saturating shift right narrow"]
19182#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_s64)"]
19183#[inline]
19184#[target_feature(enable = "neon")]
19185#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
19186#[rustc_legacy_const_generics(1)]
19187#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19188pub fn vqshrnd_n_s64<const N: i32>(a: i64) -> i32 {
19189 static_assert!(N >= 1 && N <= 32);
19190 unsafe extern "unadjusted" {
19191 #[cfg_attr(
19192 any(target_arch = "aarch64", target_arch = "arm64ec"),
19193 link_name = "llvm.aarch64.neon.sqshrn.i32"
19194 )]
19195 fn _vqshrnd_n_s64(a: i64, n: i32) -> i32;
19196 }
19197 unsafe { _vqshrnd_n_s64(a, N) }
19198}
19199#[doc = "Unsigned saturating shift right narrow"]
19200#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_u64)"]
19201#[inline]
19202#[target_feature(enable = "neon")]
19203#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
19204#[rustc_legacy_const_generics(1)]
19205#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19206pub fn vqshrnd_n_u64<const N: i32>(a: u64) -> u32 {
19207 static_assert!(N >= 1 && N <= 32);
19208 unsafe extern "unadjusted" {
19209 #[cfg_attr(
19210 any(target_arch = "aarch64", target_arch = "arm64ec"),
19211 link_name = "llvm.aarch64.neon.uqshrn.i32"
19212 )]
19213 fn _vqshrnd_n_u64(a: u64, n: i32) -> u32;
19214 }
19215 unsafe { _vqshrnd_n_u64(a, N) }
19216}
19217#[doc = "Signed saturating shift right narrow"]
19218#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_s16)"]
19219#[inline]
19220#[target_feature(enable = "neon")]
19221#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
19222#[rustc_legacy_const_generics(1)]
19223#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19224pub fn vqshrnh_n_s16<const N: i32>(a: i16) -> i8 {
19225 static_assert!(N >= 1 && N <= 8);
19226 unsafe { simd_extract!(vqshrn_n_s16::<N>(vdupq_n_s16(a)), 0) }
19227}
19228#[doc = "Signed saturating shift right narrow"]
19229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_s32)"]
19230#[inline]
19231#[target_feature(enable = "neon")]
19232#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
19233#[rustc_legacy_const_generics(1)]
19234#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19235pub fn vqshrns_n_s32<const N: i32>(a: i32) -> i16 {
19236 static_assert!(N >= 1 && N <= 16);
19237 unsafe { simd_extract!(vqshrn_n_s32::<N>(vdupq_n_s32(a)), 0) }
19238}
19239#[doc = "Unsigned saturating shift right narrow"]
19240#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_u16)"]
19241#[inline]
19242#[target_feature(enable = "neon")]
19243#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
19244#[rustc_legacy_const_generics(1)]
19245#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19246pub fn vqshrnh_n_u16<const N: i32>(a: u16) -> u8 {
19247 static_assert!(N >= 1 && N <= 8);
19248 unsafe { simd_extract!(vqshrn_n_u16::<N>(vdupq_n_u16(a)), 0) }
19249}
19250#[doc = "Unsigned saturating shift right narrow"]
19251#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_u32)"]
19252#[inline]
19253#[target_feature(enable = "neon")]
19254#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
19255#[rustc_legacy_const_generics(1)]
19256#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19257pub fn vqshrns_n_u32<const N: i32>(a: u32) -> u16 {
19258 static_assert!(N >= 1 && N <= 16);
19259 unsafe { simd_extract!(vqshrn_n_u32::<N>(vdupq_n_u32(a)), 0) }
19260}
19261#[doc = "Signed saturating shift right unsigned narrow"]
19262#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s16)"]
19263#[inline]
19264#[target_feature(enable = "neon")]
19265#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
19266#[rustc_legacy_const_generics(2)]
19267#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19268pub fn vqshrun_high_n_s16<const N: i32>(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
19269 static_assert!(N >= 1 && N <= 8);
19270 unsafe {
19271 simd_shuffle!(
19272 a,
19273 vqshrun_n_s16::<N>(b),
19274 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
19275 )
19276 }
19277}
19278#[doc = "Signed saturating shift right unsigned narrow"]
19279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s32)"]
19280#[inline]
19281#[target_feature(enable = "neon")]
19282#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
19283#[rustc_legacy_const_generics(2)]
19284#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19285pub fn vqshrun_high_n_s32<const N: i32>(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
19286 static_assert!(N >= 1 && N <= 16);
19287 unsafe { simd_shuffle!(a, vqshrun_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
19288}
19289#[doc = "Signed saturating shift right unsigned narrow"]
19290#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s64)"]
19291#[inline]
19292#[target_feature(enable = "neon")]
19293#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
19294#[rustc_legacy_const_generics(2)]
19295#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19296pub fn vqshrun_high_n_s64<const N: i32>(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
19297 static_assert!(N >= 1 && N <= 32);
19298 unsafe { simd_shuffle!(a, vqshrun_n_s64::<N>(b), [0, 1, 2, 3]) }
19299}
19300#[doc = "Signed saturating shift right unsigned narrow"]
19301#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrund_n_s64)"]
19302#[inline]
19303#[target_feature(enable = "neon")]
19304#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
19305#[rustc_legacy_const_generics(1)]
19306#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19307pub fn vqshrund_n_s64<const N: i32>(a: i64) -> u32 {
19308 static_assert!(N >= 1 && N <= 32);
19309 unsafe { simd_extract!(vqshrun_n_s64::<N>(vdupq_n_s64(a)), 0) }
19310}
19311#[doc = "Signed saturating shift right unsigned narrow"]
19312#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrunh_n_s16)"]
19313#[inline]
19314#[target_feature(enable = "neon")]
19315#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
19316#[rustc_legacy_const_generics(1)]
19317#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19318pub fn vqshrunh_n_s16<const N: i32>(a: i16) -> u8 {
19319 static_assert!(N >= 1 && N <= 8);
19320 unsafe { simd_extract!(vqshrun_n_s16::<N>(vdupq_n_s16(a)), 0) }
19321}
19322#[doc = "Signed saturating shift right unsigned narrow"]
19323#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshruns_n_s32)"]
19324#[inline]
19325#[target_feature(enable = "neon")]
19326#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
19327#[rustc_legacy_const_generics(1)]
19328#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19329pub fn vqshruns_n_s32<const N: i32>(a: i32) -> u16 {
19330 static_assert!(N >= 1 && N <= 16);
19331 unsafe { simd_extract!(vqshrun_n_s32::<N>(vdupq_n_s32(a)), 0) }
19332}
19333#[doc = "Saturating subtract"]
19334#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_s8)"]
19335#[inline]
19336#[target_feature(enable = "neon")]
19337#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19338#[cfg_attr(test, assert_instr(sqsub))]
19339pub fn vqsubb_s8(a: i8, b: i8) -> i8 {
19340 let a: int8x8_t = vdup_n_s8(a);
19341 let b: int8x8_t = vdup_n_s8(b);
19342 unsafe { simd_extract!(vqsub_s8(a, b), 0) }
19343}
19344#[doc = "Saturating subtract"]
19345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_s16)"]
19346#[inline]
19347#[target_feature(enable = "neon")]
19348#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19349#[cfg_attr(test, assert_instr(sqsub))]
19350pub fn vqsubh_s16(a: i16, b: i16) -> i16 {
19351 let a: int16x4_t = vdup_n_s16(a);
19352 let b: int16x4_t = vdup_n_s16(b);
19353 unsafe { simd_extract!(vqsub_s16(a, b), 0) }
19354}
19355#[doc = "Saturating subtract"]
19356#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_u8)"]
19357#[inline]
19358#[target_feature(enable = "neon")]
19359#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19360#[cfg_attr(test, assert_instr(uqsub))]
19361pub fn vqsubb_u8(a: u8, b: u8) -> u8 {
19362 let a: uint8x8_t = vdup_n_u8(a);
19363 let b: uint8x8_t = vdup_n_u8(b);
19364 unsafe { simd_extract!(vqsub_u8(a, b), 0) }
19365}
19366#[doc = "Saturating subtract"]
19367#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_u16)"]
19368#[inline]
19369#[target_feature(enable = "neon")]
19370#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19371#[cfg_attr(test, assert_instr(uqsub))]
19372pub fn vqsubh_u16(a: u16, b: u16) -> u16 {
19373 let a: uint16x4_t = vdup_n_u16(a);
19374 let b: uint16x4_t = vdup_n_u16(b);
19375 unsafe { simd_extract!(vqsub_u16(a, b), 0) }
19376}
19377#[doc = "Saturating subtract"]
19378#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_s32)"]
19379#[inline]
19380#[target_feature(enable = "neon")]
19381#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19382#[cfg_attr(test, assert_instr(sqsub))]
19383pub fn vqsubs_s32(a: i32, b: i32) -> i32 {
19384 unsafe extern "unadjusted" {
19385 #[cfg_attr(
19386 any(target_arch = "aarch64", target_arch = "arm64ec"),
19387 link_name = "llvm.aarch64.neon.sqsub.i32"
19388 )]
19389 fn _vqsubs_s32(a: i32, b: i32) -> i32;
19390 }
19391 unsafe { _vqsubs_s32(a, b) }
19392}
19393#[doc = "Saturating subtract"]
19394#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_s64)"]
19395#[inline]
19396#[target_feature(enable = "neon")]
19397#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19398#[cfg_attr(test, assert_instr(sqsub))]
19399pub fn vqsubd_s64(a: i64, b: i64) -> i64 {
19400 unsafe extern "unadjusted" {
19401 #[cfg_attr(
19402 any(target_arch = "aarch64", target_arch = "arm64ec"),
19403 link_name = "llvm.aarch64.neon.sqsub.i64"
19404 )]
19405 fn _vqsubd_s64(a: i64, b: i64) -> i64;
19406 }
19407 unsafe { _vqsubd_s64(a, b) }
19408}
19409#[doc = "Saturating subtract"]
19410#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_u32)"]
19411#[inline]
19412#[target_feature(enable = "neon")]
19413#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19414#[cfg_attr(test, assert_instr(uqsub))]
19415pub fn vqsubs_u32(a: u32, b: u32) -> u32 {
19416 unsafe extern "unadjusted" {
19417 #[cfg_attr(
19418 any(target_arch = "aarch64", target_arch = "arm64ec"),
19419 link_name = "llvm.aarch64.neon.uqsub.i32"
19420 )]
19421 fn _vqsubs_u32(a: u32, b: u32) -> u32;
19422 }
19423 unsafe { _vqsubs_u32(a, b) }
19424}
19425#[doc = "Saturating subtract"]
19426#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_u64)"]
19427#[inline]
19428#[target_feature(enable = "neon")]
19429#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19430#[cfg_attr(test, assert_instr(uqsub))]
19431pub fn vqsubd_u64(a: u64, b: u64) -> u64 {
19432 unsafe extern "unadjusted" {
19433 #[cfg_attr(
19434 any(target_arch = "aarch64", target_arch = "arm64ec"),
19435 link_name = "llvm.aarch64.neon.uqsub.i64"
19436 )]
19437 fn _vqsubd_u64(a: u64, b: u64) -> u64;
19438 }
19439 unsafe { _vqsubd_u64(a, b) }
19440}
19441#[doc = "Table look-up"]
19442#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1)"]
19443#[inline]
19444#[target_feature(enable = "neon")]
19445#[cfg_attr(test, assert_instr(tbl))]
19446#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19447fn vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t {
19448 unsafe extern "unadjusted" {
19449 #[cfg_attr(
19450 any(target_arch = "aarch64", target_arch = "arm64ec"),
19451 link_name = "llvm.aarch64.neon.tbl1.v8i8"
19452 )]
19453 fn _vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t;
19454 }
19455 unsafe { _vqtbl1(a, b) }
19456}
19457#[doc = "Table look-up"]
19458#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q)"]
19459#[inline]
19460#[target_feature(enable = "neon")]
19461#[cfg_attr(test, assert_instr(tbl))]
19462#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19463fn vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
19464 unsafe extern "unadjusted" {
19465 #[cfg_attr(
19466 any(target_arch = "aarch64", target_arch = "arm64ec"),
19467 link_name = "llvm.aarch64.neon.tbl1.v16i8"
19468 )]
19469 fn _vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t;
19470 }
19471 unsafe { _vqtbl1q(a, b) }
19472}
19473#[doc = "Table look-up"]
19474#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_s8)"]
19475#[inline]
19476#[target_feature(enable = "neon")]
19477#[cfg_attr(test, assert_instr(tbl))]
19478#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19479pub fn vqtbl1_s8(a: int8x16_t, b: uint8x8_t) -> int8x8_t {
19480 vqtbl1(a, b)
19481}
19482#[doc = "Table look-up"]
19483#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_s8)"]
19484#[inline]
19485#[target_feature(enable = "neon")]
19486#[cfg_attr(test, assert_instr(tbl))]
19487#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19488pub fn vqtbl1q_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
19489 vqtbl1q(a, b)
19490}
19491#[doc = "Table look-up"]
19492#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_u8)"]
19493#[inline]
19494#[target_feature(enable = "neon")]
19495#[cfg_attr(test, assert_instr(tbl))]
19496#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19497pub fn vqtbl1_u8(a: uint8x16_t, b: uint8x8_t) -> uint8x8_t {
19498 unsafe { transmute(vqtbl1(transmute(a), b)) }
19499}
19500#[doc = "Table look-up"]
19501#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_u8)"]
19502#[inline]
19503#[target_feature(enable = "neon")]
19504#[cfg_attr(test, assert_instr(tbl))]
19505#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19506pub fn vqtbl1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
19507 unsafe { transmute(vqtbl1q(transmute(a), b)) }
19508}
19509#[doc = "Table look-up"]
19510#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_p8)"]
19511#[inline]
19512#[target_feature(enable = "neon")]
19513#[cfg_attr(test, assert_instr(tbl))]
19514#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19515pub fn vqtbl1_p8(a: poly8x16_t, b: uint8x8_t) -> poly8x8_t {
19516 unsafe { transmute(vqtbl1(transmute(a), b)) }
19517}
19518#[doc = "Table look-up"]
19519#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_p8)"]
19520#[inline]
19521#[target_feature(enable = "neon")]
19522#[cfg_attr(test, assert_instr(tbl))]
19523#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19524pub fn vqtbl1q_p8(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t {
19525 unsafe { transmute(vqtbl1q(transmute(a), b)) }
19526}
19527#[doc = "Table look-up"]
19528#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2)"]
19529#[inline]
19530#[target_feature(enable = "neon")]
19531#[cfg_attr(test, assert_instr(tbl))]
19532#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19533fn vqtbl2(a: int8x16_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t {
19534 unsafe extern "unadjusted" {
19535 #[cfg_attr(
19536 any(target_arch = "aarch64", target_arch = "arm64ec"),
19537 link_name = "llvm.aarch64.neon.tbl2.v8i8"
19538 )]
19539 fn _vqtbl2(a: int8x16_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t;
19540 }
19541 unsafe { _vqtbl2(a, b, c) }
19542}
19543#[doc = "Table look-up"]
19544#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q)"]
19545#[inline]
19546#[target_feature(enable = "neon")]
19547#[cfg_attr(test, assert_instr(tbl))]
19548#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19549fn vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t {
19550 unsafe extern "unadjusted" {
19551 #[cfg_attr(
19552 any(target_arch = "aarch64", target_arch = "arm64ec"),
19553 link_name = "llvm.aarch64.neon.tbl2.v16i8"
19554 )]
19555 fn _vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t;
19556 }
19557 unsafe { _vqtbl2q(a, b, c) }
19558}
19559#[doc = "Table look-up"]
19560#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_s8)"]
19561#[inline]
19562#[target_feature(enable = "neon")]
19563#[cfg_attr(test, assert_instr(tbl))]
19564#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19565pub fn vqtbl2_s8(a: int8x16x2_t, b: uint8x8_t) -> int8x8_t {
19566 vqtbl2(a.0, a.1, b)
19567}
19568#[doc = "Table look-up"]
19569#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_s8)"]
19570#[inline]
19571#[target_feature(enable = "neon")]
19572#[cfg_attr(test, assert_instr(tbl))]
19573#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19574pub fn vqtbl2q_s8(a: int8x16x2_t, b: uint8x16_t) -> int8x16_t {
19575 vqtbl2q(a.0, a.1, b)
19576}
19577#[doc = "Table look-up"]
19578#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_u8)"]
19579#[inline]
19580#[cfg(target_endian = "little")]
19581#[target_feature(enable = "neon")]
19582#[cfg_attr(test, assert_instr(tbl))]
19583#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19584pub fn vqtbl2_u8(a: uint8x16x2_t, b: uint8x8_t) -> uint8x8_t {
19585 unsafe { transmute(vqtbl2(transmute(a.0), transmute(a.1), b)) }
19586}
19587#[doc = "Table look-up"]
19588#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_u8)"]
19589#[inline]
19590#[cfg(target_endian = "big")]
19591#[target_feature(enable = "neon")]
19592#[cfg_attr(test, assert_instr(tbl))]
19593#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19594pub fn vqtbl2_u8(a: uint8x16x2_t, b: uint8x8_t) -> uint8x8_t {
19595 let mut a: uint8x16x2_t = a;
19596 a.0 = unsafe {
19597 simd_shuffle!(
19598 a.0,
19599 a.0,
19600 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19601 )
19602 };
19603 a.1 = unsafe {
19604 simd_shuffle!(
19605 a.1,
19606 a.1,
19607 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19608 )
19609 };
19610 let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
19611 unsafe {
19612 let ret_val: uint8x8_t = transmute(vqtbl2(transmute(a.0), transmute(a.1), b));
19613 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
19614 }
19615}
19616#[doc = "Table look-up"]
19617#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_u8)"]
19618#[inline]
19619#[cfg(target_endian = "little")]
19620#[target_feature(enable = "neon")]
19621#[cfg_attr(test, assert_instr(tbl))]
19622#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19623pub fn vqtbl2q_u8(a: uint8x16x2_t, b: uint8x16_t) -> uint8x16_t {
19624 unsafe { transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)) }
19625}
19626#[doc = "Table look-up"]
19627#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_u8)"]
19628#[inline]
19629#[cfg(target_endian = "big")]
19630#[target_feature(enable = "neon")]
19631#[cfg_attr(test, assert_instr(tbl))]
19632#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19633pub fn vqtbl2q_u8(a: uint8x16x2_t, b: uint8x16_t) -> uint8x16_t {
19634 let mut a: uint8x16x2_t = a;
19635 a.0 = unsafe {
19636 simd_shuffle!(
19637 a.0,
19638 a.0,
19639 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19640 )
19641 };
19642 a.1 = unsafe {
19643 simd_shuffle!(
19644 a.1,
19645 a.1,
19646 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19647 )
19648 };
19649 let b: uint8x16_t =
19650 unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
19651 unsafe {
19652 let ret_val: uint8x16_t = transmute(vqtbl2q(transmute(a.0), transmute(a.1), b));
19653 simd_shuffle!(
19654 ret_val,
19655 ret_val,
19656 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19657 )
19658 }
19659}
19660#[doc = "Table look-up"]
19661#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_p8)"]
19662#[inline]
19663#[cfg(target_endian = "little")]
19664#[target_feature(enable = "neon")]
19665#[cfg_attr(test, assert_instr(tbl))]
19666#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19667pub fn vqtbl2_p8(a: poly8x16x2_t, b: uint8x8_t) -> poly8x8_t {
19668 unsafe { transmute(vqtbl2(transmute(a.0), transmute(a.1), b)) }
19669}
19670#[doc = "Table look-up"]
19671#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_p8)"]
19672#[inline]
19673#[cfg(target_endian = "big")]
19674#[target_feature(enable = "neon")]
19675#[cfg_attr(test, assert_instr(tbl))]
19676#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19677pub fn vqtbl2_p8(a: poly8x16x2_t, b: uint8x8_t) -> poly8x8_t {
19678 let mut a: poly8x16x2_t = a;
19679 a.0 = unsafe {
19680 simd_shuffle!(
19681 a.0,
19682 a.0,
19683 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19684 )
19685 };
19686 a.1 = unsafe {
19687 simd_shuffle!(
19688 a.1,
19689 a.1,
19690 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19691 )
19692 };
19693 let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
19694 unsafe {
19695 let ret_val: poly8x8_t = transmute(vqtbl2(transmute(a.0), transmute(a.1), b));
19696 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
19697 }
19698}
19699#[doc = "Table look-up"]
19700#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_p8)"]
19701#[inline]
19702#[cfg(target_endian = "little")]
19703#[target_feature(enable = "neon")]
19704#[cfg_attr(test, assert_instr(tbl))]
19705#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19706pub fn vqtbl2q_p8(a: poly8x16x2_t, b: uint8x16_t) -> poly8x16_t {
19707 unsafe { transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)) }
19708}
19709#[doc = "Table look-up"]
19710#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_p8)"]
19711#[inline]
19712#[cfg(target_endian = "big")]
19713#[target_feature(enable = "neon")]
19714#[cfg_attr(test, assert_instr(tbl))]
19715#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19716pub fn vqtbl2q_p8(a: poly8x16x2_t, b: uint8x16_t) -> poly8x16_t {
19717 let mut a: poly8x16x2_t = a;
19718 a.0 = unsafe {
19719 simd_shuffle!(
19720 a.0,
19721 a.0,
19722 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19723 )
19724 };
19725 a.1 = unsafe {
19726 simd_shuffle!(
19727 a.1,
19728 a.1,
19729 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19730 )
19731 };
19732 let b: uint8x16_t =
19733 unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
19734 unsafe {
19735 let ret_val: poly8x16_t = transmute(vqtbl2q(transmute(a.0), transmute(a.1), b));
19736 simd_shuffle!(
19737 ret_val,
19738 ret_val,
19739 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19740 )
19741 }
19742}
19743#[doc = "Table look-up"]
19744#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3)"]
19745#[inline]
19746#[target_feature(enable = "neon")]
19747#[cfg_attr(test, assert_instr(tbl))]
19748#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19749fn vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t {
19750 unsafe extern "unadjusted" {
19751 #[cfg_attr(
19752 any(target_arch = "aarch64", target_arch = "arm64ec"),
19753 link_name = "llvm.aarch64.neon.tbl3.v8i8"
19754 )]
19755 fn _vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t;
19756 }
19757 unsafe { _vqtbl3(a, b, c, d) }
19758}
19759#[doc = "Table look-up"]
19760#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q)"]
19761#[inline]
19762#[target_feature(enable = "neon")]
19763#[cfg_attr(test, assert_instr(tbl))]
19764#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19765fn vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t {
19766 unsafe extern "unadjusted" {
19767 #[cfg_attr(
19768 any(target_arch = "aarch64", target_arch = "arm64ec"),
19769 link_name = "llvm.aarch64.neon.tbl3.v16i8"
19770 )]
19771 fn _vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t;
19772 }
19773 unsafe { _vqtbl3q(a, b, c, d) }
19774}
19775#[doc = "Table look-up"]
19776#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_s8)"]
19777#[inline]
19778#[target_feature(enable = "neon")]
19779#[cfg_attr(test, assert_instr(tbl))]
19780#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19781pub fn vqtbl3_s8(a: int8x16x3_t, b: uint8x8_t) -> int8x8_t {
19782 vqtbl3(a.0, a.1, a.2, b)
19783}
19784#[doc = "Table look-up"]
19785#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_s8)"]
19786#[inline]
19787#[target_feature(enable = "neon")]
19788#[cfg_attr(test, assert_instr(tbl))]
19789#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19790pub fn vqtbl3q_s8(a: int8x16x3_t, b: uint8x16_t) -> int8x16_t {
19791 vqtbl3q(a.0, a.1, a.2, b)
19792}
19793#[doc = "Table look-up"]
19794#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_u8)"]
19795#[inline]
19796#[cfg(target_endian = "little")]
19797#[target_feature(enable = "neon")]
19798#[cfg_attr(test, assert_instr(tbl))]
19799#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19800pub fn vqtbl3_u8(a: uint8x16x3_t, b: uint8x8_t) -> uint8x8_t {
19801 unsafe { transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19802}
19803#[doc = "Table look-up"]
19804#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_u8)"]
19805#[inline]
19806#[cfg(target_endian = "big")]
19807#[target_feature(enable = "neon")]
19808#[cfg_attr(test, assert_instr(tbl))]
19809#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19810pub fn vqtbl3_u8(a: uint8x16x3_t, b: uint8x8_t) -> uint8x8_t {
19811 let mut a: uint8x16x3_t = a;
19812 a.0 = unsafe {
19813 simd_shuffle!(
19814 a.0,
19815 a.0,
19816 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19817 )
19818 };
19819 a.1 = unsafe {
19820 simd_shuffle!(
19821 a.1,
19822 a.1,
19823 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19824 )
19825 };
19826 a.2 = unsafe {
19827 simd_shuffle!(
19828 a.2,
19829 a.2,
19830 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19831 )
19832 };
19833 let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
19834 unsafe {
19835 let ret_val: uint8x8_t =
19836 transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b));
19837 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
19838 }
19839}
19840#[doc = "Table look-up"]
19841#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_u8)"]
19842#[inline]
19843#[cfg(target_endian = "little")]
19844#[target_feature(enable = "neon")]
19845#[cfg_attr(test, assert_instr(tbl))]
19846#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19847pub fn vqtbl3q_u8(a: uint8x16x3_t, b: uint8x16_t) -> uint8x16_t {
19848 unsafe { transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19849}
19850#[doc = "Table look-up"]
19851#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_u8)"]
19852#[inline]
19853#[cfg(target_endian = "big")]
19854#[target_feature(enable = "neon")]
19855#[cfg_attr(test, assert_instr(tbl))]
19856#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19857pub fn vqtbl3q_u8(a: uint8x16x3_t, b: uint8x16_t) -> uint8x16_t {
19858 let mut a: uint8x16x3_t = a;
19859 a.0 = unsafe {
19860 simd_shuffle!(
19861 a.0,
19862 a.0,
19863 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19864 )
19865 };
19866 a.1 = unsafe {
19867 simd_shuffle!(
19868 a.1,
19869 a.1,
19870 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19871 )
19872 };
19873 a.2 = unsafe {
19874 simd_shuffle!(
19875 a.2,
19876 a.2,
19877 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19878 )
19879 };
19880 let b: uint8x16_t =
19881 unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
19882 unsafe {
19883 let ret_val: uint8x16_t =
19884 transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b));
19885 simd_shuffle!(
19886 ret_val,
19887 ret_val,
19888 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19889 )
19890 }
19891}
19892#[doc = "Table look-up"]
19893#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_p8)"]
19894#[inline]
19895#[cfg(target_endian = "little")]
19896#[target_feature(enable = "neon")]
19897#[cfg_attr(test, assert_instr(tbl))]
19898#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19899pub fn vqtbl3_p8(a: poly8x16x3_t, b: uint8x8_t) -> poly8x8_t {
19900 unsafe { transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19901}
19902#[doc = "Table look-up"]
19903#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_p8)"]
19904#[inline]
19905#[cfg(target_endian = "big")]
19906#[target_feature(enable = "neon")]
19907#[cfg_attr(test, assert_instr(tbl))]
19908#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19909pub fn vqtbl3_p8(a: poly8x16x3_t, b: uint8x8_t) -> poly8x8_t {
19910 let mut a: poly8x16x3_t = a;
19911 a.0 = unsafe {
19912 simd_shuffle!(
19913 a.0,
19914 a.0,
19915 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19916 )
19917 };
19918 a.1 = unsafe {
19919 simd_shuffle!(
19920 a.1,
19921 a.1,
19922 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19923 )
19924 };
19925 a.2 = unsafe {
19926 simd_shuffle!(
19927 a.2,
19928 a.2,
19929 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19930 )
19931 };
19932 let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
19933 unsafe {
19934 let ret_val: poly8x8_t =
19935 transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b));
19936 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
19937 }
19938}
19939#[doc = "Table look-up"]
19940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_p8)"]
19941#[inline]
19942#[cfg(target_endian = "little")]
19943#[target_feature(enable = "neon")]
19944#[cfg_attr(test, assert_instr(tbl))]
19945#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19946pub fn vqtbl3q_p8(a: poly8x16x3_t, b: uint8x16_t) -> poly8x16_t {
19947 unsafe { transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19948}
19949#[doc = "Table look-up"]
19950#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_p8)"]
19951#[inline]
19952#[cfg(target_endian = "big")]
19953#[target_feature(enable = "neon")]
19954#[cfg_attr(test, assert_instr(tbl))]
19955#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19956pub fn vqtbl3q_p8(a: poly8x16x3_t, b: uint8x16_t) -> poly8x16_t {
19957 let mut a: poly8x16x3_t = a;
19958 a.0 = unsafe {
19959 simd_shuffle!(
19960 a.0,
19961 a.0,
19962 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19963 )
19964 };
19965 a.1 = unsafe {
19966 simd_shuffle!(
19967 a.1,
19968 a.1,
19969 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19970 )
19971 };
19972 a.2 = unsafe {
19973 simd_shuffle!(
19974 a.2,
19975 a.2,
19976 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19977 )
19978 };
19979 let b: uint8x16_t =
19980 unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
19981 unsafe {
19982 let ret_val: poly8x16_t =
19983 transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b));
19984 simd_shuffle!(
19985 ret_val,
19986 ret_val,
19987 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19988 )
19989 }
19990}
19991#[doc = "Table look-up"]
19992#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4)"]
19993#[inline]
19994#[target_feature(enable = "neon")]
19995#[cfg_attr(test, assert_instr(tbl))]
19996#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19997fn vqtbl4(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) -> int8x8_t {
19998 unsafe extern "unadjusted" {
19999 #[cfg_attr(
20000 any(target_arch = "aarch64", target_arch = "arm64ec"),
20001 link_name = "llvm.aarch64.neon.tbl4.v8i8"
20002 )]
20003 fn _vqtbl4(
20004 a: int8x16_t,
20005 b: int8x16_t,
20006 c: int8x16_t,
20007 d: int8x16_t,
20008 e: uint8x8_t,
20009 ) -> int8x8_t;
20010 }
20011 unsafe { _vqtbl4(a, b, c, d, e) }
20012}
20013#[doc = "Table look-up"]
20014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q)"]
20015#[inline]
20016#[target_feature(enable = "neon")]
20017#[cfg_attr(test, assert_instr(tbl))]
20018#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20019fn vqtbl4q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x16_t) -> int8x16_t {
20020 unsafe extern "unadjusted" {
20021 #[cfg_attr(
20022 any(target_arch = "aarch64", target_arch = "arm64ec"),
20023 link_name = "llvm.aarch64.neon.tbl4.v16i8"
20024 )]
20025 fn _vqtbl4q(
20026 a: int8x16_t,
20027 b: int8x16_t,
20028 c: int8x16_t,
20029 d: int8x16_t,
20030 e: uint8x16_t,
20031 ) -> int8x16_t;
20032 }
20033 unsafe { _vqtbl4q(a, b, c, d, e) }
20034}
20035#[doc = "Table look-up"]
20036#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_s8)"]
20037#[inline]
20038#[target_feature(enable = "neon")]
20039#[cfg_attr(test, assert_instr(tbl))]
20040#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20041pub fn vqtbl4_s8(a: int8x16x4_t, b: uint8x8_t) -> int8x8_t {
20042 vqtbl4(a.0, a.1, a.2, a.3, b)
20043}
20044#[doc = "Table look-up"]
20045#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_s8)"]
20046#[inline]
20047#[target_feature(enable = "neon")]
20048#[cfg_attr(test, assert_instr(tbl))]
20049#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20050pub fn vqtbl4q_s8(a: int8x16x4_t, b: uint8x16_t) -> int8x16_t {
20051 vqtbl4q(a.0, a.1, a.2, a.3, b)
20052}
20053#[doc = "Table look-up"]
20054#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_u8)"]
20055#[inline]
20056#[cfg(target_endian = "little")]
20057#[target_feature(enable = "neon")]
20058#[cfg_attr(test, assert_instr(tbl))]
20059#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20060pub fn vqtbl4_u8(a: uint8x16x4_t, b: uint8x8_t) -> uint8x8_t {
20061 unsafe {
20062 transmute(vqtbl4(
20063 transmute(a.0),
20064 transmute(a.1),
20065 transmute(a.2),
20066 transmute(a.3),
20067 b,
20068 ))
20069 }
20070}
20071#[doc = "Table look-up"]
20072#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_u8)"]
20073#[inline]
20074#[cfg(target_endian = "big")]
20075#[target_feature(enable = "neon")]
20076#[cfg_attr(test, assert_instr(tbl))]
20077#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20078pub fn vqtbl4_u8(a: uint8x16x4_t, b: uint8x8_t) -> uint8x8_t {
20079 let mut a: uint8x16x4_t = a;
20080 a.0 = unsafe {
20081 simd_shuffle!(
20082 a.0,
20083 a.0,
20084 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20085 )
20086 };
20087 a.1 = unsafe {
20088 simd_shuffle!(
20089 a.1,
20090 a.1,
20091 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20092 )
20093 };
20094 a.2 = unsafe {
20095 simd_shuffle!(
20096 a.2,
20097 a.2,
20098 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20099 )
20100 };
20101 a.3 = unsafe {
20102 simd_shuffle!(
20103 a.3,
20104 a.3,
20105 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20106 )
20107 };
20108 let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
20109 unsafe {
20110 let ret_val: uint8x8_t = transmute(vqtbl4(
20111 transmute(a.0),
20112 transmute(a.1),
20113 transmute(a.2),
20114 transmute(a.3),
20115 b,
20116 ));
20117 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20118 }
20119}
20120#[doc = "Table look-up"]
20121#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_u8)"]
20122#[inline]
20123#[cfg(target_endian = "little")]
20124#[target_feature(enable = "neon")]
20125#[cfg_attr(test, assert_instr(tbl))]
20126#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20127pub fn vqtbl4q_u8(a: uint8x16x4_t, b: uint8x16_t) -> uint8x16_t {
20128 unsafe {
20129 transmute(vqtbl4q(
20130 transmute(a.0),
20131 transmute(a.1),
20132 transmute(a.2),
20133 transmute(a.3),
20134 b,
20135 ))
20136 }
20137}
20138#[doc = "Table look-up"]
20139#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_u8)"]
20140#[inline]
20141#[cfg(target_endian = "big")]
20142#[target_feature(enable = "neon")]
20143#[cfg_attr(test, assert_instr(tbl))]
20144#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20145pub fn vqtbl4q_u8(a: uint8x16x4_t, b: uint8x16_t) -> uint8x16_t {
20146 let mut a: uint8x16x4_t = a;
20147 a.0 = unsafe {
20148 simd_shuffle!(
20149 a.0,
20150 a.0,
20151 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20152 )
20153 };
20154 a.1 = unsafe {
20155 simd_shuffle!(
20156 a.1,
20157 a.1,
20158 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20159 )
20160 };
20161 a.2 = unsafe {
20162 simd_shuffle!(
20163 a.2,
20164 a.2,
20165 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20166 )
20167 };
20168 a.3 = unsafe {
20169 simd_shuffle!(
20170 a.3,
20171 a.3,
20172 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20173 )
20174 };
20175 let b: uint8x16_t =
20176 unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20177 unsafe {
20178 let ret_val: uint8x16_t = transmute(vqtbl4q(
20179 transmute(a.0),
20180 transmute(a.1),
20181 transmute(a.2),
20182 transmute(a.3),
20183 b,
20184 ));
20185 simd_shuffle!(
20186 ret_val,
20187 ret_val,
20188 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20189 )
20190 }
20191}
20192#[doc = "Table look-up"]
20193#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_p8)"]
20194#[inline]
20195#[cfg(target_endian = "little")]
20196#[target_feature(enable = "neon")]
20197#[cfg_attr(test, assert_instr(tbl))]
20198#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20199pub fn vqtbl4_p8(a: poly8x16x4_t, b: uint8x8_t) -> poly8x8_t {
20200 unsafe {
20201 transmute(vqtbl4(
20202 transmute(a.0),
20203 transmute(a.1),
20204 transmute(a.2),
20205 transmute(a.3),
20206 b,
20207 ))
20208 }
20209}
20210#[doc = "Table look-up"]
20211#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_p8)"]
20212#[inline]
20213#[cfg(target_endian = "big")]
20214#[target_feature(enable = "neon")]
20215#[cfg_attr(test, assert_instr(tbl))]
20216#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20217pub fn vqtbl4_p8(a: poly8x16x4_t, b: uint8x8_t) -> poly8x8_t {
20218 let mut a: poly8x16x4_t = a;
20219 a.0 = unsafe {
20220 simd_shuffle!(
20221 a.0,
20222 a.0,
20223 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20224 )
20225 };
20226 a.1 = unsafe {
20227 simd_shuffle!(
20228 a.1,
20229 a.1,
20230 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20231 )
20232 };
20233 a.2 = unsafe {
20234 simd_shuffle!(
20235 a.2,
20236 a.2,
20237 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20238 )
20239 };
20240 a.3 = unsafe {
20241 simd_shuffle!(
20242 a.3,
20243 a.3,
20244 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20245 )
20246 };
20247 let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
20248 unsafe {
20249 let ret_val: poly8x8_t = transmute(vqtbl4(
20250 transmute(a.0),
20251 transmute(a.1),
20252 transmute(a.2),
20253 transmute(a.3),
20254 b,
20255 ));
20256 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20257 }
20258}
20259#[doc = "Table look-up"]
20260#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_p8)"]
20261#[inline]
20262#[cfg(target_endian = "little")]
20263#[target_feature(enable = "neon")]
20264#[cfg_attr(test, assert_instr(tbl))]
20265#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20266pub fn vqtbl4q_p8(a: poly8x16x4_t, b: uint8x16_t) -> poly8x16_t {
20267 unsafe {
20268 transmute(vqtbl4q(
20269 transmute(a.0),
20270 transmute(a.1),
20271 transmute(a.2),
20272 transmute(a.3),
20273 b,
20274 ))
20275 }
20276}
20277#[doc = "Table look-up"]
20278#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_p8)"]
20279#[inline]
20280#[cfg(target_endian = "big")]
20281#[target_feature(enable = "neon")]
20282#[cfg_attr(test, assert_instr(tbl))]
20283#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20284pub fn vqtbl4q_p8(a: poly8x16x4_t, b: uint8x16_t) -> poly8x16_t {
20285 let mut a: poly8x16x4_t = a;
20286 a.0 = unsafe {
20287 simd_shuffle!(
20288 a.0,
20289 a.0,
20290 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20291 )
20292 };
20293 a.1 = unsafe {
20294 simd_shuffle!(
20295 a.1,
20296 a.1,
20297 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20298 )
20299 };
20300 a.2 = unsafe {
20301 simd_shuffle!(
20302 a.2,
20303 a.2,
20304 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20305 )
20306 };
20307 a.3 = unsafe {
20308 simd_shuffle!(
20309 a.3,
20310 a.3,
20311 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20312 )
20313 };
20314 let b: uint8x16_t =
20315 unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20316 unsafe {
20317 let ret_val: poly8x16_t = transmute(vqtbl4q(
20318 transmute(a.0),
20319 transmute(a.1),
20320 transmute(a.2),
20321 transmute(a.3),
20322 b,
20323 ));
20324 simd_shuffle!(
20325 ret_val,
20326 ret_val,
20327 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20328 )
20329 }
20330}
20331#[doc = "Extended table look-up"]
20332#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1)"]
20333#[inline]
20334#[target_feature(enable = "neon")]
20335#[cfg_attr(test, assert_instr(tbx))]
20336#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20337fn vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t {
20338 unsafe extern "unadjusted" {
20339 #[cfg_attr(
20340 any(target_arch = "aarch64", target_arch = "arm64ec"),
20341 link_name = "llvm.aarch64.neon.tbx1.v8i8"
20342 )]
20343 fn _vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t;
20344 }
20345 unsafe { _vqtbx1(a, b, c) }
20346}
20347#[doc = "Extended table look-up"]
20348#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q)"]
20349#[inline]
20350#[target_feature(enable = "neon")]
20351#[cfg_attr(test, assert_instr(tbx))]
20352#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20353fn vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t {
20354 unsafe extern "unadjusted" {
20355 #[cfg_attr(
20356 any(target_arch = "aarch64", target_arch = "arm64ec"),
20357 link_name = "llvm.aarch64.neon.tbx1.v16i8"
20358 )]
20359 fn _vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t;
20360 }
20361 unsafe { _vqtbx1q(a, b, c) }
20362}
20363#[doc = "Extended table look-up"]
20364#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_s8)"]
20365#[inline]
20366#[target_feature(enable = "neon")]
20367#[cfg_attr(test, assert_instr(tbx))]
20368#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20369pub fn vqtbx1_s8(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t {
20370 vqtbx1(a, b, c)
20371}
20372#[doc = "Extended table look-up"]
20373#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_s8)"]
20374#[inline]
20375#[target_feature(enable = "neon")]
20376#[cfg_attr(test, assert_instr(tbx))]
20377#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20378pub fn vqtbx1q_s8(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t {
20379 vqtbx1q(a, b, c)
20380}
20381#[doc = "Extended table look-up"]
20382#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_u8)"]
20383#[inline]
20384#[target_feature(enable = "neon")]
20385#[cfg_attr(test, assert_instr(tbx))]
20386#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20387pub fn vqtbx1_u8(a: uint8x8_t, b: uint8x16_t, c: uint8x8_t) -> uint8x8_t {
20388 unsafe { transmute(vqtbx1(transmute(a), transmute(b), c)) }
20389}
20390#[doc = "Extended table look-up"]
20391#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_u8)"]
20392#[inline]
20393#[target_feature(enable = "neon")]
20394#[cfg_attr(test, assert_instr(tbx))]
20395#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20396pub fn vqtbx1q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
20397 unsafe { transmute(vqtbx1q(transmute(a), transmute(b), c)) }
20398}
20399#[doc = "Extended table look-up"]
20400#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_p8)"]
20401#[inline]
20402#[target_feature(enable = "neon")]
20403#[cfg_attr(test, assert_instr(tbx))]
20404#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20405pub fn vqtbx1_p8(a: poly8x8_t, b: poly8x16_t, c: uint8x8_t) -> poly8x8_t {
20406 unsafe { transmute(vqtbx1(transmute(a), transmute(b), c)) }
20407}
20408#[doc = "Extended table look-up"]
20409#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_p8)"]
20410#[inline]
20411#[target_feature(enable = "neon")]
20412#[cfg_attr(test, assert_instr(tbx))]
20413#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20414pub fn vqtbx1q_p8(a: poly8x16_t, b: poly8x16_t, c: uint8x16_t) -> poly8x16_t {
20415 unsafe { transmute(vqtbx1q(transmute(a), transmute(b), c)) }
20416}
20417#[doc = "Extended table look-up"]
20418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2)"]
20419#[inline]
20420#[target_feature(enable = "neon")]
20421#[cfg_attr(test, assert_instr(tbx))]
20422#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20423fn vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t {
20424 unsafe extern "unadjusted" {
20425 #[cfg_attr(
20426 any(target_arch = "aarch64", target_arch = "arm64ec"),
20427 link_name = "llvm.aarch64.neon.tbx2.v8i8"
20428 )]
20429 fn _vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t;
20430 }
20431 unsafe { _vqtbx2(a, b, c, d) }
20432}
20433#[doc = "Extended table look-up"]
20434#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q)"]
20435#[inline]
20436#[target_feature(enable = "neon")]
20437#[cfg_attr(test, assert_instr(tbx))]
20438#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20439fn vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t {
20440 unsafe extern "unadjusted" {
20441 #[cfg_attr(
20442 any(target_arch = "aarch64", target_arch = "arm64ec"),
20443 link_name = "llvm.aarch64.neon.tbx2.v16i8"
20444 )]
20445 fn _vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t;
20446 }
20447 unsafe { _vqtbx2q(a, b, c, d) }
20448}
20449#[doc = "Extended table look-up"]
20450#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_s8)"]
20451#[inline]
20452#[target_feature(enable = "neon")]
20453#[cfg_attr(test, assert_instr(tbx))]
20454#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20455pub fn vqtbx2_s8(a: int8x8_t, b: int8x16x2_t, c: uint8x8_t) -> int8x8_t {
20456 vqtbx2(a, b.0, b.1, c)
20457}
20458#[doc = "Extended table look-up"]
20459#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_s8)"]
20460#[inline]
20461#[target_feature(enable = "neon")]
20462#[cfg_attr(test, assert_instr(tbx))]
20463#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20464pub fn vqtbx2q_s8(a: int8x16_t, b: int8x16x2_t, c: uint8x16_t) -> int8x16_t {
20465 vqtbx2q(a, b.0, b.1, c)
20466}
20467#[doc = "Extended table look-up"]
20468#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_u8)"]
20469#[inline]
20470#[cfg(target_endian = "little")]
20471#[target_feature(enable = "neon")]
20472#[cfg_attr(test, assert_instr(tbx))]
20473#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20474pub fn vqtbx2_u8(a: uint8x8_t, b: uint8x16x2_t, c: uint8x8_t) -> uint8x8_t {
20475 unsafe { transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)) }
20476}
20477#[doc = "Extended table look-up"]
20478#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_u8)"]
20479#[inline]
20480#[cfg(target_endian = "big")]
20481#[target_feature(enable = "neon")]
20482#[cfg_attr(test, assert_instr(tbx))]
20483#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20484pub fn vqtbx2_u8(a: uint8x8_t, b: uint8x16x2_t, c: uint8x8_t) -> uint8x8_t {
20485 let mut b: uint8x16x2_t = b;
20486 let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20487 b.0 = unsafe {
20488 simd_shuffle!(
20489 b.0,
20490 b.0,
20491 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20492 )
20493 };
20494 b.1 = unsafe {
20495 simd_shuffle!(
20496 b.1,
20497 b.1,
20498 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20499 )
20500 };
20501 let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
20502 unsafe {
20503 let ret_val: uint8x8_t = transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c));
20504 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20505 }
20506}
20507#[doc = "Extended table look-up"]
20508#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_u8)"]
20509#[inline]
20510#[cfg(target_endian = "little")]
20511#[target_feature(enable = "neon")]
20512#[cfg_attr(test, assert_instr(tbx))]
20513#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20514pub fn vqtbx2q_u8(a: uint8x16_t, b: uint8x16x2_t, c: uint8x16_t) -> uint8x16_t {
20515 unsafe { transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)) }
20516}
20517#[doc = "Extended table look-up"]
20518#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_u8)"]
20519#[inline]
20520#[cfg(target_endian = "big")]
20521#[target_feature(enable = "neon")]
20522#[cfg_attr(test, assert_instr(tbx))]
20523#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20524pub fn vqtbx2q_u8(a: uint8x16_t, b: uint8x16x2_t, c: uint8x16_t) -> uint8x16_t {
20525 let mut b: uint8x16x2_t = b;
20526 let a: uint8x16_t =
20527 unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20528 b.0 = unsafe {
20529 simd_shuffle!(
20530 b.0,
20531 b.0,
20532 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20533 )
20534 };
20535 b.1 = unsafe {
20536 simd_shuffle!(
20537 b.1,
20538 b.1,
20539 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20540 )
20541 };
20542 let c: uint8x16_t =
20543 unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20544 unsafe {
20545 let ret_val: uint8x16_t =
20546 transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c));
20547 simd_shuffle!(
20548 ret_val,
20549 ret_val,
20550 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20551 )
20552 }
20553}
20554#[doc = "Extended table look-up"]
20555#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_p8)"]
20556#[inline]
20557#[cfg(target_endian = "little")]
20558#[target_feature(enable = "neon")]
20559#[cfg_attr(test, assert_instr(tbx))]
20560#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20561pub fn vqtbx2_p8(a: poly8x8_t, b: poly8x16x2_t, c: uint8x8_t) -> poly8x8_t {
20562 unsafe { transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)) }
20563}
20564#[doc = "Extended table look-up"]
20565#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_p8)"]
20566#[inline]
20567#[cfg(target_endian = "big")]
20568#[target_feature(enable = "neon")]
20569#[cfg_attr(test, assert_instr(tbx))]
20570#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20571pub fn vqtbx2_p8(a: poly8x8_t, b: poly8x16x2_t, c: uint8x8_t) -> poly8x8_t {
20572 let mut b: poly8x16x2_t = b;
20573 let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20574 b.0 = unsafe {
20575 simd_shuffle!(
20576 b.0,
20577 b.0,
20578 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20579 )
20580 };
20581 b.1 = unsafe {
20582 simd_shuffle!(
20583 b.1,
20584 b.1,
20585 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20586 )
20587 };
20588 let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
20589 unsafe {
20590 let ret_val: poly8x8_t = transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c));
20591 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20592 }
20593}
20594#[doc = "Extended table look-up"]
20595#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_p8)"]
20596#[inline]
20597#[cfg(target_endian = "little")]
20598#[target_feature(enable = "neon")]
20599#[cfg_attr(test, assert_instr(tbx))]
20600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20601pub fn vqtbx2q_p8(a: poly8x16_t, b: poly8x16x2_t, c: uint8x16_t) -> poly8x16_t {
20602 unsafe { transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)) }
20603}
20604#[doc = "Extended table look-up"]
20605#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_p8)"]
20606#[inline]
20607#[cfg(target_endian = "big")]
20608#[target_feature(enable = "neon")]
20609#[cfg_attr(test, assert_instr(tbx))]
20610#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20611pub fn vqtbx2q_p8(a: poly8x16_t, b: poly8x16x2_t, c: uint8x16_t) -> poly8x16_t {
20612 let mut b: poly8x16x2_t = b;
20613 let a: poly8x16_t =
20614 unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20615 b.0 = unsafe {
20616 simd_shuffle!(
20617 b.0,
20618 b.0,
20619 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20620 )
20621 };
20622 b.1 = unsafe {
20623 simd_shuffle!(
20624 b.1,
20625 b.1,
20626 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20627 )
20628 };
20629 let c: uint8x16_t =
20630 unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20631 unsafe {
20632 let ret_val: poly8x16_t =
20633 transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c));
20634 simd_shuffle!(
20635 ret_val,
20636 ret_val,
20637 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20638 )
20639 }
20640}
20641#[doc = "Extended table look-up"]
20642#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3)"]
20643#[inline]
20644#[target_feature(enable = "neon")]
20645#[cfg_attr(test, assert_instr(tbx))]
20646#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20647fn vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) -> int8x8_t {
20648 unsafe extern "unadjusted" {
20649 #[cfg_attr(
20650 any(target_arch = "aarch64", target_arch = "arm64ec"),
20651 link_name = "llvm.aarch64.neon.tbx3.v8i8"
20652 )]
20653 fn _vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t)
20654 -> int8x8_t;
20655 }
20656 unsafe { _vqtbx3(a, b, c, d, e) }
20657}
20658#[doc = "Extended table look-up"]
20659#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q)"]
20660#[inline]
20661#[target_feature(enable = "neon")]
20662#[cfg_attr(test, assert_instr(tbx))]
20663#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20664fn vqtbx3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x16_t) -> int8x16_t {
20665 unsafe extern "unadjusted" {
20666 #[cfg_attr(
20667 any(target_arch = "aarch64", target_arch = "arm64ec"),
20668 link_name = "llvm.aarch64.neon.tbx3.v16i8"
20669 )]
20670 fn _vqtbx3q(
20671 a: int8x16_t,
20672 b: int8x16_t,
20673 c: int8x16_t,
20674 d: int8x16_t,
20675 e: uint8x16_t,
20676 ) -> int8x16_t;
20677 }
20678 unsafe { _vqtbx3q(a, b, c, d, e) }
20679}
20680#[doc = "Extended table look-up"]
20681#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_s8)"]
20682#[inline]
20683#[target_feature(enable = "neon")]
20684#[cfg_attr(test, assert_instr(tbx))]
20685#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20686pub fn vqtbx3_s8(a: int8x8_t, b: int8x16x3_t, c: uint8x8_t) -> int8x8_t {
20687 vqtbx3(a, b.0, b.1, b.2, c)
20688}
20689#[doc = "Extended table look-up"]
20690#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_s8)"]
20691#[inline]
20692#[target_feature(enable = "neon")]
20693#[cfg_attr(test, assert_instr(tbx))]
20694#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20695pub fn vqtbx3q_s8(a: int8x16_t, b: int8x16x3_t, c: uint8x16_t) -> int8x16_t {
20696 vqtbx3q(a, b.0, b.1, b.2, c)
20697}
20698#[doc = "Extended table look-up"]
20699#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_u8)"]
20700#[inline]
20701#[cfg(target_endian = "little")]
20702#[target_feature(enable = "neon")]
20703#[cfg_attr(test, assert_instr(tbx))]
20704#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20705pub fn vqtbx3_u8(a: uint8x8_t, b: uint8x16x3_t, c: uint8x8_t) -> uint8x8_t {
20706 unsafe {
20707 transmute(vqtbx3(
20708 transmute(a),
20709 transmute(b.0),
20710 transmute(b.1),
20711 transmute(b.2),
20712 c,
20713 ))
20714 }
20715}
20716#[doc = "Extended table look-up"]
20717#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_u8)"]
20718#[inline]
20719#[cfg(target_endian = "big")]
20720#[target_feature(enable = "neon")]
20721#[cfg_attr(test, assert_instr(tbx))]
20722#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20723pub fn vqtbx3_u8(a: uint8x8_t, b: uint8x16x3_t, c: uint8x8_t) -> uint8x8_t {
20724 let mut b: uint8x16x3_t = b;
20725 let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20726 b.0 = unsafe {
20727 simd_shuffle!(
20728 b.0,
20729 b.0,
20730 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20731 )
20732 };
20733 b.1 = unsafe {
20734 simd_shuffle!(
20735 b.1,
20736 b.1,
20737 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20738 )
20739 };
20740 b.2 = unsafe {
20741 simd_shuffle!(
20742 b.2,
20743 b.2,
20744 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20745 )
20746 };
20747 let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
20748 unsafe {
20749 let ret_val: uint8x8_t = transmute(vqtbx3(
20750 transmute(a),
20751 transmute(b.0),
20752 transmute(b.1),
20753 transmute(b.2),
20754 c,
20755 ));
20756 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20757 }
20758}
20759#[doc = "Extended table look-up"]
20760#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_u8)"]
20761#[inline]
20762#[cfg(target_endian = "little")]
20763#[target_feature(enable = "neon")]
20764#[cfg_attr(test, assert_instr(tbx))]
20765#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20766pub fn vqtbx3q_u8(a: uint8x16_t, b: uint8x16x3_t, c: uint8x16_t) -> uint8x16_t {
20767 unsafe {
20768 transmute(vqtbx3q(
20769 transmute(a),
20770 transmute(b.0),
20771 transmute(b.1),
20772 transmute(b.2),
20773 c,
20774 ))
20775 }
20776}
20777#[doc = "Extended table look-up"]
20778#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_u8)"]
20779#[inline]
20780#[cfg(target_endian = "big")]
20781#[target_feature(enable = "neon")]
20782#[cfg_attr(test, assert_instr(tbx))]
20783#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20784pub fn vqtbx3q_u8(a: uint8x16_t, b: uint8x16x3_t, c: uint8x16_t) -> uint8x16_t {
20785 let mut b: uint8x16x3_t = b;
20786 let a: uint8x16_t =
20787 unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20788 b.0 = unsafe {
20789 simd_shuffle!(
20790 b.0,
20791 b.0,
20792 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20793 )
20794 };
20795 b.1 = unsafe {
20796 simd_shuffle!(
20797 b.1,
20798 b.1,
20799 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20800 )
20801 };
20802 b.2 = unsafe {
20803 simd_shuffle!(
20804 b.2,
20805 b.2,
20806 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20807 )
20808 };
20809 let c: uint8x16_t =
20810 unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20811 unsafe {
20812 let ret_val: uint8x16_t = transmute(vqtbx3q(
20813 transmute(a),
20814 transmute(b.0),
20815 transmute(b.1),
20816 transmute(b.2),
20817 c,
20818 ));
20819 simd_shuffle!(
20820 ret_val,
20821 ret_val,
20822 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20823 )
20824 }
20825}
20826#[doc = "Extended table look-up"]
20827#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_p8)"]
20828#[inline]
20829#[cfg(target_endian = "little")]
20830#[target_feature(enable = "neon")]
20831#[cfg_attr(test, assert_instr(tbx))]
20832#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20833pub fn vqtbx3_p8(a: poly8x8_t, b: poly8x16x3_t, c: uint8x8_t) -> poly8x8_t {
20834 unsafe {
20835 transmute(vqtbx3(
20836 transmute(a),
20837 transmute(b.0),
20838 transmute(b.1),
20839 transmute(b.2),
20840 c,
20841 ))
20842 }
20843}
20844#[doc = "Extended table look-up"]
20845#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_p8)"]
20846#[inline]
20847#[cfg(target_endian = "big")]
20848#[target_feature(enable = "neon")]
20849#[cfg_attr(test, assert_instr(tbx))]
20850#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20851pub fn vqtbx3_p8(a: poly8x8_t, b: poly8x16x3_t, c: uint8x8_t) -> poly8x8_t {
20852 let mut b: poly8x16x3_t = b;
20853 let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20854 b.0 = unsafe {
20855 simd_shuffle!(
20856 b.0,
20857 b.0,
20858 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20859 )
20860 };
20861 b.1 = unsafe {
20862 simd_shuffle!(
20863 b.1,
20864 b.1,
20865 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20866 )
20867 };
20868 b.2 = unsafe {
20869 simd_shuffle!(
20870 b.2,
20871 b.2,
20872 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20873 )
20874 };
20875 let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
20876 unsafe {
20877 let ret_val: poly8x8_t = transmute(vqtbx3(
20878 transmute(a),
20879 transmute(b.0),
20880 transmute(b.1),
20881 transmute(b.2),
20882 c,
20883 ));
20884 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20885 }
20886}
20887#[doc = "Extended table look-up"]
20888#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_p8)"]
20889#[inline]
20890#[cfg(target_endian = "little")]
20891#[target_feature(enable = "neon")]
20892#[cfg_attr(test, assert_instr(tbx))]
20893#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20894pub fn vqtbx3q_p8(a: poly8x16_t, b: poly8x16x3_t, c: uint8x16_t) -> poly8x16_t {
20895 unsafe {
20896 transmute(vqtbx3q(
20897 transmute(a),
20898 transmute(b.0),
20899 transmute(b.1),
20900 transmute(b.2),
20901 c,
20902 ))
20903 }
20904}
20905#[doc = "Extended table look-up"]
20906#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_p8)"]
20907#[inline]
20908#[cfg(target_endian = "big")]
20909#[target_feature(enable = "neon")]
20910#[cfg_attr(test, assert_instr(tbx))]
20911#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20912pub fn vqtbx3q_p8(a: poly8x16_t, b: poly8x16x3_t, c: uint8x16_t) -> poly8x16_t {
20913 let mut b: poly8x16x3_t = b;
20914 let a: poly8x16_t =
20915 unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20916 b.0 = unsafe {
20917 simd_shuffle!(
20918 b.0,
20919 b.0,
20920 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20921 )
20922 };
20923 b.1 = unsafe {
20924 simd_shuffle!(
20925 b.1,
20926 b.1,
20927 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20928 )
20929 };
20930 b.2 = unsafe {
20931 simd_shuffle!(
20932 b.2,
20933 b.2,
20934 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20935 )
20936 };
20937 let c: uint8x16_t =
20938 unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20939 unsafe {
20940 let ret_val: poly8x16_t = transmute(vqtbx3q(
20941 transmute(a),
20942 transmute(b.0),
20943 transmute(b.1),
20944 transmute(b.2),
20945 c,
20946 ));
20947 simd_shuffle!(
20948 ret_val,
20949 ret_val,
20950 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20951 )
20952 }
20953}
20954#[doc = "Extended table look-up"]
20955#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4)"]
20956#[inline]
20957#[target_feature(enable = "neon")]
20958#[cfg_attr(test, assert_instr(tbx))]
20959#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20960fn vqtbx4(
20961 a: int8x8_t,
20962 b: int8x16_t,
20963 c: int8x16_t,
20964 d: int8x16_t,
20965 e: int8x16_t,
20966 f: uint8x8_t,
20967) -> int8x8_t {
20968 unsafe extern "unadjusted" {
20969 #[cfg_attr(
20970 any(target_arch = "aarch64", target_arch = "arm64ec"),
20971 link_name = "llvm.aarch64.neon.tbx4.v8i8"
20972 )]
20973 fn _vqtbx4(
20974 a: int8x8_t,
20975 b: int8x16_t,
20976 c: int8x16_t,
20977 d: int8x16_t,
20978 e: int8x16_t,
20979 f: uint8x8_t,
20980 ) -> int8x8_t;
20981 }
20982 unsafe { _vqtbx4(a, b, c, d, e, f) }
20983}
20984#[doc = "Extended table look-up"]
20985#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q)"]
20986#[inline]
20987#[target_feature(enable = "neon")]
20988#[cfg_attr(test, assert_instr(tbx))]
20989#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20990fn vqtbx4q(
20991 a: int8x16_t,
20992 b: int8x16_t,
20993 c: int8x16_t,
20994 d: int8x16_t,
20995 e: int8x16_t,
20996 f: uint8x16_t,
20997) -> int8x16_t {
20998 unsafe extern "unadjusted" {
20999 #[cfg_attr(
21000 any(target_arch = "aarch64", target_arch = "arm64ec"),
21001 link_name = "llvm.aarch64.neon.tbx4.v16i8"
21002 )]
21003 fn _vqtbx4q(
21004 a: int8x16_t,
21005 b: int8x16_t,
21006 c: int8x16_t,
21007 d: int8x16_t,
21008 e: int8x16_t,
21009 f: uint8x16_t,
21010 ) -> int8x16_t;
21011 }
21012 unsafe { _vqtbx4q(a, b, c, d, e, f) }
21013}
21014#[doc = "Extended table look-up"]
21015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_s8)"]
21016#[inline]
21017#[target_feature(enable = "neon")]
21018#[cfg_attr(test, assert_instr(tbx))]
21019#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21020pub fn vqtbx4_s8(a: int8x8_t, b: int8x16x4_t, c: uint8x8_t) -> int8x8_t {
21021 vqtbx4(a, b.0, b.1, b.2, b.3, c)
21022}
21023#[doc = "Extended table look-up"]
21024#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_s8)"]
21025#[inline]
21026#[target_feature(enable = "neon")]
21027#[cfg_attr(test, assert_instr(tbx))]
21028#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21029pub fn vqtbx4q_s8(a: int8x16_t, b: int8x16x4_t, c: uint8x16_t) -> int8x16_t {
21030 vqtbx4q(a, b.0, b.1, b.2, b.3, c)
21031}
21032#[doc = "Extended table look-up"]
21033#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_u8)"]
21034#[inline]
21035#[cfg(target_endian = "little")]
21036#[target_feature(enable = "neon")]
21037#[cfg_attr(test, assert_instr(tbx))]
21038#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21039pub fn vqtbx4_u8(a: uint8x8_t, b: uint8x16x4_t, c: uint8x8_t) -> uint8x8_t {
21040 unsafe {
21041 transmute(vqtbx4(
21042 transmute(a),
21043 transmute(b.0),
21044 transmute(b.1),
21045 transmute(b.2),
21046 transmute(b.3),
21047 c,
21048 ))
21049 }
21050}
21051#[doc = "Extended table look-up"]
21052#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_u8)"]
21053#[inline]
21054#[cfg(target_endian = "big")]
21055#[target_feature(enable = "neon")]
21056#[cfg_attr(test, assert_instr(tbx))]
21057#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21058pub fn vqtbx4_u8(a: uint8x8_t, b: uint8x16x4_t, c: uint8x8_t) -> uint8x8_t {
21059 let mut b: uint8x16x4_t = b;
21060 let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21061 b.0 = unsafe {
21062 simd_shuffle!(
21063 b.0,
21064 b.0,
21065 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21066 )
21067 };
21068 b.1 = unsafe {
21069 simd_shuffle!(
21070 b.1,
21071 b.1,
21072 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21073 )
21074 };
21075 b.2 = unsafe {
21076 simd_shuffle!(
21077 b.2,
21078 b.2,
21079 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21080 )
21081 };
21082 b.3 = unsafe {
21083 simd_shuffle!(
21084 b.3,
21085 b.3,
21086 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21087 )
21088 };
21089 let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
21090 unsafe {
21091 let ret_val: uint8x8_t = transmute(vqtbx4(
21092 transmute(a),
21093 transmute(b.0),
21094 transmute(b.1),
21095 transmute(b.2),
21096 transmute(b.3),
21097 c,
21098 ));
21099 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21100 }
21101}
21102#[doc = "Extended table look-up"]
21103#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_u8)"]
21104#[inline]
21105#[cfg(target_endian = "little")]
21106#[target_feature(enable = "neon")]
21107#[cfg_attr(test, assert_instr(tbx))]
21108#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21109pub fn vqtbx4q_u8(a: uint8x16_t, b: uint8x16x4_t, c: uint8x16_t) -> uint8x16_t {
21110 unsafe {
21111 transmute(vqtbx4q(
21112 transmute(a),
21113 transmute(b.0),
21114 transmute(b.1),
21115 transmute(b.2),
21116 transmute(b.3),
21117 c,
21118 ))
21119 }
21120}
21121#[doc = "Extended table look-up"]
21122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_u8)"]
21123#[inline]
21124#[cfg(target_endian = "big")]
21125#[target_feature(enable = "neon")]
21126#[cfg_attr(test, assert_instr(tbx))]
21127#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21128pub fn vqtbx4q_u8(a: uint8x16_t, b: uint8x16x4_t, c: uint8x16_t) -> uint8x16_t {
21129 let mut b: uint8x16x4_t = b;
21130 let a: uint8x16_t =
21131 unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21132 b.0 = unsafe {
21133 simd_shuffle!(
21134 b.0,
21135 b.0,
21136 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21137 )
21138 };
21139 b.1 = unsafe {
21140 simd_shuffle!(
21141 b.1,
21142 b.1,
21143 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21144 )
21145 };
21146 b.2 = unsafe {
21147 simd_shuffle!(
21148 b.2,
21149 b.2,
21150 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21151 )
21152 };
21153 b.3 = unsafe {
21154 simd_shuffle!(
21155 b.3,
21156 b.3,
21157 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21158 )
21159 };
21160 let c: uint8x16_t =
21161 unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21162 unsafe {
21163 let ret_val: uint8x16_t = transmute(vqtbx4q(
21164 transmute(a),
21165 transmute(b.0),
21166 transmute(b.1),
21167 transmute(b.2),
21168 transmute(b.3),
21169 c,
21170 ));
21171 simd_shuffle!(
21172 ret_val,
21173 ret_val,
21174 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21175 )
21176 }
21177}
21178#[doc = "Extended table look-up"]
21179#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_p8)"]
21180#[inline]
21181#[cfg(target_endian = "little")]
21182#[target_feature(enable = "neon")]
21183#[cfg_attr(test, assert_instr(tbx))]
21184#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21185pub fn vqtbx4_p8(a: poly8x8_t, b: poly8x16x4_t, c: uint8x8_t) -> poly8x8_t {
21186 unsafe {
21187 transmute(vqtbx4(
21188 transmute(a),
21189 transmute(b.0),
21190 transmute(b.1),
21191 transmute(b.2),
21192 transmute(b.3),
21193 c,
21194 ))
21195 }
21196}
21197#[doc = "Extended table look-up"]
21198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_p8)"]
21199#[inline]
21200#[cfg(target_endian = "big")]
21201#[target_feature(enable = "neon")]
21202#[cfg_attr(test, assert_instr(tbx))]
21203#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21204pub fn vqtbx4_p8(a: poly8x8_t, b: poly8x16x4_t, c: uint8x8_t) -> poly8x8_t {
21205 let mut b: poly8x16x4_t = b;
21206 let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21207 b.0 = unsafe {
21208 simd_shuffle!(
21209 b.0,
21210 b.0,
21211 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21212 )
21213 };
21214 b.1 = unsafe {
21215 simd_shuffle!(
21216 b.1,
21217 b.1,
21218 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21219 )
21220 };
21221 b.2 = unsafe {
21222 simd_shuffle!(
21223 b.2,
21224 b.2,
21225 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21226 )
21227 };
21228 b.3 = unsafe {
21229 simd_shuffle!(
21230 b.3,
21231 b.3,
21232 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21233 )
21234 };
21235 let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
21236 unsafe {
21237 let ret_val: poly8x8_t = transmute(vqtbx4(
21238 transmute(a),
21239 transmute(b.0),
21240 transmute(b.1),
21241 transmute(b.2),
21242 transmute(b.3),
21243 c,
21244 ));
21245 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21246 }
21247}
21248#[doc = "Extended table look-up"]
21249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_p8)"]
21250#[inline]
21251#[cfg(target_endian = "little")]
21252#[target_feature(enable = "neon")]
21253#[cfg_attr(test, assert_instr(tbx))]
21254#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21255pub fn vqtbx4q_p8(a: poly8x16_t, b: poly8x16x4_t, c: uint8x16_t) -> poly8x16_t {
21256 unsafe {
21257 transmute(vqtbx4q(
21258 transmute(a),
21259 transmute(b.0),
21260 transmute(b.1),
21261 transmute(b.2),
21262 transmute(b.3),
21263 c,
21264 ))
21265 }
21266}
21267#[doc = "Extended table look-up"]
21268#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_p8)"]
21269#[inline]
21270#[cfg(target_endian = "big")]
21271#[target_feature(enable = "neon")]
21272#[cfg_attr(test, assert_instr(tbx))]
21273#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21274pub fn vqtbx4q_p8(a: poly8x16_t, b: poly8x16x4_t, c: uint8x16_t) -> poly8x16_t {
21275 let mut b: poly8x16x4_t = b;
21276 let a: poly8x16_t =
21277 unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21278 b.0 = unsafe {
21279 simd_shuffle!(
21280 b.0,
21281 b.0,
21282 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21283 )
21284 };
21285 b.1 = unsafe {
21286 simd_shuffle!(
21287 b.1,
21288 b.1,
21289 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21290 )
21291 };
21292 b.2 = unsafe {
21293 simd_shuffle!(
21294 b.2,
21295 b.2,
21296 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21297 )
21298 };
21299 b.3 = unsafe {
21300 simd_shuffle!(
21301 b.3,
21302 b.3,
21303 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21304 )
21305 };
21306 let c: uint8x16_t =
21307 unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21308 unsafe {
21309 let ret_val: poly8x16_t = transmute(vqtbx4q(
21310 transmute(a),
21311 transmute(b.0),
21312 transmute(b.1),
21313 transmute(b.2),
21314 transmute(b.3),
21315 c,
21316 ));
21317 simd_shuffle!(
21318 ret_val,
21319 ret_val,
21320 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21321 )
21322 }
21323}
21324#[doc = "Rotate and exclusive OR"]
21325#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrax1q_u64)"]
21326#[inline]
21327#[target_feature(enable = "neon,sha3")]
21328#[cfg_attr(test, assert_instr(rax1))]
21329#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
21330pub fn vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
21331 unsafe extern "unadjusted" {
21332 #[cfg_attr(
21333 any(target_arch = "aarch64", target_arch = "arm64ec"),
21334 link_name = "llvm.aarch64.crypto.rax1"
21335 )]
21336 fn _vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t;
21337 }
21338 unsafe { _vrax1q_u64(a, b) }
21339}
21340#[doc = "Reverse bit order"]
21341#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_s8)"]
21342#[inline]
21343#[target_feature(enable = "neon")]
21344#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21345#[cfg_attr(test, assert_instr(rbit))]
21346pub fn vrbit_s8(a: int8x8_t) -> int8x8_t {
21347 unsafe { simd_bitreverse(a) }
21348}
21349#[doc = "Reverse bit order"]
21350#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_s8)"]
21351#[inline]
21352#[target_feature(enable = "neon")]
21353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21354#[cfg_attr(test, assert_instr(rbit))]
21355pub fn vrbitq_s8(a: int8x16_t) -> int8x16_t {
21356 unsafe { simd_bitreverse(a) }
21357}
21358#[doc = "Reverse bit order"]
21359#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_u8)"]
21360#[inline]
21361#[cfg(target_endian = "little")]
21362#[target_feature(enable = "neon")]
21363#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21364#[cfg_attr(test, assert_instr(rbit))]
21365pub fn vrbit_u8(a: uint8x8_t) -> uint8x8_t {
21366 unsafe { transmute(vrbit_s8(transmute(a))) }
21367}
21368#[doc = "Reverse bit order"]
21369#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_u8)"]
21370#[inline]
21371#[cfg(target_endian = "big")]
21372#[target_feature(enable = "neon")]
21373#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21374#[cfg_attr(test, assert_instr(rbit))]
21375pub fn vrbit_u8(a: uint8x8_t) -> uint8x8_t {
21376 let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21377 unsafe {
21378 let ret_val: uint8x8_t = transmute(vrbit_s8(transmute(a)));
21379 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21380 }
21381}
21382#[doc = "Reverse bit order"]
21383#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_u8)"]
21384#[inline]
21385#[cfg(target_endian = "little")]
21386#[target_feature(enable = "neon")]
21387#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21388#[cfg_attr(test, assert_instr(rbit))]
21389pub fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t {
21390 unsafe { transmute(vrbitq_s8(transmute(a))) }
21391}
21392#[doc = "Reverse bit order"]
21393#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_u8)"]
21394#[inline]
21395#[cfg(target_endian = "big")]
21396#[target_feature(enable = "neon")]
21397#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21398#[cfg_attr(test, assert_instr(rbit))]
21399pub fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t {
21400 let a: uint8x16_t =
21401 unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21402 unsafe {
21403 let ret_val: uint8x16_t = transmute(vrbitq_s8(transmute(a)));
21404 simd_shuffle!(
21405 ret_val,
21406 ret_val,
21407 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21408 )
21409 }
21410}
21411#[doc = "Reverse bit order"]
21412#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_p8)"]
21413#[inline]
21414#[cfg(target_endian = "little")]
21415#[target_feature(enable = "neon")]
21416#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21417#[cfg_attr(test, assert_instr(rbit))]
21418pub fn vrbit_p8(a: poly8x8_t) -> poly8x8_t {
21419 unsafe { transmute(vrbit_s8(transmute(a))) }
21420}
21421#[doc = "Reverse bit order"]
21422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_p8)"]
21423#[inline]
21424#[cfg(target_endian = "big")]
21425#[target_feature(enable = "neon")]
21426#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21427#[cfg_attr(test, assert_instr(rbit))]
21428pub fn vrbit_p8(a: poly8x8_t) -> poly8x8_t {
21429 let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21430 unsafe {
21431 let ret_val: poly8x8_t = transmute(vrbit_s8(transmute(a)));
21432 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21433 }
21434}
21435#[doc = "Reverse bit order"]
21436#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_p8)"]
21437#[inline]
21438#[cfg(target_endian = "little")]
21439#[target_feature(enable = "neon")]
21440#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21441#[cfg_attr(test, assert_instr(rbit))]
21442pub fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t {
21443 unsafe { transmute(vrbitq_s8(transmute(a))) }
21444}
21445#[doc = "Reverse bit order"]
21446#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_p8)"]
21447#[inline]
21448#[cfg(target_endian = "big")]
21449#[target_feature(enable = "neon")]
21450#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21451#[cfg_attr(test, assert_instr(rbit))]
21452pub fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t {
21453 let a: poly8x16_t =
21454 unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21455 unsafe {
21456 let ret_val: poly8x16_t = transmute(vrbitq_s8(transmute(a)));
21457 simd_shuffle!(
21458 ret_val,
21459 ret_val,
21460 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21461 )
21462 }
21463}
21464#[doc = "Reciprocal estimate."]
21465#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f64)"]
21466#[inline]
21467#[target_feature(enable = "neon")]
21468#[cfg_attr(test, assert_instr(frecpe))]
21469#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21470pub fn vrecpe_f64(a: float64x1_t) -> float64x1_t {
21471 unsafe extern "unadjusted" {
21472 #[cfg_attr(
21473 any(target_arch = "aarch64", target_arch = "arm64ec"),
21474 link_name = "llvm.aarch64.neon.frecpe.v1f64"
21475 )]
21476 fn _vrecpe_f64(a: float64x1_t) -> float64x1_t;
21477 }
21478 unsafe { _vrecpe_f64(a) }
21479}
21480#[doc = "Reciprocal estimate."]
21481#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f64)"]
21482#[inline]
21483#[target_feature(enable = "neon")]
21484#[cfg_attr(test, assert_instr(frecpe))]
21485#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21486pub fn vrecpeq_f64(a: float64x2_t) -> float64x2_t {
21487 unsafe extern "unadjusted" {
21488 #[cfg_attr(
21489 any(target_arch = "aarch64", target_arch = "arm64ec"),
21490 link_name = "llvm.aarch64.neon.frecpe.v2f64"
21491 )]
21492 fn _vrecpeq_f64(a: float64x2_t) -> float64x2_t;
21493 }
21494 unsafe { _vrecpeq_f64(a) }
21495}
21496#[doc = "Reciprocal estimate."]
21497#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecped_f64)"]
21498#[inline]
21499#[target_feature(enable = "neon")]
21500#[cfg_attr(test, assert_instr(frecpe))]
21501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21502pub fn vrecped_f64(a: f64) -> f64 {
21503 unsafe extern "unadjusted" {
21504 #[cfg_attr(
21505 any(target_arch = "aarch64", target_arch = "arm64ec"),
21506 link_name = "llvm.aarch64.neon.frecpe.f64"
21507 )]
21508 fn _vrecped_f64(a: f64) -> f64;
21509 }
21510 unsafe { _vrecped_f64(a) }
21511}
21512#[doc = "Reciprocal estimate."]
21513#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpes_f32)"]
21514#[inline]
21515#[target_feature(enable = "neon")]
21516#[cfg_attr(test, assert_instr(frecpe))]
21517#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21518pub fn vrecpes_f32(a: f32) -> f32 {
21519 unsafe extern "unadjusted" {
21520 #[cfg_attr(
21521 any(target_arch = "aarch64", target_arch = "arm64ec"),
21522 link_name = "llvm.aarch64.neon.frecpe.f32"
21523 )]
21524 fn _vrecpes_f32(a: f32) -> f32;
21525 }
21526 unsafe { _vrecpes_f32(a) }
21527}
21528#[doc = "Reciprocal estimate."]
21529#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeh_f16)"]
21530#[inline]
21531#[cfg_attr(test, assert_instr(frecpe))]
21532#[target_feature(enable = "neon,fp16")]
21533#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21534pub fn vrecpeh_f16(a: f16) -> f16 {
21535 unsafe extern "unadjusted" {
21536 #[cfg_attr(
21537 any(target_arch = "aarch64", target_arch = "arm64ec"),
21538 link_name = "llvm.aarch64.neon.frecpe.f16"
21539 )]
21540 fn _vrecpeh_f16(a: f16) -> f16;
21541 }
21542 unsafe { _vrecpeh_f16(a) }
21543}
21544#[doc = "Floating-point reciprocal step"]
21545#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f64)"]
21546#[inline]
21547#[target_feature(enable = "neon")]
21548#[cfg_attr(test, assert_instr(frecps))]
21549#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21550pub fn vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
21551 unsafe extern "unadjusted" {
21552 #[cfg_attr(
21553 any(target_arch = "aarch64", target_arch = "arm64ec"),
21554 link_name = "llvm.aarch64.neon.frecps.v1f64"
21555 )]
21556 fn _vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
21557 }
21558 unsafe { _vrecps_f64(a, b) }
21559}
21560#[doc = "Floating-point reciprocal step"]
21561#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f64)"]
21562#[inline]
21563#[target_feature(enable = "neon")]
21564#[cfg_attr(test, assert_instr(frecps))]
21565#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21566pub fn vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
21567 unsafe extern "unadjusted" {
21568 #[cfg_attr(
21569 any(target_arch = "aarch64", target_arch = "arm64ec"),
21570 link_name = "llvm.aarch64.neon.frecps.v2f64"
21571 )]
21572 fn _vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
21573 }
21574 unsafe { _vrecpsq_f64(a, b) }
21575}
21576#[doc = "Floating-point reciprocal step"]
21577#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsd_f64)"]
21578#[inline]
21579#[target_feature(enable = "neon")]
21580#[cfg_attr(test, assert_instr(frecps))]
21581#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21582pub fn vrecpsd_f64(a: f64, b: f64) -> f64 {
21583 unsafe extern "unadjusted" {
21584 #[cfg_attr(
21585 any(target_arch = "aarch64", target_arch = "arm64ec"),
21586 link_name = "llvm.aarch64.neon.frecps.f64"
21587 )]
21588 fn _vrecpsd_f64(a: f64, b: f64) -> f64;
21589 }
21590 unsafe { _vrecpsd_f64(a, b) }
21591}
21592#[doc = "Floating-point reciprocal step"]
21593#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpss_f32)"]
21594#[inline]
21595#[target_feature(enable = "neon")]
21596#[cfg_attr(test, assert_instr(frecps))]
21597#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21598pub fn vrecpss_f32(a: f32, b: f32) -> f32 {
21599 unsafe extern "unadjusted" {
21600 #[cfg_attr(
21601 any(target_arch = "aarch64", target_arch = "arm64ec"),
21602 link_name = "llvm.aarch64.neon.frecps.f32"
21603 )]
21604 fn _vrecpss_f32(a: f32, b: f32) -> f32;
21605 }
21606 unsafe { _vrecpss_f32(a, b) }
21607}
21608#[doc = "Floating-point reciprocal step"]
21609#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsh_f16)"]
21610#[inline]
21611#[cfg_attr(test, assert_instr(frecps))]
21612#[target_feature(enable = "neon,fp16")]
21613#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21614pub fn vrecpsh_f16(a: f16, b: f16) -> f16 {
21615 unsafe extern "unadjusted" {
21616 #[cfg_attr(
21617 any(target_arch = "aarch64", target_arch = "arm64ec"),
21618 link_name = "llvm.aarch64.neon.frecps.f16"
21619 )]
21620 fn _vrecpsh_f16(a: f16, b: f16) -> f16;
21621 }
21622 unsafe { _vrecpsh_f16(a, b) }
21623}
21624#[doc = "Floating-point reciprocal exponent"]
21625#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxd_f64)"]
21626#[inline]
21627#[target_feature(enable = "neon")]
21628#[cfg_attr(test, assert_instr(frecpx))]
21629#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21630pub fn vrecpxd_f64(a: f64) -> f64 {
21631 unsafe extern "unadjusted" {
21632 #[cfg_attr(
21633 any(target_arch = "aarch64", target_arch = "arm64ec"),
21634 link_name = "llvm.aarch64.neon.frecpx.f64"
21635 )]
21636 fn _vrecpxd_f64(a: f64) -> f64;
21637 }
21638 unsafe { _vrecpxd_f64(a) }
21639}
21640#[doc = "Floating-point reciprocal exponent"]
21641#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxs_f32)"]
21642#[inline]
21643#[target_feature(enable = "neon")]
21644#[cfg_attr(test, assert_instr(frecpx))]
21645#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21646pub fn vrecpxs_f32(a: f32) -> f32 {
21647 unsafe extern "unadjusted" {
21648 #[cfg_attr(
21649 any(target_arch = "aarch64", target_arch = "arm64ec"),
21650 link_name = "llvm.aarch64.neon.frecpx.f32"
21651 )]
21652 fn _vrecpxs_f32(a: f32) -> f32;
21653 }
21654 unsafe { _vrecpxs_f32(a) }
21655}
21656#[doc = "Floating-point reciprocal exponent"]
21657#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxh_f16)"]
21658#[inline]
21659#[cfg_attr(test, assert_instr(frecpx))]
21660#[target_feature(enable = "neon,fp16")]
21661#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21662pub fn vrecpxh_f16(a: f16) -> f16 {
21663 unsafe extern "unadjusted" {
21664 #[cfg_attr(
21665 any(target_arch = "aarch64", target_arch = "arm64ec"),
21666 link_name = "llvm.aarch64.neon.frecpx.f16"
21667 )]
21668 fn _vrecpxh_f16(a: f16) -> f16;
21669 }
21670 unsafe { _vrecpxh_f16(a) }
21671}
21672#[doc = "Vector reinterpret cast operation"]
21673#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f16)"]
21674#[inline]
21675#[cfg(target_endian = "little")]
21676#[target_feature(enable = "neon,fp16")]
21677#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21678#[cfg_attr(test, assert_instr(nop))]
21679pub fn vreinterpret_f64_f16(a: float16x4_t) -> float64x1_t {
21680 unsafe { transmute(a) }
21681}
21682#[doc = "Vector reinterpret cast operation"]
21683#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f16)"]
21684#[inline]
21685#[cfg(target_endian = "big")]
21686#[target_feature(enable = "neon,fp16")]
21687#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21688#[cfg_attr(test, assert_instr(nop))]
21689pub fn vreinterpret_f64_f16(a: float16x4_t) -> float64x1_t {
21690 let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
21691 unsafe { transmute(a) }
21692}
21693#[doc = "Vector reinterpret cast operation"]
21694#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f16)"]
21695#[inline]
21696#[cfg(target_endian = "little")]
21697#[target_feature(enable = "neon,fp16")]
21698#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21699#[cfg_attr(test, assert_instr(nop))]
21700pub fn vreinterpretq_f64_f16(a: float16x8_t) -> float64x2_t {
21701 unsafe { transmute(a) }
21702}
21703#[doc = "Vector reinterpret cast operation"]
21704#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f16)"]
21705#[inline]
21706#[cfg(target_endian = "big")]
21707#[target_feature(enable = "neon,fp16")]
21708#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21709#[cfg_attr(test, assert_instr(nop))]
21710pub fn vreinterpretq_f64_f16(a: float16x8_t) -> float64x2_t {
21711 let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21712 unsafe {
21713 let ret_val: float64x2_t = transmute(a);
21714 simd_shuffle!(ret_val, ret_val, [1, 0])
21715 }
21716}
21717#[doc = "Vector reinterpret cast operation"]
21718#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f64)"]
21719#[inline]
21720#[cfg(target_endian = "little")]
21721#[target_feature(enable = "neon,fp16")]
21722#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21723#[cfg_attr(test, assert_instr(nop))]
21724pub fn vreinterpret_f16_f64(a: float64x1_t) -> float16x4_t {
21725 unsafe { transmute(a) }
21726}
21727#[doc = "Vector reinterpret cast operation"]
21728#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f64)"]
21729#[inline]
21730#[cfg(target_endian = "big")]
21731#[target_feature(enable = "neon,fp16")]
21732#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21733#[cfg_attr(test, assert_instr(nop))]
21734pub fn vreinterpret_f16_f64(a: float64x1_t) -> float16x4_t {
21735 unsafe {
21736 let ret_val: float16x4_t = transmute(a);
21737 simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
21738 }
21739}
21740#[doc = "Vector reinterpret cast operation"]
21741#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f64)"]
21742#[inline]
21743#[cfg(target_endian = "little")]
21744#[target_feature(enable = "neon,fp16")]
21745#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21746#[cfg_attr(test, assert_instr(nop))]
21747pub fn vreinterpretq_f16_f64(a: float64x2_t) -> float16x8_t {
21748 unsafe { transmute(a) }
21749}
21750#[doc = "Vector reinterpret cast operation"]
21751#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f64)"]
21752#[inline]
21753#[cfg(target_endian = "big")]
21754#[target_feature(enable = "neon,fp16")]
21755#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21756#[cfg_attr(test, assert_instr(nop))]
21757pub fn vreinterpretq_f16_f64(a: float64x2_t) -> float16x8_t {
21758 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21759 unsafe {
21760 let ret_val: float16x8_t = transmute(a);
21761 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21762 }
21763}
21764#[doc = "Vector reinterpret cast operation"]
21765#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)"]
21766#[inline]
21767#[cfg(target_endian = "little")]
21768#[target_feature(enable = "neon")]
21769#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21770#[cfg_attr(test, assert_instr(nop))]
21771pub fn vreinterpretq_f64_p128(a: p128) -> float64x2_t {
21772 unsafe { transmute(a) }
21773}
21774#[doc = "Vector reinterpret cast operation"]
21775#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)"]
21776#[inline]
21777#[cfg(target_endian = "big")]
21778#[target_feature(enable = "neon")]
21779#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21780#[cfg_attr(test, assert_instr(nop))]
21781pub fn vreinterpretq_f64_p128(a: p128) -> float64x2_t {
21782 unsafe {
21783 let ret_val: float64x2_t = transmute(a);
21784 simd_shuffle!(ret_val, ret_val, [1, 0])
21785 }
21786}
21787#[doc = "Vector reinterpret cast operation"]
21788#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"]
21789#[inline]
21790#[cfg(target_endian = "little")]
21791#[target_feature(enable = "neon")]
21792#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21793#[cfg_attr(test, assert_instr(nop))]
21794pub fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t {
21795 unsafe { transmute(a) }
21796}
21797#[doc = "Vector reinterpret cast operation"]
21798#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"]
21799#[inline]
21800#[cfg(target_endian = "big")]
21801#[target_feature(enable = "neon")]
21802#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21803#[cfg_attr(test, assert_instr(nop))]
21804pub fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t {
21805 let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21806 unsafe { transmute(a) }
21807}
21808#[doc = "Vector reinterpret cast operation"]
21809#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"]
21810#[inline]
21811#[cfg(target_endian = "little")]
21812#[target_feature(enable = "neon")]
21813#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21814#[cfg_attr(test, assert_instr(nop))]
21815pub fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t {
21816 unsafe { transmute(a) }
21817}
21818#[doc = "Vector reinterpret cast operation"]
21819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"]
21820#[inline]
21821#[cfg(target_endian = "big")]
21822#[target_feature(enable = "neon")]
21823#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21824#[cfg_attr(test, assert_instr(nop))]
21825pub fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t {
21826 let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21827 unsafe { transmute(a) }
21828}
21829#[doc = "Vector reinterpret cast operation"]
21830#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"]
21831#[inline]
21832#[cfg(target_endian = "little")]
21833#[target_feature(enable = "neon")]
21834#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21835#[cfg_attr(test, assert_instr(nop))]
21836pub fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t {
21837 unsafe { transmute(a) }
21838}
21839#[doc = "Vector reinterpret cast operation"]
21840#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"]
21841#[inline]
21842#[cfg(target_endian = "big")]
21843#[target_feature(enable = "neon")]
21844#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21845#[cfg_attr(test, assert_instr(nop))]
21846pub fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t {
21847 let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
21848 unsafe {
21849 let ret_val: float64x2_t = transmute(a);
21850 simd_shuffle!(ret_val, ret_val, [1, 0])
21851 }
21852}
21853#[doc = "Vector reinterpret cast operation"]
21854#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"]
21855#[inline]
21856#[cfg(target_endian = "little")]
21857#[target_feature(enable = "neon")]
21858#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21859#[cfg_attr(test, assert_instr(nop))]
21860pub fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t {
21861 unsafe { transmute(a) }
21862}
21863#[doc = "Vector reinterpret cast operation"]
21864#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"]
21865#[inline]
21866#[cfg(target_endian = "big")]
21867#[target_feature(enable = "neon")]
21868#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21869#[cfg_attr(test, assert_instr(nop))]
21870pub fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t {
21871 let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
21872 unsafe {
21873 let ret_val: poly64x2_t = transmute(a);
21874 simd_shuffle!(ret_val, ret_val, [1, 0])
21875 }
21876}
21877#[doc = "Vector reinterpret cast operation"]
21878#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"]
21879#[inline]
21880#[cfg(target_endian = "little")]
21881#[target_feature(enable = "neon")]
21882#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21883#[cfg_attr(test, assert_instr(nop))]
21884pub fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t {
21885 unsafe { transmute(a) }
21886}
21887#[doc = "Vector reinterpret cast operation"]
21888#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"]
21889#[inline]
21890#[cfg(target_endian = "big")]
21891#[target_feature(enable = "neon")]
21892#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21893#[cfg_attr(test, assert_instr(nop))]
21894pub fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t {
21895 unsafe {
21896 let ret_val: float32x2_t = transmute(a);
21897 simd_shuffle!(ret_val, ret_val, [1, 0])
21898 }
21899}
21900#[doc = "Vector reinterpret cast operation"]
21901#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"]
21902#[inline]
21903#[cfg(target_endian = "little")]
21904#[target_feature(enable = "neon")]
21905#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21906#[cfg_attr(test, assert_instr(nop))]
21907pub fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t {
21908 unsafe { transmute(a) }
21909}
21910#[doc = "Vector reinterpret cast operation"]
21911#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"]
21912#[inline]
21913#[cfg(target_endian = "big")]
21914#[target_feature(enable = "neon")]
21915#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21916#[cfg_attr(test, assert_instr(nop))]
21917pub fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t {
21918 unsafe {
21919 let ret_val: int8x8_t = transmute(a);
21920 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21921 }
21922}
21923#[doc = "Vector reinterpret cast operation"]
21924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"]
21925#[inline]
21926#[cfg(target_endian = "little")]
21927#[target_feature(enable = "neon")]
21928#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21929#[cfg_attr(test, assert_instr(nop))]
21930pub fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t {
21931 unsafe { transmute(a) }
21932}
21933#[doc = "Vector reinterpret cast operation"]
21934#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"]
21935#[inline]
21936#[cfg(target_endian = "big")]
21937#[target_feature(enable = "neon")]
21938#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21939#[cfg_attr(test, assert_instr(nop))]
21940pub fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t {
21941 unsafe {
21942 let ret_val: int16x4_t = transmute(a);
21943 simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
21944 }
21945}
21946#[doc = "Vector reinterpret cast operation"]
21947#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"]
21948#[inline]
21949#[cfg(target_endian = "little")]
21950#[target_feature(enable = "neon")]
21951#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21952#[cfg_attr(test, assert_instr(nop))]
21953pub fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t {
21954 unsafe { transmute(a) }
21955}
21956#[doc = "Vector reinterpret cast operation"]
21957#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"]
21958#[inline]
21959#[cfg(target_endian = "big")]
21960#[target_feature(enable = "neon")]
21961#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21962#[cfg_attr(test, assert_instr(nop))]
21963pub fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t {
21964 unsafe {
21965 let ret_val: int32x2_t = transmute(a);
21966 simd_shuffle!(ret_val, ret_val, [1, 0])
21967 }
21968}
21969#[doc = "Vector reinterpret cast operation"]
21970#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f64)"]
21971#[inline]
21972#[target_feature(enable = "neon")]
21973#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21974#[cfg_attr(test, assert_instr(nop))]
21975pub fn vreinterpret_s64_f64(a: float64x1_t) -> int64x1_t {
21976 unsafe { transmute(a) }
21977}
21978#[doc = "Vector reinterpret cast operation"]
21979#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"]
21980#[inline]
21981#[cfg(target_endian = "little")]
21982#[target_feature(enable = "neon")]
21983#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21984#[cfg_attr(test, assert_instr(nop))]
21985pub fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t {
21986 unsafe { transmute(a) }
21987}
21988#[doc = "Vector reinterpret cast operation"]
21989#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"]
21990#[inline]
21991#[cfg(target_endian = "big")]
21992#[target_feature(enable = "neon")]
21993#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21994#[cfg_attr(test, assert_instr(nop))]
21995pub fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t {
21996 unsafe {
21997 let ret_val: uint8x8_t = transmute(a);
21998 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21999 }
22000}
22001#[doc = "Vector reinterpret cast operation"]
22002#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"]
22003#[inline]
22004#[cfg(target_endian = "little")]
22005#[target_feature(enable = "neon")]
22006#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22007#[cfg_attr(test, assert_instr(nop))]
22008pub fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t {
22009 unsafe { transmute(a) }
22010}
22011#[doc = "Vector reinterpret cast operation"]
22012#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"]
22013#[inline]
22014#[cfg(target_endian = "big")]
22015#[target_feature(enable = "neon")]
22016#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22017#[cfg_attr(test, assert_instr(nop))]
22018pub fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t {
22019 unsafe {
22020 let ret_val: uint16x4_t = transmute(a);
22021 simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
22022 }
22023}
22024#[doc = "Vector reinterpret cast operation"]
22025#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"]
22026#[inline]
22027#[cfg(target_endian = "little")]
22028#[target_feature(enable = "neon")]
22029#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22030#[cfg_attr(test, assert_instr(nop))]
22031pub fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t {
22032 unsafe { transmute(a) }
22033}
22034#[doc = "Vector reinterpret cast operation"]
22035#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"]
22036#[inline]
22037#[cfg(target_endian = "big")]
22038#[target_feature(enable = "neon")]
22039#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22040#[cfg_attr(test, assert_instr(nop))]
22041pub fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t {
22042 unsafe {
22043 let ret_val: uint32x2_t = transmute(a);
22044 simd_shuffle!(ret_val, ret_val, [1, 0])
22045 }
22046}
22047#[doc = "Vector reinterpret cast operation"]
22048#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f64)"]
22049#[inline]
22050#[target_feature(enable = "neon")]
22051#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22052#[cfg_attr(test, assert_instr(nop))]
22053pub fn vreinterpret_u64_f64(a: float64x1_t) -> uint64x1_t {
22054 unsafe { transmute(a) }
22055}
22056#[doc = "Vector reinterpret cast operation"]
22057#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"]
22058#[inline]
22059#[cfg(target_endian = "little")]
22060#[target_feature(enable = "neon")]
22061#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22062#[cfg_attr(test, assert_instr(nop))]
22063pub fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t {
22064 unsafe { transmute(a) }
22065}
22066#[doc = "Vector reinterpret cast operation"]
22067#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"]
22068#[inline]
22069#[cfg(target_endian = "big")]
22070#[target_feature(enable = "neon")]
22071#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22072#[cfg_attr(test, assert_instr(nop))]
22073pub fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t {
22074 unsafe {
22075 let ret_val: poly8x8_t = transmute(a);
22076 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
22077 }
22078}
22079#[doc = "Vector reinterpret cast operation"]
22080#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"]
22081#[inline]
22082#[cfg(target_endian = "little")]
22083#[target_feature(enable = "neon")]
22084#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22085#[cfg_attr(test, assert_instr(nop))]
22086pub fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t {
22087 unsafe { transmute(a) }
22088}
22089#[doc = "Vector reinterpret cast operation"]
22090#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"]
22091#[inline]
22092#[cfg(target_endian = "big")]
22093#[target_feature(enable = "neon")]
22094#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22095#[cfg_attr(test, assert_instr(nop))]
22096pub fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t {
22097 unsafe {
22098 let ret_val: poly16x4_t = transmute(a);
22099 simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
22100 }
22101}
22102#[doc = "Vector reinterpret cast operation"]
22103#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f64)"]
22104#[inline]
22105#[target_feature(enable = "neon")]
22106#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22107#[cfg_attr(test, assert_instr(nop))]
22108pub fn vreinterpret_p64_f64(a: float64x1_t) -> poly64x1_t {
22109 unsafe { transmute(a) }
22110}
22111#[doc = "Vector reinterpret cast operation"]
22112#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"]
22113#[inline]
22114#[cfg(target_endian = "little")]
22115#[target_feature(enable = "neon")]
22116#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22117#[cfg_attr(test, assert_instr(nop))]
22118pub fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 {
22119 unsafe { transmute(a) }
22120}
22121#[doc = "Vector reinterpret cast operation"]
22122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"]
22123#[inline]
22124#[cfg(target_endian = "big")]
22125#[target_feature(enable = "neon")]
22126#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22127#[cfg_attr(test, assert_instr(nop))]
22128pub fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 {
22129 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22130 unsafe { transmute(a) }
22131}
22132#[doc = "Vector reinterpret cast operation"]
22133#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"]
22134#[inline]
22135#[cfg(target_endian = "little")]
22136#[target_feature(enable = "neon")]
22137#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22138#[cfg_attr(test, assert_instr(nop))]
22139pub fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t {
22140 unsafe { transmute(a) }
22141}
22142#[doc = "Vector reinterpret cast operation"]
22143#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"]
22144#[inline]
22145#[cfg(target_endian = "big")]
22146#[target_feature(enable = "neon")]
22147#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22148#[cfg_attr(test, assert_instr(nop))]
22149pub fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t {
22150 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22151 unsafe {
22152 let ret_val: float32x4_t = transmute(a);
22153 simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
22154 }
22155}
22156#[doc = "Vector reinterpret cast operation"]
22157#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"]
22158#[inline]
22159#[cfg(target_endian = "little")]
22160#[target_feature(enable = "neon")]
22161#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22162#[cfg_attr(test, assert_instr(nop))]
22163pub fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t {
22164 unsafe { transmute(a) }
22165}
22166#[doc = "Vector reinterpret cast operation"]
22167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"]
22168#[inline]
22169#[cfg(target_endian = "big")]
22170#[target_feature(enable = "neon")]
22171#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22172#[cfg_attr(test, assert_instr(nop))]
22173pub fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t {
22174 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22175 unsafe {
22176 let ret_val: int8x16_t = transmute(a);
22177 simd_shuffle!(
22178 ret_val,
22179 ret_val,
22180 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
22181 )
22182 }
22183}
22184#[doc = "Vector reinterpret cast operation"]
22185#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"]
22186#[inline]
22187#[cfg(target_endian = "little")]
22188#[target_feature(enable = "neon")]
22189#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22190#[cfg_attr(test, assert_instr(nop))]
22191pub fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t {
22192 unsafe { transmute(a) }
22193}
22194#[doc = "Vector reinterpret cast operation"]
22195#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"]
22196#[inline]
22197#[cfg(target_endian = "big")]
22198#[target_feature(enable = "neon")]
22199#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22200#[cfg_attr(test, assert_instr(nop))]
22201pub fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t {
22202 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22203 unsafe {
22204 let ret_val: int16x8_t = transmute(a);
22205 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
22206 }
22207}
22208#[doc = "Vector reinterpret cast operation"]
22209#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"]
22210#[inline]
22211#[cfg(target_endian = "little")]
22212#[target_feature(enable = "neon")]
22213#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22214#[cfg_attr(test, assert_instr(nop))]
22215pub fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t {
22216 unsafe { transmute(a) }
22217}
22218#[doc = "Vector reinterpret cast operation"]
22219#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"]
22220#[inline]
22221#[cfg(target_endian = "big")]
22222#[target_feature(enable = "neon")]
22223#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22224#[cfg_attr(test, assert_instr(nop))]
22225pub fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t {
22226 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22227 unsafe {
22228 let ret_val: int32x4_t = transmute(a);
22229 simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
22230 }
22231}
22232#[doc = "Vector reinterpret cast operation"]
22233#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"]
22234#[inline]
22235#[cfg(target_endian = "little")]
22236#[target_feature(enable = "neon")]
22237#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22238#[cfg_attr(test, assert_instr(nop))]
22239pub fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t {
22240 unsafe { transmute(a) }
22241}
22242#[doc = "Vector reinterpret cast operation"]
22243#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"]
22244#[inline]
22245#[cfg(target_endian = "big")]
22246#[target_feature(enable = "neon")]
22247#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22248#[cfg_attr(test, assert_instr(nop))]
22249pub fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t {
22250 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22251 unsafe {
22252 let ret_val: int64x2_t = transmute(a);
22253 simd_shuffle!(ret_val, ret_val, [1, 0])
22254 }
22255}
22256#[doc = "Vector reinterpret cast operation"]
22257#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"]
22258#[inline]
22259#[cfg(target_endian = "little")]
22260#[target_feature(enable = "neon")]
22261#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22262#[cfg_attr(test, assert_instr(nop))]
22263pub fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t {
22264 unsafe { transmute(a) }
22265}
22266#[doc = "Vector reinterpret cast operation"]
22267#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"]
22268#[inline]
22269#[cfg(target_endian = "big")]
22270#[target_feature(enable = "neon")]
22271#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22272#[cfg_attr(test, assert_instr(nop))]
22273pub fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t {
22274 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22275 unsafe {
22276 let ret_val: uint8x16_t = transmute(a);
22277 simd_shuffle!(
22278 ret_val,
22279 ret_val,
22280 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
22281 )
22282 }
22283}
22284#[doc = "Vector reinterpret cast operation"]
22285#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"]
22286#[inline]
22287#[cfg(target_endian = "little")]
22288#[target_feature(enable = "neon")]
22289#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22290#[cfg_attr(test, assert_instr(nop))]
22291pub fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t {
22292 unsafe { transmute(a) }
22293}
22294#[doc = "Vector reinterpret cast operation"]
22295#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"]
22296#[inline]
22297#[cfg(target_endian = "big")]
22298#[target_feature(enable = "neon")]
22299#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22300#[cfg_attr(test, assert_instr(nop))]
22301pub fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t {
22302 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22303 unsafe {
22304 let ret_val: uint16x8_t = transmute(a);
22305 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
22306 }
22307}
22308#[doc = "Vector reinterpret cast operation"]
22309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"]
22310#[inline]
22311#[cfg(target_endian = "little")]
22312#[target_feature(enable = "neon")]
22313#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22314#[cfg_attr(test, assert_instr(nop))]
22315pub fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t {
22316 unsafe { transmute(a) }
22317}
22318#[doc = "Vector reinterpret cast operation"]
22319#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"]
22320#[inline]
22321#[cfg(target_endian = "big")]
22322#[target_feature(enable = "neon")]
22323#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22324#[cfg_attr(test, assert_instr(nop))]
22325pub fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t {
22326 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22327 unsafe {
22328 let ret_val: uint32x4_t = transmute(a);
22329 simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
22330 }
22331}
22332#[doc = "Vector reinterpret cast operation"]
22333#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)"]
22334#[inline]
22335#[cfg(target_endian = "little")]
22336#[target_feature(enable = "neon")]
22337#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22338#[cfg_attr(test, assert_instr(nop))]
22339pub fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t {
22340 unsafe { transmute(a) }
22341}
22342#[doc = "Vector reinterpret cast operation"]
22343#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)"]
22344#[inline]
22345#[cfg(target_endian = "big")]
22346#[target_feature(enable = "neon")]
22347#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22348#[cfg_attr(test, assert_instr(nop))]
22349pub fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t {
22350 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22351 unsafe {
22352 let ret_val: uint64x2_t = transmute(a);
22353 simd_shuffle!(ret_val, ret_val, [1, 0])
22354 }
22355}
22356#[doc = "Vector reinterpret cast operation"]
22357#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"]
22358#[inline]
22359#[cfg(target_endian = "little")]
22360#[target_feature(enable = "neon")]
22361#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22362#[cfg_attr(test, assert_instr(nop))]
22363pub fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t {
22364 unsafe { transmute(a) }
22365}
22366#[doc = "Vector reinterpret cast operation"]
22367#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"]
22368#[inline]
22369#[cfg(target_endian = "big")]
22370#[target_feature(enable = "neon")]
22371#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22372#[cfg_attr(test, assert_instr(nop))]
22373pub fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t {
22374 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22375 unsafe {
22376 let ret_val: poly8x16_t = transmute(a);
22377 simd_shuffle!(
22378 ret_val,
22379 ret_val,
22380 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
22381 )
22382 }
22383}
22384#[doc = "Vector reinterpret cast operation"]
22385#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"]
22386#[inline]
22387#[cfg(target_endian = "little")]
22388#[target_feature(enable = "neon")]
22389#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22390#[cfg_attr(test, assert_instr(nop))]
22391pub fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t {
22392 unsafe { transmute(a) }
22393}
22394#[doc = "Vector reinterpret cast operation"]
22395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"]
22396#[inline]
22397#[cfg(target_endian = "big")]
22398#[target_feature(enable = "neon")]
22399#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22400#[cfg_attr(test, assert_instr(nop))]
22401pub fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t {
22402 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22403 unsafe {
22404 let ret_val: poly16x8_t = transmute(a);
22405 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
22406 }
22407}
22408#[doc = "Vector reinterpret cast operation"]
22409#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)"]
22410#[inline]
22411#[cfg(target_endian = "little")]
22412#[target_feature(enable = "neon")]
22413#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22414#[cfg_attr(test, assert_instr(nop))]
22415pub fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t {
22416 unsafe { transmute(a) }
22417}
22418#[doc = "Vector reinterpret cast operation"]
22419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)"]
22420#[inline]
22421#[cfg(target_endian = "big")]
22422#[target_feature(enable = "neon")]
22423#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22424#[cfg_attr(test, assert_instr(nop))]
22425pub fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t {
22426 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22427 unsafe {
22428 let ret_val: poly64x2_t = transmute(a);
22429 simd_shuffle!(ret_val, ret_val, [1, 0])
22430 }
22431}
22432#[doc = "Vector reinterpret cast operation"]
22433#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"]
22434#[inline]
22435#[cfg(target_endian = "little")]
22436#[target_feature(enable = "neon")]
22437#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22438#[cfg_attr(test, assert_instr(nop))]
22439pub fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t {
22440 unsafe { transmute(a) }
22441}
22442#[doc = "Vector reinterpret cast operation"]
22443#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"]
22444#[inline]
22445#[cfg(target_endian = "big")]
22446#[target_feature(enable = "neon")]
22447#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22448#[cfg_attr(test, assert_instr(nop))]
22449pub fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t {
22450 let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
22451 unsafe { transmute(a) }
22452}
22453#[doc = "Vector reinterpret cast operation"]
22454#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"]
22455#[inline]
22456#[cfg(target_endian = "little")]
22457#[target_feature(enable = "neon")]
22458#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22459#[cfg_attr(test, assert_instr(nop))]
22460pub fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t {
22461 unsafe { transmute(a) }
22462}
22463#[doc = "Vector reinterpret cast operation"]
22464#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"]
22465#[inline]
22466#[cfg(target_endian = "big")]
22467#[target_feature(enable = "neon")]
22468#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22469#[cfg_attr(test, assert_instr(nop))]
22470pub fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t {
22471 let a: int8x16_t =
22472 unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
22473 unsafe {
22474 let ret_val: float64x2_t = transmute(a);
22475 simd_shuffle!(ret_val, ret_val, [1, 0])
22476 }
22477}
22478#[doc = "Vector reinterpret cast operation"]
22479#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"]
22480#[inline]
22481#[cfg(target_endian = "little")]
22482#[target_feature(enable = "neon")]
22483#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22484#[cfg_attr(test, assert_instr(nop))]
22485pub fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t {
22486 unsafe { transmute(a) }
22487}
22488#[doc = "Vector reinterpret cast operation"]
22489#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"]
22490#[inline]
22491#[cfg(target_endian = "big")]
22492#[target_feature(enable = "neon")]
22493#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22494#[cfg_attr(test, assert_instr(nop))]
22495pub fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t {
22496 let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
22497 unsafe { transmute(a) }
22498}
22499#[doc = "Vector reinterpret cast operation"]
22500#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"]
22501#[inline]
22502#[cfg(target_endian = "little")]
22503#[target_feature(enable = "neon")]
22504#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22505#[cfg_attr(test, assert_instr(nop))]
22506pub fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t {
22507 unsafe { transmute(a) }
22508}
22509#[doc = "Vector reinterpret cast operation"]
22510#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"]
22511#[inline]
22512#[cfg(target_endian = "big")]
22513#[target_feature(enable = "neon")]
22514#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22515#[cfg_attr(test, assert_instr(nop))]
22516pub fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t {
22517 let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
22518 unsafe {
22519 let ret_val: float64x2_t = transmute(a);
22520 simd_shuffle!(ret_val, ret_val, [1, 0])
22521 }
22522}
22523#[doc = "Vector reinterpret cast operation"]
22524#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"]
22525#[inline]
22526#[cfg(target_endian = "little")]
22527#[target_feature(enable = "neon")]
22528#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22529#[cfg_attr(test, assert_instr(nop))]
22530pub fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t {
22531 unsafe { transmute(a) }
22532}
22533#[doc = "Vector reinterpret cast operation"]
22534#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"]
22535#[inline]
22536#[cfg(target_endian = "big")]
22537#[target_feature(enable = "neon")]
22538#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22539#[cfg_attr(test, assert_instr(nop))]
22540pub fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t {
22541 let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22542 unsafe { transmute(a) }
22543}
22544#[doc = "Vector reinterpret cast operation"]
22545#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"]
22546#[inline]
22547#[cfg(target_endian = "little")]
22548#[target_feature(enable = "neon")]
22549#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22550#[cfg_attr(test, assert_instr(nop))]
22551pub fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t {
22552 unsafe { transmute(a) }
22553}
22554#[doc = "Vector reinterpret cast operation"]
22555#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"]
22556#[inline]
22557#[cfg(target_endian = "big")]
22558#[target_feature(enable = "neon")]
22559#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22560#[cfg_attr(test, assert_instr(nop))]
22561pub fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t {
22562 let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
22563 unsafe {
22564 let ret_val: float64x2_t = transmute(a);
22565 simd_shuffle!(ret_val, ret_val, [1, 0])
22566 }
22567}
22568#[doc = "Vector reinterpret cast operation"]
22569#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s64)"]
22570#[inline]
22571#[target_feature(enable = "neon")]
22572#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22573#[cfg_attr(test, assert_instr(nop))]
22574pub fn vreinterpret_f64_s64(a: int64x1_t) -> float64x1_t {
22575 unsafe { transmute(a) }
22576}
22577#[doc = "Vector reinterpret cast operation"]
22578#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s64)"]
22579#[inline]
22580#[target_feature(enable = "neon")]
22581#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22582#[cfg_attr(test, assert_instr(nop))]
22583pub fn vreinterpret_p64_s64(a: int64x1_t) -> poly64x1_t {
22584 unsafe { transmute(a) }
22585}
22586#[doc = "Vector reinterpret cast operation"]
22587#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)"]
22588#[inline]
22589#[cfg(target_endian = "little")]
22590#[target_feature(enable = "neon")]
22591#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22592#[cfg_attr(test, assert_instr(nop))]
22593pub fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t {
22594 unsafe { transmute(a) }
22595}
22596#[doc = "Vector reinterpret cast operation"]
22597#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)"]
22598#[inline]
22599#[cfg(target_endian = "big")]
22600#[target_feature(enable = "neon")]
22601#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22602#[cfg_attr(test, assert_instr(nop))]
22603pub fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t {
22604 let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22605 unsafe {
22606 let ret_val: float64x2_t = transmute(a);
22607 simd_shuffle!(ret_val, ret_val, [1, 0])
22608 }
22609}
22610#[doc = "Vector reinterpret cast operation"]
22611#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)"]
22612#[inline]
22613#[cfg(target_endian = "little")]
22614#[target_feature(enable = "neon")]
22615#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22616#[cfg_attr(test, assert_instr(nop))]
22617pub fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t {
22618 unsafe { transmute(a) }
22619}
22620#[doc = "Vector reinterpret cast operation"]
22621#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)"]
22622#[inline]
22623#[cfg(target_endian = "big")]
22624#[target_feature(enable = "neon")]
22625#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22626#[cfg_attr(test, assert_instr(nop))]
22627pub fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t {
22628 let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22629 unsafe {
22630 let ret_val: poly64x2_t = transmute(a);
22631 simd_shuffle!(ret_val, ret_val, [1, 0])
22632 }
22633}
22634#[doc = "Vector reinterpret cast operation"]
22635#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"]
22636#[inline]
22637#[cfg(target_endian = "little")]
22638#[target_feature(enable = "neon")]
22639#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22640#[cfg_attr(test, assert_instr(nop))]
22641pub fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t {
22642 unsafe { transmute(a) }
22643}
22644#[doc = "Vector reinterpret cast operation"]
22645#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"]
22646#[inline]
22647#[cfg(target_endian = "big")]
22648#[target_feature(enable = "neon")]
22649#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22650#[cfg_attr(test, assert_instr(nop))]
22651pub fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t {
22652 let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
22653 unsafe { transmute(a) }
22654}
22655#[doc = "Vector reinterpret cast operation"]
22656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"]
22657#[inline]
22658#[cfg(target_endian = "little")]
22659#[target_feature(enable = "neon")]
22660#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22661#[cfg_attr(test, assert_instr(nop))]
22662pub fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t {
22663 unsafe { transmute(a) }
22664}
22665#[doc = "Vector reinterpret cast operation"]
22666#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"]
22667#[inline]
22668#[cfg(target_endian = "big")]
22669#[target_feature(enable = "neon")]
22670#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22671#[cfg_attr(test, assert_instr(nop))]
22672pub fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t {
22673 let a: uint8x16_t =
22674 unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
22675 unsafe {
22676 let ret_val: float64x2_t = transmute(a);
22677 simd_shuffle!(ret_val, ret_val, [1, 0])
22678 }
22679}
22680#[doc = "Vector reinterpret cast operation"]
22681#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"]
22682#[inline]
22683#[cfg(target_endian = "little")]
22684#[target_feature(enable = "neon")]
22685#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22686#[cfg_attr(test, assert_instr(nop))]
22687pub fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t {
22688 unsafe { transmute(a) }
22689}
22690#[doc = "Vector reinterpret cast operation"]
22691#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"]
22692#[inline]
22693#[cfg(target_endian = "big")]
22694#[target_feature(enable = "neon")]
22695#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22696#[cfg_attr(test, assert_instr(nop))]
22697pub fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t {
22698 let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
22699 unsafe { transmute(a) }
22700}
22701#[doc = "Vector reinterpret cast operation"]
22702#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"]
22703#[inline]
22704#[cfg(target_endian = "little")]
22705#[target_feature(enable = "neon")]
22706#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22707#[cfg_attr(test, assert_instr(nop))]
22708pub fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t {
22709 unsafe { transmute(a) }
22710}
22711#[doc = "Vector reinterpret cast operation"]
22712#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"]
22713#[inline]
22714#[cfg(target_endian = "big")]
22715#[target_feature(enable = "neon")]
22716#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22717#[cfg_attr(test, assert_instr(nop))]
22718pub fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t {
22719 let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
22720 unsafe {
22721 let ret_val: float64x2_t = transmute(a);
22722 simd_shuffle!(ret_val, ret_val, [1, 0])
22723 }
22724}
22725#[doc = "Vector reinterpret cast operation"]
22726#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"]
22727#[inline]
22728#[cfg(target_endian = "little")]
22729#[target_feature(enable = "neon")]
22730#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22731#[cfg_attr(test, assert_instr(nop))]
22732pub fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t {
22733 unsafe { transmute(a) }
22734}
22735#[doc = "Vector reinterpret cast operation"]
22736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"]
22737#[inline]
22738#[cfg(target_endian = "big")]
22739#[target_feature(enable = "neon")]
22740#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22741#[cfg_attr(test, assert_instr(nop))]
22742pub fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t {
22743 let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22744 unsafe { transmute(a) }
22745}
22746#[doc = "Vector reinterpret cast operation"]
22747#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"]
22748#[inline]
22749#[cfg(target_endian = "little")]
22750#[target_feature(enable = "neon")]
22751#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22752#[cfg_attr(test, assert_instr(nop))]
22753pub fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t {
22754 unsafe { transmute(a) }
22755}
22756#[doc = "Vector reinterpret cast operation"]
22757#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"]
22758#[inline]
22759#[cfg(target_endian = "big")]
22760#[target_feature(enable = "neon")]
22761#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22762#[cfg_attr(test, assert_instr(nop))]
22763pub fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t {
22764 let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
22765 unsafe {
22766 let ret_val: float64x2_t = transmute(a);
22767 simd_shuffle!(ret_val, ret_val, [1, 0])
22768 }
22769}
22770#[doc = "Vector reinterpret cast operation"]
22771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u64)"]
22772#[inline]
22773#[target_feature(enable = "neon")]
22774#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22775#[cfg_attr(test, assert_instr(nop))]
22776pub fn vreinterpret_f64_u64(a: uint64x1_t) -> float64x1_t {
22777 unsafe { transmute(a) }
22778}
22779#[doc = "Vector reinterpret cast operation"]
22780#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u64)"]
22781#[inline]
22782#[target_feature(enable = "neon")]
22783#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22784#[cfg_attr(test, assert_instr(nop))]
22785pub fn vreinterpret_p64_u64(a: uint64x1_t) -> poly64x1_t {
22786 unsafe { transmute(a) }
22787}
22788#[doc = "Vector reinterpret cast operation"]
22789#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)"]
22790#[inline]
22791#[cfg(target_endian = "little")]
22792#[target_feature(enable = "neon")]
22793#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22794#[cfg_attr(test, assert_instr(nop))]
22795pub fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t {
22796 unsafe { transmute(a) }
22797}
22798#[doc = "Vector reinterpret cast operation"]
22799#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)"]
22800#[inline]
22801#[cfg(target_endian = "big")]
22802#[target_feature(enable = "neon")]
22803#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22804#[cfg_attr(test, assert_instr(nop))]
22805pub fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t {
22806 let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22807 unsafe {
22808 let ret_val: float64x2_t = transmute(a);
22809 simd_shuffle!(ret_val, ret_val, [1, 0])
22810 }
22811}
22812#[doc = "Vector reinterpret cast operation"]
22813#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)"]
22814#[inline]
22815#[cfg(target_endian = "little")]
22816#[target_feature(enable = "neon")]
22817#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22818#[cfg_attr(test, assert_instr(nop))]
22819pub fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t {
22820 unsafe { transmute(a) }
22821}
22822#[doc = "Vector reinterpret cast operation"]
22823#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)"]
22824#[inline]
22825#[cfg(target_endian = "big")]
22826#[target_feature(enable = "neon")]
22827#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22828#[cfg_attr(test, assert_instr(nop))]
22829pub fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t {
22830 let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22831 unsafe {
22832 let ret_val: poly64x2_t = transmute(a);
22833 simd_shuffle!(ret_val, ret_val, [1, 0])
22834 }
22835}
22836#[doc = "Vector reinterpret cast operation"]
22837#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"]
22838#[inline]
22839#[cfg(target_endian = "little")]
22840#[target_feature(enable = "neon")]
22841#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22842#[cfg_attr(test, assert_instr(nop))]
22843pub fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t {
22844 unsafe { transmute(a) }
22845}
22846#[doc = "Vector reinterpret cast operation"]
22847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"]
22848#[inline]
22849#[cfg(target_endian = "big")]
22850#[target_feature(enable = "neon")]
22851#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22852#[cfg_attr(test, assert_instr(nop))]
22853pub fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t {
22854 let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
22855 unsafe { transmute(a) }
22856}
22857#[doc = "Vector reinterpret cast operation"]
22858#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"]
22859#[inline]
22860#[cfg(target_endian = "little")]
22861#[target_feature(enable = "neon")]
22862#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22863#[cfg_attr(test, assert_instr(nop))]
22864pub fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t {
22865 unsafe { transmute(a) }
22866}
22867#[doc = "Vector reinterpret cast operation"]
22868#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"]
22869#[inline]
22870#[cfg(target_endian = "big")]
22871#[target_feature(enable = "neon")]
22872#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22873#[cfg_attr(test, assert_instr(nop))]
22874pub fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t {
22875 let a: poly8x16_t =
22876 unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
22877 unsafe {
22878 let ret_val: float64x2_t = transmute(a);
22879 simd_shuffle!(ret_val, ret_val, [1, 0])
22880 }
22881}
22882#[doc = "Vector reinterpret cast operation"]
22883#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"]
22884#[inline]
22885#[cfg(target_endian = "little")]
22886#[target_feature(enable = "neon")]
22887#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22888#[cfg_attr(test, assert_instr(nop))]
22889pub fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t {
22890 unsafe { transmute(a) }
22891}
22892#[doc = "Vector reinterpret cast operation"]
22893#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"]
22894#[inline]
22895#[cfg(target_endian = "big")]
22896#[target_feature(enable = "neon")]
22897#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22898#[cfg_attr(test, assert_instr(nop))]
22899pub fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t {
22900 let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
22901 unsafe { transmute(a) }
22902}
22903#[doc = "Vector reinterpret cast operation"]
22904#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"]
22905#[inline]
22906#[cfg(target_endian = "little")]
22907#[target_feature(enable = "neon")]
22908#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22909#[cfg_attr(test, assert_instr(nop))]
22910pub fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t {
22911 unsafe { transmute(a) }
22912}
22913#[doc = "Vector reinterpret cast operation"]
22914#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"]
22915#[inline]
22916#[cfg(target_endian = "big")]
22917#[target_feature(enable = "neon")]
22918#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22919#[cfg_attr(test, assert_instr(nop))]
22920pub fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t {
22921 let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
22922 unsafe {
22923 let ret_val: float64x2_t = transmute(a);
22924 simd_shuffle!(ret_val, ret_val, [1, 0])
22925 }
22926}
22927#[doc = "Vector reinterpret cast operation"]
22928#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"]
22929#[inline]
22930#[cfg(target_endian = "little")]
22931#[target_feature(enable = "neon")]
22932#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22933#[cfg_attr(test, assert_instr(nop))]
22934pub fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t {
22935 unsafe { transmute(a) }
22936}
22937#[doc = "Vector reinterpret cast operation"]
22938#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"]
22939#[inline]
22940#[cfg(target_endian = "big")]
22941#[target_feature(enable = "neon")]
22942#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22943#[cfg_attr(test, assert_instr(nop))]
22944pub fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t {
22945 unsafe {
22946 let ret_val: float32x2_t = transmute(a);
22947 simd_shuffle!(ret_val, ret_val, [1, 0])
22948 }
22949}
22950#[doc = "Vector reinterpret cast operation"]
22951#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p64)"]
22952#[inline]
22953#[target_feature(enable = "neon")]
22954#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22955#[cfg_attr(test, assert_instr(nop))]
22956pub fn vreinterpret_f64_p64(a: poly64x1_t) -> float64x1_t {
22957 unsafe { transmute(a) }
22958}
22959#[doc = "Vector reinterpret cast operation"]
22960#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p64)"]
22961#[inline]
22962#[target_feature(enable = "neon")]
22963#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22964#[cfg_attr(test, assert_instr(nop))]
22965pub fn vreinterpret_s64_p64(a: poly64x1_t) -> int64x1_t {
22966 unsafe { transmute(a) }
22967}
22968#[doc = "Vector reinterpret cast operation"]
22969#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p64)"]
22970#[inline]
22971#[target_feature(enable = "neon")]
22972#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22973#[cfg_attr(test, assert_instr(nop))]
22974pub fn vreinterpret_u64_p64(a: poly64x1_t) -> uint64x1_t {
22975 unsafe { transmute(a) }
22976}
22977#[doc = "Vector reinterpret cast operation"]
22978#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"]
22979#[inline]
22980#[cfg(target_endian = "little")]
22981#[target_feature(enable = "neon")]
22982#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22983#[cfg_attr(test, assert_instr(nop))]
22984pub fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t {
22985 unsafe { transmute(a) }
22986}
22987#[doc = "Vector reinterpret cast operation"]
22988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"]
22989#[inline]
22990#[cfg(target_endian = "big")]
22991#[target_feature(enable = "neon")]
22992#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22993#[cfg_attr(test, assert_instr(nop))]
22994pub fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t {
22995 let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22996 unsafe {
22997 let ret_val: float32x4_t = transmute(a);
22998 simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
22999 }
23000}
23001#[doc = "Vector reinterpret cast operation"]
23002#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)"]
23003#[inline]
23004#[cfg(target_endian = "little")]
23005#[target_feature(enable = "neon")]
23006#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23007#[cfg_attr(test, assert_instr(nop))]
23008pub fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t {
23009 unsafe { transmute(a) }
23010}
23011#[doc = "Vector reinterpret cast operation"]
23012#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)"]
23013#[inline]
23014#[cfg(target_endian = "big")]
23015#[target_feature(enable = "neon")]
23016#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23017#[cfg_attr(test, assert_instr(nop))]
23018pub fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t {
23019 let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
23020 unsafe {
23021 let ret_val: float64x2_t = transmute(a);
23022 simd_shuffle!(ret_val, ret_val, [1, 0])
23023 }
23024}
23025#[doc = "Vector reinterpret cast operation"]
23026#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)"]
23027#[inline]
23028#[cfg(target_endian = "little")]
23029#[target_feature(enable = "neon")]
23030#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23031#[cfg_attr(test, assert_instr(nop))]
23032pub fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t {
23033 unsafe { transmute(a) }
23034}
23035#[doc = "Vector reinterpret cast operation"]
23036#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)"]
23037#[inline]
23038#[cfg(target_endian = "big")]
23039#[target_feature(enable = "neon")]
23040#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23041#[cfg_attr(test, assert_instr(nop))]
23042pub fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t {
23043 let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
23044 unsafe {
23045 let ret_val: int64x2_t = transmute(a);
23046 simd_shuffle!(ret_val, ret_val, [1, 0])
23047 }
23048}
23049#[doc = "Vector reinterpret cast operation"]
23050#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)"]
23051#[inline]
23052#[cfg(target_endian = "little")]
23053#[target_feature(enable = "neon")]
23054#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23055#[cfg_attr(test, assert_instr(nop))]
23056pub fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t {
23057 unsafe { transmute(a) }
23058}
23059#[doc = "Vector reinterpret cast operation"]
23060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)"]
23061#[inline]
23062#[cfg(target_endian = "big")]
23063#[target_feature(enable = "neon")]
23064#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23065#[cfg_attr(test, assert_instr(nop))]
23066pub fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t {
23067 let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
23068 unsafe {
23069 let ret_val: uint64x2_t = transmute(a);
23070 simd_shuffle!(ret_val, ret_val, [1, 0])
23071 }
23072}
23073#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
23074#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f32)"]
23075#[inline]
23076#[target_feature(enable = "neon,frintts")]
23077#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23078#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
23079pub fn vrnd32x_f32(a: float32x2_t) -> float32x2_t {
23080 unsafe extern "unadjusted" {
23081 #[cfg_attr(
23082 any(target_arch = "aarch64", target_arch = "arm64ec"),
23083 link_name = "llvm.aarch64.neon.frint32x.v2f32"
23084 )]
23085 fn _vrnd32x_f32(a: float32x2_t) -> float32x2_t;
23086 }
23087 unsafe { _vrnd32x_f32(a) }
23088}
23089#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
23090#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f32)"]
23091#[inline]
23092#[target_feature(enable = "neon,frintts")]
23093#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23094#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
23095pub fn vrnd32xq_f32(a: float32x4_t) -> float32x4_t {
23096 unsafe extern "unadjusted" {
23097 #[cfg_attr(
23098 any(target_arch = "aarch64", target_arch = "arm64ec"),
23099 link_name = "llvm.aarch64.neon.frint32x.v4f32"
23100 )]
23101 fn _vrnd32xq_f32(a: float32x4_t) -> float32x4_t;
23102 }
23103 unsafe { _vrnd32xq_f32(a) }
23104}
23105#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
23106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f64)"]
23107#[inline]
23108#[target_feature(enable = "neon,frintts")]
23109#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23110#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
23111pub fn vrnd32xq_f64(a: float64x2_t) -> float64x2_t {
23112 unsafe extern "unadjusted" {
23113 #[cfg_attr(
23114 any(target_arch = "aarch64", target_arch = "arm64ec"),
23115 link_name = "llvm.aarch64.neon.frint32x.v2f64"
23116 )]
23117 fn _vrnd32xq_f64(a: float64x2_t) -> float64x2_t;
23118 }
23119 unsafe { _vrnd32xq_f64(a) }
23120}
23121#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
23122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f64)"]
23123#[inline]
23124#[target_feature(enable = "neon,frintts")]
23125#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23126#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
23127pub fn vrnd32x_f64(a: float64x1_t) -> float64x1_t {
23128 unsafe extern "unadjusted" {
23129 #[cfg_attr(
23130 any(target_arch = "aarch64", target_arch = "arm64ec"),
23131 link_name = "llvm.aarch64.frint32x.f64"
23132 )]
23133 fn _vrnd32x_f64(a: f64) -> f64;
23134 }
23135 unsafe { transmute(_vrnd32x_f64(simd_extract!(a, 0))) }
23136}
23137#[doc = "Floating-point round to 32-bit integer toward zero"]
23138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f32)"]
23139#[inline]
23140#[target_feature(enable = "neon,frintts")]
23141#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23142#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
23143pub fn vrnd32z_f32(a: float32x2_t) -> float32x2_t {
23144 unsafe extern "unadjusted" {
23145 #[cfg_attr(
23146 any(target_arch = "aarch64", target_arch = "arm64ec"),
23147 link_name = "llvm.aarch64.neon.frint32z.v2f32"
23148 )]
23149 fn _vrnd32z_f32(a: float32x2_t) -> float32x2_t;
23150 }
23151 unsafe { _vrnd32z_f32(a) }
23152}
23153#[doc = "Floating-point round to 32-bit integer toward zero"]
23154#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f32)"]
23155#[inline]
23156#[target_feature(enable = "neon,frintts")]
23157#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23158#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
23159pub fn vrnd32zq_f32(a: float32x4_t) -> float32x4_t {
23160 unsafe extern "unadjusted" {
23161 #[cfg_attr(
23162 any(target_arch = "aarch64", target_arch = "arm64ec"),
23163 link_name = "llvm.aarch64.neon.frint32z.v4f32"
23164 )]
23165 fn _vrnd32zq_f32(a: float32x4_t) -> float32x4_t;
23166 }
23167 unsafe { _vrnd32zq_f32(a) }
23168}
23169#[doc = "Floating-point round to 32-bit integer toward zero"]
23170#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f64)"]
23171#[inline]
23172#[target_feature(enable = "neon,frintts")]
23173#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23174#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
23175pub fn vrnd32zq_f64(a: float64x2_t) -> float64x2_t {
23176 unsafe extern "unadjusted" {
23177 #[cfg_attr(
23178 any(target_arch = "aarch64", target_arch = "arm64ec"),
23179 link_name = "llvm.aarch64.neon.frint32z.v2f64"
23180 )]
23181 fn _vrnd32zq_f64(a: float64x2_t) -> float64x2_t;
23182 }
23183 unsafe { _vrnd32zq_f64(a) }
23184}
23185#[doc = "Floating-point round to 32-bit integer toward zero"]
23186#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f64)"]
23187#[inline]
23188#[target_feature(enable = "neon,frintts")]
23189#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23190#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
23191pub fn vrnd32z_f64(a: float64x1_t) -> float64x1_t {
23192 unsafe extern "unadjusted" {
23193 #[cfg_attr(
23194 any(target_arch = "aarch64", target_arch = "arm64ec"),
23195 link_name = "llvm.aarch64.frint32z.f64"
23196 )]
23197 fn _vrnd32z_f64(a: f64) -> f64;
23198 }
23199 unsafe { transmute(_vrnd32z_f64(simd_extract!(a, 0))) }
23200}
23201#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
23202#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f32)"]
23203#[inline]
23204#[target_feature(enable = "neon,frintts")]
23205#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23206#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
23207pub fn vrnd64x_f32(a: float32x2_t) -> float32x2_t {
23208 unsafe extern "unadjusted" {
23209 #[cfg_attr(
23210 any(target_arch = "aarch64", target_arch = "arm64ec"),
23211 link_name = "llvm.aarch64.neon.frint64x.v2f32"
23212 )]
23213 fn _vrnd64x_f32(a: float32x2_t) -> float32x2_t;
23214 }
23215 unsafe { _vrnd64x_f32(a) }
23216}
23217#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
23218#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f32)"]
23219#[inline]
23220#[target_feature(enable = "neon,frintts")]
23221#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23222#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
23223pub fn vrnd64xq_f32(a: float32x4_t) -> float32x4_t {
23224 unsafe extern "unadjusted" {
23225 #[cfg_attr(
23226 any(target_arch = "aarch64", target_arch = "arm64ec"),
23227 link_name = "llvm.aarch64.neon.frint64x.v4f32"
23228 )]
23229 fn _vrnd64xq_f32(a: float32x4_t) -> float32x4_t;
23230 }
23231 unsafe { _vrnd64xq_f32(a) }
23232}
23233#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
23234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f64)"]
23235#[inline]
23236#[target_feature(enable = "neon,frintts")]
23237#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23238#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
23239pub fn vrnd64xq_f64(a: float64x2_t) -> float64x2_t {
23240 unsafe extern "unadjusted" {
23241 #[cfg_attr(
23242 any(target_arch = "aarch64", target_arch = "arm64ec"),
23243 link_name = "llvm.aarch64.neon.frint64x.v2f64"
23244 )]
23245 fn _vrnd64xq_f64(a: float64x2_t) -> float64x2_t;
23246 }
23247 unsafe { _vrnd64xq_f64(a) }
23248}
23249#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
23250#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f64)"]
23251#[inline]
23252#[target_feature(enable = "neon,frintts")]
23253#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23254#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
23255pub fn vrnd64x_f64(a: float64x1_t) -> float64x1_t {
23256 unsafe extern "unadjusted" {
23257 #[cfg_attr(
23258 any(target_arch = "aarch64", target_arch = "arm64ec"),
23259 link_name = "llvm.aarch64.frint64x.f64"
23260 )]
23261 fn _vrnd64x_f64(a: f64) -> f64;
23262 }
23263 unsafe { transmute(_vrnd64x_f64(simd_extract!(a, 0))) }
23264}
23265#[doc = "Floating-point round to 64-bit integer toward zero"]
23266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f32)"]
23267#[inline]
23268#[target_feature(enable = "neon,frintts")]
23269#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23270#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
23271pub fn vrnd64z_f32(a: float32x2_t) -> float32x2_t {
23272 unsafe extern "unadjusted" {
23273 #[cfg_attr(
23274 any(target_arch = "aarch64", target_arch = "arm64ec"),
23275 link_name = "llvm.aarch64.neon.frint64z.v2f32"
23276 )]
23277 fn _vrnd64z_f32(a: float32x2_t) -> float32x2_t;
23278 }
23279 unsafe { _vrnd64z_f32(a) }
23280}
23281#[doc = "Floating-point round to 64-bit integer toward zero"]
23282#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f32)"]
23283#[inline]
23284#[target_feature(enable = "neon,frintts")]
23285#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23286#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
23287pub fn vrnd64zq_f32(a: float32x4_t) -> float32x4_t {
23288 unsafe extern "unadjusted" {
23289 #[cfg_attr(
23290 any(target_arch = "aarch64", target_arch = "arm64ec"),
23291 link_name = "llvm.aarch64.neon.frint64z.v4f32"
23292 )]
23293 fn _vrnd64zq_f32(a: float32x4_t) -> float32x4_t;
23294 }
23295 unsafe { _vrnd64zq_f32(a) }
23296}
23297#[doc = "Floating-point round to 64-bit integer toward zero"]
23298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f64)"]
23299#[inline]
23300#[target_feature(enable = "neon,frintts")]
23301#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23302#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
23303pub fn vrnd64zq_f64(a: float64x2_t) -> float64x2_t {
23304 unsafe extern "unadjusted" {
23305 #[cfg_attr(
23306 any(target_arch = "aarch64", target_arch = "arm64ec"),
23307 link_name = "llvm.aarch64.neon.frint64z.v2f64"
23308 )]
23309 fn _vrnd64zq_f64(a: float64x2_t) -> float64x2_t;
23310 }
23311 unsafe { _vrnd64zq_f64(a) }
23312}
23313#[doc = "Floating-point round to 64-bit integer toward zero"]
23314#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f64)"]
23315#[inline]
23316#[target_feature(enable = "neon,frintts")]
23317#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23318#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
23319pub fn vrnd64z_f64(a: float64x1_t) -> float64x1_t {
23320 unsafe extern "unadjusted" {
23321 #[cfg_attr(
23322 any(target_arch = "aarch64", target_arch = "arm64ec"),
23323 link_name = "llvm.aarch64.frint64z.f64"
23324 )]
23325 fn _vrnd64z_f64(a: f64) -> f64;
23326 }
23327 unsafe { transmute(_vrnd64z_f64(simd_extract!(a, 0))) }
23328}
23329#[doc = "Floating-point round to integral, toward zero"]
23330#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f16)"]
23331#[inline]
23332#[target_feature(enable = "neon,fp16")]
23333#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23334#[cfg_attr(test, assert_instr(frintz))]
23335pub fn vrnd_f16(a: float16x4_t) -> float16x4_t {
23336 unsafe { simd_trunc(a) }
23337}
23338#[doc = "Floating-point round to integral, toward zero"]
23339#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f16)"]
23340#[inline]
23341#[target_feature(enable = "neon,fp16")]
23342#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23343#[cfg_attr(test, assert_instr(frintz))]
23344pub fn vrndq_f16(a: float16x8_t) -> float16x8_t {
23345 unsafe { simd_trunc(a) }
23346}
23347#[doc = "Floating-point round to integral, toward zero"]
23348#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f32)"]
23349#[inline]
23350#[target_feature(enable = "neon")]
23351#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23352#[cfg_attr(test, assert_instr(frintz))]
23353pub fn vrnd_f32(a: float32x2_t) -> float32x2_t {
23354 unsafe { simd_trunc(a) }
23355}
23356#[doc = "Floating-point round to integral, toward zero"]
23357#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f32)"]
23358#[inline]
23359#[target_feature(enable = "neon")]
23360#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23361#[cfg_attr(test, assert_instr(frintz))]
23362pub fn vrndq_f32(a: float32x4_t) -> float32x4_t {
23363 unsafe { simd_trunc(a) }
23364}
23365#[doc = "Floating-point round to integral, toward zero"]
23366#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f64)"]
23367#[inline]
23368#[target_feature(enable = "neon")]
23369#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23370#[cfg_attr(test, assert_instr(frintz))]
23371pub fn vrnd_f64(a: float64x1_t) -> float64x1_t {
23372 unsafe { simd_trunc(a) }
23373}
23374#[doc = "Floating-point round to integral, toward zero"]
23375#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f64)"]
23376#[inline]
23377#[target_feature(enable = "neon")]
23378#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23379#[cfg_attr(test, assert_instr(frintz))]
23380pub fn vrndq_f64(a: float64x2_t) -> float64x2_t {
23381 unsafe { simd_trunc(a) }
23382}
23383#[doc = "Floating-point round to integral, to nearest with ties to away"]
23384#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f16)"]
23385#[inline]
23386#[target_feature(enable = "neon,fp16")]
23387#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23388#[cfg_attr(test, assert_instr(frinta))]
23389pub fn vrnda_f16(a: float16x4_t) -> float16x4_t {
23390 unsafe { simd_round(a) }
23391}
23392#[doc = "Floating-point round to integral, to nearest with ties to away"]
23393#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f16)"]
23394#[inline]
23395#[target_feature(enable = "neon,fp16")]
23396#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23397#[cfg_attr(test, assert_instr(frinta))]
23398pub fn vrndaq_f16(a: float16x8_t) -> float16x8_t {
23399 unsafe { simd_round(a) }
23400}
23401#[doc = "Floating-point round to integral, to nearest with ties to away"]
23402#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f32)"]
23403#[inline]
23404#[target_feature(enable = "neon")]
23405#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23406#[cfg_attr(test, assert_instr(frinta))]
23407pub fn vrnda_f32(a: float32x2_t) -> float32x2_t {
23408 unsafe { simd_round(a) }
23409}
23410#[doc = "Floating-point round to integral, to nearest with ties to away"]
23411#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f32)"]
23412#[inline]
23413#[target_feature(enable = "neon")]
23414#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23415#[cfg_attr(test, assert_instr(frinta))]
23416pub fn vrndaq_f32(a: float32x4_t) -> float32x4_t {
23417 unsafe { simd_round(a) }
23418}
23419#[doc = "Floating-point round to integral, to nearest with ties to away"]
23420#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f64)"]
23421#[inline]
23422#[target_feature(enable = "neon")]
23423#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23424#[cfg_attr(test, assert_instr(frinta))]
23425pub fn vrnda_f64(a: float64x1_t) -> float64x1_t {
23426 unsafe { simd_round(a) }
23427}
23428#[doc = "Floating-point round to integral, to nearest with ties to away"]
23429#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f64)"]
23430#[inline]
23431#[target_feature(enable = "neon")]
23432#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23433#[cfg_attr(test, assert_instr(frinta))]
23434pub fn vrndaq_f64(a: float64x2_t) -> float64x2_t {
23435 unsafe { simd_round(a) }
23436}
23437#[doc = "Floating-point round to integral, to nearest with ties to away"]
23438#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndah_f16)"]
23439#[inline]
23440#[target_feature(enable = "neon,fp16")]
23441#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23442#[cfg_attr(test, assert_instr(frinta))]
23443pub fn vrndah_f16(a: f16) -> f16 {
23444 unsafe { roundf16(a) }
23445}
23446#[doc = "Floating-point round to integral, to nearest with ties to away"]
23447#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndh_f16)"]
23448#[inline]
23449#[target_feature(enable = "neon,fp16")]
23450#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23451#[cfg_attr(test, assert_instr(frintz))]
23452pub fn vrndh_f16(a: f16) -> f16 {
23453 unsafe { truncf16(a) }
23454}
23455#[doc = "Floating-point round to integral, using current rounding mode"]
23456#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f16)"]
23457#[inline]
23458#[target_feature(enable = "neon,fp16")]
23459#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23460#[cfg_attr(test, assert_instr(frinti))]
23461pub fn vrndi_f16(a: float16x4_t) -> float16x4_t {
23462 unsafe extern "unadjusted" {
23463 #[cfg_attr(
23464 any(target_arch = "aarch64", target_arch = "arm64ec"),
23465 link_name = "llvm.nearbyint.v4f16"
23466 )]
23467 fn _vrndi_f16(a: float16x4_t) -> float16x4_t;
23468 }
23469 unsafe { _vrndi_f16(a) }
23470}
23471#[doc = "Floating-point round to integral, using current rounding mode"]
23472#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f16)"]
23473#[inline]
23474#[target_feature(enable = "neon,fp16")]
23475#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23476#[cfg_attr(test, assert_instr(frinti))]
23477pub fn vrndiq_f16(a: float16x8_t) -> float16x8_t {
23478 unsafe extern "unadjusted" {
23479 #[cfg_attr(
23480 any(target_arch = "aarch64", target_arch = "arm64ec"),
23481 link_name = "llvm.nearbyint.v8f16"
23482 )]
23483 fn _vrndiq_f16(a: float16x8_t) -> float16x8_t;
23484 }
23485 unsafe { _vrndiq_f16(a) }
23486}
23487#[doc = "Floating-point round to integral, using current rounding mode"]
23488#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f32)"]
23489#[inline]
23490#[target_feature(enable = "neon")]
23491#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23492#[cfg_attr(test, assert_instr(frinti))]
23493pub fn vrndi_f32(a: float32x2_t) -> float32x2_t {
23494 unsafe extern "unadjusted" {
23495 #[cfg_attr(
23496 any(target_arch = "aarch64", target_arch = "arm64ec"),
23497 link_name = "llvm.nearbyint.v2f32"
23498 )]
23499 fn _vrndi_f32(a: float32x2_t) -> float32x2_t;
23500 }
23501 unsafe { _vrndi_f32(a) }
23502}
23503#[doc = "Floating-point round to integral, using current rounding mode"]
23504#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f32)"]
23505#[inline]
23506#[target_feature(enable = "neon")]
23507#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23508#[cfg_attr(test, assert_instr(frinti))]
23509pub fn vrndiq_f32(a: float32x4_t) -> float32x4_t {
23510 unsafe extern "unadjusted" {
23511 #[cfg_attr(
23512 any(target_arch = "aarch64", target_arch = "arm64ec"),
23513 link_name = "llvm.nearbyint.v4f32"
23514 )]
23515 fn _vrndiq_f32(a: float32x4_t) -> float32x4_t;
23516 }
23517 unsafe { _vrndiq_f32(a) }
23518}
23519#[doc = "Floating-point round to integral, using current rounding mode"]
23520#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f64)"]
23521#[inline]
23522#[target_feature(enable = "neon")]
23523#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23524#[cfg_attr(test, assert_instr(frinti))]
23525pub fn vrndi_f64(a: float64x1_t) -> float64x1_t {
23526 unsafe extern "unadjusted" {
23527 #[cfg_attr(
23528 any(target_arch = "aarch64", target_arch = "arm64ec"),
23529 link_name = "llvm.nearbyint.v1f64"
23530 )]
23531 fn _vrndi_f64(a: float64x1_t) -> float64x1_t;
23532 }
23533 unsafe { _vrndi_f64(a) }
23534}
23535#[doc = "Floating-point round to integral, using current rounding mode"]
23536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f64)"]
23537#[inline]
23538#[target_feature(enable = "neon")]
23539#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23540#[cfg_attr(test, assert_instr(frinti))]
23541pub fn vrndiq_f64(a: float64x2_t) -> float64x2_t {
23542 unsafe extern "unadjusted" {
23543 #[cfg_attr(
23544 any(target_arch = "aarch64", target_arch = "arm64ec"),
23545 link_name = "llvm.nearbyint.v2f64"
23546 )]
23547 fn _vrndiq_f64(a: float64x2_t) -> float64x2_t;
23548 }
23549 unsafe { _vrndiq_f64(a) }
23550}
23551#[doc = "Floating-point round to integral, using current rounding mode"]
23552#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndih_f16)"]
23553#[inline]
23554#[target_feature(enable = "neon,fp16")]
23555#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23556#[cfg_attr(test, assert_instr(frinti))]
23557pub fn vrndih_f16(a: f16) -> f16 {
23558 unsafe extern "unadjusted" {
23559 #[cfg_attr(
23560 any(target_arch = "aarch64", target_arch = "arm64ec"),
23561 link_name = "llvm.nearbyint.f16"
23562 )]
23563 fn _vrndih_f16(a: f16) -> f16;
23564 }
23565 unsafe { _vrndih_f16(a) }
23566}
23567#[doc = "Floating-point round to integral, toward minus infinity"]
23568#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f16)"]
23569#[inline]
23570#[target_feature(enable = "neon,fp16")]
23571#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23572#[cfg_attr(test, assert_instr(frintm))]
23573pub fn vrndm_f16(a: float16x4_t) -> float16x4_t {
23574 unsafe { simd_floor(a) }
23575}
23576#[doc = "Floating-point round to integral, toward minus infinity"]
23577#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f16)"]
23578#[inline]
23579#[target_feature(enable = "neon,fp16")]
23580#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23581#[cfg_attr(test, assert_instr(frintm))]
23582pub fn vrndmq_f16(a: float16x8_t) -> float16x8_t {
23583 unsafe { simd_floor(a) }
23584}
23585#[doc = "Floating-point round to integral, toward minus infinity"]
23586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f32)"]
23587#[inline]
23588#[target_feature(enable = "neon")]
23589#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23590#[cfg_attr(test, assert_instr(frintm))]
23591pub fn vrndm_f32(a: float32x2_t) -> float32x2_t {
23592 unsafe { simd_floor(a) }
23593}
23594#[doc = "Floating-point round to integral, toward minus infinity"]
23595#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f32)"]
23596#[inline]
23597#[target_feature(enable = "neon")]
23598#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23599#[cfg_attr(test, assert_instr(frintm))]
23600pub fn vrndmq_f32(a: float32x4_t) -> float32x4_t {
23601 unsafe { simd_floor(a) }
23602}
23603#[doc = "Floating-point round to integral, toward minus infinity"]
23604#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f64)"]
23605#[inline]
23606#[target_feature(enable = "neon")]
23607#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23608#[cfg_attr(test, assert_instr(frintm))]
23609pub fn vrndm_f64(a: float64x1_t) -> float64x1_t {
23610 unsafe { simd_floor(a) }
23611}
23612#[doc = "Floating-point round to integral, toward minus infinity"]
23613#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f64)"]
23614#[inline]
23615#[target_feature(enable = "neon")]
23616#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23617#[cfg_attr(test, assert_instr(frintm))]
23618pub fn vrndmq_f64(a: float64x2_t) -> float64x2_t {
23619 unsafe { simd_floor(a) }
23620}
23621#[doc = "Floating-point round to integral, toward minus infinity"]
23622#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmh_f16)"]
23623#[inline]
23624#[target_feature(enable = "neon,fp16")]
23625#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23626#[cfg_attr(test, assert_instr(frintm))]
23627pub fn vrndmh_f16(a: f16) -> f16 {
23628 unsafe { floorf16(a) }
23629}
23630#[doc = "Floating-point round to integral, to nearest with ties to even"]
23631#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f64)"]
23632#[inline]
23633#[target_feature(enable = "neon")]
23634#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23635#[cfg_attr(test, assert_instr(frintn))]
23636pub fn vrndn_f64(a: float64x1_t) -> float64x1_t {
23637 unsafe extern "unadjusted" {
23638 #[cfg_attr(
23639 any(target_arch = "aarch64", target_arch = "arm64ec"),
23640 link_name = "llvm.roundeven.v1f64"
23641 )]
23642 fn _vrndn_f64(a: float64x1_t) -> float64x1_t;
23643 }
23644 unsafe { _vrndn_f64(a) }
23645}
23646#[doc = "Floating-point round to integral, to nearest with ties to even"]
23647#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f64)"]
23648#[inline]
23649#[target_feature(enable = "neon")]
23650#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23651#[cfg_attr(test, assert_instr(frintn))]
23652pub fn vrndnq_f64(a: float64x2_t) -> float64x2_t {
23653 unsafe extern "unadjusted" {
23654 #[cfg_attr(
23655 any(target_arch = "aarch64", target_arch = "arm64ec"),
23656 link_name = "llvm.roundeven.v2f64"
23657 )]
23658 fn _vrndnq_f64(a: float64x2_t) -> float64x2_t;
23659 }
23660 unsafe { _vrndnq_f64(a) }
23661}
23662#[doc = "Floating-point round to integral, toward minus infinity"]
23663#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnh_f16)"]
23664#[inline]
23665#[target_feature(enable = "neon,fp16")]
23666#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23667#[cfg_attr(test, assert_instr(frintn))]
23668pub fn vrndnh_f16(a: f16) -> f16 {
23669 unsafe extern "unadjusted" {
23670 #[cfg_attr(
23671 any(target_arch = "aarch64", target_arch = "arm64ec"),
23672 link_name = "llvm.roundeven.f16"
23673 )]
23674 fn _vrndnh_f16(a: f16) -> f16;
23675 }
23676 unsafe { _vrndnh_f16(a) }
23677}
23678#[doc = "Floating-point round to integral, to nearest with ties to even"]
23679#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndns_f32)"]
23680#[inline]
23681#[target_feature(enable = "neon")]
23682#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23683#[cfg_attr(test, assert_instr(frintn))]
23684pub fn vrndns_f32(a: f32) -> f32 {
23685 unsafe extern "unadjusted" {
23686 #[cfg_attr(
23687 any(target_arch = "aarch64", target_arch = "arm64ec"),
23688 link_name = "llvm.roundeven.f32"
23689 )]
23690 fn _vrndns_f32(a: f32) -> f32;
23691 }
23692 unsafe { _vrndns_f32(a) }
23693}
23694#[doc = "Floating-point round to integral, toward plus infinity"]
23695#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f16)"]
23696#[inline]
23697#[target_feature(enable = "neon,fp16")]
23698#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23699#[cfg_attr(test, assert_instr(frintp))]
23700pub fn vrndp_f16(a: float16x4_t) -> float16x4_t {
23701 unsafe { simd_ceil(a) }
23702}
23703#[doc = "Floating-point round to integral, toward plus infinity"]
23704#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f16)"]
23705#[inline]
23706#[target_feature(enable = "neon,fp16")]
23707#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23708#[cfg_attr(test, assert_instr(frintp))]
23709pub fn vrndpq_f16(a: float16x8_t) -> float16x8_t {
23710 unsafe { simd_ceil(a) }
23711}
23712#[doc = "Floating-point round to integral, toward plus infinity"]
23713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f32)"]
23714#[inline]
23715#[target_feature(enable = "neon")]
23716#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23717#[cfg_attr(test, assert_instr(frintp))]
23718pub fn vrndp_f32(a: float32x2_t) -> float32x2_t {
23719 unsafe { simd_ceil(a) }
23720}
23721#[doc = "Floating-point round to integral, toward plus infinity"]
23722#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f32)"]
23723#[inline]
23724#[target_feature(enable = "neon")]
23725#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23726#[cfg_attr(test, assert_instr(frintp))]
23727pub fn vrndpq_f32(a: float32x4_t) -> float32x4_t {
23728 unsafe { simd_ceil(a) }
23729}
23730#[doc = "Floating-point round to integral, toward plus infinity"]
23731#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f64)"]
23732#[inline]
23733#[target_feature(enable = "neon")]
23734#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23735#[cfg_attr(test, assert_instr(frintp))]
23736pub fn vrndp_f64(a: float64x1_t) -> float64x1_t {
23737 unsafe { simd_ceil(a) }
23738}
23739#[doc = "Floating-point round to integral, toward plus infinity"]
23740#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f64)"]
23741#[inline]
23742#[target_feature(enable = "neon")]
23743#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23744#[cfg_attr(test, assert_instr(frintp))]
23745pub fn vrndpq_f64(a: float64x2_t) -> float64x2_t {
23746 unsafe { simd_ceil(a) }
23747}
23748#[doc = "Floating-point round to integral, toward plus infinity"]
23749#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndph_f16)"]
23750#[inline]
23751#[target_feature(enable = "neon,fp16")]
23752#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23753#[cfg_attr(test, assert_instr(frintp))]
23754pub fn vrndph_f16(a: f16) -> f16 {
23755 unsafe { ceilf16(a) }
23756}
23757#[doc = "Floating-point round to integral exact, using current rounding mode"]
23758#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f16)"]
23759#[inline]
23760#[target_feature(enable = "neon,fp16")]
23761#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23762#[cfg_attr(test, assert_instr(frintx))]
23763pub fn vrndx_f16(a: float16x4_t) -> float16x4_t {
23764 unsafe { simd_round_ties_even(a) }
23765}
23766#[doc = "Floating-point round to integral exact, using current rounding mode"]
23767#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f16)"]
23768#[inline]
23769#[target_feature(enable = "neon,fp16")]
23770#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23771#[cfg_attr(test, assert_instr(frintx))]
23772pub fn vrndxq_f16(a: float16x8_t) -> float16x8_t {
23773 unsafe { simd_round_ties_even(a) }
23774}
23775#[doc = "Floating-point round to integral exact, using current rounding mode"]
23776#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f32)"]
23777#[inline]
23778#[target_feature(enable = "neon")]
23779#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23780#[cfg_attr(test, assert_instr(frintx))]
23781pub fn vrndx_f32(a: float32x2_t) -> float32x2_t {
23782 unsafe { simd_round_ties_even(a) }
23783}
23784#[doc = "Floating-point round to integral exact, using current rounding mode"]
23785#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f32)"]
23786#[inline]
23787#[target_feature(enable = "neon")]
23788#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23789#[cfg_attr(test, assert_instr(frintx))]
23790pub fn vrndxq_f32(a: float32x4_t) -> float32x4_t {
23791 unsafe { simd_round_ties_even(a) }
23792}
23793#[doc = "Floating-point round to integral exact, using current rounding mode"]
23794#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f64)"]
23795#[inline]
23796#[target_feature(enable = "neon")]
23797#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23798#[cfg_attr(test, assert_instr(frintx))]
23799pub fn vrndx_f64(a: float64x1_t) -> float64x1_t {
23800 unsafe { simd_round_ties_even(a) }
23801}
23802#[doc = "Floating-point round to integral exact, using current rounding mode"]
23803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f64)"]
23804#[inline]
23805#[target_feature(enable = "neon")]
23806#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23807#[cfg_attr(test, assert_instr(frintx))]
23808pub fn vrndxq_f64(a: float64x2_t) -> float64x2_t {
23809 unsafe { simd_round_ties_even(a) }
23810}
23811#[doc = "Floating-point round to integral, using current rounding mode"]
23812#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxh_f16)"]
23813#[inline]
23814#[target_feature(enable = "neon,fp16")]
23815#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23816#[cfg_attr(test, assert_instr(frintx))]
23817pub fn vrndxh_f16(a: f16) -> f16 {
23818 round_ties_even_f16(a)
23819}
23820#[doc = "Signed rounding shift left"]
23821#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_s64)"]
23822#[inline]
23823#[target_feature(enable = "neon")]
23824#[cfg_attr(test, assert_instr(srshl))]
23825#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23826pub fn vrshld_s64(a: i64, b: i64) -> i64 {
23827 unsafe extern "unadjusted" {
23828 #[cfg_attr(
23829 any(target_arch = "aarch64", target_arch = "arm64ec"),
23830 link_name = "llvm.aarch64.neon.srshl.i64"
23831 )]
23832 fn _vrshld_s64(a: i64, b: i64) -> i64;
23833 }
23834 unsafe { _vrshld_s64(a, b) }
23835}
23836#[doc = "Unsigned rounding shift left"]
23837#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_u64)"]
23838#[inline]
23839#[target_feature(enable = "neon")]
23840#[cfg_attr(test, assert_instr(urshl))]
23841#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23842pub fn vrshld_u64(a: u64, b: i64) -> u64 {
23843 unsafe extern "unadjusted" {
23844 #[cfg_attr(
23845 any(target_arch = "aarch64", target_arch = "arm64ec"),
23846 link_name = "llvm.aarch64.neon.urshl.i64"
23847 )]
23848 fn _vrshld_u64(a: u64, b: i64) -> u64;
23849 }
23850 unsafe { _vrshld_u64(a, b) }
23851}
23852#[doc = "Signed rounding shift right"]
23853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_s64)"]
23854#[inline]
23855#[target_feature(enable = "neon")]
23856#[cfg_attr(test, assert_instr(srshr, N = 2))]
23857#[rustc_legacy_const_generics(1)]
23858#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23859pub fn vrshrd_n_s64<const N: i32>(a: i64) -> i64 {
23860 static_assert!(N >= 1 && N <= 64);
23861 vrshld_s64(a, -N as i64)
23862}
23863#[doc = "Unsigned rounding shift right"]
23864#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_u64)"]
23865#[inline]
23866#[target_feature(enable = "neon")]
23867#[cfg_attr(test, assert_instr(urshr, N = 2))]
23868#[rustc_legacy_const_generics(1)]
23869#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23870pub fn vrshrd_n_u64<const N: i32>(a: u64) -> u64 {
23871 static_assert!(N >= 1 && N <= 64);
23872 vrshld_u64(a, -N as i64)
23873}
23874#[doc = "Rounding shift right narrow"]
23875#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s16)"]
23876#[inline]
23877#[target_feature(enable = "neon")]
23878#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
23879#[rustc_legacy_const_generics(2)]
23880#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23881pub fn vrshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
23882 static_assert!(N >= 1 && N <= 8);
23883 unsafe {
23884 simd_shuffle!(
23885 a,
23886 vrshrn_n_s16::<N>(b),
23887 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
23888 )
23889 }
23890}
23891#[doc = "Rounding shift right narrow"]
23892#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s32)"]
23893#[inline]
23894#[target_feature(enable = "neon")]
23895#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
23896#[rustc_legacy_const_generics(2)]
23897#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23898pub fn vrshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
23899 static_assert!(N >= 1 && N <= 16);
23900 unsafe { simd_shuffle!(a, vrshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
23901}
23902#[doc = "Rounding shift right narrow"]
23903#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s64)"]
23904#[inline]
23905#[target_feature(enable = "neon")]
23906#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
23907#[rustc_legacy_const_generics(2)]
23908#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23909pub fn vrshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
23910 static_assert!(N >= 1 && N <= 32);
23911 unsafe { simd_shuffle!(a, vrshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
23912}
23913#[doc = "Rounding shift right narrow"]
23914#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u16)"]
23915#[inline]
23916#[target_feature(enable = "neon")]
23917#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
23918#[rustc_legacy_const_generics(2)]
23919#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23920pub fn vrshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
23921 static_assert!(N >= 1 && N <= 8);
23922 unsafe {
23923 simd_shuffle!(
23924 a,
23925 vrshrn_n_u16::<N>(b),
23926 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
23927 )
23928 }
23929}
23930#[doc = "Rounding shift right narrow"]
23931#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u32)"]
23932#[inline]
23933#[target_feature(enable = "neon")]
23934#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
23935#[rustc_legacy_const_generics(2)]
23936#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23937pub fn vrshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
23938 static_assert!(N >= 1 && N <= 16);
23939 unsafe { simd_shuffle!(a, vrshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
23940}
23941#[doc = "Rounding shift right narrow"]
23942#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u64)"]
23943#[inline]
23944#[target_feature(enable = "neon")]
23945#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
23946#[rustc_legacy_const_generics(2)]
23947#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23948pub fn vrshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
23949 static_assert!(N >= 1 && N <= 32);
23950 unsafe { simd_shuffle!(a, vrshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
23951}
23952#[doc = "Reciprocal square-root estimate."]
23953#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f64)"]
23954#[inline]
23955#[target_feature(enable = "neon")]
23956#[cfg_attr(test, assert_instr(frsqrte))]
23957#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23958pub fn vrsqrte_f64(a: float64x1_t) -> float64x1_t {
23959 unsafe extern "unadjusted" {
23960 #[cfg_attr(
23961 any(target_arch = "aarch64", target_arch = "arm64ec"),
23962 link_name = "llvm.aarch64.neon.frsqrte.v1f64"
23963 )]
23964 fn _vrsqrte_f64(a: float64x1_t) -> float64x1_t;
23965 }
23966 unsafe { _vrsqrte_f64(a) }
23967}
23968#[doc = "Reciprocal square-root estimate."]
23969#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f64)"]
23970#[inline]
23971#[target_feature(enable = "neon")]
23972#[cfg_attr(test, assert_instr(frsqrte))]
23973#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23974pub fn vrsqrteq_f64(a: float64x2_t) -> float64x2_t {
23975 unsafe extern "unadjusted" {
23976 #[cfg_attr(
23977 any(target_arch = "aarch64", target_arch = "arm64ec"),
23978 link_name = "llvm.aarch64.neon.frsqrte.v2f64"
23979 )]
23980 fn _vrsqrteq_f64(a: float64x2_t) -> float64x2_t;
23981 }
23982 unsafe { _vrsqrteq_f64(a) }
23983}
23984#[doc = "Reciprocal square-root estimate."]
23985#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrted_f64)"]
23986#[inline]
23987#[target_feature(enable = "neon")]
23988#[cfg_attr(test, assert_instr(frsqrte))]
23989#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23990pub fn vrsqrted_f64(a: f64) -> f64 {
23991 unsafe extern "unadjusted" {
23992 #[cfg_attr(
23993 any(target_arch = "aarch64", target_arch = "arm64ec"),
23994 link_name = "llvm.aarch64.neon.frsqrte.f64"
23995 )]
23996 fn _vrsqrted_f64(a: f64) -> f64;
23997 }
23998 unsafe { _vrsqrted_f64(a) }
23999}
24000#[doc = "Reciprocal square-root estimate."]
24001#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtes_f32)"]
24002#[inline]
24003#[target_feature(enable = "neon")]
24004#[cfg_attr(test, assert_instr(frsqrte))]
24005#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24006pub fn vrsqrtes_f32(a: f32) -> f32 {
24007 unsafe extern "unadjusted" {
24008 #[cfg_attr(
24009 any(target_arch = "aarch64", target_arch = "arm64ec"),
24010 link_name = "llvm.aarch64.neon.frsqrte.f32"
24011 )]
24012 fn _vrsqrtes_f32(a: f32) -> f32;
24013 }
24014 unsafe { _vrsqrtes_f32(a) }
24015}
24016#[doc = "Reciprocal square-root estimate."]
24017#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteh_f16)"]
24018#[inline]
24019#[cfg_attr(test, assert_instr(frsqrte))]
24020#[target_feature(enable = "neon,fp16")]
24021#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
24022pub fn vrsqrteh_f16(a: f16) -> f16 {
24023 unsafe extern "unadjusted" {
24024 #[cfg_attr(
24025 any(target_arch = "aarch64", target_arch = "arm64ec"),
24026 link_name = "llvm.aarch64.neon.frsqrte.f16"
24027 )]
24028 fn _vrsqrteh_f16(a: f16) -> f16;
24029 }
24030 unsafe { _vrsqrteh_f16(a) }
24031}
24032#[doc = "Floating-point reciprocal square root step"]
24033#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f64)"]
24034#[inline]
24035#[target_feature(enable = "neon")]
24036#[cfg_attr(test, assert_instr(frsqrts))]
24037#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24038pub fn vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
24039 unsafe extern "unadjusted" {
24040 #[cfg_attr(
24041 any(target_arch = "aarch64", target_arch = "arm64ec"),
24042 link_name = "llvm.aarch64.neon.frsqrts.v1f64"
24043 )]
24044 fn _vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
24045 }
24046 unsafe { _vrsqrts_f64(a, b) }
24047}
24048#[doc = "Floating-point reciprocal square root step"]
24049#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f64)"]
24050#[inline]
24051#[target_feature(enable = "neon")]
24052#[cfg_attr(test, assert_instr(frsqrts))]
24053#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24054pub fn vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
24055 unsafe extern "unadjusted" {
24056 #[cfg_attr(
24057 any(target_arch = "aarch64", target_arch = "arm64ec"),
24058 link_name = "llvm.aarch64.neon.frsqrts.v2f64"
24059 )]
24060 fn _vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
24061 }
24062 unsafe { _vrsqrtsq_f64(a, b) }
24063}
24064#[doc = "Floating-point reciprocal square root step"]
24065#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsd_f64)"]
24066#[inline]
24067#[target_feature(enable = "neon")]
24068#[cfg_attr(test, assert_instr(frsqrts))]
24069#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24070pub fn vrsqrtsd_f64(a: f64, b: f64) -> f64 {
24071 unsafe extern "unadjusted" {
24072 #[cfg_attr(
24073 any(target_arch = "aarch64", target_arch = "arm64ec"),
24074 link_name = "llvm.aarch64.neon.frsqrts.f64"
24075 )]
24076 fn _vrsqrtsd_f64(a: f64, b: f64) -> f64;
24077 }
24078 unsafe { _vrsqrtsd_f64(a, b) }
24079}
24080#[doc = "Floating-point reciprocal square root step"]
24081#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtss_f32)"]
24082#[inline]
24083#[target_feature(enable = "neon")]
24084#[cfg_attr(test, assert_instr(frsqrts))]
24085#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24086pub fn vrsqrtss_f32(a: f32, b: f32) -> f32 {
24087 unsafe extern "unadjusted" {
24088 #[cfg_attr(
24089 any(target_arch = "aarch64", target_arch = "arm64ec"),
24090 link_name = "llvm.aarch64.neon.frsqrts.f32"
24091 )]
24092 fn _vrsqrtss_f32(a: f32, b: f32) -> f32;
24093 }
24094 unsafe { _vrsqrtss_f32(a, b) }
24095}
24096#[doc = "Floating-point reciprocal square root step"]
24097#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsh_f16)"]
24098#[inline]
24099#[target_feature(enable = "neon,fp16")]
24100#[cfg_attr(test, assert_instr(frsqrts))]
24101#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
24102pub fn vrsqrtsh_f16(a: f16, b: f16) -> f16 {
24103 unsafe extern "unadjusted" {
24104 #[cfg_attr(
24105 any(target_arch = "aarch64", target_arch = "arm64ec"),
24106 link_name = "llvm.aarch64.neon.frsqrts.f16"
24107 )]
24108 fn _vrsqrtsh_f16(a: f16, b: f16) -> f16;
24109 }
24110 unsafe { _vrsqrtsh_f16(a, b) }
24111}
24112#[doc = "Signed rounding shift right and accumulate."]
24113#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_s64)"]
24114#[inline]
24115#[target_feature(enable = "neon")]
24116#[cfg_attr(test, assert_instr(srshr, N = 2))]
24117#[rustc_legacy_const_generics(2)]
24118#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24119pub fn vrsrad_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
24120 static_assert!(N >= 1 && N <= 64);
24121 let b: i64 = vrshrd_n_s64::<N>(b);
24122 a.wrapping_add(b)
24123}
24124#[doc = "Unsigned rounding shift right and accumulate."]
24125#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_u64)"]
24126#[inline]
24127#[target_feature(enable = "neon")]
24128#[cfg_attr(test, assert_instr(urshr, N = 2))]
24129#[rustc_legacy_const_generics(2)]
24130#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24131pub fn vrsrad_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
24132 static_assert!(N >= 1 && N <= 64);
24133 let b: u64 = vrshrd_n_u64::<N>(b);
24134 a.wrapping_add(b)
24135}
24136#[doc = "Rounding subtract returning high narrow"]
24137#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)"]
24138#[inline]
24139#[target_feature(enable = "neon")]
24140#[cfg(target_endian = "little")]
24141#[cfg_attr(test, assert_instr(rsubhn2))]
24142#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24143pub fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t {
24144 let x: int8x8_t = vrsubhn_s16(b, c);
24145 unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
24146}
24147#[doc = "Rounding subtract returning high narrow"]
24148#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)"]
24149#[inline]
24150#[target_feature(enable = "neon")]
24151#[cfg(target_endian = "little")]
24152#[cfg_attr(test, assert_instr(rsubhn2))]
24153#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24154pub fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t {
24155 let x: int16x4_t = vrsubhn_s32(b, c);
24156 unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
24157}
24158#[doc = "Rounding subtract returning high narrow"]
24159#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)"]
24160#[inline]
24161#[target_feature(enable = "neon")]
24162#[cfg(target_endian = "little")]
24163#[cfg_attr(test, assert_instr(rsubhn2))]
24164#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24165pub fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t {
24166 let x: int32x2_t = vrsubhn_s64(b, c);
24167 unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
24168}
24169#[doc = "Rounding subtract returning high narrow"]
24170#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)"]
24171#[inline]
24172#[target_feature(enable = "neon")]
24173#[cfg(target_endian = "little")]
24174#[cfg_attr(test, assert_instr(rsubhn2))]
24175#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24176pub fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t {
24177 let x: uint8x8_t = vrsubhn_u16(b, c);
24178 unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
24179}
24180#[doc = "Rounding subtract returning high narrow"]
24181#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)"]
24182#[inline]
24183#[target_feature(enable = "neon")]
24184#[cfg(target_endian = "little")]
24185#[cfg_attr(test, assert_instr(rsubhn2))]
24186#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24187pub fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t {
24188 let x: uint16x4_t = vrsubhn_u32(b, c);
24189 unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
24190}
24191#[doc = "Rounding subtract returning high narrow"]
24192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)"]
24193#[inline]
24194#[target_feature(enable = "neon")]
24195#[cfg(target_endian = "little")]
24196#[cfg_attr(test, assert_instr(rsubhn2))]
24197#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24198pub fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t {
24199 let x: uint32x2_t = vrsubhn_u64(b, c);
24200 unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
24201}
24202#[doc = "Rounding subtract returning high narrow"]
24203#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)"]
24204#[inline]
24205#[target_feature(enable = "neon")]
24206#[cfg(target_endian = "big")]
24207#[cfg_attr(test, assert_instr(rsubhn))]
24208#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24209pub fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t {
24210 let x: int8x8_t = vrsubhn_s16(b, c);
24211 unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
24212}
24213#[doc = "Rounding subtract returning high narrow"]
24214#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)"]
24215#[inline]
24216#[target_feature(enable = "neon")]
24217#[cfg(target_endian = "big")]
24218#[cfg_attr(test, assert_instr(rsubhn))]
24219#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24220pub fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t {
24221 let x: int16x4_t = vrsubhn_s32(b, c);
24222 unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
24223}
24224#[doc = "Rounding subtract returning high narrow"]
24225#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)"]
24226#[inline]
24227#[target_feature(enable = "neon")]
24228#[cfg(target_endian = "big")]
24229#[cfg_attr(test, assert_instr(rsubhn))]
24230#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24231pub fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t {
24232 let x: int32x2_t = vrsubhn_s64(b, c);
24233 unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
24234}
24235#[doc = "Rounding subtract returning high narrow"]
24236#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)"]
24237#[inline]
24238#[target_feature(enable = "neon")]
24239#[cfg(target_endian = "big")]
24240#[cfg_attr(test, assert_instr(rsubhn))]
24241#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24242pub fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t {
24243 let x: uint8x8_t = vrsubhn_u16(b, c);
24244 unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
24245}
24246#[doc = "Rounding subtract returning high narrow"]
24247#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)"]
24248#[inline]
24249#[target_feature(enable = "neon")]
24250#[cfg(target_endian = "big")]
24251#[cfg_attr(test, assert_instr(rsubhn))]
24252#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24253pub fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t {
24254 let x: uint16x4_t = vrsubhn_u32(b, c);
24255 unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
24256}
24257#[doc = "Rounding subtract returning high narrow"]
24258#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)"]
24259#[inline]
24260#[target_feature(enable = "neon")]
24261#[cfg(target_endian = "big")]
24262#[cfg_attr(test, assert_instr(rsubhn))]
24263#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24264pub fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t {
24265 let x: uint32x2_t = vrsubhn_u64(b, c);
24266 unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
24267}
24268#[doc = "Insert vector element from another vector element"]
24269#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f64)"]
24270#[inline]
24271#[target_feature(enable = "neon")]
24272#[cfg_attr(test, assert_instr(nop, LANE = 0))]
24273#[rustc_legacy_const_generics(2)]
24274#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24275pub fn vset_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> float64x1_t {
24276 static_assert!(LANE == 0);
24277 unsafe { simd_insert!(b, LANE as u32, a) }
24278}
24279#[doc = "Insert vector element from another vector element"]
24280#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f64)"]
24281#[inline]
24282#[target_feature(enable = "neon")]
24283#[cfg_attr(test, assert_instr(nop, LANE = 0))]
24284#[rustc_legacy_const_generics(2)]
24285#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24286pub fn vsetq_lane_f64<const LANE: i32>(a: f64, b: float64x2_t) -> float64x2_t {
24287 static_assert_uimm_bits!(LANE, 1);
24288 unsafe { simd_insert!(b, LANE as u32, a) }
24289}
24290#[doc = "SHA512 hash update part 2"]
24291#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512h2q_u64)"]
24292#[inline]
24293#[target_feature(enable = "neon,sha3")]
24294#[cfg_attr(test, assert_instr(sha512h2))]
24295#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
24296pub fn vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
24297 unsafe extern "unadjusted" {
24298 #[cfg_attr(
24299 any(target_arch = "aarch64", target_arch = "arm64ec"),
24300 link_name = "llvm.aarch64.crypto.sha512h2"
24301 )]
24302 fn _vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
24303 }
24304 unsafe { _vsha512h2q_u64(a, b, c) }
24305}
24306#[doc = "SHA512 hash update part 1"]
24307#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512hq_u64)"]
24308#[inline]
24309#[target_feature(enable = "neon,sha3")]
24310#[cfg_attr(test, assert_instr(sha512h))]
24311#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
24312pub fn vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
24313 unsafe extern "unadjusted" {
24314 #[cfg_attr(
24315 any(target_arch = "aarch64", target_arch = "arm64ec"),
24316 link_name = "llvm.aarch64.crypto.sha512h"
24317 )]
24318 fn _vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
24319 }
24320 unsafe { _vsha512hq_u64(a, b, c) }
24321}
24322#[doc = "SHA512 schedule update 0"]
24323#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su0q_u64)"]
24324#[inline]
24325#[target_feature(enable = "neon,sha3")]
24326#[cfg_attr(test, assert_instr(sha512su0))]
24327#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
24328pub fn vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
24329 unsafe extern "unadjusted" {
24330 #[cfg_attr(
24331 any(target_arch = "aarch64", target_arch = "arm64ec"),
24332 link_name = "llvm.aarch64.crypto.sha512su0"
24333 )]
24334 fn _vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t;
24335 }
24336 unsafe { _vsha512su0q_u64(a, b) }
24337}
24338#[doc = "SHA512 schedule update 1"]
24339#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su1q_u64)"]
24340#[inline]
24341#[target_feature(enable = "neon,sha3")]
24342#[cfg_attr(test, assert_instr(sha512su1))]
24343#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
24344pub fn vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
24345 unsafe extern "unadjusted" {
24346 #[cfg_attr(
24347 any(target_arch = "aarch64", target_arch = "arm64ec"),
24348 link_name = "llvm.aarch64.crypto.sha512su1"
24349 )]
24350 fn _vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
24351 }
24352 unsafe { _vsha512su1q_u64(a, b, c) }
24353}
24354#[doc = "Signed Shift left"]
24355#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_s64)"]
24356#[inline]
24357#[target_feature(enable = "neon")]
24358#[cfg_attr(test, assert_instr(sshl))]
24359#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24360pub fn vshld_s64(a: i64, b: i64) -> i64 {
24361 unsafe { transmute(vshl_s64(transmute(a), transmute(b))) }
24362}
24363#[doc = "Unsigned Shift left"]
24364#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_u64)"]
24365#[inline]
24366#[target_feature(enable = "neon")]
24367#[cfg_attr(test, assert_instr(ushl))]
24368#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24369pub fn vshld_u64(a: u64, b: i64) -> u64 {
24370 unsafe { transmute(vshl_u64(transmute(a), transmute(b))) }
24371}
24372#[doc = "Signed shift left long"]
24373#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s8)"]
24374#[inline]
24375#[target_feature(enable = "neon")]
24376#[cfg_attr(test, assert_instr(sshll2, N = 2))]
24377#[rustc_legacy_const_generics(1)]
24378#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24379pub fn vshll_high_n_s8<const N: i32>(a: int8x16_t) -> int16x8_t {
24380 static_assert!(N >= 0 && N <= 8);
24381 unsafe {
24382 let b: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
24383 vshll_n_s8::<N>(b)
24384 }
24385}
24386#[doc = "Signed shift left long"]
24387#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s16)"]
24388#[inline]
24389#[target_feature(enable = "neon")]
24390#[cfg_attr(test, assert_instr(sshll2, N = 2))]
24391#[rustc_legacy_const_generics(1)]
24392#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24393pub fn vshll_high_n_s16<const N: i32>(a: int16x8_t) -> int32x4_t {
24394 static_assert!(N >= 0 && N <= 16);
24395 unsafe {
24396 let b: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
24397 vshll_n_s16::<N>(b)
24398 }
24399}
24400#[doc = "Signed shift left long"]
24401#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s32)"]
24402#[inline]
24403#[target_feature(enable = "neon")]
24404#[cfg_attr(test, assert_instr(sshll2, N = 2))]
24405#[rustc_legacy_const_generics(1)]
24406#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24407pub fn vshll_high_n_s32<const N: i32>(a: int32x4_t) -> int64x2_t {
24408 static_assert!(N >= 0 && N <= 32);
24409 unsafe {
24410 let b: int32x2_t = simd_shuffle!(a, a, [2, 3]);
24411 vshll_n_s32::<N>(b)
24412 }
24413}
24414#[doc = "Signed shift left long"]
24415#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u8)"]
24416#[inline]
24417#[target_feature(enable = "neon")]
24418#[cfg_attr(test, assert_instr(ushll2, N = 2))]
24419#[rustc_legacy_const_generics(1)]
24420#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24421pub fn vshll_high_n_u8<const N: i32>(a: uint8x16_t) -> uint16x8_t {
24422 static_assert!(N >= 0 && N <= 8);
24423 unsafe {
24424 let b: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
24425 vshll_n_u8::<N>(b)
24426 }
24427}
24428#[doc = "Signed shift left long"]
24429#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u16)"]
24430#[inline]
24431#[target_feature(enable = "neon")]
24432#[cfg_attr(test, assert_instr(ushll2, N = 2))]
24433#[rustc_legacy_const_generics(1)]
24434#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24435pub fn vshll_high_n_u16<const N: i32>(a: uint16x8_t) -> uint32x4_t {
24436 static_assert!(N >= 0 && N <= 16);
24437 unsafe {
24438 let b: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
24439 vshll_n_u16::<N>(b)
24440 }
24441}
24442#[doc = "Signed shift left long"]
24443#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u32)"]
24444#[inline]
24445#[target_feature(enable = "neon")]
24446#[cfg_attr(test, assert_instr(ushll2, N = 2))]
24447#[rustc_legacy_const_generics(1)]
24448#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24449pub fn vshll_high_n_u32<const N: i32>(a: uint32x4_t) -> uint64x2_t {
24450 static_assert!(N >= 0 && N <= 32);
24451 unsafe {
24452 let b: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
24453 vshll_n_u32::<N>(b)
24454 }
24455}
24456#[doc = "Shift right narrow"]
24457#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s16)"]
24458#[inline]
24459#[target_feature(enable = "neon")]
24460#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24461#[rustc_legacy_const_generics(2)]
24462#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24463pub fn vshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
24464 static_assert!(N >= 1 && N <= 8);
24465 unsafe {
24466 simd_shuffle!(
24467 a,
24468 vshrn_n_s16::<N>(b),
24469 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
24470 )
24471 }
24472}
24473#[doc = "Shift right narrow"]
24474#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s32)"]
24475#[inline]
24476#[target_feature(enable = "neon")]
24477#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24478#[rustc_legacy_const_generics(2)]
24479#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24480pub fn vshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
24481 static_assert!(N >= 1 && N <= 16);
24482 unsafe { simd_shuffle!(a, vshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
24483}
24484#[doc = "Shift right narrow"]
24485#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s64)"]
24486#[inline]
24487#[target_feature(enable = "neon")]
24488#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24489#[rustc_legacy_const_generics(2)]
24490#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24491pub fn vshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
24492 static_assert!(N >= 1 && N <= 32);
24493 unsafe { simd_shuffle!(a, vshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
24494}
24495#[doc = "Shift right narrow"]
24496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u16)"]
24497#[inline]
24498#[target_feature(enable = "neon")]
24499#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24500#[rustc_legacy_const_generics(2)]
24501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24502pub fn vshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
24503 static_assert!(N >= 1 && N <= 8);
24504 unsafe {
24505 simd_shuffle!(
24506 a,
24507 vshrn_n_u16::<N>(b),
24508 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
24509 )
24510 }
24511}
24512#[doc = "Shift right narrow"]
24513#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u32)"]
24514#[inline]
24515#[target_feature(enable = "neon")]
24516#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24517#[rustc_legacy_const_generics(2)]
24518#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24519pub fn vshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
24520 static_assert!(N >= 1 && N <= 16);
24521 unsafe { simd_shuffle!(a, vshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
24522}
24523#[doc = "Shift right narrow"]
24524#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u64)"]
24525#[inline]
24526#[target_feature(enable = "neon")]
24527#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24528#[rustc_legacy_const_generics(2)]
24529#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24530pub fn vshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
24531 static_assert!(N >= 1 && N <= 32);
24532 unsafe { simd_shuffle!(a, vshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
24533}
24534#[doc = "Shift Left and Insert (immediate)"]
24535#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s8)"]
24536#[inline]
24537#[target_feature(enable = "neon")]
24538#[cfg_attr(test, assert_instr(sli, N = 1))]
24539#[rustc_legacy_const_generics(2)]
24540#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24541pub fn vsli_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
24542 static_assert_uimm_bits!(N, 3);
24543 unsafe extern "unadjusted" {
24544 #[cfg_attr(
24545 any(target_arch = "aarch64", target_arch = "arm64ec"),
24546 link_name = "llvm.aarch64.neon.vsli.v8i8"
24547 )]
24548 fn _vsli_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t;
24549 }
24550 unsafe { _vsli_n_s8(a, b, N) }
24551}
24552#[doc = "Shift Left and Insert (immediate)"]
24553#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s8)"]
24554#[inline]
24555#[target_feature(enable = "neon")]
24556#[cfg_attr(test, assert_instr(sli, N = 1))]
24557#[rustc_legacy_const_generics(2)]
24558#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24559pub fn vsliq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
24560 static_assert_uimm_bits!(N, 3);
24561 unsafe extern "unadjusted" {
24562 #[cfg_attr(
24563 any(target_arch = "aarch64", target_arch = "arm64ec"),
24564 link_name = "llvm.aarch64.neon.vsli.v16i8"
24565 )]
24566 fn _vsliq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t;
24567 }
24568 unsafe { _vsliq_n_s8(a, b, N) }
24569}
24570#[doc = "Shift Left and Insert (immediate)"]
24571#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s16)"]
24572#[inline]
24573#[target_feature(enable = "neon")]
24574#[cfg_attr(test, assert_instr(sli, N = 1))]
24575#[rustc_legacy_const_generics(2)]
24576#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24577pub fn vsli_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
24578 static_assert_uimm_bits!(N, 4);
24579 unsafe extern "unadjusted" {
24580 #[cfg_attr(
24581 any(target_arch = "aarch64", target_arch = "arm64ec"),
24582 link_name = "llvm.aarch64.neon.vsli.v4i16"
24583 )]
24584 fn _vsli_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t;
24585 }
24586 unsafe { _vsli_n_s16(a, b, N) }
24587}
24588#[doc = "Shift Left and Insert (immediate)"]
24589#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s16)"]
24590#[inline]
24591#[target_feature(enable = "neon")]
24592#[cfg_attr(test, assert_instr(sli, N = 1))]
24593#[rustc_legacy_const_generics(2)]
24594#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24595pub fn vsliq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
24596 static_assert_uimm_bits!(N, 4);
24597 unsafe extern "unadjusted" {
24598 #[cfg_attr(
24599 any(target_arch = "aarch64", target_arch = "arm64ec"),
24600 link_name = "llvm.aarch64.neon.vsli.v8i16"
24601 )]
24602 fn _vsliq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t;
24603 }
24604 unsafe { _vsliq_n_s16(a, b, N) }
24605}
24606#[doc = "Shift Left and Insert (immediate)"]
24607#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s32)"]
24608#[inline]
24609#[target_feature(enable = "neon")]
24610#[cfg_attr(test, assert_instr(sli, N = 1))]
24611#[rustc_legacy_const_generics(2)]
24612#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24613pub fn vsli_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
24614 static_assert!(N >= 0 && N <= 31);
24615 unsafe extern "unadjusted" {
24616 #[cfg_attr(
24617 any(target_arch = "aarch64", target_arch = "arm64ec"),
24618 link_name = "llvm.aarch64.neon.vsli.v2i32"
24619 )]
24620 fn _vsli_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t;
24621 }
24622 unsafe { _vsli_n_s32(a, b, N) }
24623}
24624#[doc = "Shift Left and Insert (immediate)"]
24625#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s32)"]
24626#[inline]
24627#[target_feature(enable = "neon")]
24628#[cfg_attr(test, assert_instr(sli, N = 1))]
24629#[rustc_legacy_const_generics(2)]
24630#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24631pub fn vsliq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
24632 static_assert!(N >= 0 && N <= 31);
24633 unsafe extern "unadjusted" {
24634 #[cfg_attr(
24635 any(target_arch = "aarch64", target_arch = "arm64ec"),
24636 link_name = "llvm.aarch64.neon.vsli.v4i32"
24637 )]
24638 fn _vsliq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t;
24639 }
24640 unsafe { _vsliq_n_s32(a, b, N) }
24641}
24642#[doc = "Shift Left and Insert (immediate)"]
24643#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s64)"]
24644#[inline]
24645#[target_feature(enable = "neon")]
24646#[cfg_attr(test, assert_instr(sli, N = 1))]
24647#[rustc_legacy_const_generics(2)]
24648#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24649pub fn vsli_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t {
24650 static_assert!(N >= 0 && N <= 63);
24651 unsafe extern "unadjusted" {
24652 #[cfg_attr(
24653 any(target_arch = "aarch64", target_arch = "arm64ec"),
24654 link_name = "llvm.aarch64.neon.vsli.v1i64"
24655 )]
24656 fn _vsli_n_s64(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t;
24657 }
24658 unsafe { _vsli_n_s64(a, b, N) }
24659}
24660#[doc = "Shift Left and Insert (immediate)"]
24661#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s64)"]
24662#[inline]
24663#[target_feature(enable = "neon")]
24664#[cfg_attr(test, assert_instr(sli, N = 1))]
24665#[rustc_legacy_const_generics(2)]
24666#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24667pub fn vsliq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
24668 static_assert!(N >= 0 && N <= 63);
24669 unsafe extern "unadjusted" {
24670 #[cfg_attr(
24671 any(target_arch = "aarch64", target_arch = "arm64ec"),
24672 link_name = "llvm.aarch64.neon.vsli.v2i64"
24673 )]
24674 fn _vsliq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t;
24675 }
24676 unsafe { _vsliq_n_s64(a, b, N) }
24677}
24678#[doc = "Shift Left and Insert (immediate)"]
24679#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u8)"]
24680#[inline]
24681#[target_feature(enable = "neon")]
24682#[cfg_attr(test, assert_instr(sli, N = 1))]
24683#[rustc_legacy_const_generics(2)]
24684#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24685pub fn vsli_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
24686 static_assert_uimm_bits!(N, 3);
24687 unsafe { transmute(vsli_n_s8::<N>(transmute(a), transmute(b))) }
24688}
24689#[doc = "Shift Left and Insert (immediate)"]
24690#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u8)"]
24691#[inline]
24692#[target_feature(enable = "neon")]
24693#[cfg_attr(test, assert_instr(sli, N = 1))]
24694#[rustc_legacy_const_generics(2)]
24695#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24696pub fn vsliq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
24697 static_assert_uimm_bits!(N, 3);
24698 unsafe { transmute(vsliq_n_s8::<N>(transmute(a), transmute(b))) }
24699}
24700#[doc = "Shift Left and Insert (immediate)"]
24701#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u16)"]
24702#[inline]
24703#[target_feature(enable = "neon")]
24704#[cfg_attr(test, assert_instr(sli, N = 1))]
24705#[rustc_legacy_const_generics(2)]
24706#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24707pub fn vsli_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
24708 static_assert_uimm_bits!(N, 4);
24709 unsafe { transmute(vsli_n_s16::<N>(transmute(a), transmute(b))) }
24710}
24711#[doc = "Shift Left and Insert (immediate)"]
24712#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u16)"]
24713#[inline]
24714#[target_feature(enable = "neon")]
24715#[cfg_attr(test, assert_instr(sli, N = 1))]
24716#[rustc_legacy_const_generics(2)]
24717#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24718pub fn vsliq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
24719 static_assert_uimm_bits!(N, 4);
24720 unsafe { transmute(vsliq_n_s16::<N>(transmute(a), transmute(b))) }
24721}
24722#[doc = "Shift Left and Insert (immediate)"]
24723#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u32)"]
24724#[inline]
24725#[target_feature(enable = "neon")]
24726#[cfg_attr(test, assert_instr(sli, N = 1))]
24727#[rustc_legacy_const_generics(2)]
24728#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24729pub fn vsli_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
24730 static_assert!(N >= 0 && N <= 31);
24731 unsafe { transmute(vsli_n_s32::<N>(transmute(a), transmute(b))) }
24732}
24733#[doc = "Shift Left and Insert (immediate)"]
24734#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u32)"]
24735#[inline]
24736#[target_feature(enable = "neon")]
24737#[cfg_attr(test, assert_instr(sli, N = 1))]
24738#[rustc_legacy_const_generics(2)]
24739#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24740pub fn vsliq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
24741 static_assert!(N >= 0 && N <= 31);
24742 unsafe { transmute(vsliq_n_s32::<N>(transmute(a), transmute(b))) }
24743}
24744#[doc = "Shift Left and Insert (immediate)"]
24745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u64)"]
24746#[inline]
24747#[target_feature(enable = "neon")]
24748#[cfg_attr(test, assert_instr(sli, N = 1))]
24749#[rustc_legacy_const_generics(2)]
24750#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24751pub fn vsli_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
24752 static_assert!(N >= 0 && N <= 63);
24753 unsafe { transmute(vsli_n_s64::<N>(transmute(a), transmute(b))) }
24754}
24755#[doc = "Shift Left and Insert (immediate)"]
24756#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u64)"]
24757#[inline]
24758#[target_feature(enable = "neon")]
24759#[cfg_attr(test, assert_instr(sli, N = 1))]
24760#[rustc_legacy_const_generics(2)]
24761#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24762pub fn vsliq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
24763 static_assert!(N >= 0 && N <= 63);
24764 unsafe { transmute(vsliq_n_s64::<N>(transmute(a), transmute(b))) }
24765}
24766#[doc = "Shift Left and Insert (immediate)"]
24767#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p8)"]
24768#[inline]
24769#[target_feature(enable = "neon")]
24770#[cfg_attr(test, assert_instr(sli, N = 1))]
24771#[rustc_legacy_const_generics(2)]
24772#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24773pub fn vsli_n_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
24774 static_assert_uimm_bits!(N, 3);
24775 unsafe { transmute(vsli_n_s8::<N>(transmute(a), transmute(b))) }
24776}
24777#[doc = "Shift Left and Insert (immediate)"]
24778#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p8)"]
24779#[inline]
24780#[target_feature(enable = "neon")]
24781#[cfg_attr(test, assert_instr(sli, N = 1))]
24782#[rustc_legacy_const_generics(2)]
24783#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24784pub fn vsliq_n_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
24785 static_assert_uimm_bits!(N, 3);
24786 unsafe { transmute(vsliq_n_s8::<N>(transmute(a), transmute(b))) }
24787}
24788#[doc = "Shift Left and Insert (immediate)"]
24789#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p16)"]
24790#[inline]
24791#[target_feature(enable = "neon")]
24792#[cfg_attr(test, assert_instr(sli, N = 1))]
24793#[rustc_legacy_const_generics(2)]
24794#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24795pub fn vsli_n_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
24796 static_assert_uimm_bits!(N, 4);
24797 unsafe { transmute(vsli_n_s16::<N>(transmute(a), transmute(b))) }
24798}
24799#[doc = "Shift Left and Insert (immediate)"]
24800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p16)"]
24801#[inline]
24802#[target_feature(enable = "neon")]
24803#[cfg_attr(test, assert_instr(sli, N = 1))]
24804#[rustc_legacy_const_generics(2)]
24805#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24806pub fn vsliq_n_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
24807 static_assert_uimm_bits!(N, 4);
24808 unsafe { transmute(vsliq_n_s16::<N>(transmute(a), transmute(b))) }
24809}
24810#[doc = "Shift Left and Insert (immediate)"]
24811#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p64)"]
24812#[inline]
24813#[target_feature(enable = "neon,aes")]
24814#[cfg_attr(test, assert_instr(sli, N = 1))]
24815#[rustc_legacy_const_generics(2)]
24816#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24817pub fn vsli_n_p64<const N: i32>(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t {
24818 static_assert!(N >= 0 && N <= 63);
24819 unsafe { transmute(vsli_n_s64::<N>(transmute(a), transmute(b))) }
24820}
24821#[doc = "Shift Left and Insert (immediate)"]
24822#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p64)"]
24823#[inline]
24824#[target_feature(enable = "neon,aes")]
24825#[cfg_attr(test, assert_instr(sli, N = 1))]
24826#[rustc_legacy_const_generics(2)]
24827#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24828pub fn vsliq_n_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
24829 static_assert!(N >= 0 && N <= 63);
24830 unsafe { transmute(vsliq_n_s64::<N>(transmute(a), transmute(b))) }
24831}
24832#[doc = "Shift left and insert"]
24833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_s64)"]
24834#[inline]
24835#[target_feature(enable = "neon")]
24836#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24837#[rustc_legacy_const_generics(2)]
24838#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sli, N = 2))]
24839pub fn vslid_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
24840 static_assert!(N >= 0 && N <= 63);
24841 unsafe { transmute(vsli_n_s64::<N>(transmute(a), transmute(b))) }
24842}
24843#[doc = "Shift left and insert"]
24844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_u64)"]
24845#[inline]
24846#[target_feature(enable = "neon")]
24847#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24848#[rustc_legacy_const_generics(2)]
24849#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sli, N = 2))]
24850pub fn vslid_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
24851 static_assert!(N >= 0 && N <= 63);
24852 unsafe { transmute(vsli_n_u64::<N>(transmute(a), transmute(b))) }
24853}
24854#[doc = "SM3PARTW1"]
24855#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw1q_u32)"]
24856#[inline]
24857#[target_feature(enable = "neon,sm4")]
24858#[cfg_attr(test, assert_instr(sm3partw1))]
24859#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
24860pub fn vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
24861 unsafe extern "unadjusted" {
24862 #[cfg_attr(
24863 any(target_arch = "aarch64", target_arch = "arm64ec"),
24864 link_name = "llvm.aarch64.crypto.sm3partw1"
24865 )]
24866 fn _vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
24867 }
24868 unsafe { _vsm3partw1q_u32(a, b, c) }
24869}
24870#[doc = "SM3PARTW2"]
24871#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw2q_u32)"]
24872#[inline]
24873#[target_feature(enable = "neon,sm4")]
24874#[cfg_attr(test, assert_instr(sm3partw2))]
24875#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
24876pub fn vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
24877 unsafe extern "unadjusted" {
24878 #[cfg_attr(
24879 any(target_arch = "aarch64", target_arch = "arm64ec"),
24880 link_name = "llvm.aarch64.crypto.sm3partw2"
24881 )]
24882 fn _vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
24883 }
24884 unsafe { _vsm3partw2q_u32(a, b, c) }
24885}
24886#[doc = "SM3SS1"]
24887#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3ss1q_u32)"]
24888#[inline]
24889#[target_feature(enable = "neon,sm4")]
24890#[cfg_attr(test, assert_instr(sm3ss1))]
24891#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
24892pub fn vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
24893 unsafe extern "unadjusted" {
24894 #[cfg_attr(
24895 any(target_arch = "aarch64", target_arch = "arm64ec"),
24896 link_name = "llvm.aarch64.crypto.sm3ss1"
24897 )]
24898 fn _vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
24899 }
24900 unsafe { _vsm3ss1q_u32(a, b, c) }
24901}
24902#[doc = "SM3TT1A"]
24903#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1aq_u32)"]
24904#[inline]
24905#[target_feature(enable = "neon,sm4")]
24906#[cfg_attr(test, assert_instr(sm3tt1a, IMM2 = 0))]
24907#[rustc_legacy_const_generics(3)]
24908#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
24909pub fn vsm3tt1aq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
24910 static_assert_uimm_bits!(IMM2, 2);
24911 unsafe extern "unadjusted" {
24912 #[cfg_attr(
24913 any(target_arch = "aarch64", target_arch = "arm64ec"),
24914 link_name = "llvm.aarch64.crypto.sm3tt1a"
24915 )]
24916 fn _vsm3tt1aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
24917 }
24918 unsafe { _vsm3tt1aq_u32(a, b, c, IMM2 as i64) }
24919}
24920#[doc = "SM3TT1B"]
24921#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1bq_u32)"]
24922#[inline]
24923#[target_feature(enable = "neon,sm4")]
24924#[cfg_attr(test, assert_instr(sm3tt1b, IMM2 = 0))]
24925#[rustc_legacy_const_generics(3)]
24926#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
24927pub fn vsm3tt1bq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
24928 static_assert_uimm_bits!(IMM2, 2);
24929 unsafe extern "unadjusted" {
24930 #[cfg_attr(
24931 any(target_arch = "aarch64", target_arch = "arm64ec"),
24932 link_name = "llvm.aarch64.crypto.sm3tt1b"
24933 )]
24934 fn _vsm3tt1bq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
24935 }
24936 unsafe { _vsm3tt1bq_u32(a, b, c, IMM2 as i64) }
24937}
24938#[doc = "SM3TT2A"]
24939#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2aq_u32)"]
24940#[inline]
24941#[target_feature(enable = "neon,sm4")]
24942#[cfg_attr(test, assert_instr(sm3tt2a, IMM2 = 0))]
24943#[rustc_legacy_const_generics(3)]
24944#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
24945pub fn vsm3tt2aq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
24946 static_assert_uimm_bits!(IMM2, 2);
24947 unsafe extern "unadjusted" {
24948 #[cfg_attr(
24949 any(target_arch = "aarch64", target_arch = "arm64ec"),
24950 link_name = "llvm.aarch64.crypto.sm3tt2a"
24951 )]
24952 fn _vsm3tt2aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
24953 }
24954 unsafe { _vsm3tt2aq_u32(a, b, c, IMM2 as i64) }
24955}
24956#[doc = "SM3TT2B"]
24957#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2bq_u32)"]
24958#[inline]
24959#[target_feature(enable = "neon,sm4")]
24960#[cfg_attr(test, assert_instr(sm3tt2b, IMM2 = 0))]
24961#[rustc_legacy_const_generics(3)]
24962#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
24963pub fn vsm3tt2bq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
24964 static_assert_uimm_bits!(IMM2, 2);
24965 unsafe extern "unadjusted" {
24966 #[cfg_attr(
24967 any(target_arch = "aarch64", target_arch = "arm64ec"),
24968 link_name = "llvm.aarch64.crypto.sm3tt2b"
24969 )]
24970 fn _vsm3tt2bq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
24971 }
24972 unsafe { _vsm3tt2bq_u32(a, b, c, IMM2 as i64) }
24973}
24974#[doc = "SM4 key"]
24975#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4ekeyq_u32)"]
24976#[inline]
24977#[target_feature(enable = "neon,sm4")]
24978#[cfg_attr(test, assert_instr(sm4ekey))]
24979#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
24980pub fn vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
24981 unsafe extern "unadjusted" {
24982 #[cfg_attr(
24983 any(target_arch = "aarch64", target_arch = "arm64ec"),
24984 link_name = "llvm.aarch64.crypto.sm4ekey"
24985 )]
24986 fn _vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
24987 }
24988 unsafe { _vsm4ekeyq_u32(a, b) }
24989}
24990#[doc = "SM4 encode"]
24991#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4eq_u32)"]
24992#[inline]
24993#[target_feature(enable = "neon,sm4")]
24994#[cfg_attr(test, assert_instr(sm4e))]
24995#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
24996pub fn vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
24997 unsafe extern "unadjusted" {
24998 #[cfg_attr(
24999 any(target_arch = "aarch64", target_arch = "arm64ec"),
25000 link_name = "llvm.aarch64.crypto.sm4e"
25001 )]
25002 fn _vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
25003 }
25004 unsafe { _vsm4eq_u32(a, b) }
25005}
25006#[doc = "Unsigned saturating Accumulate of Signed value."]
25007#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u8)"]
25008#[inline]
25009#[target_feature(enable = "neon")]
25010#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25011#[cfg_attr(test, assert_instr(usqadd))]
25012pub fn vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t {
25013 unsafe extern "unadjusted" {
25014 #[cfg_attr(
25015 any(target_arch = "aarch64", target_arch = "arm64ec"),
25016 link_name = "llvm.aarch64.neon.usqadd.v8i8"
25017 )]
25018 fn _vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t;
25019 }
25020 unsafe { _vsqadd_u8(a, b) }
25021}
25022#[doc = "Unsigned saturating Accumulate of Signed value."]
25023#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u8)"]
25024#[inline]
25025#[target_feature(enable = "neon")]
25026#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25027#[cfg_attr(test, assert_instr(usqadd))]
25028pub fn vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t {
25029 unsafe extern "unadjusted" {
25030 #[cfg_attr(
25031 any(target_arch = "aarch64", target_arch = "arm64ec"),
25032 link_name = "llvm.aarch64.neon.usqadd.v16i8"
25033 )]
25034 fn _vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t;
25035 }
25036 unsafe { _vsqaddq_u8(a, b) }
25037}
25038#[doc = "Unsigned saturating Accumulate of Signed value."]
25039#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u16)"]
25040#[inline]
25041#[target_feature(enable = "neon")]
25042#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25043#[cfg_attr(test, assert_instr(usqadd))]
25044pub fn vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t {
25045 unsafe extern "unadjusted" {
25046 #[cfg_attr(
25047 any(target_arch = "aarch64", target_arch = "arm64ec"),
25048 link_name = "llvm.aarch64.neon.usqadd.v4i16"
25049 )]
25050 fn _vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t;
25051 }
25052 unsafe { _vsqadd_u16(a, b) }
25053}
25054#[doc = "Unsigned saturating Accumulate of Signed value."]
25055#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u16)"]
25056#[inline]
25057#[target_feature(enable = "neon")]
25058#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25059#[cfg_attr(test, assert_instr(usqadd))]
25060pub fn vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t {
25061 unsafe extern "unadjusted" {
25062 #[cfg_attr(
25063 any(target_arch = "aarch64", target_arch = "arm64ec"),
25064 link_name = "llvm.aarch64.neon.usqadd.v8i16"
25065 )]
25066 fn _vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t;
25067 }
25068 unsafe { _vsqaddq_u16(a, b) }
25069}
25070#[doc = "Unsigned saturating Accumulate of Signed value."]
25071#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u32)"]
25072#[inline]
25073#[target_feature(enable = "neon")]
25074#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25075#[cfg_attr(test, assert_instr(usqadd))]
25076pub fn vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t {
25077 unsafe extern "unadjusted" {
25078 #[cfg_attr(
25079 any(target_arch = "aarch64", target_arch = "arm64ec"),
25080 link_name = "llvm.aarch64.neon.usqadd.v2i32"
25081 )]
25082 fn _vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t;
25083 }
25084 unsafe { _vsqadd_u32(a, b) }
25085}
25086#[doc = "Unsigned saturating Accumulate of Signed value."]
25087#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u32)"]
25088#[inline]
25089#[target_feature(enable = "neon")]
25090#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25091#[cfg_attr(test, assert_instr(usqadd))]
25092pub fn vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t {
25093 unsafe extern "unadjusted" {
25094 #[cfg_attr(
25095 any(target_arch = "aarch64", target_arch = "arm64ec"),
25096 link_name = "llvm.aarch64.neon.usqadd.v4i32"
25097 )]
25098 fn _vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t;
25099 }
25100 unsafe { _vsqaddq_u32(a, b) }
25101}
25102#[doc = "Unsigned saturating Accumulate of Signed value."]
25103#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u64)"]
25104#[inline]
25105#[target_feature(enable = "neon")]
25106#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25107#[cfg_attr(test, assert_instr(usqadd))]
25108pub fn vsqadd_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t {
25109 unsafe extern "unadjusted" {
25110 #[cfg_attr(
25111 any(target_arch = "aarch64", target_arch = "arm64ec"),
25112 link_name = "llvm.aarch64.neon.usqadd.v1i64"
25113 )]
25114 fn _vsqadd_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t;
25115 }
25116 unsafe { _vsqadd_u64(a, b) }
25117}
25118#[doc = "Unsigned saturating Accumulate of Signed value."]
25119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u64)"]
25120#[inline]
25121#[target_feature(enable = "neon")]
25122#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25123#[cfg_attr(test, assert_instr(usqadd))]
25124pub fn vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t {
25125 unsafe extern "unadjusted" {
25126 #[cfg_attr(
25127 any(target_arch = "aarch64", target_arch = "arm64ec"),
25128 link_name = "llvm.aarch64.neon.usqadd.v2i64"
25129 )]
25130 fn _vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t;
25131 }
25132 unsafe { _vsqaddq_u64(a, b) }
25133}
25134#[doc = "Unsigned saturating accumulate of signed value"]
25135#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddb_u8)"]
25136#[inline]
25137#[target_feature(enable = "neon")]
25138#[cfg_attr(test, assert_instr(usqadd))]
25139#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25140pub fn vsqaddb_u8(a: u8, b: i8) -> u8 {
25141 unsafe { simd_extract!(vsqadd_u8(vdup_n_u8(a), vdup_n_s8(b)), 0) }
25142}
25143#[doc = "Unsigned saturating accumulate of signed value"]
25144#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddh_u16)"]
25145#[inline]
25146#[target_feature(enable = "neon")]
25147#[cfg_attr(test, assert_instr(usqadd))]
25148#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25149pub fn vsqaddh_u16(a: u16, b: i16) -> u16 {
25150 unsafe { simd_extract!(vsqadd_u16(vdup_n_u16(a), vdup_n_s16(b)), 0) }
25151}
25152#[doc = "Unsigned saturating accumulate of signed value"]
25153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddd_u64)"]
25154#[inline]
25155#[target_feature(enable = "neon")]
25156#[cfg_attr(test, assert_instr(usqadd))]
25157#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25158pub fn vsqaddd_u64(a: u64, b: i64) -> u64 {
25159 unsafe extern "unadjusted" {
25160 #[cfg_attr(
25161 any(target_arch = "aarch64", target_arch = "arm64ec"),
25162 link_name = "llvm.aarch64.neon.usqadd.i64"
25163 )]
25164 fn _vsqaddd_u64(a: u64, b: i64) -> u64;
25165 }
25166 unsafe { _vsqaddd_u64(a, b) }
25167}
25168#[doc = "Unsigned saturating accumulate of signed value"]
25169#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadds_u32)"]
25170#[inline]
25171#[target_feature(enable = "neon")]
25172#[cfg_attr(test, assert_instr(usqadd))]
25173#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25174pub fn vsqadds_u32(a: u32, b: i32) -> u32 {
25175 unsafe extern "unadjusted" {
25176 #[cfg_attr(
25177 any(target_arch = "aarch64", target_arch = "arm64ec"),
25178 link_name = "llvm.aarch64.neon.usqadd.i32"
25179 )]
25180 fn _vsqadds_u32(a: u32, b: i32) -> u32;
25181 }
25182 unsafe { _vsqadds_u32(a, b) }
25183}
25184#[doc = "Calculates the square root of each lane."]
25185#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f16)"]
25186#[inline]
25187#[cfg_attr(test, assert_instr(fsqrt))]
25188#[target_feature(enable = "neon,fp16")]
25189#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
25190pub fn vsqrt_f16(a: float16x4_t) -> float16x4_t {
25191 unsafe { simd_fsqrt(a) }
25192}
25193#[doc = "Calculates the square root of each lane."]
25194#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f16)"]
25195#[inline]
25196#[cfg_attr(test, assert_instr(fsqrt))]
25197#[target_feature(enable = "neon,fp16")]
25198#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
25199pub fn vsqrtq_f16(a: float16x8_t) -> float16x8_t {
25200 unsafe { simd_fsqrt(a) }
25201}
25202#[doc = "Calculates the square root of each lane."]
25203#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f32)"]
25204#[inline]
25205#[target_feature(enable = "neon")]
25206#[cfg_attr(test, assert_instr(fsqrt))]
25207#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25208pub fn vsqrt_f32(a: float32x2_t) -> float32x2_t {
25209 unsafe { simd_fsqrt(a) }
25210}
25211#[doc = "Calculates the square root of each lane."]
25212#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f32)"]
25213#[inline]
25214#[target_feature(enable = "neon")]
25215#[cfg_attr(test, assert_instr(fsqrt))]
25216#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25217pub fn vsqrtq_f32(a: float32x4_t) -> float32x4_t {
25218 unsafe { simd_fsqrt(a) }
25219}
25220#[doc = "Calculates the square root of each lane."]
25221#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f64)"]
25222#[inline]
25223#[target_feature(enable = "neon")]
25224#[cfg_attr(test, assert_instr(fsqrt))]
25225#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25226pub fn vsqrt_f64(a: float64x1_t) -> float64x1_t {
25227 unsafe { simd_fsqrt(a) }
25228}
25229#[doc = "Calculates the square root of each lane."]
25230#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f64)"]
25231#[inline]
25232#[target_feature(enable = "neon")]
25233#[cfg_attr(test, assert_instr(fsqrt))]
25234#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25235pub fn vsqrtq_f64(a: float64x2_t) -> float64x2_t {
25236 unsafe { simd_fsqrt(a) }
25237}
25238#[doc = "Floating-point round to integral, using current rounding mode"]
25239#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrth_f16)"]
25240#[inline]
25241#[target_feature(enable = "neon,fp16")]
25242#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
25243#[cfg_attr(test, assert_instr(fsqrt))]
25244pub fn vsqrth_f16(a: f16) -> f16 {
25245 unsafe { sqrtf16(a) }
25246}
25247#[doc = "Shift Right and Insert (immediate)"]
25248#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s8)"]
25249#[inline]
25250#[target_feature(enable = "neon")]
25251#[cfg_attr(test, assert_instr(sri, N = 1))]
25252#[rustc_legacy_const_generics(2)]
25253#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25254pub fn vsri_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
25255 static_assert!(N >= 1 && N <= 8);
25256 unsafe extern "unadjusted" {
25257 #[cfg_attr(
25258 any(target_arch = "aarch64", target_arch = "arm64ec"),
25259 link_name = "llvm.aarch64.neon.vsri.v8i8"
25260 )]
25261 fn _vsri_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t;
25262 }
25263 unsafe { _vsri_n_s8(a, b, N) }
25264}
25265#[doc = "Shift Right and Insert (immediate)"]
25266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s8)"]
25267#[inline]
25268#[target_feature(enable = "neon")]
25269#[cfg_attr(test, assert_instr(sri, N = 1))]
25270#[rustc_legacy_const_generics(2)]
25271#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25272pub fn vsriq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
25273 static_assert!(N >= 1 && N <= 8);
25274 unsafe extern "unadjusted" {
25275 #[cfg_attr(
25276 any(target_arch = "aarch64", target_arch = "arm64ec"),
25277 link_name = "llvm.aarch64.neon.vsri.v16i8"
25278 )]
25279 fn _vsriq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t;
25280 }
25281 unsafe { _vsriq_n_s8(a, b, N) }
25282}
25283#[doc = "Shift Right and Insert (immediate)"]
25284#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s16)"]
25285#[inline]
25286#[target_feature(enable = "neon")]
25287#[cfg_attr(test, assert_instr(sri, N = 1))]
25288#[rustc_legacy_const_generics(2)]
25289#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25290pub fn vsri_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
25291 static_assert!(N >= 1 && N <= 16);
25292 unsafe extern "unadjusted" {
25293 #[cfg_attr(
25294 any(target_arch = "aarch64", target_arch = "arm64ec"),
25295 link_name = "llvm.aarch64.neon.vsri.v4i16"
25296 )]
25297 fn _vsri_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t;
25298 }
25299 unsafe { _vsri_n_s16(a, b, N) }
25300}
25301#[doc = "Shift Right and Insert (immediate)"]
25302#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s16)"]
25303#[inline]
25304#[target_feature(enable = "neon")]
25305#[cfg_attr(test, assert_instr(sri, N = 1))]
25306#[rustc_legacy_const_generics(2)]
25307#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25308pub fn vsriq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
25309 static_assert!(N >= 1 && N <= 16);
25310 unsafe extern "unadjusted" {
25311 #[cfg_attr(
25312 any(target_arch = "aarch64", target_arch = "arm64ec"),
25313 link_name = "llvm.aarch64.neon.vsri.v8i16"
25314 )]
25315 fn _vsriq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t;
25316 }
25317 unsafe { _vsriq_n_s16(a, b, N) }
25318}
25319#[doc = "Shift Right and Insert (immediate)"]
25320#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s32)"]
25321#[inline]
25322#[target_feature(enable = "neon")]
25323#[cfg_attr(test, assert_instr(sri, N = 1))]
25324#[rustc_legacy_const_generics(2)]
25325#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25326pub fn vsri_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
25327 static_assert!(N >= 1 && N <= 32);
25328 unsafe extern "unadjusted" {
25329 #[cfg_attr(
25330 any(target_arch = "aarch64", target_arch = "arm64ec"),
25331 link_name = "llvm.aarch64.neon.vsri.v2i32"
25332 )]
25333 fn _vsri_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t;
25334 }
25335 unsafe { _vsri_n_s32(a, b, N) }
25336}
25337#[doc = "Shift Right and Insert (immediate)"]
25338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s32)"]
25339#[inline]
25340#[target_feature(enable = "neon")]
25341#[cfg_attr(test, assert_instr(sri, N = 1))]
25342#[rustc_legacy_const_generics(2)]
25343#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25344pub fn vsriq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
25345 static_assert!(N >= 1 && N <= 32);
25346 unsafe extern "unadjusted" {
25347 #[cfg_attr(
25348 any(target_arch = "aarch64", target_arch = "arm64ec"),
25349 link_name = "llvm.aarch64.neon.vsri.v4i32"
25350 )]
25351 fn _vsriq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t;
25352 }
25353 unsafe { _vsriq_n_s32(a, b, N) }
25354}
25355#[doc = "Shift Right and Insert (immediate)"]
25356#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s64)"]
25357#[inline]
25358#[target_feature(enable = "neon")]
25359#[cfg_attr(test, assert_instr(sri, N = 1))]
25360#[rustc_legacy_const_generics(2)]
25361#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25362pub fn vsri_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t {
25363 static_assert!(N >= 1 && N <= 64);
25364 unsafe extern "unadjusted" {
25365 #[cfg_attr(
25366 any(target_arch = "aarch64", target_arch = "arm64ec"),
25367 link_name = "llvm.aarch64.neon.vsri.v1i64"
25368 )]
25369 fn _vsri_n_s64(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t;
25370 }
25371 unsafe { _vsri_n_s64(a, b, N) }
25372}
25373#[doc = "Shift Right and Insert (immediate)"]
25374#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s64)"]
25375#[inline]
25376#[target_feature(enable = "neon")]
25377#[cfg_attr(test, assert_instr(sri, N = 1))]
25378#[rustc_legacy_const_generics(2)]
25379#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25380pub fn vsriq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
25381 static_assert!(N >= 1 && N <= 64);
25382 unsafe extern "unadjusted" {
25383 #[cfg_attr(
25384 any(target_arch = "aarch64", target_arch = "arm64ec"),
25385 link_name = "llvm.aarch64.neon.vsri.v2i64"
25386 )]
25387 fn _vsriq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t;
25388 }
25389 unsafe { _vsriq_n_s64(a, b, N) }
25390}
25391#[doc = "Shift Right and Insert (immediate)"]
25392#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u8)"]
25393#[inline]
25394#[target_feature(enable = "neon")]
25395#[cfg_attr(test, assert_instr(sri, N = 1))]
25396#[rustc_legacy_const_generics(2)]
25397#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25398pub fn vsri_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
25399 static_assert!(N >= 1 && N <= 8);
25400 unsafe { transmute(vsri_n_s8::<N>(transmute(a), transmute(b))) }
25401}
25402#[doc = "Shift Right and Insert (immediate)"]
25403#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u8)"]
25404#[inline]
25405#[target_feature(enable = "neon")]
25406#[cfg_attr(test, assert_instr(sri, N = 1))]
25407#[rustc_legacy_const_generics(2)]
25408#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25409pub fn vsriq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
25410 static_assert!(N >= 1 && N <= 8);
25411 unsafe { transmute(vsriq_n_s8::<N>(transmute(a), transmute(b))) }
25412}
25413#[doc = "Shift Right and Insert (immediate)"]
25414#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u16)"]
25415#[inline]
25416#[target_feature(enable = "neon")]
25417#[cfg_attr(test, assert_instr(sri, N = 1))]
25418#[rustc_legacy_const_generics(2)]
25419#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25420pub fn vsri_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
25421 static_assert!(N >= 1 && N <= 16);
25422 unsafe { transmute(vsri_n_s16::<N>(transmute(a), transmute(b))) }
25423}
25424#[doc = "Shift Right and Insert (immediate)"]
25425#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u16)"]
25426#[inline]
25427#[target_feature(enable = "neon")]
25428#[cfg_attr(test, assert_instr(sri, N = 1))]
25429#[rustc_legacy_const_generics(2)]
25430#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25431pub fn vsriq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
25432 static_assert!(N >= 1 && N <= 16);
25433 unsafe { transmute(vsriq_n_s16::<N>(transmute(a), transmute(b))) }
25434}
25435#[doc = "Shift Right and Insert (immediate)"]
25436#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u32)"]
25437#[inline]
25438#[target_feature(enable = "neon")]
25439#[cfg_attr(test, assert_instr(sri, N = 1))]
25440#[rustc_legacy_const_generics(2)]
25441#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25442pub fn vsri_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
25443 static_assert!(N >= 1 && N <= 32);
25444 unsafe { transmute(vsri_n_s32::<N>(transmute(a), transmute(b))) }
25445}
25446#[doc = "Shift Right and Insert (immediate)"]
25447#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u32)"]
25448#[inline]
25449#[target_feature(enable = "neon")]
25450#[cfg_attr(test, assert_instr(sri, N = 1))]
25451#[rustc_legacy_const_generics(2)]
25452#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25453pub fn vsriq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
25454 static_assert!(N >= 1 && N <= 32);
25455 unsafe { transmute(vsriq_n_s32::<N>(transmute(a), transmute(b))) }
25456}
25457#[doc = "Shift Right and Insert (immediate)"]
25458#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u64)"]
25459#[inline]
25460#[target_feature(enable = "neon")]
25461#[cfg_attr(test, assert_instr(sri, N = 1))]
25462#[rustc_legacy_const_generics(2)]
25463#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25464pub fn vsri_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
25465 static_assert!(N >= 1 && N <= 64);
25466 unsafe { transmute(vsri_n_s64::<N>(transmute(a), transmute(b))) }
25467}
25468#[doc = "Shift Right and Insert (immediate)"]
25469#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u64)"]
25470#[inline]
25471#[target_feature(enable = "neon")]
25472#[cfg_attr(test, assert_instr(sri, N = 1))]
25473#[rustc_legacy_const_generics(2)]
25474#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25475pub fn vsriq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
25476 static_assert!(N >= 1 && N <= 64);
25477 unsafe { transmute(vsriq_n_s64::<N>(transmute(a), transmute(b))) }
25478}
25479#[doc = "Shift Right and Insert (immediate)"]
25480#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p8)"]
25481#[inline]
25482#[target_feature(enable = "neon")]
25483#[cfg_attr(test, assert_instr(sri, N = 1))]
25484#[rustc_legacy_const_generics(2)]
25485#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25486pub fn vsri_n_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
25487 static_assert!(N >= 1 && N <= 8);
25488 unsafe { transmute(vsri_n_s8::<N>(transmute(a), transmute(b))) }
25489}
25490#[doc = "Shift Right and Insert (immediate)"]
25491#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p8)"]
25492#[inline]
25493#[target_feature(enable = "neon")]
25494#[cfg_attr(test, assert_instr(sri, N = 1))]
25495#[rustc_legacy_const_generics(2)]
25496#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25497pub fn vsriq_n_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
25498 static_assert!(N >= 1 && N <= 8);
25499 unsafe { transmute(vsriq_n_s8::<N>(transmute(a), transmute(b))) }
25500}
25501#[doc = "Shift Right and Insert (immediate)"]
25502#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p16)"]
25503#[inline]
25504#[target_feature(enable = "neon")]
25505#[cfg_attr(test, assert_instr(sri, N = 1))]
25506#[rustc_legacy_const_generics(2)]
25507#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25508pub fn vsri_n_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
25509 static_assert!(N >= 1 && N <= 16);
25510 unsafe { transmute(vsri_n_s16::<N>(transmute(a), transmute(b))) }
25511}
25512#[doc = "Shift Right and Insert (immediate)"]
25513#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p16)"]
25514#[inline]
25515#[target_feature(enable = "neon")]
25516#[cfg_attr(test, assert_instr(sri, N = 1))]
25517#[rustc_legacy_const_generics(2)]
25518#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25519pub fn vsriq_n_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
25520 static_assert!(N >= 1 && N <= 16);
25521 unsafe { transmute(vsriq_n_s16::<N>(transmute(a), transmute(b))) }
25522}
25523#[doc = "Shift Right and Insert (immediate)"]
25524#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p64)"]
25525#[inline]
25526#[target_feature(enable = "neon,aes")]
25527#[cfg_attr(test, assert_instr(sri, N = 1))]
25528#[rustc_legacy_const_generics(2)]
25529#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25530pub fn vsri_n_p64<const N: i32>(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t {
25531 static_assert!(N >= 1 && N <= 64);
25532 unsafe { transmute(vsri_n_s64::<N>(transmute(a), transmute(b))) }
25533}
25534#[doc = "Shift Right and Insert (immediate)"]
25535#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p64)"]
25536#[inline]
25537#[target_feature(enable = "neon,aes")]
25538#[cfg_attr(test, assert_instr(sri, N = 1))]
25539#[rustc_legacy_const_generics(2)]
25540#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25541pub fn vsriq_n_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
25542 static_assert!(N >= 1 && N <= 64);
25543 unsafe { transmute(vsriq_n_s64::<N>(transmute(a), transmute(b))) }
25544}
25545#[doc = "Shift right and insert"]
25546#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_s64)"]
25547#[inline]
25548#[target_feature(enable = "neon")]
25549#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25550#[rustc_legacy_const_generics(2)]
25551#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sri, N = 2))]
25552pub fn vsrid_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
25553 static_assert!(N >= 1 && N <= 64);
25554 unsafe { transmute(vsri_n_s64::<N>(transmute(a), transmute(b))) }
25555}
25556#[doc = "Shift right and insert"]
25557#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_u64)"]
25558#[inline]
25559#[target_feature(enable = "neon")]
25560#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25561#[rustc_legacy_const_generics(2)]
25562#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sri, N = 2))]
25563pub fn vsrid_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
25564 static_assert!(N >= 1 && N <= 64);
25565 unsafe { transmute(vsri_n_u64::<N>(transmute(a), transmute(b))) }
25566}
25567#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25568#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f16)"]
25569#[doc = "## Safety"]
25570#[doc = " * Neon instrinsic unsafe"]
25571#[inline]
25572#[target_feature(enable = "neon,fp16")]
25573#[cfg_attr(test, assert_instr(str))]
25574#[allow(clippy::cast_ptr_alignment)]
25575#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
25576pub unsafe fn vst1_f16(ptr: *mut f16, a: float16x4_t) {
25577 crate::ptr::write_unaligned(ptr.cast(), a)
25578}
25579#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f16)"]
25581#[doc = "## Safety"]
25582#[doc = " * Neon instrinsic unsafe"]
25583#[inline]
25584#[target_feature(enable = "neon,fp16")]
25585#[cfg_attr(test, assert_instr(str))]
25586#[allow(clippy::cast_ptr_alignment)]
25587#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
25588pub unsafe fn vst1q_f16(ptr: *mut f16, a: float16x8_t) {
25589 crate::ptr::write_unaligned(ptr.cast(), a)
25590}
25591#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25592#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32)"]
25593#[doc = "## Safety"]
25594#[doc = " * Neon instrinsic unsafe"]
25595#[inline]
25596#[target_feature(enable = "neon")]
25597#[cfg_attr(test, assert_instr(str))]
25598#[allow(clippy::cast_ptr_alignment)]
25599#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25600pub unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t) {
25601 crate::ptr::write_unaligned(ptr.cast(), a)
25602}
25603#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25604#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32)"]
25605#[doc = "## Safety"]
25606#[doc = " * Neon instrinsic unsafe"]
25607#[inline]
25608#[target_feature(enable = "neon")]
25609#[cfg_attr(test, assert_instr(str))]
25610#[allow(clippy::cast_ptr_alignment)]
25611#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25612pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) {
25613 crate::ptr::write_unaligned(ptr.cast(), a)
25614}
25615#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25616#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64)"]
25617#[doc = "## Safety"]
25618#[doc = " * Neon instrinsic unsafe"]
25619#[inline]
25620#[target_feature(enable = "neon")]
25621#[cfg_attr(test, assert_instr(str))]
25622#[allow(clippy::cast_ptr_alignment)]
25623#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25624pub unsafe fn vst1_f64(ptr: *mut f64, a: float64x1_t) {
25625 crate::ptr::write_unaligned(ptr.cast(), a)
25626}
25627#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25628#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64)"]
25629#[doc = "## Safety"]
25630#[doc = " * Neon instrinsic unsafe"]
25631#[inline]
25632#[target_feature(enable = "neon")]
25633#[cfg_attr(test, assert_instr(str))]
25634#[allow(clippy::cast_ptr_alignment)]
25635#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25636pub unsafe fn vst1q_f64(ptr: *mut f64, a: float64x2_t) {
25637 crate::ptr::write_unaligned(ptr.cast(), a)
25638}
25639#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25640#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8)"]
25641#[doc = "## Safety"]
25642#[doc = " * Neon instrinsic unsafe"]
25643#[inline]
25644#[target_feature(enable = "neon")]
25645#[cfg_attr(test, assert_instr(str))]
25646#[allow(clippy::cast_ptr_alignment)]
25647#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25648pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) {
25649 crate::ptr::write_unaligned(ptr.cast(), a)
25650}
25651#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25652#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8)"]
25653#[doc = "## Safety"]
25654#[doc = " * Neon instrinsic unsafe"]
25655#[inline]
25656#[target_feature(enable = "neon")]
25657#[cfg_attr(test, assert_instr(str))]
25658#[allow(clippy::cast_ptr_alignment)]
25659#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25660pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) {
25661 crate::ptr::write_unaligned(ptr.cast(), a)
25662}
25663#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25664#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16)"]
25665#[doc = "## Safety"]
25666#[doc = " * Neon instrinsic unsafe"]
25667#[inline]
25668#[target_feature(enable = "neon")]
25669#[cfg_attr(test, assert_instr(str))]
25670#[allow(clippy::cast_ptr_alignment)]
25671#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25672pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) {
25673 crate::ptr::write_unaligned(ptr.cast(), a)
25674}
25675#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16)"]
25677#[doc = "## Safety"]
25678#[doc = " * Neon instrinsic unsafe"]
25679#[inline]
25680#[target_feature(enable = "neon")]
25681#[cfg_attr(test, assert_instr(str))]
25682#[allow(clippy::cast_ptr_alignment)]
25683#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25684pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) {
25685 crate::ptr::write_unaligned(ptr.cast(), a)
25686}
25687#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25688#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32)"]
25689#[doc = "## Safety"]
25690#[doc = " * Neon instrinsic unsafe"]
25691#[inline]
25692#[target_feature(enable = "neon")]
25693#[cfg_attr(test, assert_instr(str))]
25694#[allow(clippy::cast_ptr_alignment)]
25695#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25696pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) {
25697 crate::ptr::write_unaligned(ptr.cast(), a)
25698}
25699#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25700#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32)"]
25701#[doc = "## Safety"]
25702#[doc = " * Neon instrinsic unsafe"]
25703#[inline]
25704#[target_feature(enable = "neon")]
25705#[cfg_attr(test, assert_instr(str))]
25706#[allow(clippy::cast_ptr_alignment)]
25707#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25708pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) {
25709 crate::ptr::write_unaligned(ptr.cast(), a)
25710}
25711#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25712#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64)"]
25713#[doc = "## Safety"]
25714#[doc = " * Neon instrinsic unsafe"]
25715#[inline]
25716#[target_feature(enable = "neon")]
25717#[cfg_attr(test, assert_instr(str))]
25718#[allow(clippy::cast_ptr_alignment)]
25719#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25720pub unsafe fn vst1_s64(ptr: *mut i64, a: int64x1_t) {
25721 crate::ptr::write_unaligned(ptr.cast(), a)
25722}
25723#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25724#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64)"]
25725#[doc = "## Safety"]
25726#[doc = " * Neon instrinsic unsafe"]
25727#[inline]
25728#[target_feature(enable = "neon")]
25729#[cfg_attr(test, assert_instr(str))]
25730#[allow(clippy::cast_ptr_alignment)]
25731#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25732pub unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t) {
25733 crate::ptr::write_unaligned(ptr.cast(), a)
25734}
25735#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8)"]
25737#[doc = "## Safety"]
25738#[doc = " * Neon instrinsic unsafe"]
25739#[inline]
25740#[target_feature(enable = "neon")]
25741#[cfg_attr(test, assert_instr(str))]
25742#[allow(clippy::cast_ptr_alignment)]
25743#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25744pub unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t) {
25745 crate::ptr::write_unaligned(ptr.cast(), a)
25746}
25747#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25748#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8)"]
25749#[doc = "## Safety"]
25750#[doc = " * Neon instrinsic unsafe"]
25751#[inline]
25752#[target_feature(enable = "neon")]
25753#[cfg_attr(test, assert_instr(str))]
25754#[allow(clippy::cast_ptr_alignment)]
25755#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25756pub unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t) {
25757 crate::ptr::write_unaligned(ptr.cast(), a)
25758}
25759#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25760#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16)"]
25761#[doc = "## Safety"]
25762#[doc = " * Neon instrinsic unsafe"]
25763#[inline]
25764#[target_feature(enable = "neon")]
25765#[cfg_attr(test, assert_instr(str))]
25766#[allow(clippy::cast_ptr_alignment)]
25767#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25768pub unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t) {
25769 crate::ptr::write_unaligned(ptr.cast(), a)
25770}
25771#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25772#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16)"]
25773#[doc = "## Safety"]
25774#[doc = " * Neon instrinsic unsafe"]
25775#[inline]
25776#[target_feature(enable = "neon")]
25777#[cfg_attr(test, assert_instr(str))]
25778#[allow(clippy::cast_ptr_alignment)]
25779#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25780pub unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t) {
25781 crate::ptr::write_unaligned(ptr.cast(), a)
25782}
25783#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25784#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32)"]
25785#[doc = "## Safety"]
25786#[doc = " * Neon instrinsic unsafe"]
25787#[inline]
25788#[target_feature(enable = "neon")]
25789#[cfg_attr(test, assert_instr(str))]
25790#[allow(clippy::cast_ptr_alignment)]
25791#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25792pub unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t) {
25793 crate::ptr::write_unaligned(ptr.cast(), a)
25794}
25795#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25796#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32)"]
25797#[doc = "## Safety"]
25798#[doc = " * Neon instrinsic unsafe"]
25799#[inline]
25800#[target_feature(enable = "neon")]
25801#[cfg_attr(test, assert_instr(str))]
25802#[allow(clippy::cast_ptr_alignment)]
25803#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25804pub unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t) {
25805 crate::ptr::write_unaligned(ptr.cast(), a)
25806}
25807#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25808#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64)"]
25809#[doc = "## Safety"]
25810#[doc = " * Neon instrinsic unsafe"]
25811#[inline]
25812#[target_feature(enable = "neon")]
25813#[cfg_attr(test, assert_instr(str))]
25814#[allow(clippy::cast_ptr_alignment)]
25815#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25816pub unsafe fn vst1_u64(ptr: *mut u64, a: uint64x1_t) {
25817 crate::ptr::write_unaligned(ptr.cast(), a)
25818}
25819#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25820#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64)"]
25821#[doc = "## Safety"]
25822#[doc = " * Neon instrinsic unsafe"]
25823#[inline]
25824#[target_feature(enable = "neon")]
25825#[cfg_attr(test, assert_instr(str))]
25826#[allow(clippy::cast_ptr_alignment)]
25827#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25828pub unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t) {
25829 crate::ptr::write_unaligned(ptr.cast(), a)
25830}
25831#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25832#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8)"]
25833#[doc = "## Safety"]
25834#[doc = " * Neon instrinsic unsafe"]
25835#[inline]
25836#[target_feature(enable = "neon")]
25837#[cfg_attr(test, assert_instr(str))]
25838#[allow(clippy::cast_ptr_alignment)]
25839#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25840pub unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t) {
25841 crate::ptr::write_unaligned(ptr.cast(), a)
25842}
25843#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8)"]
25845#[doc = "## Safety"]
25846#[doc = " * Neon instrinsic unsafe"]
25847#[inline]
25848#[target_feature(enable = "neon")]
25849#[cfg_attr(test, assert_instr(str))]
25850#[allow(clippy::cast_ptr_alignment)]
25851#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25852pub unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t) {
25853 crate::ptr::write_unaligned(ptr.cast(), a)
25854}
25855#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25856#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16)"]
25857#[doc = "## Safety"]
25858#[doc = " * Neon instrinsic unsafe"]
25859#[inline]
25860#[target_feature(enable = "neon")]
25861#[cfg_attr(test, assert_instr(str))]
25862#[allow(clippy::cast_ptr_alignment)]
25863#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25864pub unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t) {
25865 crate::ptr::write_unaligned(ptr.cast(), a)
25866}
25867#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25868#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16)"]
25869#[doc = "## Safety"]
25870#[doc = " * Neon instrinsic unsafe"]
25871#[inline]
25872#[target_feature(enable = "neon")]
25873#[cfg_attr(test, assert_instr(str))]
25874#[allow(clippy::cast_ptr_alignment)]
25875#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25876pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) {
25877 crate::ptr::write_unaligned(ptr.cast(), a)
25878}
25879#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25880#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64)"]
25881#[doc = "## Safety"]
25882#[doc = " * Neon instrinsic unsafe"]
25883#[inline]
25884#[target_feature(enable = "neon,aes")]
25885#[cfg_attr(test, assert_instr(str))]
25886#[allow(clippy::cast_ptr_alignment)]
25887#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25888pub unsafe fn vst1_p64(ptr: *mut p64, a: poly64x1_t) {
25889 crate::ptr::write_unaligned(ptr.cast(), a)
25890}
25891#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25892#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64)"]
25893#[doc = "## Safety"]
25894#[doc = " * Neon instrinsic unsafe"]
25895#[inline]
25896#[target_feature(enable = "neon,aes")]
25897#[cfg_attr(test, assert_instr(str))]
25898#[allow(clippy::cast_ptr_alignment)]
25899#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25900pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) {
25901 crate::ptr::write_unaligned(ptr.cast(), a)
25902}
25903#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
25904#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x2)"]
25905#[doc = "## Safety"]
25906#[doc = " * Neon instrinsic unsafe"]
25907#[inline]
25908#[target_feature(enable = "neon")]
25909#[cfg_attr(test, assert_instr(st1))]
25910#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25911pub unsafe fn vst1_f64_x2(a: *mut f64, b: float64x1x2_t) {
25912 unsafe extern "unadjusted" {
25913 #[cfg_attr(
25914 any(target_arch = "aarch64", target_arch = "arm64ec"),
25915 link_name = "llvm.aarch64.neon.st1x2.v1f64.p0"
25916 )]
25917 fn _vst1_f64_x2(a: float64x1_t, b: float64x1_t, ptr: *mut f64);
25918 }
25919 _vst1_f64_x2(b.0, b.1, a)
25920}
25921#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
25922#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x2)"]
25923#[doc = "## Safety"]
25924#[doc = " * Neon instrinsic unsafe"]
25925#[inline]
25926#[target_feature(enable = "neon")]
25927#[cfg_attr(test, assert_instr(st1))]
25928#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25929pub unsafe fn vst1q_f64_x2(a: *mut f64, b: float64x2x2_t) {
25930 unsafe extern "unadjusted" {
25931 #[cfg_attr(
25932 any(target_arch = "aarch64", target_arch = "arm64ec"),
25933 link_name = "llvm.aarch64.neon.st1x2.v2f64.p0"
25934 )]
25935 fn _vst1q_f64_x2(a: float64x2_t, b: float64x2_t, ptr: *mut f64);
25936 }
25937 _vst1q_f64_x2(b.0, b.1, a)
25938}
25939#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
25940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x3)"]
25941#[doc = "## Safety"]
25942#[doc = " * Neon instrinsic unsafe"]
25943#[inline]
25944#[target_feature(enable = "neon")]
25945#[cfg_attr(test, assert_instr(st1))]
25946#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25947pub unsafe fn vst1_f64_x3(a: *mut f64, b: float64x1x3_t) {
25948 unsafe extern "unadjusted" {
25949 #[cfg_attr(
25950 any(target_arch = "aarch64", target_arch = "arm64ec"),
25951 link_name = "llvm.aarch64.neon.st1x3.v1f64.p0"
25952 )]
25953 fn _vst1_f64_x3(a: float64x1_t, b: float64x1_t, c: float64x1_t, ptr: *mut f64);
25954 }
25955 _vst1_f64_x3(b.0, b.1, b.2, a)
25956}
25957#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
25958#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x3)"]
25959#[doc = "## Safety"]
25960#[doc = " * Neon instrinsic unsafe"]
25961#[inline]
25962#[target_feature(enable = "neon")]
25963#[cfg_attr(test, assert_instr(st1))]
25964#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25965pub unsafe fn vst1q_f64_x3(a: *mut f64, b: float64x2x3_t) {
25966 unsafe extern "unadjusted" {
25967 #[cfg_attr(
25968 any(target_arch = "aarch64", target_arch = "arm64ec"),
25969 link_name = "llvm.aarch64.neon.st1x3.v2f64.p0"
25970 )]
25971 fn _vst1q_f64_x3(a: float64x2_t, b: float64x2_t, c: float64x2_t, ptr: *mut f64);
25972 }
25973 _vst1q_f64_x3(b.0, b.1, b.2, a)
25974}
25975#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
25976#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x4)"]
25977#[doc = "## Safety"]
25978#[doc = " * Neon instrinsic unsafe"]
25979#[inline]
25980#[target_feature(enable = "neon")]
25981#[cfg_attr(test, assert_instr(st1))]
25982#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25983pub unsafe fn vst1_f64_x4(a: *mut f64, b: float64x1x4_t) {
25984 unsafe extern "unadjusted" {
25985 #[cfg_attr(
25986 any(target_arch = "aarch64", target_arch = "arm64ec"),
25987 link_name = "llvm.aarch64.neon.st1x4.v1f64.p0"
25988 )]
25989 fn _vst1_f64_x4(
25990 a: float64x1_t,
25991 b: float64x1_t,
25992 c: float64x1_t,
25993 d: float64x1_t,
25994 ptr: *mut f64,
25995 );
25996 }
25997 _vst1_f64_x4(b.0, b.1, b.2, b.3, a)
25998}
25999#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
26000#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x4)"]
26001#[doc = "## Safety"]
26002#[doc = " * Neon instrinsic unsafe"]
26003#[inline]
26004#[target_feature(enable = "neon")]
26005#[cfg_attr(test, assert_instr(st1))]
26006#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26007pub unsafe fn vst1q_f64_x4(a: *mut f64, b: float64x2x4_t) {
26008 unsafe extern "unadjusted" {
26009 #[cfg_attr(
26010 any(target_arch = "aarch64", target_arch = "arm64ec"),
26011 link_name = "llvm.aarch64.neon.st1x4.v2f64.p0"
26012 )]
26013 fn _vst1q_f64_x4(
26014 a: float64x2_t,
26015 b: float64x2_t,
26016 c: float64x2_t,
26017 d: float64x2_t,
26018 ptr: *mut f64,
26019 );
26020 }
26021 _vst1q_f64_x4(b.0, b.1, b.2, b.3, a)
26022}
26023#[doc = "Store multiple single-element structures from one, two, three, or four registers"]
26024#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_f64)"]
26025#[doc = "## Safety"]
26026#[doc = " * Neon instrinsic unsafe"]
26027#[inline]
26028#[target_feature(enable = "neon")]
26029#[cfg_attr(test, assert_instr(nop, LANE = 0))]
26030#[rustc_legacy_const_generics(2)]
26031#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26032pub unsafe fn vst1_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1_t) {
26033 static_assert!(LANE == 0);
26034 *a = simd_extract!(b, LANE as u32);
26035}
26036#[doc = "Store multiple single-element structures from one, two, three, or four registers"]
26037#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f64)"]
26038#[doc = "## Safety"]
26039#[doc = " * Neon instrinsic unsafe"]
26040#[inline]
26041#[target_feature(enable = "neon")]
26042#[cfg_attr(test, assert_instr(nop, LANE = 0))]
26043#[rustc_legacy_const_generics(2)]
26044#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26045pub unsafe fn vst1q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2_t) {
26046 static_assert_uimm_bits!(LANE, 1);
26047 *a = simd_extract!(b, LANE as u32);
26048}
26049#[doc = "Store multiple 2-element structures from two registers"]
26050#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f64)"]
26051#[doc = "## Safety"]
26052#[doc = " * Neon instrinsic unsafe"]
26053#[inline]
26054#[target_feature(enable = "neon")]
26055#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26056#[cfg_attr(test, assert_instr(st1))]
26057pub unsafe fn vst2_f64(a: *mut f64, b: float64x1x2_t) {
26058 unsafe extern "unadjusted" {
26059 #[cfg_attr(
26060 any(target_arch = "aarch64", target_arch = "arm64ec"),
26061 link_name = "llvm.aarch64.neon.st2.v1f64.p0"
26062 )]
26063 fn _vst2_f64(a: float64x1_t, b: float64x1_t, ptr: *mut i8);
26064 }
26065 _vst2_f64(b.0, b.1, a as _)
26066}
26067#[doc = "Store multiple 2-element structures from two registers"]
26068#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f64)"]
26069#[doc = "## Safety"]
26070#[doc = " * Neon instrinsic unsafe"]
26071#[inline]
26072#[target_feature(enable = "neon")]
26073#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26074#[rustc_legacy_const_generics(2)]
26075#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26076pub unsafe fn vst2_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x2_t) {
26077 static_assert!(LANE == 0);
26078 unsafe extern "unadjusted" {
26079 #[cfg_attr(
26080 any(target_arch = "aarch64", target_arch = "arm64ec"),
26081 link_name = "llvm.aarch64.neon.st2lane.v1f64.p0"
26082 )]
26083 fn _vst2_lane_f64(a: float64x1_t, b: float64x1_t, n: i64, ptr: *mut i8);
26084 }
26085 _vst2_lane_f64(b.0, b.1, LANE as i64, a as _)
26086}
26087#[doc = "Store multiple 2-element structures from two registers"]
26088#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s64)"]
26089#[doc = "## Safety"]
26090#[doc = " * Neon instrinsic unsafe"]
26091#[inline]
26092#[target_feature(enable = "neon")]
26093#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26094#[rustc_legacy_const_generics(2)]
26095#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26096pub unsafe fn vst2_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x2_t) {
26097 static_assert!(LANE == 0);
26098 unsafe extern "unadjusted" {
26099 #[cfg_attr(
26100 any(target_arch = "aarch64", target_arch = "arm64ec"),
26101 link_name = "llvm.aarch64.neon.st2lane.v1i64.p0"
26102 )]
26103 fn _vst2_lane_s64(a: int64x1_t, b: int64x1_t, n: i64, ptr: *mut i8);
26104 }
26105 _vst2_lane_s64(b.0, b.1, LANE as i64, a as _)
26106}
26107#[doc = "Store multiple 2-element structures from two registers"]
26108#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p64)"]
26109#[doc = "## Safety"]
26110#[doc = " * Neon instrinsic unsafe"]
26111#[inline]
26112#[target_feature(enable = "neon,aes")]
26113#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26114#[rustc_legacy_const_generics(2)]
26115#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26116pub unsafe fn vst2_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x2_t) {
26117 static_assert!(LANE == 0);
26118 vst2_lane_s64::<LANE>(transmute(a), transmute(b))
26119}
26120#[doc = "Store multiple 2-element structures from two registers"]
26121#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u64)"]
26122#[doc = "## Safety"]
26123#[doc = " * Neon instrinsic unsafe"]
26124#[inline]
26125#[target_feature(enable = "neon")]
26126#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26127#[rustc_legacy_const_generics(2)]
26128#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26129pub unsafe fn vst2_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x2_t) {
26130 static_assert!(LANE == 0);
26131 vst2_lane_s64::<LANE>(transmute(a), transmute(b))
26132}
26133#[doc = "Store multiple 2-element structures from two registers"]
26134#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f64)"]
26135#[doc = "## Safety"]
26136#[doc = " * Neon instrinsic unsafe"]
26137#[inline]
26138#[target_feature(enable = "neon")]
26139#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26140#[cfg_attr(test, assert_instr(st2))]
26141pub unsafe fn vst2q_f64(a: *mut f64, b: float64x2x2_t) {
26142 unsafe extern "unadjusted" {
26143 #[cfg_attr(
26144 any(target_arch = "aarch64", target_arch = "arm64ec"),
26145 link_name = "llvm.aarch64.neon.st2.v2f64.p0"
26146 )]
26147 fn _vst2q_f64(a: float64x2_t, b: float64x2_t, ptr: *mut i8);
26148 }
26149 _vst2q_f64(b.0, b.1, a as _)
26150}
26151#[doc = "Store multiple 2-element structures from two registers"]
26152#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s64)"]
26153#[doc = "## Safety"]
26154#[doc = " * Neon instrinsic unsafe"]
26155#[inline]
26156#[target_feature(enable = "neon")]
26157#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26158#[cfg_attr(test, assert_instr(st2))]
26159pub unsafe fn vst2q_s64(a: *mut i64, b: int64x2x2_t) {
26160 unsafe extern "unadjusted" {
26161 #[cfg_attr(
26162 any(target_arch = "aarch64", target_arch = "arm64ec"),
26163 link_name = "llvm.aarch64.neon.st2.v2i64.p0"
26164 )]
26165 fn _vst2q_s64(a: int64x2_t, b: int64x2_t, ptr: *mut i8);
26166 }
26167 _vst2q_s64(b.0, b.1, a as _)
26168}
26169#[doc = "Store multiple 2-element structures from two registers"]
26170#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f64)"]
26171#[doc = "## Safety"]
26172#[doc = " * Neon instrinsic unsafe"]
26173#[inline]
26174#[target_feature(enable = "neon")]
26175#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26176#[rustc_legacy_const_generics(2)]
26177#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26178pub unsafe fn vst2q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x2_t) {
26179 static_assert_uimm_bits!(LANE, 1);
26180 unsafe extern "unadjusted" {
26181 #[cfg_attr(
26182 any(target_arch = "aarch64", target_arch = "arm64ec"),
26183 link_name = "llvm.aarch64.neon.st2lane.v2f64.p0"
26184 )]
26185 fn _vst2q_lane_f64(a: float64x2_t, b: float64x2_t, n: i64, ptr: *mut i8);
26186 }
26187 _vst2q_lane_f64(b.0, b.1, LANE as i64, a as _)
26188}
26189#[doc = "Store multiple 2-element structures from two registers"]
26190#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s8)"]
26191#[doc = "## Safety"]
26192#[doc = " * Neon instrinsic unsafe"]
26193#[inline]
26194#[target_feature(enable = "neon")]
26195#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26196#[rustc_legacy_const_generics(2)]
26197#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26198pub unsafe fn vst2q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x2_t) {
26199 static_assert_uimm_bits!(LANE, 4);
26200 unsafe extern "unadjusted" {
26201 #[cfg_attr(
26202 any(target_arch = "aarch64", target_arch = "arm64ec"),
26203 link_name = "llvm.aarch64.neon.st2lane.v16i8.p0"
26204 )]
26205 fn _vst2q_lane_s8(a: int8x16_t, b: int8x16_t, n: i64, ptr: *mut i8);
26206 }
26207 _vst2q_lane_s8(b.0, b.1, LANE as i64, a as _)
26208}
26209#[doc = "Store multiple 2-element structures from two registers"]
26210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s64)"]
26211#[doc = "## Safety"]
26212#[doc = " * Neon instrinsic unsafe"]
26213#[inline]
26214#[target_feature(enable = "neon")]
26215#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26216#[rustc_legacy_const_generics(2)]
26217#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26218pub unsafe fn vst2q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x2_t) {
26219 static_assert_uimm_bits!(LANE, 1);
26220 unsafe extern "unadjusted" {
26221 #[cfg_attr(
26222 any(target_arch = "aarch64", target_arch = "arm64ec"),
26223 link_name = "llvm.aarch64.neon.st2lane.v2i64.p0"
26224 )]
26225 fn _vst2q_lane_s64(a: int64x2_t, b: int64x2_t, n: i64, ptr: *mut i8);
26226 }
26227 _vst2q_lane_s64(b.0, b.1, LANE as i64, a as _)
26228}
26229#[doc = "Store multiple 2-element structures from two registers"]
26230#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p64)"]
26231#[doc = "## Safety"]
26232#[doc = " * Neon instrinsic unsafe"]
26233#[inline]
26234#[target_feature(enable = "neon,aes")]
26235#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26236#[rustc_legacy_const_generics(2)]
26237#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26238pub unsafe fn vst2q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x2_t) {
26239 static_assert_uimm_bits!(LANE, 1);
26240 vst2q_lane_s64::<LANE>(transmute(a), transmute(b))
26241}
26242#[doc = "Store multiple 2-element structures from two registers"]
26243#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u8)"]
26244#[doc = "## Safety"]
26245#[doc = " * Neon instrinsic unsafe"]
26246#[inline]
26247#[target_feature(enable = "neon")]
26248#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26249#[rustc_legacy_const_generics(2)]
26250#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26251pub unsafe fn vst2q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x2_t) {
26252 static_assert_uimm_bits!(LANE, 4);
26253 vst2q_lane_s8::<LANE>(transmute(a), transmute(b))
26254}
26255#[doc = "Store multiple 2-element structures from two registers"]
26256#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u64)"]
26257#[doc = "## Safety"]
26258#[doc = " * Neon instrinsic unsafe"]
26259#[inline]
26260#[target_feature(enable = "neon")]
26261#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26262#[rustc_legacy_const_generics(2)]
26263#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26264pub unsafe fn vst2q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x2_t) {
26265 static_assert_uimm_bits!(LANE, 1);
26266 vst2q_lane_s64::<LANE>(transmute(a), transmute(b))
26267}
26268#[doc = "Store multiple 2-element structures from two registers"]
26269#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p8)"]
26270#[doc = "## Safety"]
26271#[doc = " * Neon instrinsic unsafe"]
26272#[inline]
26273#[target_feature(enable = "neon")]
26274#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26275#[rustc_legacy_const_generics(2)]
26276#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26277pub unsafe fn vst2q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x2_t) {
26278 static_assert_uimm_bits!(LANE, 4);
26279 vst2q_lane_s8::<LANE>(transmute(a), transmute(b))
26280}
26281#[doc = "Store multiple 2-element structures from two registers"]
26282#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p64)"]
26283#[doc = "## Safety"]
26284#[doc = " * Neon instrinsic unsafe"]
26285#[inline]
26286#[target_feature(enable = "neon,aes")]
26287#[cfg_attr(test, assert_instr(st2))]
26288#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26289pub unsafe fn vst2q_p64(a: *mut p64, b: poly64x2x2_t) {
26290 vst2q_s64(transmute(a), transmute(b))
26291}
26292#[doc = "Store multiple 2-element structures from two registers"]
26293#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u64)"]
26294#[doc = "## Safety"]
26295#[doc = " * Neon instrinsic unsafe"]
26296#[inline]
26297#[target_feature(enable = "neon")]
26298#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26299#[cfg_attr(test, assert_instr(st2))]
26300pub unsafe fn vst2q_u64(a: *mut u64, b: uint64x2x2_t) {
26301 vst2q_s64(transmute(a), transmute(b))
26302}
26303#[doc = "Store multiple 3-element structures from three registers"]
26304#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f64)"]
26305#[doc = "## Safety"]
26306#[doc = " * Neon instrinsic unsafe"]
26307#[inline]
26308#[target_feature(enable = "neon")]
26309#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26310#[cfg_attr(test, assert_instr(nop))]
26311pub unsafe fn vst3_f64(a: *mut f64, b: float64x1x3_t) {
26312 unsafe extern "unadjusted" {
26313 #[cfg_attr(
26314 any(target_arch = "aarch64", target_arch = "arm64ec"),
26315 link_name = "llvm.aarch64.neon.st3.v1f64.p0"
26316 )]
26317 fn _vst3_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, ptr: *mut i8);
26318 }
26319 _vst3_f64(b.0, b.1, b.2, a as _)
26320}
26321#[doc = "Store multiple 3-element structures from three registers"]
26322#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f64)"]
26323#[doc = "## Safety"]
26324#[doc = " * Neon instrinsic unsafe"]
26325#[inline]
26326#[target_feature(enable = "neon")]
26327#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26328#[rustc_legacy_const_generics(2)]
26329#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26330pub unsafe fn vst3_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x3_t) {
26331 static_assert!(LANE == 0);
26332 unsafe extern "unadjusted" {
26333 #[cfg_attr(
26334 any(target_arch = "aarch64", target_arch = "arm64ec"),
26335 link_name = "llvm.aarch64.neon.st3lane.v1f64.p0"
26336 )]
26337 fn _vst3_lane_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, n: i64, ptr: *mut i8);
26338 }
26339 _vst3_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
26340}
26341#[doc = "Store multiple 3-element structures from three registers"]
26342#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s64)"]
26343#[doc = "## Safety"]
26344#[doc = " * Neon instrinsic unsafe"]
26345#[inline]
26346#[target_feature(enable = "neon")]
26347#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26348#[rustc_legacy_const_generics(2)]
26349#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26350pub unsafe fn vst3_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x3_t) {
26351 static_assert!(LANE == 0);
26352 unsafe extern "unadjusted" {
26353 #[cfg_attr(
26354 any(target_arch = "aarch64", target_arch = "arm64ec"),
26355 link_name = "llvm.aarch64.neon.st3lane.v1i64.p0"
26356 )]
26357 fn _vst3_lane_s64(a: int64x1_t, b: int64x1_t, c: int64x1_t, n: i64, ptr: *mut i8);
26358 }
26359 _vst3_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
26360}
26361#[doc = "Store multiple 3-element structures from three registers"]
26362#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p64)"]
26363#[doc = "## Safety"]
26364#[doc = " * Neon instrinsic unsafe"]
26365#[inline]
26366#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26367#[target_feature(enable = "neon,aes")]
26368#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26369#[rustc_legacy_const_generics(2)]
26370pub unsafe fn vst3_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x3_t) {
26371 static_assert!(LANE == 0);
26372 vst3_lane_s64::<LANE>(transmute(a), transmute(b))
26373}
26374#[doc = "Store multiple 3-element structures from three registers"]
26375#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u64)"]
26376#[doc = "## Safety"]
26377#[doc = " * Neon instrinsic unsafe"]
26378#[inline]
26379#[target_feature(enable = "neon")]
26380#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26381#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26382#[rustc_legacy_const_generics(2)]
26383pub unsafe fn vst3_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x3_t) {
26384 static_assert!(LANE == 0);
26385 vst3_lane_s64::<LANE>(transmute(a), transmute(b))
26386}
26387#[doc = "Store multiple 3-element structures from three registers"]
26388#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f64)"]
26389#[doc = "## Safety"]
26390#[doc = " * Neon instrinsic unsafe"]
26391#[inline]
26392#[target_feature(enable = "neon")]
26393#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26394#[cfg_attr(test, assert_instr(st3))]
26395pub unsafe fn vst3q_f64(a: *mut f64, b: float64x2x3_t) {
26396 unsafe extern "unadjusted" {
26397 #[cfg_attr(
26398 any(target_arch = "aarch64", target_arch = "arm64ec"),
26399 link_name = "llvm.aarch64.neon.st3.v2f64.p0"
26400 )]
26401 fn _vst3q_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, ptr: *mut i8);
26402 }
26403 _vst3q_f64(b.0, b.1, b.2, a as _)
26404}
26405#[doc = "Store multiple 3-element structures from three registers"]
26406#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s64)"]
26407#[doc = "## Safety"]
26408#[doc = " * Neon instrinsic unsafe"]
26409#[inline]
26410#[target_feature(enable = "neon")]
26411#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26412#[cfg_attr(test, assert_instr(st3))]
26413pub unsafe fn vst3q_s64(a: *mut i64, b: int64x2x3_t) {
26414 unsafe extern "unadjusted" {
26415 #[cfg_attr(
26416 any(target_arch = "aarch64", target_arch = "arm64ec"),
26417 link_name = "llvm.aarch64.neon.st3.v2i64.p0"
26418 )]
26419 fn _vst3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, ptr: *mut i8);
26420 }
26421 _vst3q_s64(b.0, b.1, b.2, a as _)
26422}
26423#[doc = "Store multiple 3-element structures from three registers"]
26424#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f64)"]
26425#[doc = "## Safety"]
26426#[doc = " * Neon instrinsic unsafe"]
26427#[inline]
26428#[target_feature(enable = "neon")]
26429#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26430#[rustc_legacy_const_generics(2)]
26431#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26432pub unsafe fn vst3q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x3_t) {
26433 static_assert_uimm_bits!(LANE, 1);
26434 unsafe extern "unadjusted" {
26435 #[cfg_attr(
26436 any(target_arch = "aarch64", target_arch = "arm64ec"),
26437 link_name = "llvm.aarch64.neon.st3lane.v2f64.p0"
26438 )]
26439 fn _vst3q_lane_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, n: i64, ptr: *mut i8);
26440 }
26441 _vst3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
26442}
26443#[doc = "Store multiple 3-element structures from three registers"]
26444#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s8)"]
26445#[doc = "## Safety"]
26446#[doc = " * Neon instrinsic unsafe"]
26447#[inline]
26448#[target_feature(enable = "neon")]
26449#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26450#[rustc_legacy_const_generics(2)]
26451#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26452pub unsafe fn vst3q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x3_t) {
26453 static_assert_uimm_bits!(LANE, 4);
26454 unsafe extern "unadjusted" {
26455 #[cfg_attr(
26456 any(target_arch = "aarch64", target_arch = "arm64ec"),
26457 link_name = "llvm.aarch64.neon.st3lane.v16i8.p0"
26458 )]
26459 fn _vst3q_lane_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t, n: i64, ptr: *mut i8);
26460 }
26461 _vst3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _)
26462}
26463#[doc = "Store multiple 3-element structures from three registers"]
26464#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s64)"]
26465#[doc = "## Safety"]
26466#[doc = " * Neon instrinsic unsafe"]
26467#[inline]
26468#[target_feature(enable = "neon")]
26469#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26470#[rustc_legacy_const_generics(2)]
26471#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26472pub unsafe fn vst3q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x3_t) {
26473 static_assert_uimm_bits!(LANE, 1);
26474 unsafe extern "unadjusted" {
26475 #[cfg_attr(
26476 any(target_arch = "aarch64", target_arch = "arm64ec"),
26477 link_name = "llvm.aarch64.neon.st3lane.v2i64.p0"
26478 )]
26479 fn _vst3q_lane_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, n: i64, ptr: *mut i8);
26480 }
26481 _vst3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
26482}
26483#[doc = "Store multiple 3-element structures from three registers"]
26484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p64)"]
26485#[doc = "## Safety"]
26486#[doc = " * Neon instrinsic unsafe"]
26487#[inline]
26488#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26489#[target_feature(enable = "neon,aes")]
26490#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26491#[rustc_legacy_const_generics(2)]
26492pub unsafe fn vst3q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x3_t) {
26493 static_assert_uimm_bits!(LANE, 1);
26494 vst3q_lane_s64::<LANE>(transmute(a), transmute(b))
26495}
26496#[doc = "Store multiple 3-element structures from three registers"]
26497#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u8)"]
26498#[doc = "## Safety"]
26499#[doc = " * Neon instrinsic unsafe"]
26500#[inline]
26501#[target_feature(enable = "neon")]
26502#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26503#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26504#[rustc_legacy_const_generics(2)]
26505pub unsafe fn vst3q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x3_t) {
26506 static_assert_uimm_bits!(LANE, 4);
26507 vst3q_lane_s8::<LANE>(transmute(a), transmute(b))
26508}
26509#[doc = "Store multiple 3-element structures from three registers"]
26510#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u64)"]
26511#[doc = "## Safety"]
26512#[doc = " * Neon instrinsic unsafe"]
26513#[inline]
26514#[target_feature(enable = "neon")]
26515#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26516#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26517#[rustc_legacy_const_generics(2)]
26518pub unsafe fn vst3q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x3_t) {
26519 static_assert_uimm_bits!(LANE, 1);
26520 vst3q_lane_s64::<LANE>(transmute(a), transmute(b))
26521}
26522#[doc = "Store multiple 3-element structures from three registers"]
26523#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p8)"]
26524#[doc = "## Safety"]
26525#[doc = " * Neon instrinsic unsafe"]
26526#[inline]
26527#[target_feature(enable = "neon")]
26528#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26529#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26530#[rustc_legacy_const_generics(2)]
26531pub unsafe fn vst3q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x3_t) {
26532 static_assert_uimm_bits!(LANE, 4);
26533 vst3q_lane_s8::<LANE>(transmute(a), transmute(b))
26534}
26535#[doc = "Store multiple 3-element structures from three registers"]
26536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p64)"]
26537#[doc = "## Safety"]
26538#[doc = " * Neon instrinsic unsafe"]
26539#[inline]
26540#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26541#[target_feature(enable = "neon,aes")]
26542#[cfg_attr(test, assert_instr(st3))]
26543pub unsafe fn vst3q_p64(a: *mut p64, b: poly64x2x3_t) {
26544 vst3q_s64(transmute(a), transmute(b))
26545}
26546#[doc = "Store multiple 3-element structures from three registers"]
26547#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u64)"]
26548#[doc = "## Safety"]
26549#[doc = " * Neon instrinsic unsafe"]
26550#[inline]
26551#[target_feature(enable = "neon")]
26552#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26553#[cfg_attr(test, assert_instr(st3))]
26554pub unsafe fn vst3q_u64(a: *mut u64, b: uint64x2x3_t) {
26555 vst3q_s64(transmute(a), transmute(b))
26556}
26557#[doc = "Store multiple 4-element structures from four registers"]
26558#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f64)"]
26559#[doc = "## Safety"]
26560#[doc = " * Neon instrinsic unsafe"]
26561#[inline]
26562#[target_feature(enable = "neon")]
26563#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26564#[cfg_attr(test, assert_instr(nop))]
26565pub unsafe fn vst4_f64(a: *mut f64, b: float64x1x4_t) {
26566 unsafe extern "unadjusted" {
26567 #[cfg_attr(
26568 any(target_arch = "aarch64", target_arch = "arm64ec"),
26569 link_name = "llvm.aarch64.neon.st4.v1f64.p0"
26570 )]
26571 fn _vst4_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, d: float64x1_t, ptr: *mut i8);
26572 }
26573 _vst4_f64(b.0, b.1, b.2, b.3, a as _)
26574}
26575#[doc = "Store multiple 4-element structures from four registers"]
26576#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f64)"]
26577#[doc = "## Safety"]
26578#[doc = " * Neon instrinsic unsafe"]
26579#[inline]
26580#[target_feature(enable = "neon")]
26581#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26582#[rustc_legacy_const_generics(2)]
26583#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26584pub unsafe fn vst4_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x4_t) {
26585 static_assert!(LANE == 0);
26586 unsafe extern "unadjusted" {
26587 #[cfg_attr(
26588 any(target_arch = "aarch64", target_arch = "arm64ec"),
26589 link_name = "llvm.aarch64.neon.st4lane.v1f64.p0"
26590 )]
26591 fn _vst4_lane_f64(
26592 a: float64x1_t,
26593 b: float64x1_t,
26594 c: float64x1_t,
26595 d: float64x1_t,
26596 n: i64,
26597 ptr: *mut i8,
26598 );
26599 }
26600 _vst4_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
26601}
26602#[doc = "Store multiple 4-element structures from four registers"]
26603#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s64)"]
26604#[doc = "## Safety"]
26605#[doc = " * Neon instrinsic unsafe"]
26606#[inline]
26607#[target_feature(enable = "neon")]
26608#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26609#[rustc_legacy_const_generics(2)]
26610#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26611pub unsafe fn vst4_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x4_t) {
26612 static_assert!(LANE == 0);
26613 unsafe extern "unadjusted" {
26614 #[cfg_attr(
26615 any(target_arch = "aarch64", target_arch = "arm64ec"),
26616 link_name = "llvm.aarch64.neon.st4lane.v1i64.p0"
26617 )]
26618 fn _vst4_lane_s64(
26619 a: int64x1_t,
26620 b: int64x1_t,
26621 c: int64x1_t,
26622 d: int64x1_t,
26623 n: i64,
26624 ptr: *mut i8,
26625 );
26626 }
26627 _vst4_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
26628}
26629#[doc = "Store multiple 4-element structures from four registers"]
26630#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p64)"]
26631#[doc = "## Safety"]
26632#[doc = " * Neon instrinsic unsafe"]
26633#[inline]
26634#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26635#[target_feature(enable = "neon,aes")]
26636#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26637#[rustc_legacy_const_generics(2)]
26638pub unsafe fn vst4_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x4_t) {
26639 static_assert!(LANE == 0);
26640 vst4_lane_s64::<LANE>(transmute(a), transmute(b))
26641}
26642#[doc = "Store multiple 4-element structures from four registers"]
26643#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u64)"]
26644#[doc = "## Safety"]
26645#[doc = " * Neon instrinsic unsafe"]
26646#[inline]
26647#[target_feature(enable = "neon")]
26648#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26649#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26650#[rustc_legacy_const_generics(2)]
26651pub unsafe fn vst4_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x4_t) {
26652 static_assert!(LANE == 0);
26653 vst4_lane_s64::<LANE>(transmute(a), transmute(b))
26654}
26655#[doc = "Store multiple 4-element structures from four registers"]
26656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f64)"]
26657#[doc = "## Safety"]
26658#[doc = " * Neon instrinsic unsafe"]
26659#[inline]
26660#[target_feature(enable = "neon")]
26661#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26662#[cfg_attr(test, assert_instr(st4))]
26663pub unsafe fn vst4q_f64(a: *mut f64, b: float64x2x4_t) {
26664 unsafe extern "unadjusted" {
26665 #[cfg_attr(
26666 any(target_arch = "aarch64", target_arch = "arm64ec"),
26667 link_name = "llvm.aarch64.neon.st4.v2f64.p0"
26668 )]
26669 fn _vst4q_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, d: float64x2_t, ptr: *mut i8);
26670 }
26671 _vst4q_f64(b.0, b.1, b.2, b.3, a as _)
26672}
26673#[doc = "Store multiple 4-element structures from four registers"]
26674#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s64)"]
26675#[doc = "## Safety"]
26676#[doc = " * Neon instrinsic unsafe"]
26677#[inline]
26678#[target_feature(enable = "neon")]
26679#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26680#[cfg_attr(test, assert_instr(st4))]
26681pub unsafe fn vst4q_s64(a: *mut i64, b: int64x2x4_t) {
26682 unsafe extern "unadjusted" {
26683 #[cfg_attr(
26684 any(target_arch = "aarch64", target_arch = "arm64ec"),
26685 link_name = "llvm.aarch64.neon.st4.v2i64.p0"
26686 )]
26687 fn _vst4q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t, ptr: *mut i8);
26688 }
26689 _vst4q_s64(b.0, b.1, b.2, b.3, a as _)
26690}
26691#[doc = "Store multiple 4-element structures from four registers"]
26692#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f64)"]
26693#[doc = "## Safety"]
26694#[doc = " * Neon instrinsic unsafe"]
26695#[inline]
26696#[target_feature(enable = "neon")]
26697#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26698#[rustc_legacy_const_generics(2)]
26699#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26700pub unsafe fn vst4q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x4_t) {
26701 static_assert_uimm_bits!(LANE, 1);
26702 unsafe extern "unadjusted" {
26703 #[cfg_attr(
26704 any(target_arch = "aarch64", target_arch = "arm64ec"),
26705 link_name = "llvm.aarch64.neon.st4lane.v2f64.p0"
26706 )]
26707 fn _vst4q_lane_f64(
26708 a: float64x2_t,
26709 b: float64x2_t,
26710 c: float64x2_t,
26711 d: float64x2_t,
26712 n: i64,
26713 ptr: *mut i8,
26714 );
26715 }
26716 _vst4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
26717}
26718#[doc = "Store multiple 4-element structures from four registers"]
26719#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s8)"]
26720#[doc = "## Safety"]
26721#[doc = " * Neon instrinsic unsafe"]
26722#[inline]
26723#[target_feature(enable = "neon")]
26724#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26725#[rustc_legacy_const_generics(2)]
26726#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26727pub unsafe fn vst4q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x4_t) {
26728 static_assert_uimm_bits!(LANE, 4);
26729 unsafe extern "unadjusted" {
26730 #[cfg_attr(
26731 any(target_arch = "aarch64", target_arch = "arm64ec"),
26732 link_name = "llvm.aarch64.neon.st4lane.v16i8.p0"
26733 )]
26734 fn _vst4q_lane_s8(
26735 a: int8x16_t,
26736 b: int8x16_t,
26737 c: int8x16_t,
26738 d: int8x16_t,
26739 n: i64,
26740 ptr: *mut i8,
26741 );
26742 }
26743 _vst4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _)
26744}
26745#[doc = "Store multiple 4-element structures from four registers"]
26746#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s64)"]
26747#[doc = "## Safety"]
26748#[doc = " * Neon instrinsic unsafe"]
26749#[inline]
26750#[target_feature(enable = "neon")]
26751#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26752#[rustc_legacy_const_generics(2)]
26753#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26754pub unsafe fn vst4q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x4_t) {
26755 static_assert_uimm_bits!(LANE, 1);
26756 unsafe extern "unadjusted" {
26757 #[cfg_attr(
26758 any(target_arch = "aarch64", target_arch = "arm64ec"),
26759 link_name = "llvm.aarch64.neon.st4lane.v2i64.p0"
26760 )]
26761 fn _vst4q_lane_s64(
26762 a: int64x2_t,
26763 b: int64x2_t,
26764 c: int64x2_t,
26765 d: int64x2_t,
26766 n: i64,
26767 ptr: *mut i8,
26768 );
26769 }
26770 _vst4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
26771}
26772#[doc = "Store multiple 4-element structures from four registers"]
26773#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p64)"]
26774#[doc = "## Safety"]
26775#[doc = " * Neon instrinsic unsafe"]
26776#[inline]
26777#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26778#[target_feature(enable = "neon,aes")]
26779#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26780#[rustc_legacy_const_generics(2)]
26781pub unsafe fn vst4q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x4_t) {
26782 static_assert_uimm_bits!(LANE, 1);
26783 vst4q_lane_s64::<LANE>(transmute(a), transmute(b))
26784}
26785#[doc = "Store multiple 4-element structures from four registers"]
26786#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u8)"]
26787#[doc = "## Safety"]
26788#[doc = " * Neon instrinsic unsafe"]
26789#[inline]
26790#[target_feature(enable = "neon")]
26791#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26792#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26793#[rustc_legacy_const_generics(2)]
26794pub unsafe fn vst4q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x4_t) {
26795 static_assert_uimm_bits!(LANE, 4);
26796 vst4q_lane_s8::<LANE>(transmute(a), transmute(b))
26797}
26798#[doc = "Store multiple 4-element structures from four registers"]
26799#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u64)"]
26800#[doc = "## Safety"]
26801#[doc = " * Neon instrinsic unsafe"]
26802#[inline]
26803#[target_feature(enable = "neon")]
26804#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26805#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26806#[rustc_legacy_const_generics(2)]
26807pub unsafe fn vst4q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x4_t) {
26808 static_assert_uimm_bits!(LANE, 1);
26809 vst4q_lane_s64::<LANE>(transmute(a), transmute(b))
26810}
26811#[doc = "Store multiple 4-element structures from four registers"]
26812#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p8)"]
26813#[doc = "## Safety"]
26814#[doc = " * Neon instrinsic unsafe"]
26815#[inline]
26816#[target_feature(enable = "neon")]
26817#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26818#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26819#[rustc_legacy_const_generics(2)]
26820pub unsafe fn vst4q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x4_t) {
26821 static_assert_uimm_bits!(LANE, 4);
26822 vst4q_lane_s8::<LANE>(transmute(a), transmute(b))
26823}
26824#[doc = "Store multiple 4-element structures from four registers"]
26825#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p64)"]
26826#[doc = "## Safety"]
26827#[doc = " * Neon instrinsic unsafe"]
26828#[inline]
26829#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26830#[target_feature(enable = "neon,aes")]
26831#[cfg_attr(test, assert_instr(st4))]
26832pub unsafe fn vst4q_p64(a: *mut p64, b: poly64x2x4_t) {
26833 vst4q_s64(transmute(a), transmute(b))
26834}
26835#[doc = "Store multiple 4-element structures from four registers"]
26836#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u64)"]
26837#[doc = "## Safety"]
26838#[doc = " * Neon instrinsic unsafe"]
26839#[inline]
26840#[target_feature(enable = "neon")]
26841#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26842#[cfg_attr(test, assert_instr(st4))]
26843pub unsafe fn vst4q_u64(a: *mut u64, b: uint64x2x4_t) {
26844 vst4q_s64(transmute(a), transmute(b))
26845}
26846#[doc = "Subtract"]
26847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f64)"]
26848#[inline]
26849#[target_feature(enable = "neon")]
26850#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26851#[cfg_attr(test, assert_instr(fsub))]
26852pub fn vsub_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
26853 unsafe { simd_sub(a, b) }
26854}
26855#[doc = "Subtract"]
26856#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f64)"]
26857#[inline]
26858#[target_feature(enable = "neon")]
26859#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26860#[cfg_attr(test, assert_instr(fsub))]
26861pub fn vsubq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
26862 unsafe { simd_sub(a, b) }
26863}
26864#[doc = "Subtract"]
26865#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_s64)"]
26866#[inline]
26867#[target_feature(enable = "neon")]
26868#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26869#[cfg_attr(test, assert_instr(nop))]
26870pub fn vsubd_s64(a: i64, b: i64) -> i64 {
26871 a.wrapping_sub(b)
26872}
26873#[doc = "Subtract"]
26874#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_u64)"]
26875#[inline]
26876#[target_feature(enable = "neon")]
26877#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26878#[cfg_attr(test, assert_instr(nop))]
26879pub fn vsubd_u64(a: u64, b: u64) -> u64 {
26880 a.wrapping_sub(b)
26881}
26882#[doc = "Subtract"]
26883#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubh_f16)"]
26884#[inline]
26885#[target_feature(enable = "neon,fp16")]
26886#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
26887#[cfg_attr(test, assert_instr(nop))]
26888pub fn vsubh_f16(a: f16, b: f16) -> f16 {
26889 a - b
26890}
26891#[doc = "Signed Subtract Long"]
26892#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s8)"]
26893#[inline]
26894#[target_feature(enable = "neon")]
26895#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26896#[cfg_attr(test, assert_instr(ssubl))]
26897pub fn vsubl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
26898 unsafe {
26899 let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
26900 let d: int16x8_t = simd_cast(c);
26901 let e: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
26902 let f: int16x8_t = simd_cast(e);
26903 simd_sub(d, f)
26904 }
26905}
26906#[doc = "Signed Subtract Long"]
26907#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s16)"]
26908#[inline]
26909#[target_feature(enable = "neon")]
26910#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26911#[cfg_attr(test, assert_instr(ssubl))]
26912pub fn vsubl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
26913 unsafe {
26914 let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
26915 let d: int32x4_t = simd_cast(c);
26916 let e: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
26917 let f: int32x4_t = simd_cast(e);
26918 simd_sub(d, f)
26919 }
26920}
26921#[doc = "Signed Subtract Long"]
26922#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s32)"]
26923#[inline]
26924#[target_feature(enable = "neon")]
26925#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26926#[cfg_attr(test, assert_instr(ssubl))]
26927pub fn vsubl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
26928 unsafe {
26929 let c: int32x2_t = simd_shuffle!(a, a, [2, 3]);
26930 let d: int64x2_t = simd_cast(c);
26931 let e: int32x2_t = simd_shuffle!(b, b, [2, 3]);
26932 let f: int64x2_t = simd_cast(e);
26933 simd_sub(d, f)
26934 }
26935}
26936#[doc = "Unsigned Subtract Long"]
26937#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u8)"]
26938#[inline]
26939#[target_feature(enable = "neon")]
26940#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26941#[cfg_attr(test, assert_instr(usubl))]
26942pub fn vsubl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
26943 unsafe {
26944 let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
26945 let d: uint16x8_t = simd_cast(c);
26946 let e: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
26947 let f: uint16x8_t = simd_cast(e);
26948 simd_sub(d, f)
26949 }
26950}
26951#[doc = "Unsigned Subtract Long"]
26952#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u16)"]
26953#[inline]
26954#[target_feature(enable = "neon")]
26955#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26956#[cfg_attr(test, assert_instr(usubl))]
26957pub fn vsubl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
26958 unsafe {
26959 let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
26960 let d: uint32x4_t = simd_cast(c);
26961 let e: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
26962 let f: uint32x4_t = simd_cast(e);
26963 simd_sub(d, f)
26964 }
26965}
26966#[doc = "Unsigned Subtract Long"]
26967#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u32)"]
26968#[inline]
26969#[target_feature(enable = "neon")]
26970#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26971#[cfg_attr(test, assert_instr(usubl))]
26972pub fn vsubl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
26973 unsafe {
26974 let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
26975 let d: uint64x2_t = simd_cast(c);
26976 let e: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
26977 let f: uint64x2_t = simd_cast(e);
26978 simd_sub(d, f)
26979 }
26980}
26981#[doc = "Signed Subtract Wide"]
26982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s8)"]
26983#[inline]
26984#[target_feature(enable = "neon")]
26985#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26986#[cfg_attr(test, assert_instr(ssubw))]
26987pub fn vsubw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t {
26988 unsafe {
26989 let c: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
26990 simd_sub(a, simd_cast(c))
26991 }
26992}
26993#[doc = "Signed Subtract Wide"]
26994#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s16)"]
26995#[inline]
26996#[target_feature(enable = "neon")]
26997#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26998#[cfg_attr(test, assert_instr(ssubw))]
26999pub fn vsubw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t {
27000 unsafe {
27001 let c: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
27002 simd_sub(a, simd_cast(c))
27003 }
27004}
27005#[doc = "Signed Subtract Wide"]
27006#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s32)"]
27007#[inline]
27008#[target_feature(enable = "neon")]
27009#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27010#[cfg_attr(test, assert_instr(ssubw))]
27011pub fn vsubw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t {
27012 unsafe {
27013 let c: int32x2_t = simd_shuffle!(b, b, [2, 3]);
27014 simd_sub(a, simd_cast(c))
27015 }
27016}
27017#[doc = "Unsigned Subtract Wide"]
27018#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u8)"]
27019#[inline]
27020#[target_feature(enable = "neon")]
27021#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27022#[cfg_attr(test, assert_instr(usubw))]
27023pub fn vsubw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t {
27024 unsafe {
27025 let c: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
27026 simd_sub(a, simd_cast(c))
27027 }
27028}
27029#[doc = "Unsigned Subtract Wide"]
27030#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u16)"]
27031#[inline]
27032#[target_feature(enable = "neon")]
27033#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27034#[cfg_attr(test, assert_instr(usubw))]
27035pub fn vsubw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t {
27036 unsafe {
27037 let c: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
27038 simd_sub(a, simd_cast(c))
27039 }
27040}
27041#[doc = "Unsigned Subtract Wide"]
27042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u32)"]
27043#[inline]
27044#[target_feature(enable = "neon")]
27045#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27046#[cfg_attr(test, assert_instr(usubw))]
27047pub fn vsubw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t {
27048 unsafe {
27049 let c: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
27050 simd_sub(a, simd_cast(c))
27051 }
27052}
27053#[doc = "Dot product index form with signed and unsigned integers"]
27054#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudot_laneq_s32)"]
27055#[inline]
27056#[target_feature(enable = "neon,i8mm")]
27057#[cfg_attr(test, assert_instr(sudot, LANE = 3))]
27058#[rustc_legacy_const_generics(3)]
27059#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")]
27060pub fn vsudot_laneq_s32<const LANE: i32>(a: int32x2_t, b: int8x8_t, c: uint8x16_t) -> int32x2_t {
27061 static_assert_uimm_bits!(LANE, 2);
27062 unsafe {
27063 let c: uint32x4_t = transmute(c);
27064 let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
27065 vusdot_s32(a, transmute(c), b)
27066 }
27067}
27068#[doc = "Dot product index form with signed and unsigned integers"]
27069#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudotq_laneq_s32)"]
27070#[inline]
27071#[target_feature(enable = "neon,i8mm")]
27072#[cfg_attr(test, assert_instr(sudot, LANE = 3))]
27073#[rustc_legacy_const_generics(3)]
27074#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")]
27075pub fn vsudotq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int8x16_t, c: uint8x16_t) -> int32x4_t {
27076 static_assert_uimm_bits!(LANE, 2);
27077 unsafe {
27078 let c: uint32x4_t = transmute(c);
27079 let c: uint32x4_t =
27080 simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
27081 vusdotq_s32(a, transmute(c), b)
27082 }
27083}
27084#[doc = "Table look-up"]
27085#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_s8)"]
27086#[inline]
27087#[target_feature(enable = "neon")]
27088#[cfg_attr(test, assert_instr(tbl))]
27089#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27090pub fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
27091 vqtbl1_s8(vcombine_s8(a, unsafe { crate::mem::zeroed() }), unsafe {
27092 {
27093 transmute(b)
27094 }
27095 })
27096}
27097#[doc = "Table look-up"]
27098#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_u8)"]
27099#[inline]
27100#[target_feature(enable = "neon")]
27101#[cfg_attr(test, assert_instr(tbl))]
27102#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27103pub fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
27104 vqtbl1_u8(vcombine_u8(a, unsafe { crate::mem::zeroed() }), b)
27105}
27106#[doc = "Table look-up"]
27107#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_p8)"]
27108#[inline]
27109#[target_feature(enable = "neon")]
27110#[cfg_attr(test, assert_instr(tbl))]
27111#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27112pub fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t {
27113 vqtbl1_p8(vcombine_p8(a, unsafe { crate::mem::zeroed() }), b)
27114}
27115#[doc = "Table look-up"]
27116#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_s8)"]
27117#[inline]
27118#[target_feature(enable = "neon")]
27119#[cfg_attr(test, assert_instr(tbl))]
27120#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27121pub fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t {
27122 unsafe { vqtbl1(transmute(vcombine_s8(a.0, a.1)), transmute(b)) }
27123}
27124#[doc = "Table look-up"]
27125#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"]
27126#[inline]
27127#[cfg(target_endian = "little")]
27128#[target_feature(enable = "neon")]
27129#[cfg_attr(test, assert_instr(tbl))]
27130#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27131pub fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t {
27132 unsafe { transmute(vqtbl1(transmute(vcombine_u8(a.0, a.1)), b)) }
27133}
27134#[doc = "Table look-up"]
27135#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"]
27136#[inline]
27137#[cfg(target_endian = "big")]
27138#[target_feature(enable = "neon")]
27139#[cfg_attr(test, assert_instr(tbl))]
27140#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27141pub fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t {
27142 let mut a: uint8x8x2_t = a;
27143 a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27144 a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27145 let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
27146 unsafe {
27147 let ret_val: uint8x8_t = transmute(vqtbl1(transmute(vcombine_u8(a.0, a.1)), b));
27148 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27149 }
27150}
27151#[doc = "Table look-up"]
27152#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"]
27153#[inline]
27154#[cfg(target_endian = "little")]
27155#[target_feature(enable = "neon")]
27156#[cfg_attr(test, assert_instr(tbl))]
27157#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27158pub fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t {
27159 unsafe { transmute(vqtbl1(transmute(vcombine_p8(a.0, a.1)), b)) }
27160}
27161#[doc = "Table look-up"]
27162#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"]
27163#[inline]
27164#[cfg(target_endian = "big")]
27165#[target_feature(enable = "neon")]
27166#[cfg_attr(test, assert_instr(tbl))]
27167#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27168pub fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t {
27169 let mut a: poly8x8x2_t = a;
27170 a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27171 a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27172 let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
27173 unsafe {
27174 let ret_val: poly8x8_t = transmute(vqtbl1(transmute(vcombine_p8(a.0, a.1)), b));
27175 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27176 }
27177}
27178#[doc = "Table look-up"]
27179#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_s8)"]
27180#[inline]
27181#[target_feature(enable = "neon")]
27182#[cfg_attr(test, assert_instr(tbl))]
27183#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27184pub fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t {
27185 let x = int8x16x2_t(
27186 vcombine_s8(a.0, a.1),
27187 vcombine_s8(a.2, unsafe { crate::mem::zeroed() }),
27188 );
27189 unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), transmute(b))) }
27190}
27191#[doc = "Table look-up"]
27192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"]
27193#[inline]
27194#[cfg(target_endian = "little")]
27195#[target_feature(enable = "neon")]
27196#[cfg_attr(test, assert_instr(tbl))]
27197#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27198pub fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t {
27199 let x = uint8x16x2_t(
27200 vcombine_u8(a.0, a.1),
27201 vcombine_u8(a.2, unsafe { crate::mem::zeroed() }),
27202 );
27203 unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
27204}
27205#[doc = "Table look-up"]
27206#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"]
27207#[inline]
27208#[cfg(target_endian = "big")]
27209#[target_feature(enable = "neon")]
27210#[cfg_attr(test, assert_instr(tbl))]
27211#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27212pub fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t {
27213 let mut a: uint8x8x3_t = a;
27214 a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27215 a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27216 a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27217 let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
27218 let x = uint8x16x2_t(
27219 vcombine_u8(a.0, a.1),
27220 vcombine_u8(a.2, unsafe { crate::mem::zeroed() }),
27221 );
27222 unsafe {
27223 let ret_val: uint8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b));
27224 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27225 }
27226}
27227#[doc = "Table look-up"]
27228#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"]
27229#[inline]
27230#[cfg(target_endian = "little")]
27231#[target_feature(enable = "neon")]
27232#[cfg_attr(test, assert_instr(tbl))]
27233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27234pub fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t {
27235 let x = poly8x16x2_t(
27236 vcombine_p8(a.0, a.1),
27237 vcombine_p8(a.2, unsafe { crate::mem::zeroed() }),
27238 );
27239 unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
27240}
27241#[doc = "Table look-up"]
27242#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"]
27243#[inline]
27244#[cfg(target_endian = "big")]
27245#[target_feature(enable = "neon")]
27246#[cfg_attr(test, assert_instr(tbl))]
27247#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27248pub fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t {
27249 let mut a: poly8x8x3_t = a;
27250 a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27251 a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27252 a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27253 let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
27254 let x = poly8x16x2_t(
27255 vcombine_p8(a.0, a.1),
27256 vcombine_p8(a.2, unsafe { crate::mem::zeroed() }),
27257 );
27258 unsafe {
27259 let ret_val: poly8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b));
27260 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27261 }
27262}
27263#[doc = "Table look-up"]
27264#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_s8)"]
27265#[inline]
27266#[target_feature(enable = "neon")]
27267#[cfg_attr(test, assert_instr(tbl))]
27268#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27269pub fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t {
27270 let x = int8x16x2_t(vcombine_s8(a.0, a.1), vcombine_s8(a.2, a.3));
27271 unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), transmute(b))) }
27272}
27273#[doc = "Table look-up"]
27274#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"]
27275#[inline]
27276#[cfg(target_endian = "little")]
27277#[target_feature(enable = "neon")]
27278#[cfg_attr(test, assert_instr(tbl))]
27279#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27280pub fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t {
27281 let x = uint8x16x2_t(vcombine_u8(a.0, a.1), vcombine_u8(a.2, a.3));
27282 unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
27283}
27284#[doc = "Table look-up"]
27285#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"]
27286#[inline]
27287#[cfg(target_endian = "big")]
27288#[target_feature(enable = "neon")]
27289#[cfg_attr(test, assert_instr(tbl))]
27290#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27291pub fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t {
27292 let mut a: uint8x8x4_t = a;
27293 a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27294 a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27295 a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27296 a.3 = unsafe { simd_shuffle!(a.3, a.3, [7, 6, 5, 4, 3, 2, 1, 0]) };
27297 let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
27298 let x = uint8x16x2_t(vcombine_u8(a.0, a.1), vcombine_u8(a.2, a.3));
27299 unsafe {
27300 let ret_val: uint8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b));
27301 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27302 }
27303}
27304#[doc = "Table look-up"]
27305#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"]
27306#[inline]
27307#[cfg(target_endian = "little")]
27308#[target_feature(enable = "neon")]
27309#[cfg_attr(test, assert_instr(tbl))]
27310#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27311pub fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t {
27312 let x = poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, a.3));
27313 unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
27314}
27315#[doc = "Table look-up"]
27316#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"]
27317#[inline]
27318#[cfg(target_endian = "big")]
27319#[target_feature(enable = "neon")]
27320#[cfg_attr(test, assert_instr(tbl))]
27321#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27322pub fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t {
27323 let mut a: poly8x8x4_t = a;
27324 a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27325 a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27326 a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27327 a.3 = unsafe { simd_shuffle!(a.3, a.3, [7, 6, 5, 4, 3, 2, 1, 0]) };
27328 let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
27329 let x = poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, a.3));
27330 unsafe {
27331 let ret_val: poly8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b));
27332 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27333 }
27334}
27335#[doc = "Extended table look-up"]
27336#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_s8)"]
27337#[inline]
27338#[target_feature(enable = "neon")]
27339#[cfg_attr(test, assert_instr(tbx))]
27340#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27341pub fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t {
27342 unsafe {
27343 simd_select(
27344 simd_lt::<int8x8_t, int8x8_t>(c, transmute(i8x8::splat(8))),
27345 transmute(vqtbx1(
27346 transmute(a),
27347 transmute(vcombine_s8(b, crate::mem::zeroed())),
27348 transmute(c),
27349 )),
27350 a,
27351 )
27352 }
27353}
27354#[doc = "Extended table look-up"]
27355#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_u8)"]
27356#[inline]
27357#[target_feature(enable = "neon")]
27358#[cfg_attr(test, assert_instr(tbx))]
27359#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27360pub fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t {
27361 unsafe {
27362 simd_select(
27363 simd_lt::<uint8x8_t, int8x8_t>(c, transmute(u8x8::splat(8))),
27364 transmute(vqtbx1(
27365 transmute(a),
27366 transmute(vcombine_u8(b, crate::mem::zeroed())),
27367 c,
27368 )),
27369 a,
27370 )
27371 }
27372}
27373#[doc = "Extended table look-up"]
27374#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_p8)"]
27375#[inline]
27376#[target_feature(enable = "neon")]
27377#[cfg_attr(test, assert_instr(tbx))]
27378#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27379pub fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t {
27380 unsafe {
27381 simd_select(
27382 simd_lt::<uint8x8_t, int8x8_t>(c, transmute(u8x8::splat(8))),
27383 transmute(vqtbx1(
27384 transmute(a),
27385 transmute(vcombine_p8(b, crate::mem::zeroed())),
27386 c,
27387 )),
27388 a,
27389 )
27390 }
27391}
27392#[doc = "Extended table look-up"]
27393#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_s8)"]
27394#[inline]
27395#[target_feature(enable = "neon")]
27396#[cfg_attr(test, assert_instr(tbx))]
27397#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27398pub fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t {
27399 unsafe { vqtbx1(transmute(a), transmute(vcombine_s8(b.0, b.1)), transmute(c)) }
27400}
27401#[doc = "Extended table look-up"]
27402#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"]
27403#[inline]
27404#[cfg(target_endian = "little")]
27405#[target_feature(enable = "neon")]
27406#[cfg_attr(test, assert_instr(tbx))]
27407#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27408pub fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t {
27409 unsafe { transmute(vqtbx1(transmute(a), transmute(vcombine_u8(b.0, b.1)), c)) }
27410}
27411#[doc = "Extended table look-up"]
27412#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"]
27413#[inline]
27414#[cfg(target_endian = "big")]
27415#[target_feature(enable = "neon")]
27416#[cfg_attr(test, assert_instr(tbx))]
27417#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27418pub fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t {
27419 let mut b: uint8x8x2_t = b;
27420 let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27421 b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27422 b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27423 let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27424 unsafe {
27425 let ret_val: uint8x8_t =
27426 transmute(vqtbx1(transmute(a), transmute(vcombine_u8(b.0, b.1)), c));
27427 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27428 }
27429}
27430#[doc = "Extended table look-up"]
27431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"]
27432#[inline]
27433#[cfg(target_endian = "little")]
27434#[target_feature(enable = "neon")]
27435#[cfg_attr(test, assert_instr(tbx))]
27436#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27437pub fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t {
27438 unsafe { transmute(vqtbx1(transmute(a), transmute(vcombine_p8(b.0, b.1)), c)) }
27439}
27440#[doc = "Extended table look-up"]
27441#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"]
27442#[inline]
27443#[cfg(target_endian = "big")]
27444#[target_feature(enable = "neon")]
27445#[cfg_attr(test, assert_instr(tbx))]
27446#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27447pub fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t {
27448 let mut b: poly8x8x2_t = b;
27449 let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27450 b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27451 b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27452 let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27453 unsafe {
27454 let ret_val: poly8x8_t =
27455 transmute(vqtbx1(transmute(a), transmute(vcombine_p8(b.0, b.1)), c));
27456 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27457 }
27458}
27459#[doc = "Extended table look-up"]
27460#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_s8)"]
27461#[inline]
27462#[target_feature(enable = "neon")]
27463#[cfg_attr(test, assert_instr(tbx))]
27464#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27465pub fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t {
27466 let x = int8x16x2_t(
27467 vcombine_s8(b.0, b.1),
27468 vcombine_s8(b.2, unsafe { crate::mem::zeroed() }),
27469 );
27470 unsafe {
27471 transmute(simd_select(
27472 simd_lt::<int8x8_t, int8x8_t>(transmute(c), transmute(i8x8::splat(24))),
27473 transmute(vqtbx2(
27474 transmute(a),
27475 transmute(x.0),
27476 transmute(x.1),
27477 transmute(c),
27478 )),
27479 a,
27480 ))
27481 }
27482}
27483#[doc = "Extended table look-up"]
27484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"]
27485#[inline]
27486#[cfg(target_endian = "little")]
27487#[target_feature(enable = "neon")]
27488#[cfg_attr(test, assert_instr(tbx))]
27489#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27490pub fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t {
27491 let x = uint8x16x2_t(
27492 vcombine_u8(b.0, b.1),
27493 vcombine_u8(b.2, unsafe { crate::mem::zeroed() }),
27494 );
27495 unsafe {
27496 transmute(simd_select(
27497 simd_lt::<uint8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
27498 transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
27499 a,
27500 ))
27501 }
27502}
27503#[doc = "Extended table look-up"]
27504#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"]
27505#[inline]
27506#[cfg(target_endian = "big")]
27507#[target_feature(enable = "neon")]
27508#[cfg_attr(test, assert_instr(tbx))]
27509#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27510pub fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t {
27511 let mut b: uint8x8x3_t = b;
27512 let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27513 b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27514 b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27515 b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27516 let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27517 let x = uint8x16x2_t(
27518 vcombine_u8(b.0, b.1),
27519 vcombine_u8(b.2, unsafe { crate::mem::zeroed() }),
27520 );
27521 unsafe {
27522 let ret_val: uint8x8_t = transmute(simd_select(
27523 simd_lt::<uint8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
27524 transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
27525 a,
27526 ));
27527 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27528 }
27529}
27530#[doc = "Extended table look-up"]
27531#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"]
27532#[inline]
27533#[cfg(target_endian = "little")]
27534#[target_feature(enable = "neon")]
27535#[cfg_attr(test, assert_instr(tbx))]
27536#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27537pub fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t {
27538 let x = poly8x16x2_t(
27539 vcombine_p8(b.0, b.1),
27540 vcombine_p8(b.2, unsafe { crate::mem::zeroed() }),
27541 );
27542 unsafe {
27543 transmute(simd_select(
27544 simd_lt::<poly8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
27545 transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
27546 a,
27547 ))
27548 }
27549}
27550#[doc = "Extended table look-up"]
27551#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"]
27552#[inline]
27553#[cfg(target_endian = "big")]
27554#[target_feature(enable = "neon")]
27555#[cfg_attr(test, assert_instr(tbx))]
27556#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27557pub fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t {
27558 let mut b: poly8x8x3_t = b;
27559 let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27560 b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27561 b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27562 b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27563 let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27564 let x = poly8x16x2_t(
27565 vcombine_p8(b.0, b.1),
27566 vcombine_p8(b.2, unsafe { crate::mem::zeroed() }),
27567 );
27568 unsafe {
27569 let ret_val: poly8x8_t = transmute(simd_select(
27570 simd_lt::<poly8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
27571 transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
27572 a,
27573 ));
27574 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27575 }
27576}
27577#[doc = "Extended table look-up"]
27578#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_s8)"]
27579#[inline]
27580#[target_feature(enable = "neon")]
27581#[cfg_attr(test, assert_instr(tbx))]
27582#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27583pub fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t {
27584 unsafe {
27585 vqtbx2(
27586 transmute(a),
27587 transmute(vcombine_s8(b.0, b.1)),
27588 transmute(vcombine_s8(b.2, b.3)),
27589 transmute(c),
27590 )
27591 }
27592}
27593#[doc = "Extended table look-up"]
27594#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"]
27595#[inline]
27596#[cfg(target_endian = "little")]
27597#[target_feature(enable = "neon")]
27598#[cfg_attr(test, assert_instr(tbx))]
27599#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27600pub fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t {
27601 unsafe {
27602 transmute(vqtbx2(
27603 transmute(a),
27604 transmute(vcombine_u8(b.0, b.1)),
27605 transmute(vcombine_u8(b.2, b.3)),
27606 c,
27607 ))
27608 }
27609}
27610#[doc = "Extended table look-up"]
27611#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"]
27612#[inline]
27613#[cfg(target_endian = "big")]
27614#[target_feature(enable = "neon")]
27615#[cfg_attr(test, assert_instr(tbx))]
27616#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27617pub fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t {
27618 let mut b: uint8x8x4_t = b;
27619 let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27620 b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27621 b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27622 b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27623 b.3 = unsafe { simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]) };
27624 let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27625 unsafe {
27626 let ret_val: uint8x8_t = transmute(vqtbx2(
27627 transmute(a),
27628 transmute(vcombine_u8(b.0, b.1)),
27629 transmute(vcombine_u8(b.2, b.3)),
27630 c,
27631 ));
27632 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27633 }
27634}
27635#[doc = "Extended table look-up"]
27636#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"]
27637#[inline]
27638#[cfg(target_endian = "little")]
27639#[target_feature(enable = "neon")]
27640#[cfg_attr(test, assert_instr(tbx))]
27641#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27642pub fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t {
27643 unsafe {
27644 transmute(vqtbx2(
27645 transmute(a),
27646 transmute(vcombine_p8(b.0, b.1)),
27647 transmute(vcombine_p8(b.2, b.3)),
27648 c,
27649 ))
27650 }
27651}
27652#[doc = "Extended table look-up"]
27653#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"]
27654#[inline]
27655#[cfg(target_endian = "big")]
27656#[target_feature(enable = "neon")]
27657#[cfg_attr(test, assert_instr(tbx))]
27658#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27659pub fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t {
27660 let mut b: poly8x8x4_t = b;
27661 let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27662 b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27663 b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27664 b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27665 b.3 = unsafe { simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]) };
27666 let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27667 unsafe {
27668 let ret_val: poly8x8_t = transmute(vqtbx2(
27669 transmute(a),
27670 transmute(vcombine_p8(b.0, b.1)),
27671 transmute(vcombine_p8(b.2, b.3)),
27672 c,
27673 ));
27674 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27675 }
27676}
27677#[doc = "Transpose vectors"]
27678#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f16)"]
27679#[inline]
27680#[target_feature(enable = "neon,fp16")]
27681#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
27682#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27683pub fn vtrn1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
27684 unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
27685}
27686#[doc = "Transpose vectors"]
27687#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f16)"]
27688#[inline]
27689#[target_feature(enable = "neon,fp16")]
27690#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
27691#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27692pub fn vtrn1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
27693 unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
27694}
27695#[doc = "Transpose vectors"]
27696#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f32)"]
27697#[inline]
27698#[target_feature(enable = "neon")]
27699#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27700#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27701pub fn vtrn1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
27702 unsafe { simd_shuffle!(a, b, [0, 2]) }
27703}
27704#[doc = "Transpose vectors"]
27705#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f64)"]
27706#[inline]
27707#[target_feature(enable = "neon")]
27708#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27709#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27710pub fn vtrn1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
27711 unsafe { simd_shuffle!(a, b, [0, 2]) }
27712}
27713#[doc = "Transpose vectors"]
27714#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s32)"]
27715#[inline]
27716#[target_feature(enable = "neon")]
27717#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27718#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27719pub fn vtrn1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
27720 unsafe { simd_shuffle!(a, b, [0, 2]) }
27721}
27722#[doc = "Transpose vectors"]
27723#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s64)"]
27724#[inline]
27725#[target_feature(enable = "neon")]
27726#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27727#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27728pub fn vtrn1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
27729 unsafe { simd_shuffle!(a, b, [0, 2]) }
27730}
27731#[doc = "Transpose vectors"]
27732#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u32)"]
27733#[inline]
27734#[target_feature(enable = "neon")]
27735#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27736#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27737pub fn vtrn1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
27738 unsafe { simd_shuffle!(a, b, [0, 2]) }
27739}
27740#[doc = "Transpose vectors"]
27741#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u64)"]
27742#[inline]
27743#[target_feature(enable = "neon")]
27744#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27745#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27746pub fn vtrn1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
27747 unsafe { simd_shuffle!(a, b, [0, 2]) }
27748}
27749#[doc = "Transpose vectors"]
27750#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p64)"]
27751#[inline]
27752#[target_feature(enable = "neon")]
27753#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27754#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27755pub fn vtrn1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
27756 unsafe { simd_shuffle!(a, b, [0, 2]) }
27757}
27758#[doc = "Transpose vectors"]
27759#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f32)"]
27760#[inline]
27761#[target_feature(enable = "neon")]
27762#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27763#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27764pub fn vtrn1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
27765 unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
27766}
27767#[doc = "Transpose vectors"]
27768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s8)"]
27769#[inline]
27770#[target_feature(enable = "neon")]
27771#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27772#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27773pub fn vtrn1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
27774 unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
27775}
27776#[doc = "Transpose vectors"]
27777#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s8)"]
27778#[inline]
27779#[target_feature(enable = "neon")]
27780#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27781#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27782pub fn vtrn1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
27783 unsafe {
27784 simd_shuffle!(
27785 a,
27786 b,
27787 [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]
27788 )
27789 }
27790}
27791#[doc = "Transpose vectors"]
27792#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s16)"]
27793#[inline]
27794#[target_feature(enable = "neon")]
27795#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27796#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27797pub fn vtrn1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
27798 unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
27799}
27800#[doc = "Transpose vectors"]
27801#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s16)"]
27802#[inline]
27803#[target_feature(enable = "neon")]
27804#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27805#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27806pub fn vtrn1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
27807 unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
27808}
27809#[doc = "Transpose vectors"]
27810#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s32)"]
27811#[inline]
27812#[target_feature(enable = "neon")]
27813#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27814#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27815pub fn vtrn1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
27816 unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
27817}
27818#[doc = "Transpose vectors"]
27819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u8)"]
27820#[inline]
27821#[target_feature(enable = "neon")]
27822#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27823#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27824pub fn vtrn1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
27825 unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
27826}
27827#[doc = "Transpose vectors"]
27828#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u8)"]
27829#[inline]
27830#[target_feature(enable = "neon")]
27831#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27832#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27833pub fn vtrn1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
27834 unsafe {
27835 simd_shuffle!(
27836 a,
27837 b,
27838 [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]
27839 )
27840 }
27841}
27842#[doc = "Transpose vectors"]
27843#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u16)"]
27844#[inline]
27845#[target_feature(enable = "neon")]
27846#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27847#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27848pub fn vtrn1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
27849 unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
27850}
27851#[doc = "Transpose vectors"]
27852#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u16)"]
27853#[inline]
27854#[target_feature(enable = "neon")]
27855#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27856#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27857pub fn vtrn1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
27858 unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
27859}
27860#[doc = "Transpose vectors"]
27861#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u32)"]
27862#[inline]
27863#[target_feature(enable = "neon")]
27864#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27865#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27866pub fn vtrn1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
27867 unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
27868}
27869#[doc = "Transpose vectors"]
27870#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p8)"]
27871#[inline]
27872#[target_feature(enable = "neon")]
27873#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27874#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27875pub fn vtrn1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
27876 unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
27877}
27878#[doc = "Transpose vectors"]
27879#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p8)"]
27880#[inline]
27881#[target_feature(enable = "neon")]
27882#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27883#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27884pub fn vtrn1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
27885 unsafe {
27886 simd_shuffle!(
27887 a,
27888 b,
27889 [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]
27890 )
27891 }
27892}
27893#[doc = "Transpose vectors"]
27894#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p16)"]
27895#[inline]
27896#[target_feature(enable = "neon")]
27897#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27898#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27899pub fn vtrn1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
27900 unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
27901}
27902#[doc = "Transpose vectors"]
27903#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p16)"]
27904#[inline]
27905#[target_feature(enable = "neon")]
27906#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27907#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27908pub fn vtrn1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
27909 unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
27910}
27911#[doc = "Transpose vectors"]
27912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f16)"]
27913#[inline]
27914#[target_feature(enable = "neon,fp16")]
27915#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
27916#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27917pub fn vtrn2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
27918 unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
27919}
27920#[doc = "Transpose vectors"]
27921#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f16)"]
27922#[inline]
27923#[target_feature(enable = "neon,fp16")]
27924#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
27925#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27926pub fn vtrn2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
27927 unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
27928}
27929#[doc = "Transpose vectors"]
27930#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f32)"]
27931#[inline]
27932#[target_feature(enable = "neon")]
27933#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27934#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27935pub fn vtrn2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
27936 unsafe { simd_shuffle!(a, b, [1, 3]) }
27937}
27938#[doc = "Transpose vectors"]
27939#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f64)"]
27940#[inline]
27941#[target_feature(enable = "neon")]
27942#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27943#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27944pub fn vtrn2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
27945 unsafe { simd_shuffle!(a, b, [1, 3]) }
27946}
27947#[doc = "Transpose vectors"]
27948#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s32)"]
27949#[inline]
27950#[target_feature(enable = "neon")]
27951#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27952#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27953pub fn vtrn2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
27954 unsafe { simd_shuffle!(a, b, [1, 3]) }
27955}
27956#[doc = "Transpose vectors"]
27957#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s64)"]
27958#[inline]
27959#[target_feature(enable = "neon")]
27960#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27961#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27962pub fn vtrn2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
27963 unsafe { simd_shuffle!(a, b, [1, 3]) }
27964}
27965#[doc = "Transpose vectors"]
27966#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u32)"]
27967#[inline]
27968#[target_feature(enable = "neon")]
27969#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27970#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27971pub fn vtrn2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
27972 unsafe { simd_shuffle!(a, b, [1, 3]) }
27973}
27974#[doc = "Transpose vectors"]
27975#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u64)"]
27976#[inline]
27977#[target_feature(enable = "neon")]
27978#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27979#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27980pub fn vtrn2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
27981 unsafe { simd_shuffle!(a, b, [1, 3]) }
27982}
27983#[doc = "Transpose vectors"]
27984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p64)"]
27985#[inline]
27986#[target_feature(enable = "neon")]
27987#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27988#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27989pub fn vtrn2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
27990 unsafe { simd_shuffle!(a, b, [1, 3]) }
27991}
27992#[doc = "Transpose vectors"]
27993#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f32)"]
27994#[inline]
27995#[target_feature(enable = "neon")]
27996#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27997#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27998pub fn vtrn2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
27999 unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
28000}
28001#[doc = "Transpose vectors"]
28002#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s8)"]
28003#[inline]
28004#[target_feature(enable = "neon")]
28005#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28006#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28007pub fn vtrn2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
28008 unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
28009}
28010#[doc = "Transpose vectors"]
28011#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s8)"]
28012#[inline]
28013#[target_feature(enable = "neon")]
28014#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28015#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28016pub fn vtrn2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
28017 unsafe {
28018 simd_shuffle!(
28019 a,
28020 b,
28021 [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]
28022 )
28023 }
28024}
28025#[doc = "Transpose vectors"]
28026#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s16)"]
28027#[inline]
28028#[target_feature(enable = "neon")]
28029#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28030#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28031pub fn vtrn2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
28032 unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
28033}
28034#[doc = "Transpose vectors"]
28035#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s16)"]
28036#[inline]
28037#[target_feature(enable = "neon")]
28038#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28039#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28040pub fn vtrn2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
28041 unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
28042}
28043#[doc = "Transpose vectors"]
28044#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s32)"]
28045#[inline]
28046#[target_feature(enable = "neon")]
28047#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28048#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28049pub fn vtrn2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
28050 unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
28051}
28052#[doc = "Transpose vectors"]
28053#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u8)"]
28054#[inline]
28055#[target_feature(enable = "neon")]
28056#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28057#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28058pub fn vtrn2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
28059 unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
28060}
28061#[doc = "Transpose vectors"]
28062#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u8)"]
28063#[inline]
28064#[target_feature(enable = "neon")]
28065#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28066#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28067pub fn vtrn2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
28068 unsafe {
28069 simd_shuffle!(
28070 a,
28071 b,
28072 [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]
28073 )
28074 }
28075}
28076#[doc = "Transpose vectors"]
28077#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u16)"]
28078#[inline]
28079#[target_feature(enable = "neon")]
28080#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28081#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28082pub fn vtrn2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
28083 unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
28084}
28085#[doc = "Transpose vectors"]
28086#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u16)"]
28087#[inline]
28088#[target_feature(enable = "neon")]
28089#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28090#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28091pub fn vtrn2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
28092 unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
28093}
28094#[doc = "Transpose vectors"]
28095#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u32)"]
28096#[inline]
28097#[target_feature(enable = "neon")]
28098#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28099#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28100pub fn vtrn2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
28101 unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
28102}
28103#[doc = "Transpose vectors"]
28104#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p8)"]
28105#[inline]
28106#[target_feature(enable = "neon")]
28107#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28108#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28109pub fn vtrn2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
28110 unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
28111}
28112#[doc = "Transpose vectors"]
28113#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p8)"]
28114#[inline]
28115#[target_feature(enable = "neon")]
28116#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28117#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28118pub fn vtrn2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
28119 unsafe {
28120 simd_shuffle!(
28121 a,
28122 b,
28123 [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]
28124 )
28125 }
28126}
28127#[doc = "Transpose vectors"]
28128#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p16)"]
28129#[inline]
28130#[target_feature(enable = "neon")]
28131#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28132#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28133pub fn vtrn2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
28134 unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
28135}
28136#[doc = "Transpose vectors"]
28137#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p16)"]
28138#[inline]
28139#[target_feature(enable = "neon")]
28140#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28141#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28142pub fn vtrn2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
28143 unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
28144}
28145#[doc = "Signed compare bitwise Test bits nonzero"]
28146#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s64)"]
28147#[inline]
28148#[target_feature(enable = "neon")]
28149#[cfg_attr(test, assert_instr(cmtst))]
28150#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28151pub fn vtst_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
28152 unsafe {
28153 let c: int64x1_t = simd_and(a, b);
28154 let d: i64x1 = i64x1::new(0);
28155 simd_ne(c, transmute(d))
28156 }
28157}
28158#[doc = "Signed compare bitwise Test bits nonzero"]
28159#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s64)"]
28160#[inline]
28161#[target_feature(enable = "neon")]
28162#[cfg_attr(test, assert_instr(cmtst))]
28163#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28164pub fn vtstq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
28165 unsafe {
28166 let c: int64x2_t = simd_and(a, b);
28167 let d: i64x2 = i64x2::new(0, 0);
28168 simd_ne(c, transmute(d))
28169 }
28170}
28171#[doc = "Signed compare bitwise Test bits nonzero"]
28172#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p64)"]
28173#[inline]
28174#[target_feature(enable = "neon")]
28175#[cfg_attr(test, assert_instr(cmtst))]
28176#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28177pub fn vtst_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t {
28178 unsafe {
28179 let c: poly64x1_t = simd_and(a, b);
28180 let d: i64x1 = i64x1::new(0);
28181 simd_ne(c, transmute(d))
28182 }
28183}
28184#[doc = "Signed compare bitwise Test bits nonzero"]
28185#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p64)"]
28186#[inline]
28187#[target_feature(enable = "neon")]
28188#[cfg_attr(test, assert_instr(cmtst))]
28189#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28190pub fn vtstq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t {
28191 unsafe {
28192 let c: poly64x2_t = simd_and(a, b);
28193 let d: i64x2 = i64x2::new(0, 0);
28194 simd_ne(c, transmute(d))
28195 }
28196}
28197#[doc = "Unsigned compare bitwise Test bits nonzero"]
28198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u64)"]
28199#[inline]
28200#[target_feature(enable = "neon")]
28201#[cfg_attr(test, assert_instr(cmtst))]
28202#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28203pub fn vtst_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
28204 unsafe {
28205 let c: uint64x1_t = simd_and(a, b);
28206 let d: u64x1 = u64x1::new(0);
28207 simd_ne(c, transmute(d))
28208 }
28209}
28210#[doc = "Unsigned compare bitwise Test bits nonzero"]
28211#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u64)"]
28212#[inline]
28213#[target_feature(enable = "neon")]
28214#[cfg_attr(test, assert_instr(cmtst))]
28215#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28216pub fn vtstq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
28217 unsafe {
28218 let c: uint64x2_t = simd_and(a, b);
28219 let d: u64x2 = u64x2::new(0, 0);
28220 simd_ne(c, transmute(d))
28221 }
28222}
28223#[doc = "Compare bitwise test bits nonzero"]
28224#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_s64)"]
28225#[inline]
28226#[target_feature(enable = "neon")]
28227#[cfg_attr(test, assert_instr(tst))]
28228#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28229pub fn vtstd_s64(a: i64, b: i64) -> u64 {
28230 unsafe { transmute(vtst_s64(transmute(a), transmute(b))) }
28231}
28232#[doc = "Compare bitwise test bits nonzero"]
28233#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_u64)"]
28234#[inline]
28235#[target_feature(enable = "neon")]
28236#[cfg_attr(test, assert_instr(tst))]
28237#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28238pub fn vtstd_u64(a: u64, b: u64) -> u64 {
28239 unsafe { transmute(vtst_u64(transmute(a), transmute(b))) }
28240}
28241#[doc = "Signed saturating Accumulate of Unsigned value."]
28242#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s8)"]
28243#[inline]
28244#[target_feature(enable = "neon")]
28245#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28246#[cfg_attr(test, assert_instr(suqadd))]
28247pub fn vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t {
28248 unsafe extern "unadjusted" {
28249 #[cfg_attr(
28250 any(target_arch = "aarch64", target_arch = "arm64ec"),
28251 link_name = "llvm.aarch64.neon.suqadd.v8i8"
28252 )]
28253 fn _vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t;
28254 }
28255 unsafe { _vuqadd_s8(a, b) }
28256}
28257#[doc = "Signed saturating Accumulate of Unsigned value."]
28258#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s8)"]
28259#[inline]
28260#[target_feature(enable = "neon")]
28261#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28262#[cfg_attr(test, assert_instr(suqadd))]
28263pub fn vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
28264 unsafe extern "unadjusted" {
28265 #[cfg_attr(
28266 any(target_arch = "aarch64", target_arch = "arm64ec"),
28267 link_name = "llvm.aarch64.neon.suqadd.v16i8"
28268 )]
28269 fn _vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t;
28270 }
28271 unsafe { _vuqaddq_s8(a, b) }
28272}
28273#[doc = "Signed saturating Accumulate of Unsigned value."]
28274#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s16)"]
28275#[inline]
28276#[target_feature(enable = "neon")]
28277#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28278#[cfg_attr(test, assert_instr(suqadd))]
28279pub fn vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t {
28280 unsafe extern "unadjusted" {
28281 #[cfg_attr(
28282 any(target_arch = "aarch64", target_arch = "arm64ec"),
28283 link_name = "llvm.aarch64.neon.suqadd.v4i16"
28284 )]
28285 fn _vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t;
28286 }
28287 unsafe { _vuqadd_s16(a, b) }
28288}
28289#[doc = "Signed saturating Accumulate of Unsigned value."]
28290#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s16)"]
28291#[inline]
28292#[target_feature(enable = "neon")]
28293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28294#[cfg_attr(test, assert_instr(suqadd))]
28295pub fn vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t {
28296 unsafe extern "unadjusted" {
28297 #[cfg_attr(
28298 any(target_arch = "aarch64", target_arch = "arm64ec"),
28299 link_name = "llvm.aarch64.neon.suqadd.v8i16"
28300 )]
28301 fn _vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t;
28302 }
28303 unsafe { _vuqaddq_s16(a, b) }
28304}
28305#[doc = "Signed saturating Accumulate of Unsigned value."]
28306#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s32)"]
28307#[inline]
28308#[target_feature(enable = "neon")]
28309#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28310#[cfg_attr(test, assert_instr(suqadd))]
28311pub fn vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t {
28312 unsafe extern "unadjusted" {
28313 #[cfg_attr(
28314 any(target_arch = "aarch64", target_arch = "arm64ec"),
28315 link_name = "llvm.aarch64.neon.suqadd.v2i32"
28316 )]
28317 fn _vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t;
28318 }
28319 unsafe { _vuqadd_s32(a, b) }
28320}
28321#[doc = "Signed saturating Accumulate of Unsigned value."]
28322#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s32)"]
28323#[inline]
28324#[target_feature(enable = "neon")]
28325#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28326#[cfg_attr(test, assert_instr(suqadd))]
28327pub fn vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t {
28328 unsafe extern "unadjusted" {
28329 #[cfg_attr(
28330 any(target_arch = "aarch64", target_arch = "arm64ec"),
28331 link_name = "llvm.aarch64.neon.suqadd.v4i32"
28332 )]
28333 fn _vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t;
28334 }
28335 unsafe { _vuqaddq_s32(a, b) }
28336}
28337#[doc = "Signed saturating Accumulate of Unsigned value."]
28338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s64)"]
28339#[inline]
28340#[target_feature(enable = "neon")]
28341#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28342#[cfg_attr(test, assert_instr(suqadd))]
28343pub fn vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t {
28344 unsafe extern "unadjusted" {
28345 #[cfg_attr(
28346 any(target_arch = "aarch64", target_arch = "arm64ec"),
28347 link_name = "llvm.aarch64.neon.suqadd.v1i64"
28348 )]
28349 fn _vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t;
28350 }
28351 unsafe { _vuqadd_s64(a, b) }
28352}
28353#[doc = "Signed saturating Accumulate of Unsigned value."]
28354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s64)"]
28355#[inline]
28356#[target_feature(enable = "neon")]
28357#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28358#[cfg_attr(test, assert_instr(suqadd))]
28359pub fn vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t {
28360 unsafe extern "unadjusted" {
28361 #[cfg_attr(
28362 any(target_arch = "aarch64", target_arch = "arm64ec"),
28363 link_name = "llvm.aarch64.neon.suqadd.v2i64"
28364 )]
28365 fn _vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t;
28366 }
28367 unsafe { _vuqaddq_s64(a, b) }
28368}
28369#[doc = "Signed saturating accumulate of unsigned value"]
28370#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddb_s8)"]
28371#[inline]
28372#[target_feature(enable = "neon")]
28373#[cfg_attr(test, assert_instr(suqadd))]
28374#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28375pub fn vuqaddb_s8(a: i8, b: u8) -> i8 {
28376 unsafe { simd_extract!(vuqadd_s8(vdup_n_s8(a), vdup_n_u8(b)), 0) }
28377}
28378#[doc = "Signed saturating accumulate of unsigned value"]
28379#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddh_s16)"]
28380#[inline]
28381#[target_feature(enable = "neon")]
28382#[cfg_attr(test, assert_instr(suqadd))]
28383#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28384pub fn vuqaddh_s16(a: i16, b: u16) -> i16 {
28385 unsafe { simd_extract!(vuqadd_s16(vdup_n_s16(a), vdup_n_u16(b)), 0) }
28386}
28387#[doc = "Signed saturating accumulate of unsigned value"]
28388#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddd_s64)"]
28389#[inline]
28390#[target_feature(enable = "neon")]
28391#[cfg_attr(test, assert_instr(suqadd))]
28392#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28393pub fn vuqaddd_s64(a: i64, b: u64) -> i64 {
28394 unsafe extern "unadjusted" {
28395 #[cfg_attr(
28396 any(target_arch = "aarch64", target_arch = "arm64ec"),
28397 link_name = "llvm.aarch64.neon.suqadd.i64"
28398 )]
28399 fn _vuqaddd_s64(a: i64, b: u64) -> i64;
28400 }
28401 unsafe { _vuqaddd_s64(a, b) }
28402}
28403#[doc = "Signed saturating accumulate of unsigned value"]
28404#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadds_s32)"]
28405#[inline]
28406#[target_feature(enable = "neon")]
28407#[cfg_attr(test, assert_instr(suqadd))]
28408#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28409pub fn vuqadds_s32(a: i32, b: u32) -> i32 {
28410 unsafe extern "unadjusted" {
28411 #[cfg_attr(
28412 any(target_arch = "aarch64", target_arch = "arm64ec"),
28413 link_name = "llvm.aarch64.neon.suqadd.i32"
28414 )]
28415 fn _vuqadds_s32(a: i32, b: u32) -> i32;
28416 }
28417 unsafe { _vuqadds_s32(a, b) }
28418}
28419#[doc = "Dot product index form with unsigned and signed integers"]
28420#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_laneq_s32)"]
28421#[inline]
28422#[target_feature(enable = "neon,i8mm")]
28423#[cfg_attr(test, assert_instr(usdot, LANE = 3))]
28424#[rustc_legacy_const_generics(3)]
28425#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")]
28426pub fn vusdot_laneq_s32<const LANE: i32>(a: int32x2_t, b: uint8x8_t, c: int8x16_t) -> int32x2_t {
28427 static_assert_uimm_bits!(LANE, 2);
28428 unsafe {
28429 let c: int32x4_t = transmute(c);
28430 let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
28431 vusdot_s32(a, b, transmute(c))
28432 }
28433}
28434#[doc = "Dot product index form with unsigned and signed integers"]
28435#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_laneq_s32)"]
28436#[inline]
28437#[target_feature(enable = "neon,i8mm")]
28438#[cfg_attr(test, assert_instr(usdot, LANE = 3))]
28439#[rustc_legacy_const_generics(3)]
28440#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")]
28441pub fn vusdotq_laneq_s32<const LANE: i32>(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t {
28442 static_assert_uimm_bits!(LANE, 2);
28443 unsafe {
28444 let c: int32x4_t = transmute(c);
28445 let c: int32x4_t =
28446 simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
28447 vusdotq_s32(a, b, transmute(c))
28448 }
28449}
28450#[doc = "Unzip vectors"]
28451#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f16)"]
28452#[inline]
28453#[target_feature(enable = "neon,fp16")]
28454#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
28455#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28456pub fn vuzp1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
28457 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28458}
28459#[doc = "Unzip vectors"]
28460#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f16)"]
28461#[inline]
28462#[target_feature(enable = "neon,fp16")]
28463#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
28464#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28465pub fn vuzp1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
28466 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28467}
28468#[doc = "Unzip vectors"]
28469#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f32)"]
28470#[inline]
28471#[target_feature(enable = "neon")]
28472#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28473#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28474pub fn vuzp1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
28475 unsafe { simd_shuffle!(a, b, [0, 2]) }
28476}
28477#[doc = "Unzip vectors"]
28478#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f64)"]
28479#[inline]
28480#[target_feature(enable = "neon")]
28481#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28482#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28483pub fn vuzp1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
28484 unsafe { simd_shuffle!(a, b, [0, 2]) }
28485}
28486#[doc = "Unzip vectors"]
28487#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s32)"]
28488#[inline]
28489#[target_feature(enable = "neon")]
28490#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28491#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28492pub fn vuzp1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
28493 unsafe { simd_shuffle!(a, b, [0, 2]) }
28494}
28495#[doc = "Unzip vectors"]
28496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s64)"]
28497#[inline]
28498#[target_feature(enable = "neon")]
28499#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28500#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28501pub fn vuzp1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
28502 unsafe { simd_shuffle!(a, b, [0, 2]) }
28503}
28504#[doc = "Unzip vectors"]
28505#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u32)"]
28506#[inline]
28507#[target_feature(enable = "neon")]
28508#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28509#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28510pub fn vuzp1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
28511 unsafe { simd_shuffle!(a, b, [0, 2]) }
28512}
28513#[doc = "Unzip vectors"]
28514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u64)"]
28515#[inline]
28516#[target_feature(enable = "neon")]
28517#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28518#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28519pub fn vuzp1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
28520 unsafe { simd_shuffle!(a, b, [0, 2]) }
28521}
28522#[doc = "Unzip vectors"]
28523#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p64)"]
28524#[inline]
28525#[target_feature(enable = "neon")]
28526#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28527#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28528pub fn vuzp1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
28529 unsafe { simd_shuffle!(a, b, [0, 2]) }
28530}
28531#[doc = "Unzip vectors"]
28532#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f32)"]
28533#[inline]
28534#[target_feature(enable = "neon")]
28535#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28536#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28537pub fn vuzp1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
28538 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28539}
28540#[doc = "Unzip vectors"]
28541#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s8)"]
28542#[inline]
28543#[target_feature(enable = "neon")]
28544#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28545#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28546pub fn vuzp1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
28547 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28548}
28549#[doc = "Unzip vectors"]
28550#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s8)"]
28551#[inline]
28552#[target_feature(enable = "neon")]
28553#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28554#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28555pub fn vuzp1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
28556 unsafe {
28557 simd_shuffle!(
28558 a,
28559 b,
28560 [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
28561 )
28562 }
28563}
28564#[doc = "Unzip vectors"]
28565#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s16)"]
28566#[inline]
28567#[target_feature(enable = "neon")]
28568#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28569#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28570pub fn vuzp1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
28571 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28572}
28573#[doc = "Unzip vectors"]
28574#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s16)"]
28575#[inline]
28576#[target_feature(enable = "neon")]
28577#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28578#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28579pub fn vuzp1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
28580 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28581}
28582#[doc = "Unzip vectors"]
28583#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s32)"]
28584#[inline]
28585#[target_feature(enable = "neon")]
28586#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28587#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28588pub fn vuzp1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
28589 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28590}
28591#[doc = "Unzip vectors"]
28592#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u8)"]
28593#[inline]
28594#[target_feature(enable = "neon")]
28595#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28596#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28597pub fn vuzp1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
28598 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28599}
28600#[doc = "Unzip vectors"]
28601#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u8)"]
28602#[inline]
28603#[target_feature(enable = "neon")]
28604#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28605#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28606pub fn vuzp1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
28607 unsafe {
28608 simd_shuffle!(
28609 a,
28610 b,
28611 [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
28612 )
28613 }
28614}
28615#[doc = "Unzip vectors"]
28616#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u16)"]
28617#[inline]
28618#[target_feature(enable = "neon")]
28619#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28620#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28621pub fn vuzp1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
28622 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28623}
28624#[doc = "Unzip vectors"]
28625#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u16)"]
28626#[inline]
28627#[target_feature(enable = "neon")]
28628#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28629#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28630pub fn vuzp1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
28631 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28632}
28633#[doc = "Unzip vectors"]
28634#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u32)"]
28635#[inline]
28636#[target_feature(enable = "neon")]
28637#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28638#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28639pub fn vuzp1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
28640 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28641}
28642#[doc = "Unzip vectors"]
28643#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p8)"]
28644#[inline]
28645#[target_feature(enable = "neon")]
28646#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28647#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28648pub fn vuzp1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
28649 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28650}
28651#[doc = "Unzip vectors"]
28652#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p8)"]
28653#[inline]
28654#[target_feature(enable = "neon")]
28655#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28656#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28657pub fn vuzp1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
28658 unsafe {
28659 simd_shuffle!(
28660 a,
28661 b,
28662 [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
28663 )
28664 }
28665}
28666#[doc = "Unzip vectors"]
28667#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p16)"]
28668#[inline]
28669#[target_feature(enable = "neon")]
28670#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28671#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28672pub fn vuzp1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
28673 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28674}
28675#[doc = "Unzip vectors"]
28676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p16)"]
28677#[inline]
28678#[target_feature(enable = "neon")]
28679#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28680#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28681pub fn vuzp1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
28682 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28683}
28684#[doc = "Unzip vectors"]
28685#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f16)"]
28686#[inline]
28687#[target_feature(enable = "neon,fp16")]
28688#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
28689#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28690pub fn vuzp2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
28691 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
28692}
28693#[doc = "Unzip vectors"]
28694#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f16)"]
28695#[inline]
28696#[target_feature(enable = "neon,fp16")]
28697#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
28698#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28699pub fn vuzp2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
28700 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
28701}
28702#[doc = "Unzip vectors"]
28703#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f32)"]
28704#[inline]
28705#[target_feature(enable = "neon")]
28706#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28707#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28708pub fn vuzp2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
28709 unsafe { simd_shuffle!(a, b, [1, 3]) }
28710}
28711#[doc = "Unzip vectors"]
28712#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f64)"]
28713#[inline]
28714#[target_feature(enable = "neon")]
28715#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28716#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28717pub fn vuzp2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
28718 unsafe { simd_shuffle!(a, b, [1, 3]) }
28719}
28720#[doc = "Unzip vectors"]
28721#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s32)"]
28722#[inline]
28723#[target_feature(enable = "neon")]
28724#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28725#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28726pub fn vuzp2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
28727 unsafe { simd_shuffle!(a, b, [1, 3]) }
28728}
28729#[doc = "Unzip vectors"]
28730#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s64)"]
28731#[inline]
28732#[target_feature(enable = "neon")]
28733#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28734#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28735pub fn vuzp2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
28736 unsafe { simd_shuffle!(a, b, [1, 3]) }
28737}
28738#[doc = "Unzip vectors"]
28739#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u32)"]
28740#[inline]
28741#[target_feature(enable = "neon")]
28742#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28743#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28744pub fn vuzp2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
28745 unsafe { simd_shuffle!(a, b, [1, 3]) }
28746}
28747#[doc = "Unzip vectors"]
28748#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u64)"]
28749#[inline]
28750#[target_feature(enable = "neon")]
28751#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28752#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28753pub fn vuzp2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
28754 unsafe { simd_shuffle!(a, b, [1, 3]) }
28755}
28756#[doc = "Unzip vectors"]
28757#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p64)"]
28758#[inline]
28759#[target_feature(enable = "neon")]
28760#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28761#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28762pub fn vuzp2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
28763 unsafe { simd_shuffle!(a, b, [1, 3]) }
28764}
28765#[doc = "Unzip vectors"]
28766#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f32)"]
28767#[inline]
28768#[target_feature(enable = "neon")]
28769#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28770#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28771pub fn vuzp2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
28772 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
28773}
28774#[doc = "Unzip vectors"]
28775#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s8)"]
28776#[inline]
28777#[target_feature(enable = "neon")]
28778#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28779#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28780pub fn vuzp2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
28781 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
28782}
28783#[doc = "Unzip vectors"]
28784#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s8)"]
28785#[inline]
28786#[target_feature(enable = "neon")]
28787#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28788#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28789pub fn vuzp2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
28790 unsafe {
28791 simd_shuffle!(
28792 a,
28793 b,
28794 [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]
28795 )
28796 }
28797}
28798#[doc = "Unzip vectors"]
28799#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s16)"]
28800#[inline]
28801#[target_feature(enable = "neon")]
28802#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28803#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28804pub fn vuzp2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
28805 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
28806}
28807#[doc = "Unzip vectors"]
28808#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s16)"]
28809#[inline]
28810#[target_feature(enable = "neon")]
28811#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28812#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28813pub fn vuzp2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
28814 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
28815}
28816#[doc = "Unzip vectors"]
28817#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s32)"]
28818#[inline]
28819#[target_feature(enable = "neon")]
28820#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28821#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28822pub fn vuzp2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
28823 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
28824}
28825#[doc = "Unzip vectors"]
28826#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u8)"]
28827#[inline]
28828#[target_feature(enable = "neon")]
28829#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28830#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28831pub fn vuzp2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
28832 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
28833}
28834#[doc = "Unzip vectors"]
28835#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u8)"]
28836#[inline]
28837#[target_feature(enable = "neon")]
28838#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28839#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28840pub fn vuzp2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
28841 unsafe {
28842 simd_shuffle!(
28843 a,
28844 b,
28845 [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]
28846 )
28847 }
28848}
28849#[doc = "Unzip vectors"]
28850#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u16)"]
28851#[inline]
28852#[target_feature(enable = "neon")]
28853#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28854#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28855pub fn vuzp2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
28856 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
28857}
28858#[doc = "Unzip vectors"]
28859#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u16)"]
28860#[inline]
28861#[target_feature(enable = "neon")]
28862#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28863#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28864pub fn vuzp2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
28865 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
28866}
28867#[doc = "Unzip vectors"]
28868#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u32)"]
28869#[inline]
28870#[target_feature(enable = "neon")]
28871#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28872#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28873pub fn vuzp2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
28874 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
28875}
28876#[doc = "Unzip vectors"]
28877#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p8)"]
28878#[inline]
28879#[target_feature(enable = "neon")]
28880#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28881#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28882pub fn vuzp2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
28883 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
28884}
28885#[doc = "Unzip vectors"]
28886#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p8)"]
28887#[inline]
28888#[target_feature(enable = "neon")]
28889#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28890#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28891pub fn vuzp2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
28892 unsafe {
28893 simd_shuffle!(
28894 a,
28895 b,
28896 [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]
28897 )
28898 }
28899}
28900#[doc = "Unzip vectors"]
28901#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p16)"]
28902#[inline]
28903#[target_feature(enable = "neon")]
28904#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28905#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28906pub fn vuzp2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
28907 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
28908}
28909#[doc = "Unzip vectors"]
28910#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p16)"]
28911#[inline]
28912#[target_feature(enable = "neon")]
28913#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28914#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28915pub fn vuzp2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
28916 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
28917}
28918#[doc = "Exclusive OR and rotate"]
28919#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vxarq_u64)"]
28920#[inline]
28921#[target_feature(enable = "neon,sha3")]
28922#[cfg_attr(test, assert_instr(xar, IMM6 = 0))]
28923#[rustc_legacy_const_generics(2)]
28924#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
28925pub fn vxarq_u64<const IMM6: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
28926 static_assert_uimm_bits!(IMM6, 6);
28927 unsafe extern "unadjusted" {
28928 #[cfg_attr(
28929 any(target_arch = "aarch64", target_arch = "arm64ec"),
28930 link_name = "llvm.aarch64.crypto.xar"
28931 )]
28932 fn _vxarq_u64(a: uint64x2_t, b: uint64x2_t, n: i64) -> uint64x2_t;
28933 }
28934 unsafe { _vxarq_u64(a, b, IMM6 as i64) }
28935}
28936#[doc = "Zip vectors"]
28937#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f16)"]
28938#[inline]
28939#[target_feature(enable = "neon,fp16")]
28940#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
28941#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28942pub fn vzip1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
28943 unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
28944}
28945#[doc = "Zip vectors"]
28946#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f16)"]
28947#[inline]
28948#[target_feature(enable = "neon,fp16")]
28949#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
28950#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28951pub fn vzip1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
28952 unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
28953}
28954#[doc = "Zip vectors"]
28955#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f32)"]
28956#[inline]
28957#[target_feature(enable = "neon")]
28958#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28959#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28960pub fn vzip1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
28961 unsafe { simd_shuffle!(a, b, [0, 2]) }
28962}
28963#[doc = "Zip vectors"]
28964#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f32)"]
28965#[inline]
28966#[target_feature(enable = "neon")]
28967#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28968#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28969pub fn vzip1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
28970 unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
28971}
28972#[doc = "Zip vectors"]
28973#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f64)"]
28974#[inline]
28975#[target_feature(enable = "neon")]
28976#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28977#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28978pub fn vzip1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
28979 unsafe { simd_shuffle!(a, b, [0, 2]) }
28980}
28981#[doc = "Zip vectors"]
28982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s8)"]
28983#[inline]
28984#[target_feature(enable = "neon")]
28985#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28986#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28987pub fn vzip1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
28988 unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
28989}
28990#[doc = "Zip vectors"]
28991#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s8)"]
28992#[inline]
28993#[target_feature(enable = "neon")]
28994#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28995#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28996pub fn vzip1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
28997 unsafe {
28998 simd_shuffle!(
28999 a,
29000 b,
29001 [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]
29002 )
29003 }
29004}
29005#[doc = "Zip vectors"]
29006#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s16)"]
29007#[inline]
29008#[target_feature(enable = "neon")]
29009#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29010#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29011pub fn vzip1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
29012 unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
29013}
29014#[doc = "Zip vectors"]
29015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s16)"]
29016#[inline]
29017#[target_feature(enable = "neon")]
29018#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29019#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29020pub fn vzip1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
29021 unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
29022}
29023#[doc = "Zip vectors"]
29024#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s32)"]
29025#[inline]
29026#[target_feature(enable = "neon")]
29027#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29028#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29029pub fn vzip1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
29030 unsafe { simd_shuffle!(a, b, [0, 2]) }
29031}
29032#[doc = "Zip vectors"]
29033#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s32)"]
29034#[inline]
29035#[target_feature(enable = "neon")]
29036#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29037#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29038pub fn vzip1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
29039 unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
29040}
29041#[doc = "Zip vectors"]
29042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s64)"]
29043#[inline]
29044#[target_feature(enable = "neon")]
29045#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29046#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29047pub fn vzip1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
29048 unsafe { simd_shuffle!(a, b, [0, 2]) }
29049}
29050#[doc = "Zip vectors"]
29051#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u8)"]
29052#[inline]
29053#[target_feature(enable = "neon")]
29054#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29055#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29056pub fn vzip1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
29057 unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
29058}
29059#[doc = "Zip vectors"]
29060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u8)"]
29061#[inline]
29062#[target_feature(enable = "neon")]
29063#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29064#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29065pub fn vzip1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
29066 unsafe {
29067 simd_shuffle!(
29068 a,
29069 b,
29070 [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]
29071 )
29072 }
29073}
29074#[doc = "Zip vectors"]
29075#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u16)"]
29076#[inline]
29077#[target_feature(enable = "neon")]
29078#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29079#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29080pub fn vzip1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
29081 unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
29082}
29083#[doc = "Zip vectors"]
29084#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u16)"]
29085#[inline]
29086#[target_feature(enable = "neon")]
29087#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29088#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29089pub fn vzip1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
29090 unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
29091}
29092#[doc = "Zip vectors"]
29093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u32)"]
29094#[inline]
29095#[target_feature(enable = "neon")]
29096#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29097#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29098pub fn vzip1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
29099 unsafe { simd_shuffle!(a, b, [0, 2]) }
29100}
29101#[doc = "Zip vectors"]
29102#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u32)"]
29103#[inline]
29104#[target_feature(enable = "neon")]
29105#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29106#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29107pub fn vzip1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
29108 unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
29109}
29110#[doc = "Zip vectors"]
29111#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u64)"]
29112#[inline]
29113#[target_feature(enable = "neon")]
29114#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29115#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29116pub fn vzip1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
29117 unsafe { simd_shuffle!(a, b, [0, 2]) }
29118}
29119#[doc = "Zip vectors"]
29120#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p8)"]
29121#[inline]
29122#[target_feature(enable = "neon")]
29123#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29124#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29125pub fn vzip1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
29126 unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
29127}
29128#[doc = "Zip vectors"]
29129#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p8)"]
29130#[inline]
29131#[target_feature(enable = "neon")]
29132#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29133#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29134pub fn vzip1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
29135 unsafe {
29136 simd_shuffle!(
29137 a,
29138 b,
29139 [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]
29140 )
29141 }
29142}
29143#[doc = "Zip vectors"]
29144#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p16)"]
29145#[inline]
29146#[target_feature(enable = "neon")]
29147#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29148#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29149pub fn vzip1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
29150 unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
29151}
29152#[doc = "Zip vectors"]
29153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p16)"]
29154#[inline]
29155#[target_feature(enable = "neon")]
29156#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29157#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29158pub fn vzip1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
29159 unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
29160}
29161#[doc = "Zip vectors"]
29162#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p64)"]
29163#[inline]
29164#[target_feature(enable = "neon")]
29165#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29166#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29167pub fn vzip1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
29168 unsafe { simd_shuffle!(a, b, [0, 2]) }
29169}
29170#[doc = "Zip vectors"]
29171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f16)"]
29172#[inline]
29173#[target_feature(enable = "neon,fp16")]
29174#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
29175#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29176pub fn vzip2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
29177 unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29178}
29179#[doc = "Zip vectors"]
29180#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f16)"]
29181#[inline]
29182#[target_feature(enable = "neon,fp16")]
29183#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
29184#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29185pub fn vzip2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
29186 unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29187}
29188#[doc = "Zip vectors"]
29189#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f32)"]
29190#[inline]
29191#[target_feature(enable = "neon")]
29192#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29193#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29194pub fn vzip2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
29195 unsafe { simd_shuffle!(a, b, [1, 3]) }
29196}
29197#[doc = "Zip vectors"]
29198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f32)"]
29199#[inline]
29200#[target_feature(enable = "neon")]
29201#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29202#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29203pub fn vzip2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
29204 unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29205}
29206#[doc = "Zip vectors"]
29207#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f64)"]
29208#[inline]
29209#[target_feature(enable = "neon")]
29210#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29211#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29212pub fn vzip2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
29213 unsafe { simd_shuffle!(a, b, [1, 3]) }
29214}
29215#[doc = "Zip vectors"]
29216#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s8)"]
29217#[inline]
29218#[target_feature(enable = "neon")]
29219#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29220#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29221pub fn vzip2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
29222 unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29223}
29224#[doc = "Zip vectors"]
29225#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s8)"]
29226#[inline]
29227#[target_feature(enable = "neon")]
29228#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29229#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29230pub fn vzip2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
29231 unsafe {
29232 simd_shuffle!(
29233 a,
29234 b,
29235 [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]
29236 )
29237 }
29238}
29239#[doc = "Zip vectors"]
29240#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s16)"]
29241#[inline]
29242#[target_feature(enable = "neon")]
29243#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29244#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29245pub fn vzip2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
29246 unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29247}
29248#[doc = "Zip vectors"]
29249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s16)"]
29250#[inline]
29251#[target_feature(enable = "neon")]
29252#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29253#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29254pub fn vzip2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
29255 unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29256}
29257#[doc = "Zip vectors"]
29258#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s32)"]
29259#[inline]
29260#[target_feature(enable = "neon")]
29261#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29262#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29263pub fn vzip2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
29264 unsafe { simd_shuffle!(a, b, [1, 3]) }
29265}
29266#[doc = "Zip vectors"]
29267#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s32)"]
29268#[inline]
29269#[target_feature(enable = "neon")]
29270#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29271#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29272pub fn vzip2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
29273 unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29274}
29275#[doc = "Zip vectors"]
29276#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s64)"]
29277#[inline]
29278#[target_feature(enable = "neon")]
29279#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29280#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29281pub fn vzip2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
29282 unsafe { simd_shuffle!(a, b, [1, 3]) }
29283}
29284#[doc = "Zip vectors"]
29285#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u8)"]
29286#[inline]
29287#[target_feature(enable = "neon")]
29288#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29289#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29290pub fn vzip2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
29291 unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29292}
29293#[doc = "Zip vectors"]
29294#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u8)"]
29295#[inline]
29296#[target_feature(enable = "neon")]
29297#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29298#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29299pub fn vzip2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
29300 unsafe {
29301 simd_shuffle!(
29302 a,
29303 b,
29304 [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]
29305 )
29306 }
29307}
29308#[doc = "Zip vectors"]
29309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u16)"]
29310#[inline]
29311#[target_feature(enable = "neon")]
29312#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29313#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29314pub fn vzip2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
29315 unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29316}
29317#[doc = "Zip vectors"]
29318#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u16)"]
29319#[inline]
29320#[target_feature(enable = "neon")]
29321#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29322#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29323pub fn vzip2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
29324 unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29325}
29326#[doc = "Zip vectors"]
29327#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u32)"]
29328#[inline]
29329#[target_feature(enable = "neon")]
29330#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29331#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29332pub fn vzip2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
29333 unsafe { simd_shuffle!(a, b, [1, 3]) }
29334}
29335#[doc = "Zip vectors"]
29336#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u32)"]
29337#[inline]
29338#[target_feature(enable = "neon")]
29339#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29340#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29341pub fn vzip2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
29342 unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29343}
29344#[doc = "Zip vectors"]
29345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u64)"]
29346#[inline]
29347#[target_feature(enable = "neon")]
29348#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29349#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29350pub fn vzip2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
29351 unsafe { simd_shuffle!(a, b, [1, 3]) }
29352}
29353#[doc = "Zip vectors"]
29354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p8)"]
29355#[inline]
29356#[target_feature(enable = "neon")]
29357#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29358#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29359pub fn vzip2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
29360 unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29361}
29362#[doc = "Zip vectors"]
29363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p8)"]
29364#[inline]
29365#[target_feature(enable = "neon")]
29366#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29367#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29368pub fn vzip2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
29369 unsafe {
29370 simd_shuffle!(
29371 a,
29372 b,
29373 [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]
29374 )
29375 }
29376}
29377#[doc = "Zip vectors"]
29378#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p16)"]
29379#[inline]
29380#[target_feature(enable = "neon")]
29381#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29382#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29383pub fn vzip2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
29384 unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29385}
29386#[doc = "Zip vectors"]
29387#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p16)"]
29388#[inline]
29389#[target_feature(enable = "neon")]
29390#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29391#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29392pub fn vzip2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
29393 unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29394}
29395#[doc = "Zip vectors"]
29396#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p64)"]
29397#[inline]
29398#[target_feature(enable = "neon")]
29399#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29400#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29401pub fn vzip2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
29402 unsafe { simd_shuffle!(a, b, [1, 3]) }
29403}