xref: /aosp_15_r20/prebuilts/clang-tools/linux-x86/lib64/clang/19/include/arm_fp16.h (revision bed243d3d9cd544cfb038bfa7be843dedc6e6bf7)
1 /*===---- arm_fp16.h - ARM FP16 intrinsics ---------------------------------===
2  *
3  * Permission is hereby granted, free of charge, to any person obtaining a copy
4  * of this software and associated documentation files (the "Software"), to deal
5  * in the Software without restriction, including without limitation the rights
6  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7  * copies of the Software, and to permit persons to whom the Software is
8  * furnished to do so, subject to the following conditions:
9  *
10  * The above copyright notice and this permission notice shall be included in
11  * all copies or substantial portions of the Software.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19  * THE SOFTWARE.
20  *
21  *===-----------------------------------------------------------------------===
22  */
23 
24 #ifndef __ARM_FP16_H
25 #define __ARM_FP16_H
26 
27 #include <stdint.h>
28 
29 typedef __fp16 float16_t;
30 #define __ai static __inline__ __attribute__((__always_inline__, __nodebug__))
31 
32 #if defined(__aarch64__)
33 #define vabdh_f16(__p0, __p1) __extension__ ({ \
34   float16_t __ret; \
35   float16_t __s0 = __p0; \
36   float16_t __s1 = __p1; \
37   __ret = (float16_t) __builtin_neon_vabdh_f16(__s0, __s1); \
38   __ret; \
39 })
40 #define vabsh_f16(__p0) __extension__ ({ \
41   float16_t __ret; \
42   float16_t __s0 = __p0; \
43   __ret = (float16_t) __builtin_neon_vabsh_f16(__s0); \
44   __ret; \
45 })
46 #define vaddh_f16(__p0, __p1) __extension__ ({ \
47   float16_t __ret; \
48   float16_t __s0 = __p0; \
49   float16_t __s1 = __p1; \
50   __ret = (float16_t) __builtin_neon_vaddh_f16(__s0, __s1); \
51   __ret; \
52 })
53 #define vcageh_f16(__p0, __p1) __extension__ ({ \
54   uint16_t __ret; \
55   float16_t __s0 = __p0; \
56   float16_t __s1 = __p1; \
57   __ret = (uint16_t) __builtin_neon_vcageh_f16(__s0, __s1); \
58   __ret; \
59 })
60 #define vcagth_f16(__p0, __p1) __extension__ ({ \
61   uint16_t __ret; \
62   float16_t __s0 = __p0; \
63   float16_t __s1 = __p1; \
64   __ret = (uint16_t) __builtin_neon_vcagth_f16(__s0, __s1); \
65   __ret; \
66 })
67 #define vcaleh_f16(__p0, __p1) __extension__ ({ \
68   uint16_t __ret; \
69   float16_t __s0 = __p0; \
70   float16_t __s1 = __p1; \
71   __ret = (uint16_t) __builtin_neon_vcaleh_f16(__s0, __s1); \
72   __ret; \
73 })
74 #define vcalth_f16(__p0, __p1) __extension__ ({ \
75   uint16_t __ret; \
76   float16_t __s0 = __p0; \
77   float16_t __s1 = __p1; \
78   __ret = (uint16_t) __builtin_neon_vcalth_f16(__s0, __s1); \
79   __ret; \
80 })
81 #define vceqh_f16(__p0, __p1) __extension__ ({ \
82   uint16_t __ret; \
83   float16_t __s0 = __p0; \
84   float16_t __s1 = __p1; \
85   __ret = (uint16_t) __builtin_neon_vceqh_f16(__s0, __s1); \
86   __ret; \
87 })
88 #define vceqzh_f16(__p0) __extension__ ({ \
89   uint16_t __ret; \
90   float16_t __s0 = __p0; \
91   __ret = (uint16_t) __builtin_neon_vceqzh_f16(__s0); \
92   __ret; \
93 })
94 #define vcgeh_f16(__p0, __p1) __extension__ ({ \
95   uint16_t __ret; \
96   float16_t __s0 = __p0; \
97   float16_t __s1 = __p1; \
98   __ret = (uint16_t) __builtin_neon_vcgeh_f16(__s0, __s1); \
99   __ret; \
100 })
101 #define vcgezh_f16(__p0) __extension__ ({ \
102   uint16_t __ret; \
103   float16_t __s0 = __p0; \
104   __ret = (uint16_t) __builtin_neon_vcgezh_f16(__s0); \
105   __ret; \
106 })
107 #define vcgth_f16(__p0, __p1) __extension__ ({ \
108   uint16_t __ret; \
109   float16_t __s0 = __p0; \
110   float16_t __s1 = __p1; \
111   __ret = (uint16_t) __builtin_neon_vcgth_f16(__s0, __s1); \
112   __ret; \
113 })
114 #define vcgtzh_f16(__p0) __extension__ ({ \
115   uint16_t __ret; \
116   float16_t __s0 = __p0; \
117   __ret = (uint16_t) __builtin_neon_vcgtzh_f16(__s0); \
118   __ret; \
119 })
120 #define vcleh_f16(__p0, __p1) __extension__ ({ \
121   uint16_t __ret; \
122   float16_t __s0 = __p0; \
123   float16_t __s1 = __p1; \
124   __ret = (uint16_t) __builtin_neon_vcleh_f16(__s0, __s1); \
125   __ret; \
126 })
127 #define vclezh_f16(__p0) __extension__ ({ \
128   uint16_t __ret; \
129   float16_t __s0 = __p0; \
130   __ret = (uint16_t) __builtin_neon_vclezh_f16(__s0); \
131   __ret; \
132 })
133 #define vclth_f16(__p0, __p1) __extension__ ({ \
134   uint16_t __ret; \
135   float16_t __s0 = __p0; \
136   float16_t __s1 = __p1; \
137   __ret = (uint16_t) __builtin_neon_vclth_f16(__s0, __s1); \
138   __ret; \
139 })
140 #define vcltzh_f16(__p0) __extension__ ({ \
141   uint16_t __ret; \
142   float16_t __s0 = __p0; \
143   __ret = (uint16_t) __builtin_neon_vcltzh_f16(__s0); \
144   __ret; \
145 })
146 #define vcvth_n_s16_f16(__p0, __p1) __extension__ ({ \
147   int16_t __ret; \
148   float16_t __s0 = __p0; \
149   __ret = (int16_t) __builtin_neon_vcvth_n_s16_f16(__s0, __p1); \
150   __ret; \
151 })
152 #define vcvth_n_s32_f16(__p0, __p1) __extension__ ({ \
153   int32_t __ret; \
154   float16_t __s0 = __p0; \
155   __ret = (int32_t) __builtin_neon_vcvth_n_s32_f16(__s0, __p1); \
156   __ret; \
157 })
158 #define vcvth_n_s64_f16(__p0, __p1) __extension__ ({ \
159   int64_t __ret; \
160   float16_t __s0 = __p0; \
161   __ret = (int64_t) __builtin_neon_vcvth_n_s64_f16(__s0, __p1); \
162   __ret; \
163 })
164 #define vcvth_n_u16_f16(__p0, __p1) __extension__ ({ \
165   uint16_t __ret; \
166   float16_t __s0 = __p0; \
167   __ret = (uint16_t) __builtin_neon_vcvth_n_u16_f16(__s0, __p1); \
168   __ret; \
169 })
170 #define vcvth_n_u32_f16(__p0, __p1) __extension__ ({ \
171   uint32_t __ret; \
172   float16_t __s0 = __p0; \
173   __ret = (uint32_t) __builtin_neon_vcvth_n_u32_f16(__s0, __p1); \
174   __ret; \
175 })
176 #define vcvth_n_u64_f16(__p0, __p1) __extension__ ({ \
177   uint64_t __ret; \
178   float16_t __s0 = __p0; \
179   __ret = (uint64_t) __builtin_neon_vcvth_n_u64_f16(__s0, __p1); \
180   __ret; \
181 })
182 #define vcvth_s16_f16(__p0) __extension__ ({ \
183   int16_t __ret; \
184   float16_t __s0 = __p0; \
185   __ret = (int16_t) __builtin_neon_vcvth_s16_f16(__s0); \
186   __ret; \
187 })
188 #define vcvth_s32_f16(__p0) __extension__ ({ \
189   int32_t __ret; \
190   float16_t __s0 = __p0; \
191   __ret = (int32_t) __builtin_neon_vcvth_s32_f16(__s0); \
192   __ret; \
193 })
194 #define vcvth_s64_f16(__p0) __extension__ ({ \
195   int64_t __ret; \
196   float16_t __s0 = __p0; \
197   __ret = (int64_t) __builtin_neon_vcvth_s64_f16(__s0); \
198   __ret; \
199 })
200 #define vcvth_u16_f16(__p0) __extension__ ({ \
201   uint16_t __ret; \
202   float16_t __s0 = __p0; \
203   __ret = (uint16_t) __builtin_neon_vcvth_u16_f16(__s0); \
204   __ret; \
205 })
206 #define vcvth_u32_f16(__p0) __extension__ ({ \
207   uint32_t __ret; \
208   float16_t __s0 = __p0; \
209   __ret = (uint32_t) __builtin_neon_vcvth_u32_f16(__s0); \
210   __ret; \
211 })
212 #define vcvth_u64_f16(__p0) __extension__ ({ \
213   uint64_t __ret; \
214   float16_t __s0 = __p0; \
215   __ret = (uint64_t) __builtin_neon_vcvth_u64_f16(__s0); \
216   __ret; \
217 })
218 #define vcvtah_s16_f16(__p0) __extension__ ({ \
219   int16_t __ret; \
220   float16_t __s0 = __p0; \
221   __ret = (int16_t) __builtin_neon_vcvtah_s16_f16(__s0); \
222   __ret; \
223 })
224 #define vcvtah_s32_f16(__p0) __extension__ ({ \
225   int32_t __ret; \
226   float16_t __s0 = __p0; \
227   __ret = (int32_t) __builtin_neon_vcvtah_s32_f16(__s0); \
228   __ret; \
229 })
230 #define vcvtah_s64_f16(__p0) __extension__ ({ \
231   int64_t __ret; \
232   float16_t __s0 = __p0; \
233   __ret = (int64_t) __builtin_neon_vcvtah_s64_f16(__s0); \
234   __ret; \
235 })
236 #define vcvtah_u16_f16(__p0) __extension__ ({ \
237   uint16_t __ret; \
238   float16_t __s0 = __p0; \
239   __ret = (uint16_t) __builtin_neon_vcvtah_u16_f16(__s0); \
240   __ret; \
241 })
242 #define vcvtah_u32_f16(__p0) __extension__ ({ \
243   uint32_t __ret; \
244   float16_t __s0 = __p0; \
245   __ret = (uint32_t) __builtin_neon_vcvtah_u32_f16(__s0); \
246   __ret; \
247 })
248 #define vcvtah_u64_f16(__p0) __extension__ ({ \
249   uint64_t __ret; \
250   float16_t __s0 = __p0; \
251   __ret = (uint64_t) __builtin_neon_vcvtah_u64_f16(__s0); \
252   __ret; \
253 })
254 #define vcvth_f16_u16(__p0) __extension__ ({ \
255   float16_t __ret; \
256   uint16_t __s0 = __p0; \
257   __ret = (float16_t) __builtin_neon_vcvth_f16_u16(__s0); \
258   __ret; \
259 })
260 #define vcvth_f16_s16(__p0) __extension__ ({ \
261   float16_t __ret; \
262   int16_t __s0 = __p0; \
263   __ret = (float16_t) __builtin_neon_vcvth_f16_s16(__s0); \
264   __ret; \
265 })
266 #define vcvth_f16_u32(__p0) __extension__ ({ \
267   float16_t __ret; \
268   uint32_t __s0 = __p0; \
269   __ret = (float16_t) __builtin_neon_vcvth_f16_u32(__s0); \
270   __ret; \
271 })
272 #define vcvth_f16_s32(__p0) __extension__ ({ \
273   float16_t __ret; \
274   int32_t __s0 = __p0; \
275   __ret = (float16_t) __builtin_neon_vcvth_f16_s32(__s0); \
276   __ret; \
277 })
278 #define vcvth_f16_u64(__p0) __extension__ ({ \
279   float16_t __ret; \
280   uint64_t __s0 = __p0; \
281   __ret = (float16_t) __builtin_neon_vcvth_f16_u64(__s0); \
282   __ret; \
283 })
284 #define vcvth_f16_s64(__p0) __extension__ ({ \
285   float16_t __ret; \
286   int64_t __s0 = __p0; \
287   __ret = (float16_t) __builtin_neon_vcvth_f16_s64(__s0); \
288   __ret; \
289 })
290 #define vcvth_n_f16_u32(__p0, __p1) __extension__ ({ \
291   float16_t __ret; \
292   uint32_t __s0 = __p0; \
293   __ret = (float16_t) __builtin_neon_vcvth_n_f16_u32(__s0, __p1); \
294   __ret; \
295 })
296 #define vcvth_n_f16_s32(__p0, __p1) __extension__ ({ \
297   float16_t __ret; \
298   int32_t __s0 = __p0; \
299   __ret = (float16_t) __builtin_neon_vcvth_n_f16_s32(__s0, __p1); \
300   __ret; \
301 })
302 #define vcvth_n_f16_u64(__p0, __p1) __extension__ ({ \
303   float16_t __ret; \
304   uint64_t __s0 = __p0; \
305   __ret = (float16_t) __builtin_neon_vcvth_n_f16_u64(__s0, __p1); \
306   __ret; \
307 })
308 #define vcvth_n_f16_s64(__p0, __p1) __extension__ ({ \
309   float16_t __ret; \
310   int64_t __s0 = __p0; \
311   __ret = (float16_t) __builtin_neon_vcvth_n_f16_s64(__s0, __p1); \
312   __ret; \
313 })
314 #define vcvth_n_f16_u16(__p0, __p1) __extension__ ({ \
315   float16_t __ret; \
316   uint16_t __s0 = __p0; \
317   __ret = (float16_t) __builtin_neon_vcvth_n_f16_u16(__s0, __p1); \
318   __ret; \
319 })
320 #define vcvth_n_f16_s16(__p0, __p1) __extension__ ({ \
321   float16_t __ret; \
322   int16_t __s0 = __p0; \
323   __ret = (float16_t) __builtin_neon_vcvth_n_f16_s16(__s0, __p1); \
324   __ret; \
325 })
326 #define vcvtmh_s16_f16(__p0) __extension__ ({ \
327   int16_t __ret; \
328   float16_t __s0 = __p0; \
329   __ret = (int16_t) __builtin_neon_vcvtmh_s16_f16(__s0); \
330   __ret; \
331 })
332 #define vcvtmh_s32_f16(__p0) __extension__ ({ \
333   int32_t __ret; \
334   float16_t __s0 = __p0; \
335   __ret = (int32_t) __builtin_neon_vcvtmh_s32_f16(__s0); \
336   __ret; \
337 })
338 #define vcvtmh_s64_f16(__p0) __extension__ ({ \
339   int64_t __ret; \
340   float16_t __s0 = __p0; \
341   __ret = (int64_t) __builtin_neon_vcvtmh_s64_f16(__s0); \
342   __ret; \
343 })
344 #define vcvtmh_u16_f16(__p0) __extension__ ({ \
345   uint16_t __ret; \
346   float16_t __s0 = __p0; \
347   __ret = (uint16_t) __builtin_neon_vcvtmh_u16_f16(__s0); \
348   __ret; \
349 })
350 #define vcvtmh_u32_f16(__p0) __extension__ ({ \
351   uint32_t __ret; \
352   float16_t __s0 = __p0; \
353   __ret = (uint32_t) __builtin_neon_vcvtmh_u32_f16(__s0); \
354   __ret; \
355 })
356 #define vcvtmh_u64_f16(__p0) __extension__ ({ \
357   uint64_t __ret; \
358   float16_t __s0 = __p0; \
359   __ret = (uint64_t) __builtin_neon_vcvtmh_u64_f16(__s0); \
360   __ret; \
361 })
362 #define vcvtnh_s16_f16(__p0) __extension__ ({ \
363   int16_t __ret; \
364   float16_t __s0 = __p0; \
365   __ret = (int16_t) __builtin_neon_vcvtnh_s16_f16(__s0); \
366   __ret; \
367 })
368 #define vcvtnh_s32_f16(__p0) __extension__ ({ \
369   int32_t __ret; \
370   float16_t __s0 = __p0; \
371   __ret = (int32_t) __builtin_neon_vcvtnh_s32_f16(__s0); \
372   __ret; \
373 })
374 #define vcvtnh_s64_f16(__p0) __extension__ ({ \
375   int64_t __ret; \
376   float16_t __s0 = __p0; \
377   __ret = (int64_t) __builtin_neon_vcvtnh_s64_f16(__s0); \
378   __ret; \
379 })
380 #define vcvtnh_u16_f16(__p0) __extension__ ({ \
381   uint16_t __ret; \
382   float16_t __s0 = __p0; \
383   __ret = (uint16_t) __builtin_neon_vcvtnh_u16_f16(__s0); \
384   __ret; \
385 })
386 #define vcvtnh_u32_f16(__p0) __extension__ ({ \
387   uint32_t __ret; \
388   float16_t __s0 = __p0; \
389   __ret = (uint32_t) __builtin_neon_vcvtnh_u32_f16(__s0); \
390   __ret; \
391 })
392 #define vcvtnh_u64_f16(__p0) __extension__ ({ \
393   uint64_t __ret; \
394   float16_t __s0 = __p0; \
395   __ret = (uint64_t) __builtin_neon_vcvtnh_u64_f16(__s0); \
396   __ret; \
397 })
398 #define vcvtph_s16_f16(__p0) __extension__ ({ \
399   int16_t __ret; \
400   float16_t __s0 = __p0; \
401   __ret = (int16_t) __builtin_neon_vcvtph_s16_f16(__s0); \
402   __ret; \
403 })
404 #define vcvtph_s32_f16(__p0) __extension__ ({ \
405   int32_t __ret; \
406   float16_t __s0 = __p0; \
407   __ret = (int32_t) __builtin_neon_vcvtph_s32_f16(__s0); \
408   __ret; \
409 })
410 #define vcvtph_s64_f16(__p0) __extension__ ({ \
411   int64_t __ret; \
412   float16_t __s0 = __p0; \
413   __ret = (int64_t) __builtin_neon_vcvtph_s64_f16(__s0); \
414   __ret; \
415 })
416 #define vcvtph_u16_f16(__p0) __extension__ ({ \
417   uint16_t __ret; \
418   float16_t __s0 = __p0; \
419   __ret = (uint16_t) __builtin_neon_vcvtph_u16_f16(__s0); \
420   __ret; \
421 })
422 #define vcvtph_u32_f16(__p0) __extension__ ({ \
423   uint32_t __ret; \
424   float16_t __s0 = __p0; \
425   __ret = (uint32_t) __builtin_neon_vcvtph_u32_f16(__s0); \
426   __ret; \
427 })
428 #define vcvtph_u64_f16(__p0) __extension__ ({ \
429   uint64_t __ret; \
430   float16_t __s0 = __p0; \
431   __ret = (uint64_t) __builtin_neon_vcvtph_u64_f16(__s0); \
432   __ret; \
433 })
434 #define vdivh_f16(__p0, __p1) __extension__ ({ \
435   float16_t __ret; \
436   float16_t __s0 = __p0; \
437   float16_t __s1 = __p1; \
438   __ret = (float16_t) __builtin_neon_vdivh_f16(__s0, __s1); \
439   __ret; \
440 })
441 #define vfmah_f16(__p0, __p1, __p2) __extension__ ({ \
442   float16_t __ret; \
443   float16_t __s0 = __p0; \
444   float16_t __s1 = __p1; \
445   float16_t __s2 = __p2; \
446   __ret = (float16_t) __builtin_neon_vfmah_f16(__s0, __s1, __s2); \
447   __ret; \
448 })
449 #define vfmsh_f16(__p0, __p1, __p2) __extension__ ({ \
450   float16_t __ret; \
451   float16_t __s0 = __p0; \
452   float16_t __s1 = __p1; \
453   float16_t __s2 = __p2; \
454   __ret = (float16_t) __builtin_neon_vfmsh_f16(__s0, __s1, __s2); \
455   __ret; \
456 })
457 #define vmaxh_f16(__p0, __p1) __extension__ ({ \
458   float16_t __ret; \
459   float16_t __s0 = __p0; \
460   float16_t __s1 = __p1; \
461   __ret = (float16_t) __builtin_neon_vmaxh_f16(__s0, __s1); \
462   __ret; \
463 })
464 #define vmaxnmh_f16(__p0, __p1) __extension__ ({ \
465   float16_t __ret; \
466   float16_t __s0 = __p0; \
467   float16_t __s1 = __p1; \
468   __ret = (float16_t) __builtin_neon_vmaxnmh_f16(__s0, __s1); \
469   __ret; \
470 })
471 #define vminh_f16(__p0, __p1) __extension__ ({ \
472   float16_t __ret; \
473   float16_t __s0 = __p0; \
474   float16_t __s1 = __p1; \
475   __ret = (float16_t) __builtin_neon_vminh_f16(__s0, __s1); \
476   __ret; \
477 })
478 #define vminnmh_f16(__p0, __p1) __extension__ ({ \
479   float16_t __ret; \
480   float16_t __s0 = __p0; \
481   float16_t __s1 = __p1; \
482   __ret = (float16_t) __builtin_neon_vminnmh_f16(__s0, __s1); \
483   __ret; \
484 })
485 #define vmulh_f16(__p0, __p1) __extension__ ({ \
486   float16_t __ret; \
487   float16_t __s0 = __p0; \
488   float16_t __s1 = __p1; \
489   __ret = (float16_t) __builtin_neon_vmulh_f16(__s0, __s1); \
490   __ret; \
491 })
492 #define vmulxh_f16(__p0, __p1) __extension__ ({ \
493   float16_t __ret; \
494   float16_t __s0 = __p0; \
495   float16_t __s1 = __p1; \
496   __ret = (float16_t) __builtin_neon_vmulxh_f16(__s0, __s1); \
497   __ret; \
498 })
499 #define vnegh_f16(__p0) __extension__ ({ \
500   float16_t __ret; \
501   float16_t __s0 = __p0; \
502   __ret = (float16_t) __builtin_neon_vnegh_f16(__s0); \
503   __ret; \
504 })
505 #define vrecpeh_f16(__p0) __extension__ ({ \
506   float16_t __ret; \
507   float16_t __s0 = __p0; \
508   __ret = (float16_t) __builtin_neon_vrecpeh_f16(__s0); \
509   __ret; \
510 })
511 #define vrecpsh_f16(__p0, __p1) __extension__ ({ \
512   float16_t __ret; \
513   float16_t __s0 = __p0; \
514   float16_t __s1 = __p1; \
515   __ret = (float16_t) __builtin_neon_vrecpsh_f16(__s0, __s1); \
516   __ret; \
517 })
518 #define vrecpxh_f16(__p0) __extension__ ({ \
519   float16_t __ret; \
520   float16_t __s0 = __p0; \
521   __ret = (float16_t) __builtin_neon_vrecpxh_f16(__s0); \
522   __ret; \
523 })
524 #define vrndh_f16(__p0) __extension__ ({ \
525   float16_t __ret; \
526   float16_t __s0 = __p0; \
527   __ret = (float16_t) __builtin_neon_vrndh_f16(__s0); \
528   __ret; \
529 })
530 #define vrndah_f16(__p0) __extension__ ({ \
531   float16_t __ret; \
532   float16_t __s0 = __p0; \
533   __ret = (float16_t) __builtin_neon_vrndah_f16(__s0); \
534   __ret; \
535 })
536 #define vrndih_f16(__p0) __extension__ ({ \
537   float16_t __ret; \
538   float16_t __s0 = __p0; \
539   __ret = (float16_t) __builtin_neon_vrndih_f16(__s0); \
540   __ret; \
541 })
542 #define vrndmh_f16(__p0) __extension__ ({ \
543   float16_t __ret; \
544   float16_t __s0 = __p0; \
545   __ret = (float16_t) __builtin_neon_vrndmh_f16(__s0); \
546   __ret; \
547 })
548 #define vrndnh_f16(__p0) __extension__ ({ \
549   float16_t __ret; \
550   float16_t __s0 = __p0; \
551   __ret = (float16_t) __builtin_neon_vrndnh_f16(__s0); \
552   __ret; \
553 })
554 #define vrndph_f16(__p0) __extension__ ({ \
555   float16_t __ret; \
556   float16_t __s0 = __p0; \
557   __ret = (float16_t) __builtin_neon_vrndph_f16(__s0); \
558   __ret; \
559 })
560 #define vrndxh_f16(__p0) __extension__ ({ \
561   float16_t __ret; \
562   float16_t __s0 = __p0; \
563   __ret = (float16_t) __builtin_neon_vrndxh_f16(__s0); \
564   __ret; \
565 })
566 #define vrsqrteh_f16(__p0) __extension__ ({ \
567   float16_t __ret; \
568   float16_t __s0 = __p0; \
569   __ret = (float16_t) __builtin_neon_vrsqrteh_f16(__s0); \
570   __ret; \
571 })
572 #define vrsqrtsh_f16(__p0, __p1) __extension__ ({ \
573   float16_t __ret; \
574   float16_t __s0 = __p0; \
575   float16_t __s1 = __p1; \
576   __ret = (float16_t) __builtin_neon_vrsqrtsh_f16(__s0, __s1); \
577   __ret; \
578 })
579 #define vsqrth_f16(__p0) __extension__ ({ \
580   float16_t __ret; \
581   float16_t __s0 = __p0; \
582   __ret = (float16_t) __builtin_neon_vsqrth_f16(__s0); \
583   __ret; \
584 })
585 #define vsubh_f16(__p0, __p1) __extension__ ({ \
586   float16_t __ret; \
587   float16_t __s0 = __p0; \
588   float16_t __s1 = __p1; \
589   __ret = (float16_t) __builtin_neon_vsubh_f16(__s0, __s1); \
590   __ret; \
591 })
592 #endif
593 
594 #undef __ai
595 
596 #endif /* __ARM_FP16_H */
597