xref: /btstack/port/msp432p401lp-cc256x/CMSIS/cmsis_gcc.h (revision 5fd0122a3e19d95e11e1f3eb8a08a2b2acb2557e)
1 /**************************************************************************//**
2  * @file     cmsis_gcc.h
3  * @brief    CMSIS Cortex-M Core Function/Instruction Header File
4  * @version  V4.20
5  * @date     18. August 2015
6  ******************************************************************************/
7 /* Copyright (c) 2009 - 2015 ARM LIMITED
8 
9    All rights reserved.
10    Redistribution and use in source and binary forms, with or without
11    modification, are permitted provided that the following conditions are met:
12    - Redistributions of source code must retain the above copyright
13      notice, this list of conditions and the following disclaimer.
14    - Redistributions in binary form must reproduce the above copyright
15      notice, this list of conditions and the following disclaimer in the
16      documentation and/or other materials provided with the distribution.
17    - Neither the name of ARM nor the names of its contributors may be used
18      to endorse or promote products derived from this software without
19      specific prior written permission.
20    *
21    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22    AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23    IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24    ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
25    LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26    CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27    SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28    INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29    CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30    ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31    POSSIBILITY OF SUCH DAMAGE.
32    ---------------------------------------------------------------------------*/
33 
34 
35 #ifndef __CMSIS_GCC_H
36 #define __CMSIS_GCC_H
37 
38 
39 /* ###########################  Core Function Access  ########################### */
40 /** \ingroup  CMSIS_Core_FunctionInterface
41     \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions
42   @{
43  */
44 
45 /** \brief  Enable IRQ Interrupts
46 
47   This function enables IRQ interrupts by clearing the I-bit in the CPSR.
48   Can only be executed in Privileged modes.
49  */
__enable_irq(void)50 __attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_irq(void)
51 {
52   __ASM volatile ("cpsie i" : : : "memory");
53 }
54 
55 
56 /** \brief  Disable IRQ Interrupts
57 
58   This function disables IRQ interrupts by setting the I-bit in the CPSR.
59   Can only be executed in Privileged modes.
60  */
__disable_irq(void)61 __attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_irq(void)
62 {
63   __ASM volatile ("cpsid i" : : : "memory");
64 }
65 
66 
67 /** \brief  Get Control Register
68 
69     This function returns the content of the Control Register.
70 
71     \return               Control Register value
72  */
__get_CONTROL(void)73 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_CONTROL(void)
74 {
75   uint32_t result;
76 
77   __ASM volatile ("MRS %0, control" : "=r" (result) );
78   return(result);
79 }
80 
81 
82 /** \brief  Set Control Register
83 
84     This function writes the given value to the Control Register.
85 
86     \param [in]    control  Control Register value to set
87  */
__set_CONTROL(uint32_t control)88 __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_CONTROL(uint32_t control)
89 {
90   __ASM volatile ("MSR control, %0" : : "r" (control) : "memory");
91 }
92 
93 
94 /** \brief  Get IPSR Register
95 
96     This function returns the content of the IPSR Register.
97 
98     \return               IPSR Register value
99  */
__get_IPSR(void)100 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_IPSR(void)
101 {
102   uint32_t result;
103 
104   __ASM volatile ("MRS %0, ipsr" : "=r" (result) );
105   return(result);
106 }
107 
108 
109 /** \brief  Get APSR Register
110 
111     This function returns the content of the APSR Register.
112 
113     \return               APSR Register value
114  */
__get_APSR(void)115 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_APSR(void)
116 {
117   uint32_t result;
118 
119   __ASM volatile ("MRS %0, apsr" : "=r" (result) );
120   return(result);
121 }
122 
123 
124 /** \brief  Get xPSR Register
125 
126     This function returns the content of the xPSR Register.
127 
128     \return               xPSR Register value
129  */
__get_xPSR(void)130 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_xPSR(void)
131 {
132   uint32_t result;
133 
134   __ASM volatile ("MRS %0, xpsr" : "=r" (result) );
135   return(result);
136 }
137 
138 
139 /** \brief  Get Process Stack Pointer
140 
141     This function returns the current value of the Process Stack Pointer (PSP).
142 
143     \return               PSP Register value
144  */
__get_PSP(void)145 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_PSP(void)
146 {
147   register uint32_t result;
148 
149   __ASM volatile ("MRS %0, psp\n"  : "=r" (result) );
150   return(result);
151 }
152 
153 
154 /** \brief  Set Process Stack Pointer
155 
156     This function assigns the given value to the Process Stack Pointer (PSP).
157 
158     \param [in]    topOfProcStack  Process Stack Pointer value to set
159  */
__set_PSP(uint32_t topOfProcStack)160 __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_PSP(uint32_t topOfProcStack)
161 {
162   __ASM volatile ("MSR psp, %0\n" : : "r" (topOfProcStack) : "sp");
163 }
164 
165 
166 /** \brief  Get Main Stack Pointer
167 
168     This function returns the current value of the Main Stack Pointer (MSP).
169 
170     \return               MSP Register value
171  */
__get_MSP(void)172 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_MSP(void)
173 {
174   register uint32_t result;
175 
176   __ASM volatile ("MRS %0, msp\n" : "=r" (result) );
177   return(result);
178 }
179 
180 
181 /** \brief  Set Main Stack Pointer
182 
183     This function assigns the given value to the Main Stack Pointer (MSP).
184 
185     \param [in]    topOfMainStack  Main Stack Pointer value to set
186  */
__set_MSP(uint32_t topOfMainStack)187 __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_MSP(uint32_t topOfMainStack)
188 {
189   __ASM volatile ("MSR msp, %0\n" : : "r" (topOfMainStack) : "sp");
190 }
191 
192 
193 /** \brief  Get Priority Mask
194 
195     This function returns the current state of the priority mask bit from the Priority Mask Register.
196 
197     \return               Priority Mask value
198  */
__get_PRIMASK(void)199 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_PRIMASK(void)
200 {
201   uint32_t result;
202 
203   __ASM volatile ("MRS %0, primask" : "=r" (result) );
204   return(result);
205 }
206 
207 
208 /** \brief  Set Priority Mask
209 
210     This function assigns the given value to the Priority Mask Register.
211 
212     \param [in]    priMask  Priority Mask
213  */
__set_PRIMASK(uint32_t priMask)214 __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_PRIMASK(uint32_t priMask)
215 {
216   __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory");
217 }
218 
219 
220 #if       (__CORTEX_M >= 0x03U)
221 
222 /** \brief  Enable FIQ
223 
224     This function enables FIQ interrupts by clearing the F-bit in the CPSR.
225     Can only be executed in Privileged modes.
226  */
__enable_fault_irq(void)227 __attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_fault_irq(void)
228 {
229   __ASM volatile ("cpsie f" : : : "memory");
230 }
231 
232 
233 /** \brief  Disable FIQ
234 
235     This function disables FIQ interrupts by setting the F-bit in the CPSR.
236     Can only be executed in Privileged modes.
237  */
__disable_fault_irq(void)238 __attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_fault_irq(void)
239 {
240   __ASM volatile ("cpsid f" : : : "memory");
241 }
242 
243 
244 /** \brief  Get Base Priority
245 
246     This function returns the current value of the Base Priority register.
247 
248     \return               Base Priority register value
249  */
__get_BASEPRI(void)250 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_BASEPRI(void)
251 {
252   uint32_t result;
253 
254   __ASM volatile ("MRS %0, basepri" : "=r" (result) );
255   return(result);
256 }
257 
258 
259 /** \brief  Set Base Priority
260 
261     This function assigns the given value to the Base Priority register.
262 
263     \param [in]    basePri  Base Priority value to set
264  */
__set_BASEPRI(uint32_t value)265 __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_BASEPRI(uint32_t value)
266 {
267   __ASM volatile ("MSR basepri, %0" : : "r" (value) : "memory");
268 }
269 
270 
271 /** \brief  Set Base Priority with condition
272 
273     This function assigns the given value to the Base Priority register only if BASEPRI masking is disabled,
274 	or the new value increases the BASEPRI priority level.
275 
276     \param [in]    basePri  Base Priority value to set
277  */
__set_BASEPRI_MAX(uint32_t value)278 __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_BASEPRI_MAX(uint32_t value)
279 {
280   __ASM volatile ("MSR basepri_max, %0" : : "r" (value) : "memory");
281 }
282 
283 
284 /** \brief  Get Fault Mask
285 
286     This function returns the current value of the Fault Mask register.
287 
288     \return               Fault Mask register value
289  */
__get_FAULTMASK(void)290 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_FAULTMASK(void)
291 {
292   uint32_t result;
293 
294   __ASM volatile ("MRS %0, faultmask" : "=r" (result) );
295   return(result);
296 }
297 
298 
299 /** \brief  Set Fault Mask
300 
301     This function assigns the given value to the Fault Mask register.
302 
303     \param [in]    faultMask  Fault Mask value to set
304  */
__set_FAULTMASK(uint32_t faultMask)305 __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_FAULTMASK(uint32_t faultMask)
306 {
307   __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory");
308 }
309 
310 #endif /* (__CORTEX_M >= 0x03U) */
311 
312 
313 #if       (__CORTEX_M == 0x04U) || (__CORTEX_M == 0x07U)
314 
315 /** \brief  Get FPSCR
316 
317     This function returns the current value of the Floating Point Status/Control register.
318 
319     \return               Floating Point Status/Control register value
320  */
__get_FPSCR(void)321 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_FPSCR(void)
322 {
323 #if (__FPU_PRESENT == 1U) && (__FPU_USED == 1U)
324   uint32_t result;
325 
326   /* Empty asm statement works as a scheduling barrier */
327   __ASM volatile ("");
328   __ASM volatile ("VMRS %0, fpscr" : "=r" (result) );
329   __ASM volatile ("");
330   return(result);
331 #else
332    return(0);
333 #endif
334 }
335 
336 
337 /** \brief  Set FPSCR
338 
339     This function assigns the given value to the Floating Point Status/Control register.
340 
341     \param [in]    fpscr  Floating Point Status/Control value to set
342  */
__set_FPSCR(uint32_t fpscr)343 __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_FPSCR(uint32_t fpscr)
344 {
345 #if (__FPU_PRESENT == 1U) && (__FPU_USED == 1U)
346   /* Empty asm statement works as a scheduling barrier */
347   __ASM volatile ("");
348 //  __ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc");          // ARMCC_V6: needs to be checked
349   __ASM volatile ("");
350 #endif
351 }
352 
353 #endif /* (__CORTEX_M == 0x04U) || (__CORTEX_M == 0x07U) */
354 
355 
356 
357 /*@} end of CMSIS_Core_RegAccFunctions */
358 
359 
360 /* ##########################  Core Instruction Access  ######################### */
361 /** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface
362   Access to dedicated instructions
363   @{
364 */
365 
366 /* Define macros for porting to both thumb1 and thumb2.
367  * For thumb1, use low register (r0-r7), specified by constrant "l"
368  * Otherwise, use general registers, specified by constrant "r" */
369 #if defined (__thumb__) && !defined (__thumb2__)
370 #define __CMSIS_GCC_OUT_REG(r) "=l" (r)
371 #define __CMSIS_GCC_USE_REG(r) "l" (r)
372 #else
373 #define __CMSIS_GCC_OUT_REG(r) "=r" (r)
374 #define __CMSIS_GCC_USE_REG(r) "r" (r)
375 #endif
376 
377 /** \brief  No Operation
378 
379     No Operation does nothing. This instruction can be used for code alignment purposes.
380  */
__NOP(void)381 __attribute__((always_inline)) __STATIC_INLINE void __NOP(void)
382 {
383   __ASM volatile ("nop");
384 }
385 
386 
387 /** \brief  Wait For Interrupt
388 
389     Wait For Interrupt is a hint instruction that suspends execution
390     until one of a number of events occurs.
391  */
__WFI(void)392 __attribute__((always_inline)) __STATIC_INLINE void __WFI(void)
393 {
394   __ASM volatile ("wfi");
395 }
396 
397 
398 /** \brief  Wait For Event
399 
400     Wait For Event is a hint instruction that permits the processor to enter
401     a low-power state until one of a number of events occurs.
402  */
__WFE(void)403 __attribute__((always_inline)) __STATIC_INLINE void __WFE(void)
404 {
405   __ASM volatile ("wfe");
406 }
407 
408 
409 /** \brief  Send Event
410 
411     Send Event is a hint instruction. It causes an event to be signaled to the CPU.
412  */
__SEV(void)413 __attribute__((always_inline)) __STATIC_INLINE void __SEV(void)
414 {
415   __ASM volatile ("sev");
416 }
417 
418 
419 /** \brief  Instruction Synchronization Barrier
420 
421     Instruction Synchronization Barrier flushes the pipeline in the processor,
422     so that all instructions following the ISB are fetched from cache or
423     memory, after the instruction has been completed.
424  */
__ISB(void)425 __attribute__((always_inline)) __STATIC_INLINE void __ISB(void)
426 {
427   __ASM volatile ("isb 0xF":::"memory");
428 }
429 
430 
431 /** \brief  Data Synchronization Barrier
432 
433     This function acts as a special kind of Data Memory Barrier.
434     It completes when all explicit memory accesses before this instruction complete.
435  */
__DSB(void)436 __attribute__((always_inline)) __STATIC_INLINE void __DSB(void)
437 {
438   __ASM volatile ("dsb 0xF":::"memory");
439 }
440 
441 
442 /** \brief  Data Memory Barrier
443 
444     This function ensures the apparent order of the explicit memory operations before
445     and after the instruction, without ensuring their completion.
446  */
__DMB(void)447 __attribute__((always_inline)) __STATIC_INLINE void __DMB(void)
448 {
449   __ASM volatile ("dmb 0xF":::"memory");
450 }
451 
452 
453 /** \brief  Reverse byte order (32 bit)
454 
455     This function reverses the byte order in integer value.
456 
457     \param [in]    value  Value to reverse
458     \return               Reversed value
459  */
__REV(uint32_t value)460 __attribute__((always_inline)) __STATIC_INLINE uint32_t __REV(uint32_t value)
461 {
462 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
463   return __builtin_bswap32(value);
464 #else
465   uint32_t result;
466 
467   __ASM volatile ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
468   return(result);
469 #endif
470 }
471 
472 
473 /** \brief  Reverse byte order (16 bit)
474 
475     This function reverses the byte order in two unsigned short values.
476 
477     \param [in]    value  Value to reverse
478     \return               Reversed value
479  */
__REV16(uint32_t value)480 __attribute__((always_inline)) __STATIC_INLINE uint32_t __REV16(uint32_t value)
481 {
482   uint32_t result;
483 
484   __ASM volatile ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
485   return(result);
486 }
487 
488 
489 /** \brief  Reverse byte order in signed short value
490 
491     This function reverses the byte order in a signed short value with sign extension to integer.
492 
493     \param [in]    value  Value to reverse
494     \return               Reversed value
495  */
__REVSH(int32_t value)496 __attribute__((always_inline)) __STATIC_INLINE int32_t __REVSH(int32_t value)
497 {
498 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
499   return (short)__builtin_bswap16(value);
500 #else
501   int32_t result;
502 
503   __ASM volatile ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
504   return(result);
505 #endif
506 }
507 
508 
509 /** \brief  Rotate Right in unsigned value (32 bit)
510 
511     This function Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
512 
513     \param [in]    value  Value to rotate
514     \param [in]    value  Number of Bits to rotate
515     \return               Rotated value
516  */
__ROR(uint32_t op1,uint32_t op2)517 __attribute__((always_inline)) __STATIC_INLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
518 {
519   return (op1 >> op2) | (op1 << (32U - op2));
520 }
521 
522 
523 /** \brief  Breakpoint
524 
525     This function causes the processor to enter Debug state.
526     Debug tools can use this to investigate system state when the instruction at a particular address is reached.
527 
528     \param [in]    value  is ignored by the processor.
529                    If required, a debugger can use it to store additional information about the breakpoint.
530  */
531 #define __BKPT(value)                       __ASM volatile ("bkpt "#value)
532 
533 
534 /** \brief  Reverse bit order of value
535 
536     This function reverses the bit order of the given value.
537 
538     \param [in]    value  Value to reverse
539     \return               Reversed value
540  */
__RBIT(uint32_t value)541 __attribute__((always_inline)) __STATIC_INLINE uint32_t __RBIT(uint32_t value)
542 {
543   uint32_t result;
544 
545 #if       (__CORTEX_M >= 0x03U) || (__CORTEX_SC >= 300U)
546    __ASM volatile ("rbit %0, %1" : "=r" (result) : "r" (value) );
547 #else
548   int32_t s = 4 /*sizeof(v)*/ * 8 - 1; // extra shift needed at end
549 
550   result = value;                      // r will be reversed bits of v; first get LSB of v
551   for (value >>= 1U; value; value >>= 1U)
552   {
553     result <<= 1U;
554     result |= value & 1U;
555     s--;
556   }
557   result <<= s;                       // shift when v's highest bits are zero
558 #endif
559   return(result);
560 }
561 
562 
563 /** \brief  Count leading zeros
564 
565     This function counts the number of leading zeros of a data value.
566 
567     \param [in]  value  Value to count the leading zeros
568     \return             number of leading zeros in value
569  */
570 #define __CLZ             __builtin_clz
571 
572 
573 #if       (__CORTEX_M >= 0x03U) || (__CORTEX_SC >= 300U)
574 
575 /** \brief  LDR Exclusive (8 bit)
576 
577     This function executes a exclusive LDR instruction for 8 bit value.
578 
579     \param [in]    ptr  Pointer to data
580     \return             value of type uint8_t at (*ptr)
581  */
__LDREXB(volatile uint8_t * addr)582 __attribute__((always_inline)) __STATIC_INLINE uint8_t __LDREXB(volatile uint8_t *addr)
583 {
584     uint32_t result;
585 
586 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
587    __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) );
588 #else
589     /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
590        accepted by assembler. So has to use following less efficient pattern.
591     */
592    __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
593 #endif
594    return ((uint8_t) result);    /* Add explicit type cast here */
595 }
596 
597 
598 /** \brief  LDR Exclusive (16 bit)
599 
600     This function executes a exclusive LDR instruction for 16 bit values.
601 
602     \param [in]    ptr  Pointer to data
603     \return        value of type uint16_t at (*ptr)
604  */
__LDREXH(volatile uint16_t * addr)605 __attribute__((always_inline)) __STATIC_INLINE uint16_t __LDREXH(volatile uint16_t *addr)
606 {
607     uint32_t result;
608 
609 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
610    __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) );
611 #else
612     /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
613        accepted by assembler. So has to use following less efficient pattern.
614     */
615    __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
616 #endif
617    return ((uint16_t) result);    /* Add explicit type cast here */
618 }
619 
620 
621 /** \brief  LDR Exclusive (32 bit)
622 
623     This function executes a exclusive LDR instruction for 32 bit values.
624 
625     \param [in]    ptr  Pointer to data
626     \return        value of type uint32_t at (*ptr)
627  */
__LDREXW(volatile uint32_t * addr)628 __attribute__((always_inline)) __STATIC_INLINE uint32_t __LDREXW(volatile uint32_t *addr)
629 {
630     uint32_t result;
631 
632    __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) );
633    return(result);
634 }
635 
636 
637 /** \brief  STR Exclusive (8 bit)
638 
639     This function executes a exclusive STR instruction for 8 bit values.
640 
641     \param [in]  value  Value to store
642     \param [in]    ptr  Pointer to location
643     \return          0  Function succeeded
644     \return          1  Function failed
645  */
__STREXB(uint8_t value,volatile uint8_t * addr)646 __attribute__((always_inline)) __STATIC_INLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr)
647 {
648    uint32_t result;
649 
650    __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
651    return(result);
652 }
653 
654 
655 /** \brief  STR Exclusive (16 bit)
656 
657     This function executes a exclusive STR instruction for 16 bit values.
658 
659     \param [in]  value  Value to store
660     \param [in]    ptr  Pointer to location
661     \return          0  Function succeeded
662     \return          1  Function failed
663  */
__STREXH(uint16_t value,volatile uint16_t * addr)664 __attribute__((always_inline)) __STATIC_INLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr)
665 {
666    uint32_t result;
667 
668    __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
669    return(result);
670 }
671 
672 
673 /** \brief  STR Exclusive (32 bit)
674 
675     This function executes a exclusive STR instruction for 32 bit values.
676 
677     \param [in]  value  Value to store
678     \param [in]    ptr  Pointer to location
679     \return          0  Function succeeded
680     \return          1  Function failed
681  */
__STREXW(uint32_t value,volatile uint32_t * addr)682 __attribute__((always_inline)) __STATIC_INLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr)
683 {
684    uint32_t result;
685 
686    __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) );
687    return(result);
688 }
689 
690 
691 /** \brief  Remove the exclusive lock
692 
693     This function removes the exclusive lock which is created by LDREX.
694 
695  */
__CLREX(void)696 __attribute__((always_inline)) __STATIC_INLINE void __CLREX(void)
697 {
698   __ASM volatile ("clrex" ::: "memory");
699 }
700 
701 
702 /** \brief  Signed Saturate
703 
704     This function saturates a signed value.
705 
706     \param [in]  value  Value to be saturated
707     \param [in]    sat  Bit position to saturate to (1..32)
708     \return             Saturated value
709  */
710 #define __SSAT(ARG1,ARG2) \
711 ({                          \
712   uint32_t __RES, __ARG1 = (ARG1); \
713   __ASM ("ssat %0, %1, %2" : "=r" (__RES) :  "I" (ARG2), "r" (__ARG1) ); \
714   __RES; \
715  })
716 
717 
718 /** \brief  Unsigned Saturate
719 
720     This function saturates an unsigned value.
721 
722     \param [in]  value  Value to be saturated
723     \param [in]    sat  Bit position to saturate to (0..31)
724     \return             Saturated value
725  */
726 #define __USAT(ARG1,ARG2) \
727 ({                          \
728   uint32_t __RES, __ARG1 = (ARG1); \
729   __ASM ("usat %0, %1, %2" : "=r" (__RES) :  "I" (ARG2), "r" (__ARG1) ); \
730   __RES; \
731  })
732 
733 
734 /** \brief  Rotate Right with Extend (32 bit)
735 
736     This function moves each bit of a bitstring right by one bit.
737     The carry input is shifted in at the left end of the bitstring.
738 
739     \param [in]    value  Value to rotate
740     \return               Rotated value
741  */
__RRX(uint32_t value)742 __attribute__((always_inline)) __STATIC_INLINE uint32_t __RRX(uint32_t value)
743 {
744   uint32_t result;
745 
746   __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
747   return(result);
748 }
749 
750 
751 /** \brief  LDRT Unprivileged (8 bit)
752 
753     This function executes a Unprivileged LDRT instruction for 8 bit value.
754 
755     \param [in]    ptr  Pointer to data
756     \return             value of type uint8_t at (*ptr)
757  */
__LDRBT(volatile uint8_t * addr)758 __attribute__((always_inline)) __STATIC_INLINE uint8_t __LDRBT(volatile uint8_t *addr)
759 {
760     uint32_t result;
761 
762 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
763    __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*addr) );
764 #else
765     /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
766        accepted by assembler. So has to use following less efficient pattern.
767     */
768    __ASM volatile ("ldrbt %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
769 #endif
770    return ((uint8_t) result);    /* Add explicit type cast here */
771 }
772 
773 
774 /** \brief  LDRT Unprivileged (16 bit)
775 
776     This function executes a Unprivileged LDRT instruction for 16 bit values.
777 
778     \param [in]    ptr  Pointer to data
779     \return        value of type uint16_t at (*ptr)
780  */
__LDRHT(volatile uint16_t * addr)781 __attribute__((always_inline)) __STATIC_INLINE uint16_t __LDRHT(volatile uint16_t *addr)
782 {
783     uint32_t result;
784 
785 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
786    __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*addr) );
787 #else
788     /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
789        accepted by assembler. So has to use following less efficient pattern.
790     */
791    __ASM volatile ("ldrht %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
792 #endif
793    return ((uint16_t) result);    /* Add explicit type cast here */
794 }
795 
796 
797 /** \brief  LDRT Unprivileged (32 bit)
798 
799     This function executes a Unprivileged LDRT instruction for 32 bit values.
800 
801     \param [in]    ptr  Pointer to data
802     \return        value of type uint32_t at (*ptr)
803  */
__LDRT(volatile uint32_t * addr)804 __attribute__((always_inline)) __STATIC_INLINE uint32_t __LDRT(volatile uint32_t *addr)
805 {
806     uint32_t result;
807 
808    __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*addr) );
809    return(result);
810 }
811 
812 
813 /** \brief  STRT Unprivileged (8 bit)
814 
815     This function executes a Unprivileged STRT instruction for 8 bit values.
816 
817     \param [in]  value  Value to store
818     \param [in]    ptr  Pointer to location
819  */
__STRBT(uint8_t value,volatile uint8_t * addr)820 __attribute__((always_inline)) __STATIC_INLINE void __STRBT(uint8_t value, volatile uint8_t *addr)
821 {
822    __ASM volatile ("strbt %1, %0" : "=Q" (*addr) : "r" ((uint32_t)value) );
823 }
824 
825 
826 /** \brief  STRT Unprivileged (16 bit)
827 
828     This function executes a Unprivileged STRT instruction for 16 bit values.
829 
830     \param [in]  value  Value to store
831     \param [in]    ptr  Pointer to location
832  */
__STRHT(uint16_t value,volatile uint16_t * addr)833 __attribute__((always_inline)) __STATIC_INLINE void __STRHT(uint16_t value, volatile uint16_t *addr)
834 {
835    __ASM volatile ("strht %1, %0" : "=Q" (*addr) : "r" ((uint32_t)value) );
836 }
837 
838 
839 /** \brief  STRT Unprivileged (32 bit)
840 
841     This function executes a Unprivileged STRT instruction for 32 bit values.
842 
843     \param [in]  value  Value to store
844     \param [in]    ptr  Pointer to location
845  */
__STRT(uint32_t value,volatile uint32_t * addr)846 __attribute__((always_inline)) __STATIC_INLINE void __STRT(uint32_t value, volatile uint32_t *addr)
847 {
848    __ASM volatile ("strt %1, %0" : "=Q" (*addr) : "r" (value) );
849 }
850 
851 #endif /* (__CORTEX_M >= 0x03U) || (__CORTEX_SC >= 300U) */
852 
853 /*@}*/ /* end of group CMSIS_Core_InstructionInterface */
854 
855 
856 /* ###################  Compiler specific Intrinsics  ########################### */
857 /** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
858   Access to dedicated SIMD instructions
859   @{
860 */
861 
862 #if (__CORTEX_M >= 0x04)  /* only for Cortex-M4 and above */
863 
__SADD8(uint32_t op1,uint32_t op2)864 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD8(uint32_t op1, uint32_t op2)
865 {
866   uint32_t result;
867 
868   __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
869   return(result);
870 }
871 
__QADD8(uint32_t op1,uint32_t op2)872 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
873 {
874   uint32_t result;
875 
876   __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
877   return(result);
878 }
879 
__SHADD8(uint32_t op1,uint32_t op2)880 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2)
881 {
882   uint32_t result;
883 
884   __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
885   return(result);
886 }
887 
__UADD8(uint32_t op1,uint32_t op2)888 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD8(uint32_t op1, uint32_t op2)
889 {
890   uint32_t result;
891 
892   __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
893   return(result);
894 }
895 
__UQADD8(uint32_t op1,uint32_t op2)896 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2)
897 {
898   uint32_t result;
899 
900   __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
901   return(result);
902 }
903 
__UHADD8(uint32_t op1,uint32_t op2)904 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2)
905 {
906   uint32_t result;
907 
908   __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
909   return(result);
910 }
911 
912 
__SSUB8(uint32_t op1,uint32_t op2)913 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2)
914 {
915   uint32_t result;
916 
917   __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
918   return(result);
919 }
920 
__QSUB8(uint32_t op1,uint32_t op2)921 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
922 {
923   uint32_t result;
924 
925   __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
926   return(result);
927 }
928 
__SHSUB8(uint32_t op1,uint32_t op2)929 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)
930 {
931   uint32_t result;
932 
933   __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
934   return(result);
935 }
936 
__USUB8(uint32_t op1,uint32_t op2)937 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB8(uint32_t op1, uint32_t op2)
938 {
939   uint32_t result;
940 
941   __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
942   return(result);
943 }
944 
__UQSUB8(uint32_t op1,uint32_t op2)945 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)
946 {
947   uint32_t result;
948 
949   __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
950   return(result);
951 }
952 
__UHSUB8(uint32_t op1,uint32_t op2)953 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)
954 {
955   uint32_t result;
956 
957   __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
958   return(result);
959 }
960 
961 
__SADD16(uint32_t op1,uint32_t op2)962 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD16(uint32_t op1, uint32_t op2)
963 {
964   uint32_t result;
965 
966   __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
967   return(result);
968 }
969 
__QADD16(uint32_t op1,uint32_t op2)970 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
971 {
972   uint32_t result;
973 
974   __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
975   return(result);
976 }
977 
__SHADD16(uint32_t op1,uint32_t op2)978 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
979 {
980   uint32_t result;
981 
982   __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
983   return(result);
984 }
985 
__UADD16(uint32_t op1,uint32_t op2)986 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD16(uint32_t op1, uint32_t op2)
987 {
988   uint32_t result;
989 
990   __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
991   return(result);
992 }
993 
__UQADD16(uint32_t op1,uint32_t op2)994 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)
995 {
996   uint32_t result;
997 
998   __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
999   return(result);
1000 }
1001 
__UHADD16(uint32_t op1,uint32_t op2)1002 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)
1003 {
1004   uint32_t result;
1005 
1006   __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1007   return(result);
1008 }
1009 
__SSUB16(uint32_t op1,uint32_t op2)1010 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2)
1011 {
1012   uint32_t result;
1013 
1014   __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1015   return(result);
1016 }
1017 
__QSUB16(uint32_t op1,uint32_t op2)1018 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
1019 {
1020   uint32_t result;
1021 
1022   __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1023   return(result);
1024 }
1025 
__SHSUB16(uint32_t op1,uint32_t op2)1026 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
1027 {
1028   uint32_t result;
1029 
1030   __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1031   return(result);
1032 }
1033 
__USUB16(uint32_t op1,uint32_t op2)1034 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB16(uint32_t op1, uint32_t op2)
1035 {
1036   uint32_t result;
1037 
1038   __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1039   return(result);
1040 }
1041 
__UQSUB16(uint32_t op1,uint32_t op2)1042 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)
1043 {
1044   uint32_t result;
1045 
1046   __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1047   return(result);
1048 }
1049 
__UHSUB16(uint32_t op1,uint32_t op2)1050 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)
1051 {
1052   uint32_t result;
1053 
1054   __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1055   return(result);
1056 }
1057 
__SASX(uint32_t op1,uint32_t op2)1058 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SASX(uint32_t op1, uint32_t op2)
1059 {
1060   uint32_t result;
1061 
1062   __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1063   return(result);
1064 }
1065 
__QASX(uint32_t op1,uint32_t op2)1066 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
1067 {
1068   uint32_t result;
1069 
1070   __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1071   return(result);
1072 }
1073 
__SHASX(uint32_t op1,uint32_t op2)1074 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
1075 {
1076   uint32_t result;
1077 
1078   __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1079   return(result);
1080 }
1081 
__UASX(uint32_t op1,uint32_t op2)1082 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UASX(uint32_t op1, uint32_t op2)
1083 {
1084   uint32_t result;
1085 
1086   __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1087   return(result);
1088 }
1089 
__UQASX(uint32_t op1,uint32_t op2)1090 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)
1091 {
1092   uint32_t result;
1093 
1094   __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1095   return(result);
1096 }
1097 
__UHASX(uint32_t op1,uint32_t op2)1098 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)
1099 {
1100   uint32_t result;
1101 
1102   __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1103   return(result);
1104 }
1105 
__SSAX(uint32_t op1,uint32_t op2)1106 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSAX(uint32_t op1, uint32_t op2)
1107 {
1108   uint32_t result;
1109 
1110   __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1111   return(result);
1112 }
1113 
__QSAX(uint32_t op1,uint32_t op2)1114 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
1115 {
1116   uint32_t result;
1117 
1118   __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1119   return(result);
1120 }
1121 
__SHSAX(uint32_t op1,uint32_t op2)1122 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
1123 {
1124   uint32_t result;
1125 
1126   __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1127   return(result);
1128 }
1129 
__USAX(uint32_t op1,uint32_t op2)1130 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAX(uint32_t op1, uint32_t op2)
1131 {
1132   uint32_t result;
1133 
1134   __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1135   return(result);
1136 }
1137 
__UQSAX(uint32_t op1,uint32_t op2)1138 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)
1139 {
1140   uint32_t result;
1141 
1142   __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1143   return(result);
1144 }
1145 
__UHSAX(uint32_t op1,uint32_t op2)1146 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)
1147 {
1148   uint32_t result;
1149 
1150   __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1151   return(result);
1152 }
1153 
__USAD8(uint32_t op1,uint32_t op2)1154 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)
1155 {
1156   uint32_t result;
1157 
1158   __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1159   return(result);
1160 }
1161 
__USADA8(uint32_t op1,uint32_t op2,uint32_t op3)1162 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)
1163 {
1164   uint32_t result;
1165 
1166   __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1167   return(result);
1168 }
1169 
1170 #define __SSAT16(ARG1,ARG2) \
1171 ({                          \
1172   uint32_t __RES, __ARG1 = (ARG1); \
1173   __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) :  "I" (ARG2), "r" (__ARG1) ); \
1174   __RES; \
1175  })
1176 
1177 #define __USAT16(ARG1,ARG2) \
1178 ({                          \
1179   uint32_t __RES, __ARG1 = (ARG1); \
1180   __ASM ("usat16 %0, %1, %2" : "=r" (__RES) :  "I" (ARG2), "r" (__ARG1) ); \
1181   __RES; \
1182  })
1183 
__UXTB16(uint32_t op1)1184 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTB16(uint32_t op1)
1185 {
1186   uint32_t result;
1187 
1188   __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));
1189   return(result);
1190 }
1191 
__UXTAB16(uint32_t op1,uint32_t op2)1192 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)
1193 {
1194   uint32_t result;
1195 
1196   __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1197   return(result);
1198 }
1199 
__SXTB16(uint32_t op1)1200 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTB16(uint32_t op1)
1201 {
1202   uint32_t result;
1203 
1204   __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
1205   return(result);
1206 }
1207 
__SXTAB16(uint32_t op1,uint32_t op2)1208 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2)
1209 {
1210   uint32_t result;
1211 
1212   __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1213   return(result);
1214 }
1215 
__SMUAD(uint32_t op1,uint32_t op2)1216 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUAD  (uint32_t op1, uint32_t op2)
1217 {
1218   uint32_t result;
1219 
1220   __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1221   return(result);
1222 }
1223 
__SMUADX(uint32_t op1,uint32_t op2)1224 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
1225 {
1226   uint32_t result;
1227 
1228   __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1229   return(result);
1230 }
1231 
__SMLAD(uint32_t op1,uint32_t op2,uint32_t op3)1232 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
1233 {
1234   uint32_t result;
1235 
1236   __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1237   return(result);
1238 }
1239 
__SMLADX(uint32_t op1,uint32_t op2,uint32_t op3)1240 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
1241 {
1242   uint32_t result;
1243 
1244   __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1245   return(result);
1246 }
1247 
__SMLALD(uint32_t op1,uint32_t op2,uint64_t acc)1248 __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc)
1249 {
1250   union llreg_u{
1251     uint32_t w32[2];
1252     uint64_t w64;
1253   } llr;
1254   llr.w64 = acc;
1255 
1256 #ifndef __ARMEB__   // Little endian
1257   __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
1258 #else               // Big endian
1259   __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
1260 #endif
1261 
1262   return(llr.w64);
1263 }
1264 
__SMLALDX(uint32_t op1,uint32_t op2,uint64_t acc)1265 __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc)
1266 {
1267   union llreg_u{
1268     uint32_t w32[2];
1269     uint64_t w64;
1270   } llr;
1271   llr.w64 = acc;
1272 
1273 #ifndef __ARMEB__   // Little endian
1274   __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
1275 #else               // Big endian
1276   __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
1277 #endif
1278 
1279   return(llr.w64);
1280 }
1281 
__SMUSD(uint32_t op1,uint32_t op2)1282 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSD  (uint32_t op1, uint32_t op2)
1283 {
1284   uint32_t result;
1285 
1286   __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1287   return(result);
1288 }
1289 
__SMUSDX(uint32_t op1,uint32_t op2)1290 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)
1291 {
1292   uint32_t result;
1293 
1294   __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1295   return(result);
1296 }
1297 
__SMLSD(uint32_t op1,uint32_t op2,uint32_t op3)1298 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3)
1299 {
1300   uint32_t result;
1301 
1302   __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1303   return(result);
1304 }
1305 
__SMLSDX(uint32_t op1,uint32_t op2,uint32_t op3)1306 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
1307 {
1308   uint32_t result;
1309 
1310   __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1311   return(result);
1312 }
1313 
__SMLSLD(uint32_t op1,uint32_t op2,uint64_t acc)1314 __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc)
1315 {
1316   union llreg_u{
1317     uint32_t w32[2];
1318     uint64_t w64;
1319   } llr;
1320   llr.w64 = acc;
1321 
1322 #ifndef __ARMEB__   // Little endian
1323   __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
1324 #else               // Big endian
1325   __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
1326 #endif
1327 
1328   return(llr.w64);
1329 }
1330 
__SMLSLDX(uint32_t op1,uint32_t op2,uint64_t acc)1331 __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc)
1332 {
1333   union llreg_u{
1334     uint32_t w32[2];
1335     uint64_t w64;
1336   } llr;
1337   llr.w64 = acc;
1338 
1339 #ifndef __ARMEB__   // Little endian
1340   __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
1341 #else               // Big endian
1342   __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
1343 #endif
1344 
1345   return(llr.w64);
1346 }
1347 
__SEL(uint32_t op1,uint32_t op2)1348 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SEL  (uint32_t op1, uint32_t op2)
1349 {
1350   uint32_t result;
1351 
1352   __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1353   return(result);
1354 }
1355 
__QADD(uint32_t op1,uint32_t op2)1356 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD(uint32_t op1, uint32_t op2)
1357 {
1358   uint32_t result;
1359 
1360   __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1361   return(result);
1362 }
1363 
__QSUB(uint32_t op1,uint32_t op2)1364 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB(uint32_t op1, uint32_t op2)
1365 {
1366   uint32_t result;
1367 
1368   __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1369   return(result);
1370 }
1371 
1372 #define __PKHBT(ARG1,ARG2,ARG3) \
1373 ({                          \
1374   uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
1375   __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) :  "r" (__ARG1), "r" (__ARG2), "I" (ARG3)  ); \
1376   __RES; \
1377  })
1378 
1379 #define __PKHTB(ARG1,ARG2,ARG3) \
1380 ({                          \
1381   uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
1382   if (ARG3 == 0) \
1383     __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) :  "r" (__ARG1), "r" (__ARG2)  ); \
1384   else \
1385     __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) :  "r" (__ARG1), "r" (__ARG2), "I" (ARG3)  ); \
1386   __RES; \
1387  })
1388 
__SMMLA(int32_t op1,int32_t op2,int32_t op3)1389 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
1390 {
1391  int32_t result;
1392 
1393  __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r"  (op1), "r" (op2), "r" (op3) );
1394  return(result);
1395 }
1396 
1397 #endif /* (__CORTEX_M >= 0x04) */
1398 /*@} end of group CMSIS_SIMD_intrinsics */
1399 
1400 
1401 #endif /* __CMSIS_GCC_H */
1402