1 /*
2  * Copyright (c) 2008-2014 Travis Geiselbrecht
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files
6  * (the "Software"), to deal in the Software without restriction,
7  * including without limitation the rights to use, copy, modify, merge,
8  * publish, distribute, sublicense, and/or sell copies of the Software,
9  * and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 #pragma once
24 
25 #ifndef ASSEMBLY
26 
27 #include <assert.h>
28 #include <stdbool.h>
29 #include <compiler.h>
30 #include <reg.h>
31 #include <arch/arm.h>
32 
33 #if ARM_ISA_ARMV7M
34 #include <arch/arm/cm.h>
35 #endif
36 
37 __BEGIN_CDECLS;
38 
39 #if ARM_MERGE_FIQ_IRQ
40 
41 #define CPS_MASK_INTS "if"
42 #define FIQ_CHANGE(x)
43 
check_irq_fiq_state(unsigned long state)44 static inline void check_irq_fiq_state(unsigned long state)
45 {
46     ASSERT(((state >> 6) & 1) == ((state >> 7) & 1));
47 }
48 
49 #else
50 
51 #define CPS_MASK_INTS "i"
52 #define FIQ_CHANGE(x) x
53 
check_irq_fiq_state(unsigned long state)54 static inline void check_irq_fiq_state(unsigned long state)
55 {
56 }
57 
58 #endif
59 
60 #if ARM_ISA_ARMV7 || (ARM_ISA_ARMV6 && !__thumb__)
61 #define USE_GCC_ATOMICS 0
62 #define ENABLE_CYCLE_COUNTER 1
63 
64 // override of some routines
arch_enable_ints(void)65 static inline void arch_enable_ints(void)
66 {
67     CF;
68     __asm__ volatile("cpsie " CPS_MASK_INTS);
69 }
70 
arch_disable_ints(void)71 static inline void arch_disable_ints(void)
72 {
73     __asm__ volatile("cpsid " CPS_MASK_INTS);
74     CF;
75 }
76 
arch_ints_disabled(void)77 static inline bool arch_ints_disabled(void)
78 {
79     unsigned int state;
80 
81 #if ARM_ISA_ARMV7M
82     __asm__ volatile("mrs %0, primask" : "=r"(state));
83     state &= 0x1;
84 #else
85     __asm__ volatile("mrs %0, cpsr" : "=r"(state));
86     check_irq_fiq_state(state);
87     state &= (1<<7);
88 #endif
89 
90     return !!state;
91 }
92 
arch_enable_fiqs(void)93 static inline void arch_enable_fiqs(void)
94 {
95     CF;
96     FIQ_CHANGE(__asm__ volatile("cpsie f"));
97 }
98 
arch_disable_fiqs(void)99 static inline void arch_disable_fiqs(void)
100 {
101     FIQ_CHANGE(__asm__ volatile("cpsid f"));
102     CF;
103 }
104 
arch_fiqs_disabled(void)105 static inline bool arch_fiqs_disabled(void)
106 {
107     unsigned int state;
108 
109     __asm__ volatile("mrs %0, cpsr" : "=r"(state));
110     check_irq_fiq_state(state);
111     state &= (1<<6);
112 
113     return !!state;
114 }
115 
arch_in_int_handler(void)116 static inline bool arch_in_int_handler(void)
117 {
118 #if ARM_ISA_ARMV7M
119     uint32_t ipsr;
120     __asm volatile ("MRS %0, ipsr" : "=r" (ipsr) );
121     return (ipsr & IPSR_ISR_Msk);
122 #else
123     /* set by the interrupt glue to track that the cpu is inside a handler */
124     /* Note that this in not SMP safe */
125     extern volatile bool __arm_in_handler;
126 
127     return __arm_in_handler;
128 #endif
129 }
130 
atomic_add(volatile int * ptr,int val)131 static inline int atomic_add(volatile int *ptr, int val)
132 {
133 #if USE_GCC_ATOMICS
134     return __atomic_fetch_add(ptr, val, __ATOMIC_RELAXED);
135 #else
136     int old;
137     int temp;
138     int test;
139 
140     do {
141         __asm__ volatile(
142             "ldrex	%[old], [%[ptr]]\n"
143             "adds	%[temp], %[old], %[val]\n"
144             "strex	%[test], %[temp], [%[ptr]]\n"
145             : [old]"=&r" (old), [temp]"=&r" (temp), [test]"=&r" (test)
146             : [ptr]"r" (ptr), [val]"r" (val)
147             : "memory", "cc");
148 
149     } while (test != 0);
150 
151     return old;
152 #endif
153 }
154 
atomic_or(volatile int * ptr,int val)155 static inline int atomic_or(volatile int *ptr, int val)
156 {
157 #if USE_GCC_ATOMICS
158     return __atomic_fetch_or(ptr, val, __ATOMIC_RELAXED);
159 #else
160     int old;
161     int temp;
162     int test;
163 
164     do {
165         __asm__ volatile(
166             "ldrex	%[old], [%[ptr]]\n"
167             "orrs	%[temp], %[old], %[val]\n"
168             "strex	%[test], %[temp], [%[ptr]]\n"
169             : [old]"=&r" (old), [temp]"=&r" (temp), [test]"=&r" (test)
170             : [ptr]"r" (ptr), [val]"r" (val)
171             : "memory", "cc");
172 
173     } while (test != 0);
174 
175     return old;
176 #endif
177 }
178 
atomic_and(volatile int * ptr,int val)179 static inline int atomic_and(volatile int *ptr, int val)
180 {
181 #if USE_GCC_ATOMICS
182     return __atomic_fetch_and(ptr, val, __ATOMIC_RELAXED);
183 #else
184     int old;
185     int temp;
186     int test;
187 
188     do {
189         __asm__ volatile(
190             "ldrex	%[old], [%[ptr]]\n"
191             "ands	%[temp], %[old], %[val]\n"
192             "strex	%[test], %[temp], [%[ptr]]\n"
193             : [old]"=&r" (old), [temp]"=&r" (temp), [test]"=&r" (test)
194             : [ptr]"r" (ptr), [val]"r" (val)
195             : "memory", "cc");
196 
197     } while (test != 0);
198 
199     return old;
200 #endif
201 }
202 
atomic_swap(volatile int * ptr,int val)203 static inline int atomic_swap(volatile int *ptr, int val)
204 {
205 #if USE_GCC_ATOMICS
206     return __atomic_exchange_n(ptr, val, __ATOMIC_RELAXED);
207 #else
208     int old;
209     int test;
210 
211     do {
212         __asm__ volatile(
213             "ldrex	%[old], [%[ptr]]\n"
214             "strex	%[test], %[val], [%[ptr]]\n"
215             : [old]"=&r" (old), [test]"=&r" (test)
216             : [ptr]"r" (ptr), [val]"r" (val)
217             : "memory");
218 
219     } while (test != 0);
220 
221     return old;
222 #endif
223 }
224 
atomic_cmpxchg(volatile int * ptr,int oldval,int newval)225 static inline int atomic_cmpxchg(volatile int *ptr, int oldval, int newval)
226 {
227     int old;
228     int test;
229 
230     do {
231         __asm__ volatile(
232             "ldrex	%[old], [%[ptr]]\n"
233             "mov	%[test], #0\n"
234             "teq	%[old], %[oldval]\n"
235 #if (ARM_ISA_ARMV7M || __thumb__)
236             "bne	0f\n"
237             "strex	%[test], %[newval], [%[ptr]]\n"
238             "0:\n"
239 #else
240             "strexeq %[test], %[newval], [%[ptr]]\n"
241 #endif
242             : [old]"=&r" (old), [test]"=&r" (test)
243             : [ptr]"r" (ptr), [oldval]"Ir" (oldval), [newval]"r" (newval)
244             : "cc");
245 
246     } while (test != 0);
247 
248     return old;
249 }
250 
arch_cycle_count(void)251 static inline uint32_t arch_cycle_count(void)
252 {
253 #if ARM_ISA_ARMV7M
254 #if ENABLE_CYCLE_COUNTER
255 #define DWT_CYCCNT (0xE0001004)
256     return *REG32(DWT_CYCCNT);
257 #else
258     return 0;
259 #endif
260 #elif ARM_ISA_ARMV7
261     uint32_t count;
262     __asm__ volatile("mrc		p15, 0, %0, c9, c13, 0"
263                      : "=r" (count)
264                     );
265     return count;
266 #else
267 //#warning no arch_cycle_count implementation
268     return 0;
269 #endif
270 }
271 
272 #if WITH_SMP && ARM_ISA_ARMV7
273 extern uint arm_curr_cpu_num(void);
arch_curr_cpu_num(void)274 static inline uint arch_curr_cpu_num(void)
275 {
276     return arm_curr_cpu_num();
277 }
278 #else
arch_curr_cpu_num(void)279 static inline uint arch_curr_cpu_num(void)
280 {
281     return 0;
282 }
283 #endif
284 
285 /* defined in kernel/thread.h */
286 
287 #if !ARM_ISA_ARMV7M
288 /* use the cpu local thread context pointer to store current_thread */
get_current_thread(void)289 static inline struct thread *get_current_thread(void)
290 {
291     return (struct thread *)arm_read_tpidrprw();
292 }
293 
set_current_thread(struct thread * t)294 static inline void set_current_thread(struct thread *t)
295 {
296     arm_write_tpidrprw((uint32_t)t);
297 }
298 #else // ARM_ISA_ARM7M
299 
300 /* use a global pointer to store the current_thread */
301 extern struct thread *_current_thread;
302 
get_current_thread(void)303 static inline struct thread *get_current_thread(void)
304 {
305     return _current_thread;
306 }
307 
set_current_thread(struct thread * t)308 static inline void set_current_thread(struct thread *t)
309 {
310     _current_thread = t;
311 }
312 
313 #endif // !ARM_ISA_ARMV7M
314 
315 #elif ARM_ISA_ARMV6M // cortex-m0 cortex-m0+
316 
317 
arch_enable_fiqs(void)318 static inline void arch_enable_fiqs(void)
319 {
320     CF;
321     __asm__ volatile("cpsie f");
322 }
323 
arch_disable_fiqs(void)324 static inline void arch_disable_fiqs(void)
325 {
326     __asm__ volatile("cpsid f");
327     CF;
328 }
329 
arch_fiqs_disabled(void)330 static inline bool arch_fiqs_disabled(void)
331 {
332     unsigned int state;
333 
334     __asm__ volatile("mrs %0, cpsr" : "=r"(state));
335     state &= (1<<6);
336 
337     return !!state;
338 }
339 
340 
341 
arch_enable_ints(void)342 static inline void arch_enable_ints(void)
343 {
344     CF;
345     __asm__ volatile("cpsie i");
346 }
arch_disable_ints(void)347 static inline void arch_disable_ints(void)
348 {
349     __asm__ volatile("cpsid i");
350     CF;
351 }
352 
arch_ints_disabled(void)353 static inline bool arch_ints_disabled(void)
354 {
355     unsigned int state;
356 
357     __asm__ volatile("mrs %0, primask" : "=r"(state));
358     state &= 0x1;
359     return !!state;
360 }
361 
atomic_add(volatile int * ptr,int val)362 static inline int atomic_add(volatile int *ptr, int val)
363 {
364     int temp;
365     bool state;
366 
367     state = arch_ints_disabled();
368     arch_disable_ints();
369     temp = *ptr;
370     *ptr = temp + val;
371     if (!state)
372         arch_enable_ints();
373     return temp;
374 }
375 
atomic_and(volatile int * ptr,int val)376 static inline  int atomic_and(volatile int *ptr, int val)
377 {
378     int temp;
379     bool state;
380 
381     state = arch_ints_disabled();
382     arch_disable_ints();
383     temp = *ptr;
384     *ptr = temp & val;
385     if (!state)
386         arch_enable_ints();
387     return temp;
388 }
389 
atomic_or(volatile int * ptr,int val)390 static inline int atomic_or(volatile int *ptr, int val)
391 {
392     int temp;
393     bool state;
394 
395     state = arch_ints_disabled();
396     arch_disable_ints();
397     temp = *ptr;
398     *ptr = temp | val;
399     if (!state)
400         arch_enable_ints();
401     return temp;
402 }
403 
atomic_swap(volatile int * ptr,int val)404 static inline int atomic_swap(volatile int *ptr, int val)
405 {
406     int temp;
407     bool state;
408 
409     state = arch_ints_disabled();
410     arch_disable_ints();
411     temp = *ptr;
412     *ptr = val;
413     if (!state)
414         arch_enable_ints();
415     return temp;
416 }
417 
atomic_cmpxchg(volatile int * ptr,int oldval,int newval)418 static inline int atomic_cmpxchg(volatile int *ptr, int oldval, int newval)
419 {
420     int temp;
421     bool state;
422 
423     state = arch_ints_disabled();
424     arch_disable_ints();
425     temp = *ptr;
426     if (temp == oldval) {
427         *ptr = newval;
428     }
429     if (!state)
430         arch_enable_ints();
431     return temp;
432 }
433 
arch_cycle_count(void)434 static inline uint32_t arch_cycle_count(void)
435 {
436     return 0;
437 }
438 
arch_curr_cpu_num(void)439 static inline uint arch_curr_cpu_num(void)
440 {
441     return 0;
442 }
443 
444 /* use a global pointer to store the current_thread */
445 extern struct thread *_current_thread;
446 
get_current_thread(void)447 static inline struct thread *get_current_thread(void)
448 {
449     return _current_thread;
450 }
451 
set_current_thread(struct thread * t)452 static inline void set_current_thread(struct thread *t)
453 {
454     _current_thread = t;
455 }
456 
457 #else // pre-armv6 || (armv6 & thumb)
458 
459 /* for pre-armv6 the bodies of these are too big to inline, call an assembly stub version */
460 void _arch_enable_ints(void);
461 void _arch_disable_ints(void);
462 
463 int _atomic_add(volatile int *ptr, int val);
464 int _atomic_and(volatile int *ptr, int val);
465 int _atomic_or(volatile int *ptr, int val);
466 int _atomic_add(volatile int *ptr, int val);
467 int _atomic_swap(volatile int *ptr, int val);
468 int _atomic_cmpxchg(volatile int *ptr, int oldval, int newval);
469 
470 uint32_t _arch_cycle_count(void);
471 
atomic_add(volatile int * ptr,int val)472 static inline int atomic_add(volatile int *ptr, int val) { return _atomic_add(ptr, val); }
atomic_and(volatile int * ptr,int val)473 static inline int atomic_and(volatile int *ptr, int val) { return _atomic_and(ptr, val); }
atomic_or(volatile int * ptr,int val)474 static inline int atomic_or(volatile int *ptr, int val) { return _atomic_or(ptr, val); }
atomic_swap(volatile int * ptr,int val)475 static inline int atomic_swap(volatile int *ptr, int val) { return _atomic_swap(ptr, val); }
atomic_cmpxchg(volatile int * ptr,int oldval,int newval)476 static inline int atomic_cmpxchg(volatile int *ptr, int oldval, int newval) { return _atomic_cmpxchg(ptr, oldval, newval); }
477 
arch_enable_ints(void)478 static inline void arch_enable_ints(void) { _arch_enable_ints(); }
arch_disable_ints(void)479 static inline void arch_disable_ints(void) { _arch_disable_ints(); }
480 
arch_cycle_count(void)481 static inline uint32_t arch_cycle_count(void) { return _arch_cycle_count(); }
482 
483 #endif
484 
485 /**
486  * arch_extract_return_addr - process LR to remove any memory tags
487  *
488  * Return: lr is returned without modification.
489  */
arch_extract_return_addr(uintptr_t lr)490 static inline uintptr_t arch_extract_return_addr(uintptr_t lr) {
491     return lr;
492 }
493 
494 #define mb()        DSB
495 #define wmb()       DSB
496 #define rmb()       DSB
497 
498 #ifdef WITH_SMP
499 #define smp_mb()    DMB
500 #define smp_wmb()   DMB
501 #define smp_rmb()   DMB
502 #else
503 #define smp_mb()    CF
504 #define smp_wmb()   CF
505 #define smp_rmb()   CF
506 #endif
507 
508 __END_CDECLS;
509 
510 #endif // ASSEMBLY
511