xref: /aosp_15_r20/external/llvm-libc/src/__support/CPP/atomic.h (revision 71db0c75aadcf003ffe3238005f61d7618a3fead)
1 //===-- A simple equivalent of std::atomic ----------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef LLVM_LIBC_SRC___SUPPORT_CPP_ATOMIC_H
10 #define LLVM_LIBC_SRC___SUPPORT_CPP_ATOMIC_H
11 
12 #include "src/__support/macros/attributes.h"
13 #include "src/__support/macros/config.h"
14 #include "src/__support/macros/properties/architectures.h"
15 
16 #include "type_traits.h"
17 
18 namespace LIBC_NAMESPACE_DECL {
19 namespace cpp {
20 
21 enum class MemoryOrder : int {
22   RELAXED = __ATOMIC_RELAXED,
23   CONSUME = __ATOMIC_CONSUME,
24   ACQUIRE = __ATOMIC_ACQUIRE,
25   RELEASE = __ATOMIC_RELEASE,
26   ACQ_REL = __ATOMIC_ACQ_REL,
27   SEQ_CST = __ATOMIC_SEQ_CST
28 };
29 
30 // These are a clang extension, see the clang documenation for more information:
31 // https://clang.llvm.org/docs/LanguageExtensions.html#scoped-atomic-builtins.
32 enum class MemoryScope : int {
33 #if defined(__MEMORY_SCOPE_SYSTEM) && defined(__MEMORY_SCOPE_DEVICE)
34   SYSTEM = __MEMORY_SCOPE_SYSTEM,
35   DEVICE = __MEMORY_SCOPE_DEVICE,
36 #else
37   SYSTEM = 0,
38   DEVICE = 0,
39 #endif
40 };
41 
42 template <typename T> struct Atomic {
43   // For now, we will restrict to only arithmetic types.
44   static_assert(is_arithmetic_v<T>, "Only arithmetic types can be atomic.");
45 
46 private:
47   // The value stored should be appropriately aligned so that
48   // hardware instructions used to perform atomic operations work
49   // correctly.
50   static constexpr int ALIGNMENT = sizeof(T) > alignof(T) ? sizeof(T)
51                                                           : alignof(T);
52 
53 public:
54   using value_type = T;
55 
56   // We keep the internal value public so that it can be addressable.
57   // This is useful in places like the Linux futex operations where
58   // we need pointers to the memory of the atomic values. Load and store
59   // operations should be performed using the atomic methods however.
60   alignas(ALIGNMENT) value_type val;
61 
62   constexpr Atomic() = default;
63 
64   // Intializes the value without using atomic operations.
AtomicAtomic65   constexpr Atomic(value_type v) : val(v) {}
66 
67   Atomic(const Atomic &) = delete;
68   Atomic &operator=(const Atomic &) = delete;
69 
70   // Atomic load.
TAtomic71   operator T() { return __atomic_load_n(&val, int(MemoryOrder::SEQ_CST)); }
72 
73   T load(MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
74          [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
75 #if __has_builtin(__scoped_atomic_load_n)
76     return __scoped_atomic_load_n(&val, int(mem_ord), (int)(mem_scope));
77 #else
78     return __atomic_load_n(&val, int(mem_ord));
79 #endif
80   }
81 
82   // Atomic store.
83   T operator=(T rhs) {
84     __atomic_store_n(&val, rhs, int(MemoryOrder::SEQ_CST));
85     return rhs;
86   }
87 
88   void store(T rhs, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
89              [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
90 #if __has_builtin(__scoped_atomic_store_n)
91     __scoped_atomic_store_n(&val, rhs, int(mem_ord), (int)(mem_scope));
92 #else
93     __atomic_store_n(&val, rhs, int(mem_ord));
94 #endif
95   }
96 
97   // Atomic compare exchange
98   bool compare_exchange_strong(
99       T &expected, T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
100       [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
101     return __atomic_compare_exchange_n(&val, &expected, desired, false,
102                                        int(mem_ord), int(mem_ord));
103   }
104 
105   // Atomic compare exchange (separate success and failure memory orders)
106   bool compare_exchange_strong(
107       T &expected, T desired, MemoryOrder success_order,
108       MemoryOrder failure_order,
109       [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
110     return __atomic_compare_exchange_n(&val, &expected, desired, false,
111                                        static_cast<int>(success_order),
112                                        static_cast<int>(failure_order));
113   }
114 
115   // Atomic compare exchange (weak version)
116   bool compare_exchange_weak(
117       T &expected, T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
118       [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
119     return __atomic_compare_exchange_n(&val, &expected, desired, true,
120                                        static_cast<int>(mem_ord),
121                                        static_cast<int>(mem_ord));
122   }
123 
124   // Atomic compare exchange (weak version with separate success and failure
125   // memory orders)
126   bool compare_exchange_weak(
127       T &expected, T desired, MemoryOrder success_order,
128       MemoryOrder failure_order,
129       [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
130     return __atomic_compare_exchange_n(&val, &expected, desired, true,
131                                        static_cast<int>(success_order),
132                                        static_cast<int>(failure_order));
133   }
134 
135   T exchange(T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
136              [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
137 #if __has_builtin(__scoped_atomic_exchange_n)
138     return __scoped_atomic_exchange_n(&val, desired, int(mem_ord),
139                                       (int)(mem_scope));
140 #else
141     return __atomic_exchange_n(&val, desired, int(mem_ord));
142 #endif
143   }
144 
145   T fetch_add(T increment, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
146               [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
147 #if __has_builtin(__scoped_atomic_fetch_add)
148     return __scoped_atomic_fetch_add(&val, increment, int(mem_ord),
149                                      (int)(mem_scope));
150 #else
151     return __atomic_fetch_add(&val, increment, int(mem_ord));
152 #endif
153   }
154 
155   T fetch_or(T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
156              [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
157 #if __has_builtin(__scoped_atomic_fetch_or)
158     return __scoped_atomic_fetch_or(&val, mask, int(mem_ord), (int)(mem_scope));
159 #else
160     return __atomic_fetch_or(&val, mask, int(mem_ord));
161 #endif
162   }
163 
164   T fetch_and(T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
165               [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
166 #if __has_builtin(__scoped_atomic_fetch_and)
167     return __scoped_atomic_fetch_and(&val, mask, int(mem_ord),
168                                      (int)(mem_scope));
169 #else
170     return __atomic_fetch_and(&val, mask, int(mem_ord));
171 #endif
172   }
173 
174   T fetch_sub(T decrement, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
175               [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
176 #if __has_builtin(__scoped_atomic_fetch_sub)
177     return __scoped_atomic_fetch_sub(&val, decrement, int(mem_ord),
178                                      (int)(mem_scope));
179 #else
180     return __atomic_fetch_sub(&val, decrement, int(mem_ord));
181 #endif
182   }
183 
184   // Set the value without using an atomic operation. This is useful
185   // in initializing atomic values without a constructor.
setAtomic186   void set(T rhs) { val = rhs; }
187 };
188 
189 // Issue a thread fence with the given memory ordering.
atomic_thread_fence(MemoryOrder mem_ord)190 LIBC_INLINE void atomic_thread_fence([[maybe_unused]] MemoryOrder mem_ord) {
191 // The NVPTX backend currently does not support atomic thread fences so we use a
192 // full system fence instead.
193 #ifdef LIBC_TARGET_ARCH_IS_NVPTX
194   __nvvm_membar_sys();
195 #else
196   __atomic_thread_fence(static_cast<int>(mem_ord));
197 #endif
198 }
199 
200 // Establishes memory synchronization ordering of non-atomic and relaxed atomic
201 // accesses, as instructed by order, between a thread and a signal handler
202 // executed on the same thread. This is equivalent to atomic_thread_fence,
203 // except no instructions for memory ordering are issued. Only reordering of
204 // the instructions by the compiler is suppressed as order instructs.
atomic_signal_fence(MemoryOrder mem_ord)205 LIBC_INLINE void atomic_signal_fence([[maybe_unused]] MemoryOrder mem_ord) {
206 #if __has_builtin(__atomic_signal_fence)
207   __atomic_signal_fence(static_cast<int>(mem_ord));
208 #else
209   // if the builtin is not ready, use asm as a full compiler barrier.
210   asm volatile("" ::: "memory");
211 #endif
212 }
213 
214 } // namespace cpp
215 } // namespace LIBC_NAMESPACE_DECL
216 
217 #endif // LLVM_LIBC_SRC___SUPPORT_CPP_ATOMIC_H
218