1// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5//go:build arm
6
7package atomic
8
9import (
10	"internal/cpu"
11	"unsafe"
12)
13
14const (
15	offsetARMHasV7Atomics = unsafe.Offsetof(cpu.ARM.HasV7Atomics)
16)
17
18// Export some functions via linkname to assembly in sync/atomic.
19//
20//go:linkname Xchg
21//go:linkname Xchguintptr
22//go:linkname Xadd
23
24type spinlock struct {
25	v uint32
26}
27
28//go:nosplit
29func (l *spinlock) lock() {
30	for {
31		if Cas(&l.v, 0, 1) {
32			return
33		}
34	}
35}
36
37//go:nosplit
38func (l *spinlock) unlock() {
39	Store(&l.v, 0)
40}
41
42var locktab [57]struct {
43	l   spinlock
44	pad [cpu.CacheLinePadSize - unsafe.Sizeof(spinlock{})]byte
45}
46
47func addrLock(addr *uint64) *spinlock {
48	return &locktab[(uintptr(unsafe.Pointer(addr))>>3)%uintptr(len(locktab))].l
49}
50
51// Atomic add and return new value.
52//
53//go:nosplit
54func Xadd(val *uint32, delta int32) uint32 {
55	for {
56		oval := *val
57		nval := oval + uint32(delta)
58		if Cas(val, oval, nval) {
59			return nval
60		}
61	}
62}
63
64//go:noescape
65func Xadduintptr(ptr *uintptr, delta uintptr) uintptr
66
67//go:nosplit
68func Xchg(addr *uint32, v uint32) uint32 {
69	for {
70		old := *addr
71		if Cas(addr, old, v) {
72			return old
73		}
74	}
75}
76
77//go:nosplit
78func Xchguintptr(addr *uintptr, v uintptr) uintptr {
79	return uintptr(Xchg((*uint32)(unsafe.Pointer(addr)), uint32(v)))
80}
81
82// Not noescape -- it installs a pointer to addr.
83func StorepNoWB(addr unsafe.Pointer, v unsafe.Pointer)
84
85//go:noescape
86func Store(addr *uint32, v uint32)
87
88//go:noescape
89func StoreRel(addr *uint32, v uint32)
90
91//go:noescape
92func StoreReluintptr(addr *uintptr, v uintptr)
93
94//go:nosplit
95func goCas64(addr *uint64, old, new uint64) bool {
96	if uintptr(unsafe.Pointer(addr))&7 != 0 {
97		*(*int)(nil) = 0 // crash on unaligned uint64
98	}
99	_ = *addr // if nil, fault before taking the lock
100	var ok bool
101	addrLock(addr).lock()
102	if *addr == old {
103		*addr = new
104		ok = true
105	}
106	addrLock(addr).unlock()
107	return ok
108}
109
110//go:nosplit
111func goXadd64(addr *uint64, delta int64) uint64 {
112	if uintptr(unsafe.Pointer(addr))&7 != 0 {
113		*(*int)(nil) = 0 // crash on unaligned uint64
114	}
115	_ = *addr // if nil, fault before taking the lock
116	var r uint64
117	addrLock(addr).lock()
118	r = *addr + uint64(delta)
119	*addr = r
120	addrLock(addr).unlock()
121	return r
122}
123
124//go:nosplit
125func goXchg64(addr *uint64, v uint64) uint64 {
126	if uintptr(unsafe.Pointer(addr))&7 != 0 {
127		*(*int)(nil) = 0 // crash on unaligned uint64
128	}
129	_ = *addr // if nil, fault before taking the lock
130	var r uint64
131	addrLock(addr).lock()
132	r = *addr
133	*addr = v
134	addrLock(addr).unlock()
135	return r
136}
137
138//go:nosplit
139func goLoad64(addr *uint64) uint64 {
140	if uintptr(unsafe.Pointer(addr))&7 != 0 {
141		*(*int)(nil) = 0 // crash on unaligned uint64
142	}
143	_ = *addr // if nil, fault before taking the lock
144	var r uint64
145	addrLock(addr).lock()
146	r = *addr
147	addrLock(addr).unlock()
148	return r
149}
150
151//go:nosplit
152func goStore64(addr *uint64, v uint64) {
153	if uintptr(unsafe.Pointer(addr))&7 != 0 {
154		*(*int)(nil) = 0 // crash on unaligned uint64
155	}
156	_ = *addr // if nil, fault before taking the lock
157	addrLock(addr).lock()
158	*addr = v
159	addrLock(addr).unlock()
160}
161
162//go:nosplit
163func Or8(addr *uint8, v uint8) {
164	// Align down to 4 bytes and use 32-bit CAS.
165	uaddr := uintptr(unsafe.Pointer(addr))
166	addr32 := (*uint32)(unsafe.Pointer(uaddr &^ 3))
167	word := uint32(v) << ((uaddr & 3) * 8) // little endian
168	for {
169		old := *addr32
170		if Cas(addr32, old, old|word) {
171			return
172		}
173	}
174}
175
176//go:nosplit
177func And8(addr *uint8, v uint8) {
178	// Align down to 4 bytes and use 32-bit CAS.
179	uaddr := uintptr(unsafe.Pointer(addr))
180	addr32 := (*uint32)(unsafe.Pointer(uaddr &^ 3))
181	word := uint32(v) << ((uaddr & 3) * 8)    // little endian
182	mask := uint32(0xFF) << ((uaddr & 3) * 8) // little endian
183	word |= ^mask
184	for {
185		old := *addr32
186		if Cas(addr32, old, old&word) {
187			return
188		}
189	}
190}
191
192//go:nosplit
193func Or(addr *uint32, v uint32) {
194	for {
195		old := *addr
196		if Cas(addr, old, old|v) {
197			return
198		}
199	}
200}
201
202//go:nosplit
203func And(addr *uint32, v uint32) {
204	for {
205		old := *addr
206		if Cas(addr, old, old&v) {
207			return
208		}
209	}
210}
211
212//go:nosplit
213func armcas(ptr *uint32, old, new uint32) bool
214
215//go:noescape
216func Load(addr *uint32) uint32
217
218// NO go:noescape annotation; *addr escapes if result escapes (#31525)
219func Loadp(addr unsafe.Pointer) unsafe.Pointer
220
221//go:noescape
222func Load8(addr *uint8) uint8
223
224//go:noescape
225func LoadAcq(addr *uint32) uint32
226
227//go:noescape
228func LoadAcquintptr(ptr *uintptr) uintptr
229
230//go:noescape
231func Cas64(addr *uint64, old, new uint64) bool
232
233//go:noescape
234func CasRel(addr *uint32, old, new uint32) bool
235
236//go:noescape
237func Xadd64(addr *uint64, delta int64) uint64
238
239//go:noescape
240func Xchg64(addr *uint64, v uint64) uint64
241
242//go:noescape
243func Load64(addr *uint64) uint64
244
245//go:noescape
246func Store8(addr *uint8, v uint8)
247
248//go:noescape
249func Store64(addr *uint64, v uint64)
250