1// Copyright 2022 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime_test
6
7import (
8	"internal/goarch"
9	"internal/runtime/atomic"
10	"reflect"
11	. "runtime"
12	"runtime/debug"
13	"testing"
14	"time"
15	"unsafe"
16)
17
18type smallScalar struct {
19	X uintptr
20}
21type smallPointer struct {
22	X *smallPointer
23}
24type smallPointerMix struct {
25	A *smallPointer
26	B byte
27	C *smallPointer
28	D [11]byte
29}
30type mediumScalarEven [8192]byte
31type mediumScalarOdd [3321]byte
32type mediumPointerEven [1024]*smallPointer
33type mediumPointerOdd [1023]*smallPointer
34
35type largeScalar [UserArenaChunkBytes + 1]byte
36type largePointer [UserArenaChunkBytes/unsafe.Sizeof(&smallPointer{}) + 1]*smallPointer
37
38func TestUserArena(t *testing.T) {
39	// Set GOMAXPROCS to 2 so we don't run too many of these
40	// tests in parallel.
41	defer GOMAXPROCS(GOMAXPROCS(2))
42
43	// Start a subtest so that we can clean up after any parallel tests within.
44	t.Run("Alloc", func(t *testing.T) {
45		ss := &smallScalar{5}
46		runSubTestUserArenaNew(t, ss, true)
47
48		sp := &smallPointer{new(smallPointer)}
49		runSubTestUserArenaNew(t, sp, true)
50
51		spm := &smallPointerMix{sp, 5, nil, [11]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}}
52		runSubTestUserArenaNew(t, spm, true)
53
54		mse := new(mediumScalarEven)
55		for i := range mse {
56			mse[i] = 121
57		}
58		runSubTestUserArenaNew(t, mse, true)
59
60		mso := new(mediumScalarOdd)
61		for i := range mso {
62			mso[i] = 122
63		}
64		runSubTestUserArenaNew(t, mso, true)
65
66		mpe := new(mediumPointerEven)
67		for i := range mpe {
68			mpe[i] = sp
69		}
70		runSubTestUserArenaNew(t, mpe, true)
71
72		mpo := new(mediumPointerOdd)
73		for i := range mpo {
74			mpo[i] = sp
75		}
76		runSubTestUserArenaNew(t, mpo, true)
77
78		ls := new(largeScalar)
79		for i := range ls {
80			ls[i] = 123
81		}
82		// Not in parallel because we don't want to hold this large allocation live.
83		runSubTestUserArenaNew(t, ls, false)
84
85		lp := new(largePointer)
86		for i := range lp {
87			lp[i] = sp
88		}
89		// Not in parallel because we don't want to hold this large allocation live.
90		runSubTestUserArenaNew(t, lp, false)
91
92		sss := make([]smallScalar, 25)
93		for i := range sss {
94			sss[i] = smallScalar{12}
95		}
96		runSubTestUserArenaSlice(t, sss, true)
97
98		mpos := make([]mediumPointerOdd, 5)
99		for i := range mpos {
100			mpos[i] = *mpo
101		}
102		runSubTestUserArenaSlice(t, mpos, true)
103
104		sps := make([]smallPointer, UserArenaChunkBytes/unsafe.Sizeof(smallPointer{})+1)
105		for i := range sps {
106			sps[i] = *sp
107		}
108		// Not in parallel because we don't want to hold this large allocation live.
109		runSubTestUserArenaSlice(t, sps, false)
110
111		// Test zero-sized types.
112		t.Run("struct{}", func(t *testing.T) {
113			arena := NewUserArena()
114			var x any
115			x = (*struct{})(nil)
116			arena.New(&x)
117			if v := unsafe.Pointer(x.(*struct{})); v != ZeroBase {
118				t.Errorf("expected zero-sized type to be allocated as zerobase: got %x, want %x", v, ZeroBase)
119			}
120			arena.Free()
121		})
122		t.Run("[]struct{}", func(t *testing.T) {
123			arena := NewUserArena()
124			var sl []struct{}
125			arena.Slice(&sl, 10)
126			if v := unsafe.Pointer(&sl[0]); v != ZeroBase {
127				t.Errorf("expected zero-sized type to be allocated as zerobase: got %x, want %x", v, ZeroBase)
128			}
129			arena.Free()
130		})
131		t.Run("[]int (cap 0)", func(t *testing.T) {
132			arena := NewUserArena()
133			var sl []int
134			arena.Slice(&sl, 0)
135			if len(sl) != 0 {
136				t.Errorf("expected requested zero-sized slice to still have zero length: got %x, want 0", len(sl))
137			}
138			arena.Free()
139		})
140	})
141
142	// Run a GC cycle to get any arenas off the quarantine list.
143	GC()
144
145	if n := GlobalWaitingArenaChunks(); n != 0 {
146		t.Errorf("expected zero waiting arena chunks, found %d", n)
147	}
148}
149
150func runSubTestUserArenaNew[S comparable](t *testing.T, value *S, parallel bool) {
151	t.Run(reflect.TypeOf(value).Elem().Name(), func(t *testing.T) {
152		if parallel {
153			t.Parallel()
154		}
155
156		// Allocate and write data, enough to exhaust the arena.
157		//
158		// This is an underestimate, likely leaving some space in the arena. That's a good thing,
159		// because it gives us coverage of boundary cases.
160		n := int(UserArenaChunkBytes / unsafe.Sizeof(*value))
161		if n == 0 {
162			n = 1
163		}
164
165		// Create a new arena and do a bunch of operations on it.
166		arena := NewUserArena()
167
168		arenaValues := make([]*S, 0, n)
169		for j := 0; j < n; j++ {
170			var x any
171			x = (*S)(nil)
172			arena.New(&x)
173			s := x.(*S)
174			*s = *value
175			arenaValues = append(arenaValues, s)
176		}
177		// Check integrity of allocated data.
178		for _, s := range arenaValues {
179			if *s != *value {
180				t.Errorf("failed integrity check: got %#v, want %#v", *s, *value)
181			}
182		}
183
184		// Release the arena.
185		arena.Free()
186	})
187}
188
189func runSubTestUserArenaSlice[S comparable](t *testing.T, value []S, parallel bool) {
190	t.Run("[]"+reflect.TypeOf(value).Elem().Name(), func(t *testing.T) {
191		if parallel {
192			t.Parallel()
193		}
194
195		// Allocate and write data, enough to exhaust the arena.
196		//
197		// This is an underestimate, likely leaving some space in the arena. That's a good thing,
198		// because it gives us coverage of boundary cases.
199		n := int(UserArenaChunkBytes / (unsafe.Sizeof(*new(S)) * uintptr(cap(value))))
200		if n == 0 {
201			n = 1
202		}
203
204		// Create a new arena and do a bunch of operations on it.
205		arena := NewUserArena()
206
207		arenaValues := make([][]S, 0, n)
208		for j := 0; j < n; j++ {
209			var sl []S
210			arena.Slice(&sl, cap(value))
211			copy(sl, value)
212			arenaValues = append(arenaValues, sl)
213		}
214		// Check integrity of allocated data.
215		for _, sl := range arenaValues {
216			for i := range sl {
217				got := sl[i]
218				want := value[i]
219				if got != want {
220					t.Errorf("failed integrity check: got %#v, want %#v at index %d", got, want, i)
221				}
222			}
223		}
224
225		// Release the arena.
226		arena.Free()
227	})
228}
229
230func TestUserArenaLiveness(t *testing.T) {
231	t.Run("Free", func(t *testing.T) {
232		testUserArenaLiveness(t, false)
233	})
234	t.Run("Finalizer", func(t *testing.T) {
235		testUserArenaLiveness(t, true)
236	})
237}
238
239func testUserArenaLiveness(t *testing.T, useArenaFinalizer bool) {
240	// Disable the GC so that there's zero chance we try doing anything arena related *during*
241	// a mark phase, since otherwise a bunch of arenas could end up on the fault list.
242	defer debug.SetGCPercent(debug.SetGCPercent(-1))
243
244	// Defensively ensure that any full arena chunks leftover from previous tests have been cleared.
245	GC()
246	GC()
247
248	arena := NewUserArena()
249
250	// Allocate a few pointer-ful but un-initialized objects so that later we can
251	// place a reference to heap object at a more interesting location.
252	for i := 0; i < 3; i++ {
253		var x any
254		x = (*mediumPointerOdd)(nil)
255		arena.New(&x)
256	}
257
258	var x any
259	x = (*smallPointerMix)(nil)
260	arena.New(&x)
261	v := x.(*smallPointerMix)
262
263	var safeToFinalize atomic.Bool
264	var finalized atomic.Bool
265	v.C = new(smallPointer)
266	SetFinalizer(v.C, func(_ *smallPointer) {
267		if !safeToFinalize.Load() {
268			t.Error("finalized arena-referenced object unexpectedly")
269		}
270		finalized.Store(true)
271	})
272
273	// Make sure it stays alive.
274	GC()
275	GC()
276
277	// In order to ensure the object can be freed, we now need to make sure to use
278	// the entire arena. Exhaust the rest of the arena.
279
280	for i := 0; i < int(UserArenaChunkBytes/unsafe.Sizeof(mediumScalarEven{})); i++ {
281		var x any
282		x = (*mediumScalarEven)(nil)
283		arena.New(&x)
284	}
285
286	// Make sure it stays alive again.
287	GC()
288	GC()
289
290	v = nil
291
292	safeToFinalize.Store(true)
293	if useArenaFinalizer {
294		arena = nil
295
296		// Try to queue the arena finalizer.
297		GC()
298		GC()
299
300		// In order for the finalizer we actually want to run to execute,
301		// we need to make sure this one runs first.
302		if !BlockUntilEmptyFinalizerQueue(int64(2 * time.Second)) {
303			t.Fatal("finalizer queue was never emptied")
304		}
305	} else {
306		// Free the arena explicitly.
307		arena.Free()
308	}
309
310	// Try to queue the object's finalizer that we set earlier.
311	GC()
312	GC()
313
314	if !BlockUntilEmptyFinalizerQueue(int64(2 * time.Second)) {
315		t.Fatal("finalizer queue was never emptied")
316	}
317	if !finalized.Load() {
318		t.Error("expected arena-referenced object to be finalized")
319	}
320}
321
322func TestUserArenaClearsPointerBits(t *testing.T) {
323	// This is a regression test for a serious issue wherein if pointer bits
324	// aren't properly cleared, it's possible to allocate scalar data down
325	// into a previously pointer-ful area, causing misinterpretation by the GC.
326
327	// Create a large object, grab a pointer into it, and free it.
328	x := new([8 << 20]byte)
329	xp := uintptr(unsafe.Pointer(&x[124]))
330	var finalized atomic.Bool
331	SetFinalizer(x, func(_ *[8 << 20]byte) {
332		finalized.Store(true)
333	})
334
335	// Write three chunks worth of pointer data. Three gives us a
336	// high likelihood that when we write 2 later, we'll get the behavior
337	// we want.
338	a := NewUserArena()
339	for i := 0; i < int(UserArenaChunkBytes/goarch.PtrSize*3); i++ {
340		var x any
341		x = (*smallPointer)(nil)
342		a.New(&x)
343	}
344	a.Free()
345
346	// Recycle the arena chunks.
347	GC()
348	GC()
349
350	a = NewUserArena()
351	for i := 0; i < int(UserArenaChunkBytes/goarch.PtrSize*2); i++ {
352		var x any
353		x = (*smallScalar)(nil)
354		a.New(&x)
355		v := x.(*smallScalar)
356		// Write a pointer that should not keep x alive.
357		*v = smallScalar{xp}
358	}
359	KeepAlive(x)
360	x = nil
361
362	// Try to free x.
363	GC()
364	GC()
365
366	if !BlockUntilEmptyFinalizerQueue(int64(2 * time.Second)) {
367		t.Fatal("finalizer queue was never emptied")
368	}
369	if !finalized.Load() {
370		t.Fatal("heap allocation kept alive through non-pointer reference")
371	}
372
373	// Clean up the arena.
374	a.Free()
375	GC()
376	GC()
377}
378
379func TestUserArenaCloneString(t *testing.T) {
380	a := NewUserArena()
381
382	// A static string (not on heap or arena)
383	var s = "abcdefghij"
384
385	// Create a byte slice in the arena, initialize it with s
386	var b []byte
387	a.Slice(&b, len(s))
388	copy(b, s)
389
390	// Create a string as using the same memory as the byte slice, hence in
391	// the arena. This could be an arena API, but hasn't really been needed
392	// yet.
393	as := unsafe.String(&b[0], len(b))
394
395	// Clone should make a copy of as, since it is in the arena.
396	asCopy := UserArenaClone(as)
397	if unsafe.StringData(as) == unsafe.StringData(asCopy) {
398		t.Error("Clone did not make a copy")
399	}
400
401	// Clone should make a copy of subAs, since subAs is just part of as and so is in the arena.
402	subAs := as[1:3]
403	subAsCopy := UserArenaClone(subAs)
404	if unsafe.StringData(subAs) == unsafe.StringData(subAsCopy) {
405		t.Error("Clone did not make a copy")
406	}
407	if len(subAs) != len(subAsCopy) {
408		t.Errorf("Clone made an incorrect copy (bad length): %d -> %d", len(subAs), len(subAsCopy))
409	} else {
410		for i := range subAs {
411			if subAs[i] != subAsCopy[i] {
412				t.Errorf("Clone made an incorrect copy (data at index %d): %d -> %d", i, subAs[i], subAs[i])
413			}
414		}
415	}
416
417	// Clone should not make a copy of doubleAs, since doubleAs will be on the heap.
418	doubleAs := as + as
419	doubleAsCopy := UserArenaClone(doubleAs)
420	if unsafe.StringData(doubleAs) != unsafe.StringData(doubleAsCopy) {
421		t.Error("Clone should not have made a copy")
422	}
423
424	// Clone should not make a copy of s, since s is a static string.
425	sCopy := UserArenaClone(s)
426	if unsafe.StringData(s) != unsafe.StringData(sCopy) {
427		t.Error("Clone should not have made a copy")
428	}
429
430	a.Free()
431}
432
433func TestUserArenaClonePointer(t *testing.T) {
434	a := NewUserArena()
435
436	// Clone should not make a copy of a heap-allocated smallScalar.
437	x := Escape(new(smallScalar))
438	xCopy := UserArenaClone(x)
439	if unsafe.Pointer(x) != unsafe.Pointer(xCopy) {
440		t.Errorf("Clone should not have made a copy: %#v -> %#v", x, xCopy)
441	}
442
443	// Clone should make a copy of an arena-allocated smallScalar.
444	var i any
445	i = (*smallScalar)(nil)
446	a.New(&i)
447	xArena := i.(*smallScalar)
448	xArenaCopy := UserArenaClone(xArena)
449	if unsafe.Pointer(xArena) == unsafe.Pointer(xArenaCopy) {
450		t.Errorf("Clone should have made a copy: %#v -> %#v", xArena, xArenaCopy)
451	}
452	if *xArena != *xArenaCopy {
453		t.Errorf("Clone made an incorrect copy copy: %#v -> %#v", *xArena, *xArenaCopy)
454	}
455
456	a.Free()
457}
458
459func TestUserArenaCloneSlice(t *testing.T) {
460	a := NewUserArena()
461
462	// A static string (not on heap or arena)
463	var s = "klmnopqrstuv"
464
465	// Create a byte slice in the arena, initialize it with s
466	var b []byte
467	a.Slice(&b, len(s))
468	copy(b, s)
469
470	// Clone should make a copy of b, since it is in the arena.
471	bCopy := UserArenaClone(b)
472	if unsafe.Pointer(&b[0]) == unsafe.Pointer(&bCopy[0]) {
473		t.Errorf("Clone did not make a copy: %#v -> %#v", b, bCopy)
474	}
475	if len(b) != len(bCopy) {
476		t.Errorf("Clone made an incorrect copy (bad length): %d -> %d", len(b), len(bCopy))
477	} else {
478		for i := range b {
479			if b[i] != bCopy[i] {
480				t.Errorf("Clone made an incorrect copy (data at index %d): %d -> %d", i, b[i], bCopy[i])
481			}
482		}
483	}
484
485	// Clone should make a copy of bSub, since bSub is just part of b and so is in the arena.
486	bSub := b[1:3]
487	bSubCopy := UserArenaClone(bSub)
488	if unsafe.Pointer(&bSub[0]) == unsafe.Pointer(&bSubCopy[0]) {
489		t.Errorf("Clone did not make a copy: %#v -> %#v", bSub, bSubCopy)
490	}
491	if len(bSub) != len(bSubCopy) {
492		t.Errorf("Clone made an incorrect copy (bad length): %d -> %d", len(bSub), len(bSubCopy))
493	} else {
494		for i := range bSub {
495			if bSub[i] != bSubCopy[i] {
496				t.Errorf("Clone made an incorrect copy (data at index %d): %d -> %d", i, bSub[i], bSubCopy[i])
497			}
498		}
499	}
500
501	// Clone should not make a copy of bNotArena, since it will not be in an arena.
502	bNotArena := make([]byte, len(s))
503	copy(bNotArena, s)
504	bNotArenaCopy := UserArenaClone(bNotArena)
505	if unsafe.Pointer(&bNotArena[0]) != unsafe.Pointer(&bNotArenaCopy[0]) {
506		t.Error("Clone should not have made a copy")
507	}
508
509	a.Free()
510}
511
512func TestUserArenaClonePanic(t *testing.T) {
513	var s string
514	func() {
515		x := smallScalar{2}
516		defer func() {
517			if v := recover(); v != nil {
518				s = v.(string)
519			}
520		}()
521		UserArenaClone(x)
522	}()
523	if s == "" {
524		t.Errorf("expected panic from Clone")
525	}
526}
527