1// Copyright 2013 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime_test
6
7import (
8	"flag"
9	"fmt"
10	"internal/race"
11	"internal/testenv"
12	"os"
13	"os/exec"
14	"reflect"
15	"runtime"
16	. "runtime"
17	"strings"
18	"sync/atomic"
19	"testing"
20	"time"
21	"unsafe"
22)
23
24var testMemStatsCount int
25
26func TestMemStats(t *testing.T) {
27	testMemStatsCount++
28
29	// Make sure there's at least one forced GC.
30	GC()
31
32	// Test that MemStats has sane values.
33	st := new(MemStats)
34	ReadMemStats(st)
35
36	nz := func(x any) error {
37		if x != reflect.Zero(reflect.TypeOf(x)).Interface() {
38			return nil
39		}
40		return fmt.Errorf("zero value")
41	}
42	le := func(thresh float64) func(any) error {
43		return func(x any) error {
44			// These sanity tests aren't necessarily valid
45			// with high -test.count values, so only run
46			// them once.
47			if testMemStatsCount > 1 {
48				return nil
49			}
50
51			if reflect.ValueOf(x).Convert(reflect.TypeOf(thresh)).Float() < thresh {
52				return nil
53			}
54			return fmt.Errorf("insanely high value (overflow?); want <= %v", thresh)
55		}
56	}
57	eq := func(x any) func(any) error {
58		return func(y any) error {
59			if x == y {
60				return nil
61			}
62			return fmt.Errorf("want %v", x)
63		}
64	}
65	// Of the uint fields, HeapReleased, HeapIdle can be 0.
66	// PauseTotalNs can be 0 if timer resolution is poor.
67	fields := map[string][]func(any) error{
68		"Alloc": {nz, le(1e10)}, "TotalAlloc": {nz, le(1e11)}, "Sys": {nz, le(1e10)},
69		"Lookups": {eq(uint64(0))}, "Mallocs": {nz, le(1e10)}, "Frees": {nz, le(1e10)},
70		"HeapAlloc": {nz, le(1e10)}, "HeapSys": {nz, le(1e10)}, "HeapIdle": {le(1e10)},
71		"HeapInuse": {nz, le(1e10)}, "HeapReleased": {le(1e10)}, "HeapObjects": {nz, le(1e10)},
72		"StackInuse": {nz, le(1e10)}, "StackSys": {nz, le(1e10)},
73		"MSpanInuse": {nz, le(1e10)}, "MSpanSys": {nz, le(1e10)},
74		"MCacheInuse": {nz, le(1e10)}, "MCacheSys": {nz, le(1e10)},
75		"BuckHashSys": {nz, le(1e10)}, "GCSys": {nz, le(1e10)}, "OtherSys": {nz, le(1e10)},
76		"NextGC": {nz, le(1e10)}, "LastGC": {nz},
77		"PauseTotalNs": {le(1e11)}, "PauseNs": nil, "PauseEnd": nil,
78		"NumGC": {nz, le(1e9)}, "NumForcedGC": {nz, le(1e9)},
79		"GCCPUFraction": {le(0.99)}, "EnableGC": {eq(true)}, "DebugGC": {eq(false)},
80		"BySize": nil,
81	}
82
83	rst := reflect.ValueOf(st).Elem()
84	for i := 0; i < rst.Type().NumField(); i++ {
85		name, val := rst.Type().Field(i).Name, rst.Field(i).Interface()
86		checks, ok := fields[name]
87		if !ok {
88			t.Errorf("unknown MemStats field %s", name)
89			continue
90		}
91		for _, check := range checks {
92			if err := check(val); err != nil {
93				t.Errorf("%s = %v: %s", name, val, err)
94			}
95		}
96	}
97
98	if st.Sys != st.HeapSys+st.StackSys+st.MSpanSys+st.MCacheSys+
99		st.BuckHashSys+st.GCSys+st.OtherSys {
100		t.Fatalf("Bad sys value: %+v", *st)
101	}
102
103	if st.HeapIdle+st.HeapInuse != st.HeapSys {
104		t.Fatalf("HeapIdle(%d) + HeapInuse(%d) should be equal to HeapSys(%d), but isn't.", st.HeapIdle, st.HeapInuse, st.HeapSys)
105	}
106
107	if lpe := st.PauseEnd[int(st.NumGC+255)%len(st.PauseEnd)]; st.LastGC != lpe {
108		t.Fatalf("LastGC(%d) != last PauseEnd(%d)", st.LastGC, lpe)
109	}
110
111	var pauseTotal uint64
112	for _, pause := range st.PauseNs {
113		pauseTotal += pause
114	}
115	if int(st.NumGC) < len(st.PauseNs) {
116		// We have all pauses, so this should be exact.
117		if st.PauseTotalNs != pauseTotal {
118			t.Fatalf("PauseTotalNs(%d) != sum PauseNs(%d)", st.PauseTotalNs, pauseTotal)
119		}
120		for i := int(st.NumGC); i < len(st.PauseNs); i++ {
121			if st.PauseNs[i] != 0 {
122				t.Fatalf("Non-zero PauseNs[%d]: %+v", i, st)
123			}
124			if st.PauseEnd[i] != 0 {
125				t.Fatalf("Non-zero PauseEnd[%d]: %+v", i, st)
126			}
127		}
128	} else {
129		if st.PauseTotalNs < pauseTotal {
130			t.Fatalf("PauseTotalNs(%d) < sum PauseNs(%d)", st.PauseTotalNs, pauseTotal)
131		}
132	}
133
134	if st.NumForcedGC > st.NumGC {
135		t.Fatalf("NumForcedGC(%d) > NumGC(%d)", st.NumForcedGC, st.NumGC)
136	}
137}
138
139func TestStringConcatenationAllocs(t *testing.T) {
140	n := testing.AllocsPerRun(1e3, func() {
141		b := make([]byte, 10)
142		for i := 0; i < 10; i++ {
143			b[i] = byte(i) + '0'
144		}
145		s := "foo" + string(b)
146		if want := "foo0123456789"; s != want {
147			t.Fatalf("want %v, got %v", want, s)
148		}
149	})
150	// Only string concatenation allocates.
151	if n != 1 {
152		t.Fatalf("want 1 allocation, got %v", n)
153	}
154}
155
156func TestTinyAlloc(t *testing.T) {
157	if runtime.Raceenabled {
158		t.Skip("tinyalloc suppressed when running in race mode")
159	}
160	const N = 16
161	var v [N]unsafe.Pointer
162	for i := range v {
163		v[i] = unsafe.Pointer(new(byte))
164	}
165
166	chunks := make(map[uintptr]bool, N)
167	for _, p := range v {
168		chunks[uintptr(p)&^7] = true
169	}
170
171	if len(chunks) == N {
172		t.Fatal("no bytes allocated within the same 8-byte chunk")
173	}
174}
175
176type obj12 struct {
177	a uint64
178	b uint32
179}
180
181func TestTinyAllocIssue37262(t *testing.T) {
182	if runtime.Raceenabled {
183		t.Skip("tinyalloc suppressed when running in race mode")
184	}
185	// Try to cause an alignment access fault
186	// by atomically accessing the first 64-bit
187	// value of a tiny-allocated object.
188	// See issue 37262 for details.
189
190	// GC twice, once to reach a stable heap state
191	// and again to make sure we finish the sweep phase.
192	runtime.GC()
193	runtime.GC()
194
195	// Disable preemption so we stay on one P's tiny allocator and
196	// nothing else allocates from it.
197	runtime.Acquirem()
198
199	// Make 1-byte allocations until we get a fresh tiny slot.
200	aligned := false
201	for i := 0; i < 16; i++ {
202		x := runtime.Escape(new(byte))
203		if uintptr(unsafe.Pointer(x))&0xf == 0xf {
204			aligned = true
205			break
206		}
207	}
208	if !aligned {
209		runtime.Releasem()
210		t.Fatal("unable to get a fresh tiny slot")
211	}
212
213	// Create a 4-byte object so that the current
214	// tiny slot is partially filled.
215	runtime.Escape(new(uint32))
216
217	// Create a 12-byte object, which fits into the
218	// tiny slot. If it actually gets place there,
219	// then the field "a" will be improperly aligned
220	// for atomic access on 32-bit architectures.
221	// This won't be true if issue 36606 gets resolved.
222	tinyObj12 := runtime.Escape(new(obj12))
223
224	// Try to atomically access "x.a".
225	atomic.StoreUint64(&tinyObj12.a, 10)
226
227	runtime.Releasem()
228}
229
230func TestPageCacheLeak(t *testing.T) {
231	defer GOMAXPROCS(GOMAXPROCS(1))
232	leaked := PageCachePagesLeaked()
233	if leaked != 0 {
234		t.Fatalf("found %d leaked pages in page caches", leaked)
235	}
236}
237
238func TestPhysicalMemoryUtilization(t *testing.T) {
239	got := runTestProg(t, "testprog", "GCPhys")
240	want := "OK\n"
241	if got != want {
242		t.Fatalf("expected %q, but got %q", want, got)
243	}
244}
245
246func TestScavengedBitsCleared(t *testing.T) {
247	var mismatches [128]BitsMismatch
248	if n, ok := CheckScavengedBitsCleared(mismatches[:]); !ok {
249		t.Errorf("uncleared scavenged bits")
250		for _, m := range mismatches[:n] {
251			t.Logf("\t@ address 0x%x", m.Base)
252			t.Logf("\t|  got: %064b", m.Got)
253			t.Logf("\t| want: %064b", m.Want)
254		}
255		t.FailNow()
256	}
257}
258
259type acLink struct {
260	x [1 << 20]byte
261}
262
263var arenaCollisionSink []*acLink
264
265func TestArenaCollision(t *testing.T) {
266	testenv.MustHaveExec(t)
267
268	// Test that mheap.sysAlloc handles collisions with other
269	// memory mappings.
270	if os.Getenv("TEST_ARENA_COLLISION") != "1" {
271		cmd := testenv.CleanCmdEnv(exec.Command(os.Args[0], "-test.run=^TestArenaCollision$", "-test.v"))
272		cmd.Env = append(cmd.Env, "TEST_ARENA_COLLISION=1")
273		out, err := cmd.CombinedOutput()
274		if race.Enabled {
275			// This test runs the runtime out of hint
276			// addresses, so it will start mapping the
277			// heap wherever it can. The race detector
278			// doesn't support this, so look for the
279			// expected failure.
280			if want := "too many address space collisions"; !strings.Contains(string(out), want) {
281				t.Fatalf("want %q, got:\n%s", want, string(out))
282			}
283		} else if !strings.Contains(string(out), "PASS\n") || err != nil {
284			t.Fatalf("%s\n(exit status %v)", string(out), err)
285		}
286		return
287	}
288	disallowed := [][2]uintptr{}
289	// Drop all but the next 3 hints. 64-bit has a lot of hints,
290	// so it would take a lot of memory to go through all of them.
291	KeepNArenaHints(3)
292	// Consume these 3 hints and force the runtime to find some
293	// fallback hints.
294	for i := 0; i < 5; i++ {
295		// Reserve memory at the next hint so it can't be used
296		// for the heap.
297		start, end, ok := MapNextArenaHint()
298		if !ok {
299			t.Skipf("failed to reserve memory at next arena hint [%#x, %#x)", start, end)
300		}
301		t.Logf("reserved [%#x, %#x)", start, end)
302		disallowed = append(disallowed, [2]uintptr{start, end})
303		// Allocate until the runtime tries to use the hint we
304		// just mapped over.
305		hint := GetNextArenaHint()
306		for GetNextArenaHint() == hint {
307			ac := new(acLink)
308			arenaCollisionSink = append(arenaCollisionSink, ac)
309			// The allocation must not have fallen into
310			// one of the reserved regions.
311			p := uintptr(unsafe.Pointer(ac))
312			for _, d := range disallowed {
313				if d[0] <= p && p < d[1] {
314					t.Fatalf("allocation %#x in reserved region [%#x, %#x)", p, d[0], d[1])
315				}
316			}
317		}
318	}
319}
320
321func BenchmarkMalloc8(b *testing.B) {
322	for i := 0; i < b.N; i++ {
323		p := new(int64)
324		Escape(p)
325	}
326}
327
328func BenchmarkMalloc16(b *testing.B) {
329	for i := 0; i < b.N; i++ {
330		p := new([2]int64)
331		Escape(p)
332	}
333}
334
335func BenchmarkMallocTypeInfo8(b *testing.B) {
336	for i := 0; i < b.N; i++ {
337		p := new(struct {
338			p [8 / unsafe.Sizeof(uintptr(0))]*int
339		})
340		Escape(p)
341	}
342}
343
344func BenchmarkMallocTypeInfo16(b *testing.B) {
345	for i := 0; i < b.N; i++ {
346		p := new(struct {
347			p [16 / unsafe.Sizeof(uintptr(0))]*int
348		})
349		Escape(p)
350	}
351}
352
353type LargeStruct struct {
354	x [16][]byte
355}
356
357func BenchmarkMallocLargeStruct(b *testing.B) {
358	for i := 0; i < b.N; i++ {
359		p := make([]LargeStruct, 2)
360		Escape(p)
361	}
362}
363
364var n = flag.Int("n", 1000, "number of goroutines")
365
366func BenchmarkGoroutineSelect(b *testing.B) {
367	quit := make(chan struct{})
368	read := func(ch chan struct{}) {
369		for {
370			select {
371			case _, ok := <-ch:
372				if !ok {
373					return
374				}
375			case <-quit:
376				return
377			}
378		}
379	}
380	benchHelper(b, *n, read)
381}
382
383func BenchmarkGoroutineBlocking(b *testing.B) {
384	read := func(ch chan struct{}) {
385		for {
386			if _, ok := <-ch; !ok {
387				return
388			}
389		}
390	}
391	benchHelper(b, *n, read)
392}
393
394func BenchmarkGoroutineForRange(b *testing.B) {
395	read := func(ch chan struct{}) {
396		for range ch {
397		}
398	}
399	benchHelper(b, *n, read)
400}
401
402func benchHelper(b *testing.B, n int, read func(chan struct{})) {
403	m := make([]chan struct{}, n)
404	for i := range m {
405		m[i] = make(chan struct{}, 1)
406		go read(m[i])
407	}
408	b.StopTimer()
409	b.ResetTimer()
410	GC()
411
412	for i := 0; i < b.N; i++ {
413		for _, ch := range m {
414			if ch != nil {
415				ch <- struct{}{}
416			}
417		}
418		time.Sleep(10 * time.Millisecond)
419		b.StartTimer()
420		GC()
421		b.StopTimer()
422	}
423
424	for _, ch := range m {
425		close(ch)
426	}
427	time.Sleep(10 * time.Millisecond)
428}
429
430func BenchmarkGoroutineIdle(b *testing.B) {
431	quit := make(chan struct{})
432	fn := func() {
433		<-quit
434	}
435	for i := 0; i < *n; i++ {
436		go fn()
437	}
438
439	GC()
440	b.ResetTimer()
441
442	for i := 0; i < b.N; i++ {
443		GC()
444	}
445
446	b.StopTimer()
447	close(quit)
448	time.Sleep(10 * time.Millisecond)
449}
450