1// Copyright 2019 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime_test
6
7import (
8	"internal/goos"
9	"math/rand"
10	. "runtime"
11	"testing"
12)
13
14func checkPageCache(t *testing.T, got, want PageCache) {
15	if got.Base() != want.Base() {
16		t.Errorf("bad pageCache base: got 0x%x, want 0x%x", got.Base(), want.Base())
17	}
18	if got.Cache() != want.Cache() {
19		t.Errorf("bad pageCache bits: got %016x, want %016x", got.Base(), want.Base())
20	}
21	if got.Scav() != want.Scav() {
22		t.Errorf("bad pageCache scav: got %016x, want %016x", got.Scav(), want.Scav())
23	}
24}
25
26func TestPageCacheAlloc(t *testing.T) {
27	base := PageBase(BaseChunkIdx, 0)
28	type hit struct {
29		npages uintptr
30		base   uintptr
31		scav   uintptr
32	}
33	tests := map[string]struct {
34		cache PageCache
35		hits  []hit
36	}{
37		"Empty": {
38			cache: NewPageCache(base, 0, 0),
39			hits: []hit{
40				{1, 0, 0},
41				{2, 0, 0},
42				{3, 0, 0},
43				{4, 0, 0},
44				{5, 0, 0},
45				{11, 0, 0},
46				{12, 0, 0},
47				{16, 0, 0},
48				{27, 0, 0},
49				{32, 0, 0},
50				{43, 0, 0},
51				{57, 0, 0},
52				{64, 0, 0},
53				{121, 0, 0},
54			},
55		},
56		"Lo1": {
57			cache: NewPageCache(base, 0x1, 0x1),
58			hits: []hit{
59				{1, base, PageSize},
60				{1, 0, 0},
61				{10, 0, 0},
62			},
63		},
64		"Hi1": {
65			cache: NewPageCache(base, 0x1<<63, 0x1),
66			hits: []hit{
67				{1, base + 63*PageSize, 0},
68				{1, 0, 0},
69				{10, 0, 0},
70			},
71		},
72		"Swiss1": {
73			cache: NewPageCache(base, 0x20005555, 0x5505),
74			hits: []hit{
75				{2, 0, 0},
76				{1, base, PageSize},
77				{1, base + 2*PageSize, PageSize},
78				{1, base + 4*PageSize, 0},
79				{1, base + 6*PageSize, 0},
80				{1, base + 8*PageSize, PageSize},
81				{1, base + 10*PageSize, PageSize},
82				{1, base + 12*PageSize, PageSize},
83				{1, base + 14*PageSize, PageSize},
84				{1, base + 29*PageSize, 0},
85				{1, 0, 0},
86				{10, 0, 0},
87			},
88		},
89		"Lo2": {
90			cache: NewPageCache(base, 0x3, 0x2<<62),
91			hits: []hit{
92				{2, base, 0},
93				{2, 0, 0},
94				{1, 0, 0},
95			},
96		},
97		"Hi2": {
98			cache: NewPageCache(base, 0x3<<62, 0x3<<62),
99			hits: []hit{
100				{2, base + 62*PageSize, 2 * PageSize},
101				{2, 0, 0},
102				{1, 0, 0},
103			},
104		},
105		"Swiss2": {
106			cache: NewPageCache(base, 0x3333<<31, 0x3030<<31),
107			hits: []hit{
108				{2, base + 31*PageSize, 0},
109				{2, base + 35*PageSize, 2 * PageSize},
110				{2, base + 39*PageSize, 0},
111				{2, base + 43*PageSize, 2 * PageSize},
112				{2, 0, 0},
113			},
114		},
115		"Hi53": {
116			cache: NewPageCache(base, ((uint64(1)<<53)-1)<<10, ((uint64(1)<<16)-1)<<10),
117			hits: []hit{
118				{53, base + 10*PageSize, 16 * PageSize},
119				{53, 0, 0},
120				{1, 0, 0},
121			},
122		},
123		"Full53": {
124			cache: NewPageCache(base, ^uint64(0), ((uint64(1)<<16)-1)<<10),
125			hits: []hit{
126				{53, base, 16 * PageSize},
127				{53, 0, 0},
128				{1, base + 53*PageSize, 0},
129			},
130		},
131		"Full64": {
132			cache: NewPageCache(base, ^uint64(0), ^uint64(0)),
133			hits: []hit{
134				{64, base, 64 * PageSize},
135				{64, 0, 0},
136				{1, 0, 0},
137			},
138		},
139		"FullMixed": {
140			cache: NewPageCache(base, ^uint64(0), ^uint64(0)),
141			hits: []hit{
142				{5, base, 5 * PageSize},
143				{7, base + 5*PageSize, 7 * PageSize},
144				{1, base + 12*PageSize, 1 * PageSize},
145				{23, base + 13*PageSize, 23 * PageSize},
146				{63, 0, 0},
147				{3, base + 36*PageSize, 3 * PageSize},
148				{3, base + 39*PageSize, 3 * PageSize},
149				{3, base + 42*PageSize, 3 * PageSize},
150				{12, base + 45*PageSize, 12 * PageSize},
151				{11, 0, 0},
152				{4, base + 57*PageSize, 4 * PageSize},
153				{4, 0, 0},
154				{6, 0, 0},
155				{36, 0, 0},
156				{2, base + 61*PageSize, 2 * PageSize},
157				{3, 0, 0},
158				{1, base + 63*PageSize, 1 * PageSize},
159				{4, 0, 0},
160				{2, 0, 0},
161				{62, 0, 0},
162				{1, 0, 0},
163			},
164		},
165	}
166	for name, test := range tests {
167		test := test
168		t.Run(name, func(t *testing.T) {
169			c := test.cache
170			for i, h := range test.hits {
171				b, s := c.Alloc(h.npages)
172				if b != h.base {
173					t.Fatalf("bad alloc base #%d: got 0x%x, want 0x%x", i, b, h.base)
174				}
175				if s != h.scav {
176					t.Fatalf("bad alloc scav #%d: got %d, want %d", i, s, h.scav)
177				}
178			}
179		})
180	}
181}
182
183func TestPageCacheFlush(t *testing.T) {
184	if GOOS == "openbsd" && testing.Short() {
185		t.Skip("skipping because virtual memory is limited; see #36210")
186	}
187	bits64ToBitRanges := func(bits uint64, base uint) []BitRange {
188		var ranges []BitRange
189		start, size := uint(0), uint(0)
190		for i := 0; i < 64; i++ {
191			if bits&(1<<i) != 0 {
192				if size == 0 {
193					start = uint(i) + base
194				}
195				size++
196			} else {
197				if size != 0 {
198					ranges = append(ranges, BitRange{start, size})
199					size = 0
200				}
201			}
202		}
203		if size != 0 {
204			ranges = append(ranges, BitRange{start, size})
205		}
206		return ranges
207	}
208	runTest := func(t *testing.T, base uint, cache, scav uint64) {
209		// Set up the before state.
210		beforeAlloc := map[ChunkIdx][]BitRange{
211			BaseChunkIdx: {{base, 64}},
212		}
213		beforeScav := map[ChunkIdx][]BitRange{
214			BaseChunkIdx: {},
215		}
216		b := NewPageAlloc(beforeAlloc, beforeScav)
217		defer FreePageAlloc(b)
218
219		// Create and flush the cache.
220		c := NewPageCache(PageBase(BaseChunkIdx, base), cache, scav)
221		c.Flush(b)
222		if !c.Empty() {
223			t.Errorf("pageCache flush did not clear cache")
224		}
225
226		// Set up the expected after state.
227		afterAlloc := map[ChunkIdx][]BitRange{
228			BaseChunkIdx: bits64ToBitRanges(^cache, base),
229		}
230		afterScav := map[ChunkIdx][]BitRange{
231			BaseChunkIdx: bits64ToBitRanges(scav, base),
232		}
233		want := NewPageAlloc(afterAlloc, afterScav)
234		defer FreePageAlloc(want)
235
236		// Check to see if it worked.
237		checkPageAlloc(t, want, b)
238	}
239
240	// Empty.
241	runTest(t, 0, 0, 0)
242
243	// Full.
244	runTest(t, 0, ^uint64(0), ^uint64(0))
245
246	// Random.
247	for i := 0; i < 100; i++ {
248		// Generate random valid base within a chunk.
249		base := uint(rand.Intn(PallocChunkPages/64)) * 64
250
251		// Generate random cache.
252		cache := rand.Uint64()
253		scav := rand.Uint64() & cache
254
255		// Run the test.
256		runTest(t, base, cache, scav)
257	}
258}
259
260func TestPageAllocAllocToCache(t *testing.T) {
261	if GOOS == "openbsd" && testing.Short() {
262		t.Skip("skipping because virtual memory is limited; see #36210")
263	}
264	type test struct {
265		beforeAlloc map[ChunkIdx][]BitRange
266		beforeScav  map[ChunkIdx][]BitRange
267		hits        []PageCache // expected base addresses and patterns
268		afterAlloc  map[ChunkIdx][]BitRange
269		afterScav   map[ChunkIdx][]BitRange
270	}
271	tests := map[string]test{
272		"AllFree": {
273			beforeAlloc: map[ChunkIdx][]BitRange{
274				BaseChunkIdx: {},
275			},
276			beforeScav: map[ChunkIdx][]BitRange{
277				BaseChunkIdx: {{1, 1}, {64, 64}},
278			},
279			hits: []PageCache{
280				NewPageCache(PageBase(BaseChunkIdx, 0), ^uint64(0), 0x2),
281				NewPageCache(PageBase(BaseChunkIdx, 64), ^uint64(0), ^uint64(0)),
282				NewPageCache(PageBase(BaseChunkIdx, 128), ^uint64(0), 0),
283				NewPageCache(PageBase(BaseChunkIdx, 192), ^uint64(0), 0),
284			},
285			afterAlloc: map[ChunkIdx][]BitRange{
286				BaseChunkIdx: {{0, 256}},
287			},
288		},
289		"ManyArena": {
290			beforeAlloc: map[ChunkIdx][]BitRange{
291				BaseChunkIdx:     {{0, PallocChunkPages}},
292				BaseChunkIdx + 1: {{0, PallocChunkPages}},
293				BaseChunkIdx + 2: {{0, PallocChunkPages - 64}},
294			},
295			beforeScav: map[ChunkIdx][]BitRange{
296				BaseChunkIdx:     {{0, PallocChunkPages}},
297				BaseChunkIdx + 1: {{0, PallocChunkPages}},
298				BaseChunkIdx + 2: {},
299			},
300			hits: []PageCache{
301				NewPageCache(PageBase(BaseChunkIdx+2, PallocChunkPages-64), ^uint64(0), 0),
302			},
303			afterAlloc: map[ChunkIdx][]BitRange{
304				BaseChunkIdx:     {{0, PallocChunkPages}},
305				BaseChunkIdx + 1: {{0, PallocChunkPages}},
306				BaseChunkIdx + 2: {{0, PallocChunkPages}},
307			},
308		},
309		"NotContiguous": {
310			beforeAlloc: map[ChunkIdx][]BitRange{
311				BaseChunkIdx:        {{0, PallocChunkPages}},
312				BaseChunkIdx + 0xff: {{0, 0}},
313			},
314			beforeScav: map[ChunkIdx][]BitRange{
315				BaseChunkIdx:        {{0, PallocChunkPages}},
316				BaseChunkIdx + 0xff: {{31, 67}},
317			},
318			hits: []PageCache{
319				NewPageCache(PageBase(BaseChunkIdx+0xff, 0), ^uint64(0), ((uint64(1)<<33)-1)<<31),
320			},
321			afterAlloc: map[ChunkIdx][]BitRange{
322				BaseChunkIdx:        {{0, PallocChunkPages}},
323				BaseChunkIdx + 0xff: {{0, 64}},
324			},
325			afterScav: map[ChunkIdx][]BitRange{
326				BaseChunkIdx:        {{0, PallocChunkPages}},
327				BaseChunkIdx + 0xff: {{64, 34}},
328			},
329		},
330		"First": {
331			beforeAlloc: map[ChunkIdx][]BitRange{
332				BaseChunkIdx: {{0, 32}, {33, 31}, {96, 32}},
333			},
334			beforeScav: map[ChunkIdx][]BitRange{
335				BaseChunkIdx: {{1, 4}, {31, 5}, {66, 2}},
336			},
337			hits: []PageCache{
338				NewPageCache(PageBase(BaseChunkIdx, 0), 1<<32, 1<<32),
339				NewPageCache(PageBase(BaseChunkIdx, 64), (uint64(1)<<32)-1, 0x3<<2),
340			},
341			afterAlloc: map[ChunkIdx][]BitRange{
342				BaseChunkIdx: {{0, 128}},
343			},
344		},
345		"Fail": {
346			beforeAlloc: map[ChunkIdx][]BitRange{
347				BaseChunkIdx: {{0, PallocChunkPages}},
348			},
349			hits: []PageCache{
350				NewPageCache(0, 0, 0),
351				NewPageCache(0, 0, 0),
352				NewPageCache(0, 0, 0),
353			},
354			afterAlloc: map[ChunkIdx][]BitRange{
355				BaseChunkIdx: {{0, PallocChunkPages}},
356			},
357		},
358		"RetainScavBits": {
359			beforeAlloc: map[ChunkIdx][]BitRange{
360				BaseChunkIdx: {{0, 1}, {10, 2}},
361			},
362			beforeScav: map[ChunkIdx][]BitRange{
363				BaseChunkIdx: {{0, 4}, {11, 1}},
364			},
365			hits: []PageCache{
366				NewPageCache(PageBase(BaseChunkIdx, 0), ^uint64(0x1|(0x3<<10)), 0x7<<1),
367			},
368			afterAlloc: map[ChunkIdx][]BitRange{
369				BaseChunkIdx: {{0, 64}},
370			},
371			afterScav: map[ChunkIdx][]BitRange{
372				BaseChunkIdx: {{0, 1}, {11, 1}},
373			},
374		},
375	}
376	// Disable these tests on iOS since we have a small address space.
377	// See #46860.
378	if PageAlloc64Bit != 0 && goos.IsIos == 0 {
379		const chunkIdxBigJump = 0x100000 // chunk index offset which translates to O(TiB)
380
381		// This test is similar to the one with the same name for
382		// pageAlloc.alloc and serves the same purpose.
383		// See mpagealloc_test.go for details.
384		sumsPerPhysPage := ChunkIdx(PhysPageSize / PallocSumBytes)
385		baseChunkIdx := BaseChunkIdx &^ (sumsPerPhysPage - 1)
386		tests["DiscontiguousMappedSumBoundary"] = test{
387			beforeAlloc: map[ChunkIdx][]BitRange{
388				baseChunkIdx + sumsPerPhysPage - 1: {{0, PallocChunkPages - 1}},
389				baseChunkIdx + chunkIdxBigJump:     {{1, PallocChunkPages - 1}},
390			},
391			beforeScav: map[ChunkIdx][]BitRange{
392				baseChunkIdx + sumsPerPhysPage - 1: {},
393				baseChunkIdx + chunkIdxBigJump:     {},
394			},
395			hits: []PageCache{
396				NewPageCache(PageBase(baseChunkIdx+sumsPerPhysPage-1, PallocChunkPages-64), 1<<63, 0),
397				NewPageCache(PageBase(baseChunkIdx+chunkIdxBigJump, 0), 1, 0),
398				NewPageCache(0, 0, 0),
399			},
400			afterAlloc: map[ChunkIdx][]BitRange{
401				baseChunkIdx + sumsPerPhysPage - 1: {{0, PallocChunkPages}},
402				baseChunkIdx + chunkIdxBigJump:     {{0, PallocChunkPages}},
403			},
404		}
405	}
406	for name, v := range tests {
407		v := v
408		t.Run(name, func(t *testing.T) {
409			b := NewPageAlloc(v.beforeAlloc, v.beforeScav)
410			defer FreePageAlloc(b)
411
412			for _, expect := range v.hits {
413				checkPageCache(t, b.AllocToCache(), expect)
414				if t.Failed() {
415					return
416				}
417			}
418			want := NewPageAlloc(v.afterAlloc, v.afterScav)
419			defer FreePageAlloc(want)
420
421			checkPageAlloc(t, want, b)
422		})
423	}
424}
425