xref: /aosp_15_r20/external/ltp/testcases/kernel/mem/hugetlb/hugemmap/hugemmap10.c (revision 49cdfc7efb34551c7342be41a7384b9c40d7cab7)
1 // SPDX-License-Identifier: LGPL-2.1-or-later
2 /*
3  * Copyright (C) 2005-2007 IBM Corporation.
4  * Author: David Gibson & Adam Litke
5  */
6 
7 /*\
8  * [Description]
9  *
10  * This Test perform mmap, munmap and write operation on hugetlb file
11  * based mapping. Mapping can be shared or private. and it checks for
12  * Hugetlb counter (Total, Free, Reserve, Surplus) in /proc/meminfo and
13  * compare them with expected (calculated) value. if all checks are
14  * successful, the test passes.
15  */
16 
17 #define _GNU_SOURCE
18 #include <unistd.h>
19 #include <stdio.h>
20 #include <sys/mount.h>
21 #include <limits.h>
22 #include <sys/param.h>
23 #include <sys/types.h>
24 
25 #include "hugetlb.h"
26 
27 #define MNTPOINT "hugetlbfs/"
28 
29 static long hpage_size;
30 static int private_resv;
31 
32 #define NR_SLOTS	2
33 #define SL_SETUP	0
34 #define SL_TEST		1
35 static int map_fd[NR_SLOTS];
36 static char *map_addr[NR_SLOTS];
37 static unsigned long map_size[NR_SLOTS];
38 static unsigned int touched[NR_SLOTS];
39 
40 static long prev_total;
41 static long prev_free;
42 static long prev_resv;
43 static long prev_surp;
44 
read_meminfo_huge(long * total,long * free,long * resv,long * surp)45 static void read_meminfo_huge(long *total, long *free, long *resv, long *surp)
46 {
47 	*total = SAFE_READ_MEMINFO(MEMINFO_HPAGE_TOTAL);
48 	*free = SAFE_READ_MEMINFO(MEMINFO_HPAGE_FREE);
49 	*resv = SAFE_READ_MEMINFO(MEMINFO_HPAGE_RSVD);
50 	*surp = SAFE_READ_MEMINFO(MEMINFO_HPAGE_SURP);
51 }
52 
kernel_has_private_reservations(void)53 static int kernel_has_private_reservations(void)
54 {
55 	int fd;
56 	long t, f, r, s;
57 	long nt, nf, nr, ns;
58 	void *p;
59 
60 	read_meminfo_huge(&t, &f, &r, &s);
61 	fd = tst_creat_unlinked(MNTPOINT, 0);
62 
63 	p = SAFE_MMAP(NULL, hpage_size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
64 
65 	read_meminfo_huge(&nt, &nf, &nr, &ns);
66 
67 	SAFE_MUNMAP(p, hpage_size);
68 	SAFE_CLOSE(fd);
69 
70 	/*
71 	 * There are only three valid cases:
72 	 * 1) If a surplus page was allocated to create a reservation, all
73 	 *    four pool counters increment
74 	 * 2) All counters remain the same except for Hugepages_Rsvd, then
75 	 *    a reservation was created using an existing pool page.
76 	 * 3) All counters remain the same, indicates that no reservation has
77 	 *    been created
78 	 */
79 	if ((nt == t + 1) && (nf == f + 1) && (ns == s + 1) && (nr == r + 1))
80 		return 1;
81 	else if ((nt == t) && (nf == f) && (ns == s)) {
82 		if (nr == r + 1)
83 			return 1;
84 		else if (nr == r)
85 			return 0;
86 	}
87 	tst_brk(TCONF, "bad counter state - "
88 	      "T:%li F:%li R:%li S:%li -> T:%li F:%li R:%li S:%li",
89 		  t, f, r, s, nt, nf, nr, ns);
90 	return -1;
91 }
92 
verify_counters(int line,char * desc,long et,long ef,long er,long es)93 static int verify_counters(int line, char *desc, long et, long ef, long er, long es)
94 {
95 	long t, f, r, s;
96 	long fail = 0;
97 
98 	read_meminfo_huge(&t, &f, &r, &s);
99 
100 	if (t != et) {
101 		tst_res_(__FILE__, line, TFAIL, "While %s: Bad "MEMINFO_HPAGE_TOTAL
102 				" expected %li, actual %li", desc, et, t);
103 		fail++;
104 	}
105 	if (f != ef) {
106 		tst_res_(__FILE__, line, TFAIL, "While %s: Bad "MEMINFO_HPAGE_FREE
107 				" expected %li, actual %li", desc, ef, f);
108 		fail++;
109 	}
110 	if (r != er) {
111 		tst_res_(__FILE__, line, TFAIL, "While %s: Bad "MEMINFO_HPAGE_RSVD
112 				" expected %li, actual %li", desc, er, r);
113 		fail++;
114 	}
115 	if (s != es) {
116 		tst_res_(__FILE__, line, TFAIL, "While %s: Bad "MEMINFO_HPAGE_SURP
117 				" expected %li, actual %li", desc, es, s);
118 		fail++;
119 	}
120 
121 	if (fail)
122 		return -1;
123 
124 	prev_total = t;
125 	prev_free = f;
126 	prev_resv = r;
127 	prev_surp = s;
128 	return 0;
129 }
130 
131 /* Memory operations:
132  * Each of these has a predefined effect on the counters
133  */
set_nr_hugepages_(long count,char * desc,int line)134 static int set_nr_hugepages_(long count, char *desc, int line)
135 {
136 	long min_size;
137 	long et, ef, er, es;
138 
139 	SAFE_FILE_PRINTF(PATH_NR_HPAGES, "%lu", count);
140 
141 	/* The code below is based on set_max_huge_pages in mm/hugetlb.c */
142 	es = prev_surp;
143 	et = prev_total;
144 	ef = prev_free;
145 	er = prev_resv;
146 
147 	/*
148 	 * Increase the pool size
149 	 * First take pages out of surplus state.  Then make up the
150 	 * remaining difference by allocating fresh huge pages.
151 	 */
152 	while (es && count > et - es)
153 		es--;
154 	while (count > et - es) {
155 		et++;
156 		ef++;
157 	}
158 	if (count >= et - es)
159 		goto out;
160 
161 	/*
162 	 * Decrease the pool size
163 	 * First return free pages to the buddy allocator (being careful
164 	 * to keep enough around to satisfy reservations).  Then place
165 	 * pages into surplus state as needed so the pool will shrink
166 	 * to the desired size as pages become free.
167 	 */
168 	min_size = MAX(count, er + et - ef);
169 	while (min_size < et - es) {
170 		ef--;
171 		et--;
172 	}
173 	while (count < et - es)
174 		es++;
175 
176 out:
177 	return verify_counters(line, desc, et, ef, er, es);
178 }
179 #define SET_NR_HUGEPAGES(c, d) set_nr_hugepages_(c, d, __LINE__)
180 
map_(int s,int hpages,int flags,char * desc,int line)181 static int map_(int s, int hpages, int flags, char *desc, int line)
182 {
183 	long et, ef, er, es;
184 
185 	map_fd[s] = tst_creat_unlinked(MNTPOINT, 0);
186 	map_size[s] = hpages * hpage_size;
187 	map_addr[s] = SAFE_MMAP(NULL, map_size[s], PROT_READ|PROT_WRITE, flags,
188 				map_fd[s], 0);
189 	touched[s] = 0;
190 
191 	et = prev_total;
192 	ef = prev_free;
193 	er = prev_resv;
194 	es = prev_surp;
195 	/*
196 	 * When using MAP_SHARED, a reservation will be created to guarantee
197 	 * pages to the process.  If not enough pages are available to
198 	 * satisfy the reservation, surplus pages are added to the pool.
199 	 * NOTE: This code assumes that the whole mapping needs to be
200 	 * reserved and hence, will not work with partial reservations.
201 	 *
202 	 * If the kernel supports private reservations, then MAP_PRIVATE
203 	 * mappings behave like MAP_SHARED at mmap time.  Otherwise,
204 	 * no counter updates will occur.
205 	 */
206 	if ((flags & MAP_SHARED) || private_resv) {
207 		unsigned long shortfall = 0;
208 
209 		if (hpages + prev_resv > prev_free)
210 			shortfall = hpages - prev_free + prev_resv;
211 		et += shortfall;
212 		ef += shortfall;
213 		er += hpages;
214 		es += shortfall;
215 	}
216 
217 	return verify_counters(line, desc, et, ef, er, es);
218 }
219 #define MAP(s, h, f, d) map_(s, h, f, d, __LINE__)
220 
unmap_(int s,int hpages,int flags,char * desc,int line)221 static int unmap_(int s, int hpages, int flags, char *desc, int line)
222 {
223 	long et, ef, er, es;
224 	unsigned long i;
225 
226 	SAFE_MUNMAP(map_addr[s], map_size[s]);
227 	SAFE_CLOSE(map_fd[s]);
228 	map_addr[s] = NULL;
229 	map_size[s] = 0;
230 
231 	et = prev_total;
232 	ef = prev_free;
233 	er = prev_resv;
234 	es = prev_surp;
235 
236 	/*
237 	 * When a VMA is unmapped, the instantiated (touched) pages are
238 	 * freed.  If the pool is in a surplus state, pages are freed to the
239 	 * buddy allocator, otherwise they go back into the hugetlb pool.
240 	 * NOTE: This code assumes touched pages have only one user.
241 	 */
242 	for (i = 0; i < touched[s]; i++) {
243 		if (es) {
244 			et--;
245 			es--;
246 		} else
247 			ef++;
248 	}
249 
250 	/*
251 	 * mmap may have created some surplus pages to accommodate a
252 	 * reservation.  If those pages were not touched, then they will
253 	 * not have been freed by the code above.  Free them here.
254 	 */
255 	if ((flags & MAP_SHARED) || private_resv) {
256 		int unused_surplus = MIN(hpages - touched[s], es);
257 
258 		et -= unused_surplus;
259 		ef -= unused_surplus;
260 		er -= hpages - touched[s];
261 		es -= unused_surplus;
262 	}
263 
264 	return verify_counters(line, desc, et, ef, er, es);
265 }
266 #define UNMAP(s, h, f, d) unmap_(s, h, f, d, __LINE__)
267 
touch_(int s,int hpages,int flags,char * desc,int line)268 static int touch_(int s, int hpages, int flags, char *desc, int line)
269 {
270 	long et, ef, er, es;
271 	int nr;
272 	char *c;
273 
274 	for (c = map_addr[s], nr = hpages;
275 			hpages && c < map_addr[s] + map_size[s];
276 			c += hpage_size, nr--)
277 		*c = (char) (nr % 2);
278 	/*
279 	 * Keep track of how many pages were touched since we can't easily
280 	 * detect that from user space.
281 	 * NOTE: Calling this function more than once for a mmap may yield
282 	 * results you don't expect.  Be careful :)
283 	 */
284 	touched[s] = MAX(touched[s], hpages);
285 
286 	/*
287 	 * Shared (and private when supported) mappings and consume resv pages
288 	 * that were previously allocated. Also deduct them from the free count.
289 	 *
290 	 * Unreserved private mappings may need to allocate surplus pages to
291 	 * satisfy the fault.  The surplus pages become part of the pool
292 	 * which could elevate total, free, and surplus counts.  resv is
293 	 * unchanged but free must be decreased.
294 	 */
295 	if (flags & MAP_SHARED || private_resv) {
296 		et = prev_total;
297 		ef = prev_free - hpages;
298 		er = prev_resv - hpages;
299 		es = prev_surp;
300 	} else {
301 		if (hpages + prev_resv > prev_free)
302 			et = prev_total + (hpages - prev_free + prev_resv);
303 		else
304 			et = prev_total;
305 		er = prev_resv;
306 		es = prev_surp + et - prev_total;
307 		ef = prev_free - hpages + et - prev_total;
308 	}
309 	return verify_counters(line, desc, et, ef, er, es);
310 }
311 #define TOUCH(s, h, f, d) touch_(s, h, f, d, __LINE__)
312 
test_counters(char * desc,int base_nr)313 static int test_counters(char *desc, int base_nr)
314 {
315 	tst_res(TINFO, "%s...", desc);
316 
317 	if (SET_NR_HUGEPAGES(base_nr, "initializing hugepages pool"))
318 		return -1;
319 
320 	/* untouched, shared mmap */
321 	if (MAP(SL_TEST, 1, MAP_SHARED, "doing mmap shared with no touch") ||
322 		UNMAP(SL_TEST, 1, MAP_SHARED, "doing munmap on shared with no touch"))
323 		return -1;
324 
325 	/* untouched, private mmap */
326 	if (MAP(SL_TEST, 1, MAP_PRIVATE, "doing mmap private with no touch") ||
327 		UNMAP(SL_TEST, 1, MAP_PRIVATE, "doing munmap private with on touch"))
328 		return -1;
329 
330 	/* touched, shared mmap */
331 	if (MAP(SL_TEST, 1, MAP_SHARED, "doing mmap shared followed by touch") ||
332 		TOUCH(SL_TEST, 1, MAP_SHARED, "touching the addr after mmap shared") ||
333 		UNMAP(SL_TEST, 1, MAP_SHARED, "doing munmap shared after touch"))
334 		return -1;
335 
336 	/* touched, private mmap */
337 	if (MAP(SL_TEST, 1, MAP_PRIVATE, "doing mmap private followed by touch") ||
338 		TOUCH(SL_TEST, 1, MAP_PRIVATE, "touching the addr after mmap private") ||
339 		UNMAP(SL_TEST, 1, MAP_PRIVATE, "doing munmap private after touch"))
340 		return -1;
341 
342 	/*
343 	 * Explicit resizing during outstanding surplus
344 	 * Consume surplus when growing pool
345 	 */
346 	if (MAP(SL_TEST, 2, MAP_SHARED, "doing mmap to consume surplus") ||
347 		SET_NR_HUGEPAGES(MAX(base_nr, 1), "setting hugepages pool to consume surplus"))
348 		return -1;
349 
350 	/* Add pages once surplus is consumed */
351 	if (SET_NR_HUGEPAGES(MAX(base_nr, 3), "adding more pages after consuming surplus"))
352 		return -1;
353 
354 	/* Release free huge pages first */
355 	if (SET_NR_HUGEPAGES(MAX(base_nr, 2), "releasing free huge pages"))
356 		return -1;
357 
358 	/* When shrinking beyond committed level, increase surplus */
359 	if (SET_NR_HUGEPAGES(base_nr, "increasing surplus counts"))
360 		return -1;
361 
362 	/* Upon releasing the reservation, reduce surplus counts */
363 	if (UNMAP(SL_TEST, 2, MAP_SHARED, "reducing surplus counts"))
364 		return -1;
365 
366 	tst_res(TINFO, "OK");
367 	return 0;
368 }
369 
per_iteration_cleanup(void)370 static void per_iteration_cleanup(void)
371 {
372 	int nr;
373 
374 	prev_total = 0;
375 	prev_free = 0;
376 	prev_resv = 0;
377 	prev_surp = 0;
378 	for (nr = 0; nr < NR_SLOTS; nr++) {
379 		if (map_addr[nr])
380 			SAFE_MUNMAP(map_addr[nr], map_size[nr]);
381 		if (map_fd[nr] > 0)
382 			SAFE_CLOSE(map_fd[nr]);
383 	}
384 }
385 
test_per_base_nr(int base_nr)386 static int test_per_base_nr(int base_nr)
387 {
388 	tst_res(TINFO, "Base pool size: %i", base_nr);
389 
390 	/* Run the tests with a clean slate */
391 	if (test_counters("Clean", base_nr))
392 		return -1;
393 
394 	/* Now with a pre-existing untouched, shared mmap */
395 	if (MAP(SL_SETUP, 1, MAP_SHARED, "mmap for test having prior untouched shared mmap") ||
396 		test_counters("Untouched, shared", base_nr) ||
397 		UNMAP(SL_SETUP, 1, MAP_SHARED, "unmap after test having prior untouched shared mmap"))
398 		return -1;
399 
400 	/* Now with a pre-existing untouched, private mmap */
401 	if (MAP(SL_SETUP, 1, MAP_PRIVATE, "mmap for test having prior untouched private mmap") ||
402 		test_counters("Untouched, private", base_nr) ||
403 		UNMAP(SL_SETUP, 1, MAP_PRIVATE, "unmap after test having prior untouched private mmap"))
404 		return -1;
405 
406 	/* Now with a pre-existing touched, shared mmap */
407 	if (MAP(SL_SETUP, 1, MAP_SHARED, "mmap for test having prior touched shared mmap") ||
408 		TOUCH(SL_SETUP, 1, MAP_SHARED, "touching for test having prior touched shared mmap") ||
409 		test_counters("Touched, shared", base_nr) ||
410 		UNMAP(SL_SETUP, 1, MAP_SHARED, "unmap after test having prior touched shared mmap"))
411 		return -1;
412 
413 	/* Now with a pre-existing touched, private mmap */
414 	if (MAP(SL_SETUP, 1, MAP_PRIVATE, "mmap for test with having touched private mmap") ||
415 		TOUCH(SL_SETUP, 1, MAP_PRIVATE, "touching for test with having touched private mmap") ||
416 		test_counters("Touched, private", base_nr) ||
417 		UNMAP(SL_SETUP, 1, MAP_PRIVATE,	"unmap after test having prior touched private mmap"))
418 		return -1;
419 	return 0;
420 }
421 
run_test(void)422 static void run_test(void)
423 {
424 	int base_nr;
425 
426 	for (base_nr = 0; base_nr <= 3; base_nr++) {
427 		if (test_per_base_nr(base_nr))
428 			break;
429 	}
430 	if (base_nr > 3)
431 		tst_res(TPASS, "Hugepages Counters works as expected.");
432 	per_iteration_cleanup();
433 }
434 
setup(void)435 static void setup(void)
436 {
437 	hpage_size = SAFE_READ_MEMINFO(MEMINFO_HPAGE_SIZE)*1024;
438 	SAFE_FILE_PRINTF(PATH_OC_HPAGES, "%lu", tst_hugepages);
439 	private_resv = kernel_has_private_reservations();
440 }
441 
cleanup(void)442 static void cleanup(void)
443 {
444 	per_iteration_cleanup();
445 }
446 
447 static struct tst_test test = {
448 	.needs_root = 1,
449 	.mntpoint = MNTPOINT,
450 	.needs_hugetlbfs = 1,
451 	.save_restore = (const struct tst_path_val[]) {
452 		{PATH_OC_HPAGES, NULL},
453 		{PATH_NR_HPAGES, NULL},
454 		{}
455 	},
456 	.setup = setup,
457 	.cleanup = cleanup,
458 	.test_all = run_test,
459 	.hugepages = {3, TST_NEEDS},
460 };
461