xref: /aosp_15_r20/external/ltp/testcases/kernel/controllers/memcg/memcontrol03.c (revision 49cdfc7efb34551c7342be41a7384b9c40d7cab7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*\
3  *
4  * [Description]
5  *
6  * Conversion of the third kself test in cgroup/test_memcontrol.c.
7  *
8  * Original description:
9  * "First, this test creates the following hierarchy:
10  * A       memory.min = 50M,  memory.max = 200M
11  * A/B     memory.min = 50M,  memory.current = 50M
12  * A/B/C   memory.min = 75M,  memory.current = 50M
13  * A/B/D   memory.min = 25M,  memory.current = 50M
14  * A/B/E   memory.min = 500M, memory.current = 0
15  * A/B/F   memory.min = 0,    memory.current = 50M
16  *
17  * Usages are pagecache, but the test keeps a running
18  * process in every leaf cgroup.
19  * Then it creates A/G and creates a significant
20  * memory pressure in it.
21  *
22  * A/B    memory.current ~= 50M
23  * A/B/C  memory.current ~= 33M
24  * A/B/D  memory.current ~= 17M
25  * A/B/E  memory.current ~= 0
26  *
27  * After that it tries to allocate more than there is unprotected
28  * memory in A available, and checks that memory.min protects
29  * pagecache even in this case."
30  *
31  * memory.min doesn't appear to exist on V1 so we only test on V2 like
32  * the selftest. We do test on more file systems, but not tempfs
33  * becaue it can't evict the page cache without swap. Also we avoid
34  * filesystems which allocate extra memory for buffer heads.
35  *
36  * The tolerances have been increased from the self tests.
37  */
38 
39 #define _GNU_SOURCE
40 
41 #include <inttypes.h>
42 
43 #include "memcontrol_common.h"
44 
45 #define TMPDIR "mntdir"
46 
47 static struct tst_cg_group *trunk_cg[3];
48 static struct tst_cg_group *leaf_cg[4];
49 static int fd = -1;
50 
51 enum checkpoints {
52 	CHILD_IDLE,
53 	TEST_DONE,
54 };
55 
56 enum trunk_cg {
57 	A,
58 	B,
59 	G
60 };
61 
62 enum leaf_cg {
63 	C,
64 	D,
65 	E,
66 	F
67 };
68 
cleanup_sub_groups(void)69 static void cleanup_sub_groups(void)
70 {
71 	size_t i;
72 
73 	for (i = ARRAY_SIZE(leaf_cg); i > 0; i--) {
74 		if (!leaf_cg[i - 1])
75 			continue;
76 
77 		TST_CHECKPOINT_WAKE2(TEST_DONE,
78 				     ARRAY_SIZE(leaf_cg) - 1);
79 		tst_reap_children();
80 		break;
81 	}
82 
83 	for (i = ARRAY_SIZE(leaf_cg); i > 0; i--) {
84 		if (!leaf_cg[i - 1])
85 			continue;
86 
87 		leaf_cg[i - 1] = tst_cg_group_rm(leaf_cg[i - 1]);
88 	}
89 
90 	for (i = ARRAY_SIZE(trunk_cg); i > 0; i--) {
91 		if (!trunk_cg[i - 1])
92 			continue;
93 
94 		trunk_cg[i - 1] = tst_cg_group_rm(trunk_cg[i - 1]);
95 	}
96 }
97 
alloc_anon_in_child(const struct tst_cg_group * const cg,const size_t size,const int expect_oom)98 static void alloc_anon_in_child(const struct tst_cg_group *const cg,
99 				const size_t size, const int expect_oom)
100 {
101 	int status;
102 	const pid_t pid = SAFE_FORK();
103 
104 	if (!pid) {
105 		SAFE_CG_PRINTF(cg, "cgroup.procs", "%d", getpid());
106 
107 		tst_res(TINFO, "Child %d in %s: Allocating anon: %"PRIdPTR,
108 		getpid(), tst_cg_group_name(cg), size);
109 		alloc_anon(size);
110 		exit(0);
111 	}
112 
113 	SAFE_WAITPID(pid, &status, 0);
114 
115 	if (expect_oom && WIFSIGNALED(status) && WTERMSIG(status) == SIGKILL) {
116 		tst_res(TPASS, "Child %d killed by OOM", pid);
117 		return;
118 	}
119 
120 	if (!expect_oom && WIFEXITED(status) && WEXITSTATUS(status) == 0) {
121 		tst_res(TPASS, "Child %d exited", pid);
122 		return;
123 	}
124 
125 	tst_res(TFAIL,
126 		"Expected child %d to %s, but instead %s",
127 		pid,
128 		expect_oom ? "be killed" : "exit(0)",
129 		tst_strstatus(status));
130 }
131 
alloc_pagecache_in_child(const struct tst_cg_group * const cg,const size_t size)132 static void alloc_pagecache_in_child(const struct tst_cg_group *const cg,
133 				     const size_t size)
134 {
135 	const pid_t pid = SAFE_FORK();
136 
137 	if (pid) {
138 		TST_CHECKPOINT_WAIT(CHILD_IDLE);
139 		return;
140 	}
141 
142 	SAFE_CG_PRINTF(cg, "cgroup.procs", "%d", getpid());
143 
144 	tst_res(TINFO, "Child %d in %s: Allocating pagecache: %"PRIdPTR,
145 		getpid(), tst_cg_group_name(cg), size);
146 	alloc_pagecache(fd, size);
147 	SAFE_FSYNC(fd);
148 
149 	TST_CHECKPOINT_WAKE(CHILD_IDLE);
150 	TST_CHECKPOINT_WAIT(TEST_DONE);
151 	exit(0);
152 }
153 
test_memcg_min(void)154 static void test_memcg_min(void)
155 {
156 	long c[4];
157 	unsigned int i;
158 	size_t attempts;
159 
160 	fd = SAFE_OPEN(TMPDIR"/tmpfile", O_RDWR | O_CREAT, 0600);
161 	trunk_cg[A] = tst_cg_group_mk(tst_cg, "trunk_A");
162 
163 	SAFE_CG_SCANF(trunk_cg[A], "memory.min", "%ld", c);
164 	if (c[0]) {
165 		tst_brk(TCONF,
166 			"memory.min already set to %ld on parent group", c[0]);
167 	}
168 
169 	SAFE_CG_PRINT(trunk_cg[A], "cgroup.subtree_control", "+memory");
170 
171 	SAFE_CG_PRINT(trunk_cg[A], "memory.max", "200M");
172 	SAFE_CG_PRINT(trunk_cg[A], "memory.swap.max", "0");
173 
174 	trunk_cg[B] = tst_cg_group_mk(trunk_cg[A], "trunk_B");
175 
176 	SAFE_CG_PRINT(trunk_cg[B], "cgroup.subtree_control", "+memory");
177 
178 	trunk_cg[G] = tst_cg_group_mk(trunk_cg[A], "trunk_G");
179 
180 	for (i = 0; i < ARRAY_SIZE(leaf_cg); i++) {
181 		leaf_cg[i] = tst_cg_group_mk(trunk_cg[B],
182 						 "leaf_%c", 'C' + i);
183 
184 		if (i == E)
185 			continue;
186 
187 		alloc_pagecache_in_child(leaf_cg[i], MB(50));
188 	}
189 
190 	SAFE_CG_PRINT(trunk_cg[A], "memory.min", "50M");
191 	SAFE_CG_PRINT(trunk_cg[B], "memory.min", "50M");
192 	SAFE_CG_PRINT(leaf_cg[C], "memory.min", "75M");
193 	SAFE_CG_PRINT(leaf_cg[D], "memory.min", "25M");
194 	SAFE_CG_PRINT(leaf_cg[E], "memory.min", "500M");
195 	SAFE_CG_PRINT(leaf_cg[F], "memory.min", "0");
196 
197 	for (attempts = 0; attempts < 5; attempts++) {
198 		SAFE_CG_SCANF(trunk_cg[B], "memory.current", "%ld", c);
199 		if (values_close(c[0], MB(150), 3))
200 			break;
201 
202 		sleep(1);
203 	}
204 
205 	alloc_anon_in_child(trunk_cg[G], MB(148), 0);
206 
207 	SAFE_CG_SCANF(trunk_cg[B], "memory.current", "%ld", c);
208 	TST_EXP_EXPR(values_close(c[0], MB(50), 5),
209 		     "(A/B memory.current=%ld) ~= %d", c[0], MB(50));
210 
211 	for (i = 0; i < ARRAY_SIZE(leaf_cg); i++)
212 		SAFE_CG_SCANF(leaf_cg[i], "memory.current", "%ld", c + i);
213 
214 	TST_EXP_EXPR(values_close(c[0], MB(33), 20),
215 		     "(A/B/C memory.current=%ld) ~= %d", c[0], MB(33));
216 	TST_EXP_EXPR(values_close(c[1], MB(17), 20),
217 		     "(A/B/D memory.current=%ld) ~= %d", c[1], MB(17));
218 	TST_EXP_EXPR(values_close(c[2], 0, 1),
219 		     "(A/B/E memory.current=%ld) ~= 0", c[2]);
220 
221 	alloc_anon_in_child(trunk_cg[G], MB(170), 1);
222 
223 	SAFE_CG_SCANF(trunk_cg[B], "memory.current", "%ld", c);
224 	TST_EXP_EXPR(values_close(c[0], MB(50), 5),
225 		     "(A/B memory.current=%ld) ~= %d", c[0], MB(50));
226 
227 	cleanup_sub_groups();
228 	SAFE_CLOSE(fd);
229 	SAFE_UNLINK(TMPDIR"/tmpfile");
230 }
231 
cleanup(void)232 static void cleanup(void)
233 {
234 	cleanup_sub_groups();
235 	if (fd > -1)
236 		SAFE_CLOSE(fd);
237 }
238 
239 static struct tst_test test = {
240 	.cleanup = cleanup,
241 	.test_all = test_memcg_min,
242 	.mount_device = 1,
243 	.mntpoint = TMPDIR,
244 	.all_filesystems = 1,
245 	.skip_filesystems = (const char *const[]){
246 		"exfat", "vfat", "fuse", "ntfs", "tmpfs", NULL
247 	},
248 	.forks_child = 1,
249 	.needs_root = 1,
250 	.needs_checkpoints = 1,
251 	.needs_cgroup_ver = TST_CG_V2,
252 	.needs_cgroup_ctrls = (const char *const[]){ "memory", NULL },
253 };
254