1 #include "test/jemalloc_test.h"
2
3 #include "jemalloc/internal/ticker.h"
4
5 static nstime_monotonic_t *nstime_monotonic_orig;
6 static nstime_update_t *nstime_update_orig;
7
8 static unsigned nupdates_mock;
9 static nstime_t time_mock;
10 static bool monotonic_mock;
11
12 static bool
check_background_thread_enabled(void)13 check_background_thread_enabled(void) {
14 bool enabled;
15 size_t sz = sizeof(bool);
16 int ret = mallctl("background_thread", (void *)&enabled, &sz, NULL,0);
17 if (ret == ENOENT) {
18 return false;
19 }
20 assert_d_eq(ret, 0, "Unexpected mallctl error");
21 return enabled;
22 }
23
24 static bool
nstime_monotonic_mock(void)25 nstime_monotonic_mock(void) {
26 return monotonic_mock;
27 }
28
29 static bool
nstime_update_mock(nstime_t * time)30 nstime_update_mock(nstime_t *time) {
31 nupdates_mock++;
32 if (monotonic_mock) {
33 nstime_copy(time, &time_mock);
34 }
35 return !monotonic_mock;
36 }
37
38 static unsigned
do_arena_create(ssize_t dirty_decay_ms,ssize_t muzzy_decay_ms)39 do_arena_create(ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms) {
40 unsigned arena_ind;
41 size_t sz = sizeof(unsigned);
42 assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
43 0, "Unexpected mallctl() failure");
44 size_t mib[3];
45 size_t miblen = sizeof(mib)/sizeof(size_t);
46
47 assert_d_eq(mallctlnametomib("arena.0.dirty_decay_ms", mib, &miblen),
48 0, "Unexpected mallctlnametomib() failure");
49 mib[1] = (size_t)arena_ind;
50 assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL,
51 (void *)&dirty_decay_ms, sizeof(dirty_decay_ms)), 0,
52 "Unexpected mallctlbymib() failure");
53
54 assert_d_eq(mallctlnametomib("arena.0.muzzy_decay_ms", mib, &miblen),
55 0, "Unexpected mallctlnametomib() failure");
56 mib[1] = (size_t)arena_ind;
57 assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL,
58 (void *)&muzzy_decay_ms, sizeof(muzzy_decay_ms)), 0,
59 "Unexpected mallctlbymib() failure");
60
61 return arena_ind;
62 }
63
64 static void
do_arena_destroy(unsigned arena_ind)65 do_arena_destroy(unsigned arena_ind) {
66 size_t mib[3];
67 size_t miblen = sizeof(mib)/sizeof(size_t);
68 assert_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0,
69 "Unexpected mallctlnametomib() failure");
70 mib[1] = (size_t)arena_ind;
71 assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
72 "Unexpected mallctlbymib() failure");
73 }
74
75 void
do_epoch(void)76 do_epoch(void) {
77 uint64_t epoch = 1;
78 assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
79 0, "Unexpected mallctl() failure");
80 }
81
82 void
do_purge(unsigned arena_ind)83 do_purge(unsigned arena_ind) {
84 size_t mib[3];
85 size_t miblen = sizeof(mib)/sizeof(size_t);
86 assert_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0,
87 "Unexpected mallctlnametomib() failure");
88 mib[1] = (size_t)arena_ind;
89 assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
90 "Unexpected mallctlbymib() failure");
91 }
92
93 void
do_decay(unsigned arena_ind)94 do_decay(unsigned arena_ind) {
95 size_t mib[3];
96 size_t miblen = sizeof(mib)/sizeof(size_t);
97 assert_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0,
98 "Unexpected mallctlnametomib() failure");
99 mib[1] = (size_t)arena_ind;
100 assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
101 "Unexpected mallctlbymib() failure");
102 }
103
104 static uint64_t
get_arena_npurge_impl(const char * mibname,unsigned arena_ind)105 get_arena_npurge_impl(const char *mibname, unsigned arena_ind) {
106 size_t mib[4];
107 size_t miblen = sizeof(mib)/sizeof(size_t);
108 assert_d_eq(mallctlnametomib(mibname, mib, &miblen), 0,
109 "Unexpected mallctlnametomib() failure");
110 mib[2] = (size_t)arena_ind;
111 uint64_t npurge = 0;
112 size_t sz = sizeof(npurge);
113 assert_d_eq(mallctlbymib(mib, miblen, (void *)&npurge, &sz, NULL, 0),
114 config_stats ? 0 : ENOENT, "Unexpected mallctlbymib() failure");
115 return npurge;
116 }
117
118 static uint64_t
get_arena_dirty_npurge(unsigned arena_ind)119 get_arena_dirty_npurge(unsigned arena_ind) {
120 do_epoch();
121 return get_arena_npurge_impl("stats.arenas.0.dirty_npurge", arena_ind);
122 }
123
124 static uint64_t
get_arena_muzzy_npurge(unsigned arena_ind)125 get_arena_muzzy_npurge(unsigned arena_ind) {
126 do_epoch();
127 return get_arena_npurge_impl("stats.arenas.0.muzzy_npurge", arena_ind);
128 }
129
130 static uint64_t
get_arena_npurge(unsigned arena_ind)131 get_arena_npurge(unsigned arena_ind) {
132 do_epoch();
133 return get_arena_npurge_impl("stats.arenas.0.dirty_npurge", arena_ind) +
134 get_arena_npurge_impl("stats.arenas.0.muzzy_npurge", arena_ind);
135 }
136
137 static size_t
get_arena_pdirty(unsigned arena_ind)138 get_arena_pdirty(unsigned arena_ind) {
139 do_epoch();
140 size_t mib[4];
141 size_t miblen = sizeof(mib)/sizeof(size_t);
142 assert_d_eq(mallctlnametomib("stats.arenas.0.pdirty", mib, &miblen), 0,
143 "Unexpected mallctlnametomib() failure");
144 mib[2] = (size_t)arena_ind;
145 size_t pdirty;
146 size_t sz = sizeof(pdirty);
147 assert_d_eq(mallctlbymib(mib, miblen, (void *)&pdirty, &sz, NULL, 0), 0,
148 "Unexpected mallctlbymib() failure");
149 return pdirty;
150 }
151
152 static size_t
get_arena_pmuzzy(unsigned arena_ind)153 get_arena_pmuzzy(unsigned arena_ind) {
154 do_epoch();
155 size_t mib[4];
156 size_t miblen = sizeof(mib)/sizeof(size_t);
157 assert_d_eq(mallctlnametomib("stats.arenas.0.pmuzzy", mib, &miblen), 0,
158 "Unexpected mallctlnametomib() failure");
159 mib[2] = (size_t)arena_ind;
160 size_t pmuzzy;
161 size_t sz = sizeof(pmuzzy);
162 assert_d_eq(mallctlbymib(mib, miblen, (void *)&pmuzzy, &sz, NULL, 0), 0,
163 "Unexpected mallctlbymib() failure");
164 return pmuzzy;
165 }
166
167 static void *
do_mallocx(size_t size,int flags)168 do_mallocx(size_t size, int flags) {
169 void *p = mallocx(size, flags);
170 assert_ptr_not_null(p, "Unexpected mallocx() failure");
171 return p;
172 }
173
174 static void
generate_dirty(unsigned arena_ind,size_t size)175 generate_dirty(unsigned arena_ind, size_t size) {
176 int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
177 void *p = do_mallocx(size, flags);
178 dallocx(p, flags);
179 }
180
TEST_BEGIN(test_decay_ticks)181 TEST_BEGIN(test_decay_ticks) {
182 test_skip_if(check_background_thread_enabled());
183 test_skip_if(known_failure_on_android);
184
185 ticker_t *decay_ticker;
186 unsigned tick0, tick1, arena_ind;
187 size_t sz, large0;
188 void *p;
189
190 sz = sizeof(size_t);
191 assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
192 0), 0, "Unexpected mallctl failure");
193
194 /* Set up a manually managed arena for test. */
195 arena_ind = do_arena_create(0, 0);
196
197 /* Migrate to the new arena, and get the ticker. */
198 unsigned old_arena_ind;
199 size_t sz_arena_ind = sizeof(old_arena_ind);
200 assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind,
201 &sz_arena_ind, (void *)&arena_ind, sizeof(arena_ind)), 0,
202 "Unexpected mallctl() failure");
203 decay_ticker = decay_ticker_get(tsd_fetch(), arena_ind);
204 assert_ptr_not_null(decay_ticker,
205 "Unexpected failure getting decay ticker");
206
207 /*
208 * Test the standard APIs using a large size class, since we can't
209 * control tcache interactions for small size classes (except by
210 * completely disabling tcache for the entire test program).
211 */
212
213 /* malloc(). */
214 tick0 = ticker_read(decay_ticker);
215 p = malloc(large0);
216 assert_ptr_not_null(p, "Unexpected malloc() failure");
217 tick1 = ticker_read(decay_ticker);
218 assert_u32_ne(tick1, tick0, "Expected ticker to tick during malloc()");
219 /* free(). */
220 tick0 = ticker_read(decay_ticker);
221 free(p);
222 tick1 = ticker_read(decay_ticker);
223 assert_u32_ne(tick1, tick0, "Expected ticker to tick during free()");
224
225 /* calloc(). */
226 tick0 = ticker_read(decay_ticker);
227 p = calloc(1, large0);
228 assert_ptr_not_null(p, "Unexpected calloc() failure");
229 tick1 = ticker_read(decay_ticker);
230 assert_u32_ne(tick1, tick0, "Expected ticker to tick during calloc()");
231 free(p);
232
233 /* posix_memalign(). */
234 tick0 = ticker_read(decay_ticker);
235 assert_d_eq(posix_memalign(&p, sizeof(size_t), large0), 0,
236 "Unexpected posix_memalign() failure");
237 tick1 = ticker_read(decay_ticker);
238 assert_u32_ne(tick1, tick0,
239 "Expected ticker to tick during posix_memalign()");
240 free(p);
241
242 /* aligned_alloc(). */
243 tick0 = ticker_read(decay_ticker);
244 p = aligned_alloc(sizeof(size_t), large0);
245 assert_ptr_not_null(p, "Unexpected aligned_alloc() failure");
246 tick1 = ticker_read(decay_ticker);
247 assert_u32_ne(tick1, tick0,
248 "Expected ticker to tick during aligned_alloc()");
249 free(p);
250
251 /* realloc(). */
252 /* Allocate. */
253 tick0 = ticker_read(decay_ticker);
254 p = realloc(NULL, large0);
255 assert_ptr_not_null(p, "Unexpected realloc() failure");
256 tick1 = ticker_read(decay_ticker);
257 assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
258 /* Reallocate. */
259 tick0 = ticker_read(decay_ticker);
260 p = realloc(p, large0);
261 assert_ptr_not_null(p, "Unexpected realloc() failure");
262 tick1 = ticker_read(decay_ticker);
263 assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
264 /* Deallocate. */
265 tick0 = ticker_read(decay_ticker);
266 realloc(p, 0);
267 tick1 = ticker_read(decay_ticker);
268 assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
269
270 /*
271 * Test the *allocx() APIs using large and small size classes, with
272 * tcache explicitly disabled.
273 */
274 {
275 unsigned i;
276 size_t allocx_sizes[2];
277 allocx_sizes[0] = large0;
278 allocx_sizes[1] = 1;
279
280 for (i = 0; i < sizeof(allocx_sizes) / sizeof(size_t); i++) {
281 sz = allocx_sizes[i];
282
283 /* mallocx(). */
284 tick0 = ticker_read(decay_ticker);
285 p = mallocx(sz, MALLOCX_TCACHE_NONE);
286 assert_ptr_not_null(p, "Unexpected mallocx() failure");
287 tick1 = ticker_read(decay_ticker);
288 assert_u32_ne(tick1, tick0,
289 "Expected ticker to tick during mallocx() (sz=%zu)",
290 sz);
291 /* rallocx(). */
292 tick0 = ticker_read(decay_ticker);
293 p = rallocx(p, sz, MALLOCX_TCACHE_NONE);
294 assert_ptr_not_null(p, "Unexpected rallocx() failure");
295 tick1 = ticker_read(decay_ticker);
296 assert_u32_ne(tick1, tick0,
297 "Expected ticker to tick during rallocx() (sz=%zu)",
298 sz);
299 /* xallocx(). */
300 tick0 = ticker_read(decay_ticker);
301 xallocx(p, sz, 0, MALLOCX_TCACHE_NONE);
302 tick1 = ticker_read(decay_ticker);
303 assert_u32_ne(tick1, tick0,
304 "Expected ticker to tick during xallocx() (sz=%zu)",
305 sz);
306 /* dallocx(). */
307 tick0 = ticker_read(decay_ticker);
308 dallocx(p, MALLOCX_TCACHE_NONE);
309 tick1 = ticker_read(decay_ticker);
310 assert_u32_ne(tick1, tick0,
311 "Expected ticker to tick during dallocx() (sz=%zu)",
312 sz);
313 /* sdallocx(). */
314 p = mallocx(sz, MALLOCX_TCACHE_NONE);
315 assert_ptr_not_null(p, "Unexpected mallocx() failure");
316 tick0 = ticker_read(decay_ticker);
317 sdallocx(p, sz, MALLOCX_TCACHE_NONE);
318 tick1 = ticker_read(decay_ticker);
319 assert_u32_ne(tick1, tick0,
320 "Expected ticker to tick during sdallocx() "
321 "(sz=%zu)", sz);
322 }
323 }
324
325 /*
326 * Test tcache fill/flush interactions for large and small size classes,
327 * using an explicit tcache.
328 */
329 unsigned tcache_ind, i;
330 size_t tcache_sizes[2];
331 tcache_sizes[0] = large0;
332 tcache_sizes[1] = 1;
333
334 size_t tcache_max, sz_tcache_max;
335 sz_tcache_max = sizeof(tcache_max);
336 assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max,
337 &sz_tcache_max, NULL, 0), 0, "Unexpected mallctl() failure");
338
339 sz = sizeof(unsigned);
340 assert_d_eq(mallctl("tcache.create", (void *)&tcache_ind, &sz,
341 NULL, 0), 0, "Unexpected mallctl failure");
342
343 for (i = 0; i < sizeof(tcache_sizes) / sizeof(size_t); i++) {
344 sz = tcache_sizes[i];
345
346 /* tcache fill. */
347 tick0 = ticker_read(decay_ticker);
348 p = mallocx(sz, MALLOCX_TCACHE(tcache_ind));
349 assert_ptr_not_null(p, "Unexpected mallocx() failure");
350 tick1 = ticker_read(decay_ticker);
351 assert_u32_ne(tick1, tick0,
352 "Expected ticker to tick during tcache fill "
353 "(sz=%zu)", sz);
354 /* tcache flush. */
355 dallocx(p, MALLOCX_TCACHE(tcache_ind));
356 tick0 = ticker_read(decay_ticker);
357 assert_d_eq(mallctl("tcache.flush", NULL, NULL,
358 (void *)&tcache_ind, sizeof(unsigned)), 0,
359 "Unexpected mallctl failure");
360 tick1 = ticker_read(decay_ticker);
361
362 /* Will only tick if it's in tcache. */
363 if (sz <= tcache_max) {
364 assert_u32_ne(tick1, tick0,
365 "Expected ticker to tick during tcache "
366 "flush (sz=%zu)", sz);
367 } else {
368 assert_u32_eq(tick1, tick0,
369 "Unexpected ticker tick during tcache "
370 "flush (sz=%zu)", sz);
371 }
372 }
373 }
374 TEST_END
375
376 static void
decay_ticker_helper(unsigned arena_ind,int flags,bool dirty,ssize_t dt,uint64_t dirty_npurge0,uint64_t muzzy_npurge0,bool terminate_asap)377 decay_ticker_helper(unsigned arena_ind, int flags, bool dirty, ssize_t dt,
378 uint64_t dirty_npurge0, uint64_t muzzy_npurge0, bool terminate_asap) {
379 #define NINTERVALS 101
380 nstime_t time, update_interval, decay_ms, deadline;
381
382 nstime_init(&time, 0);
383 nstime_update(&time);
384
385 nstime_init2(&decay_ms, dt, 0);
386 nstime_copy(&deadline, &time);
387 nstime_add(&deadline, &decay_ms);
388
389 nstime_init2(&update_interval, dt, 0);
390 nstime_idivide(&update_interval, NINTERVALS);
391
392 /*
393 * Keep q's slab from being deallocated during the looping below. If a
394 * cached slab were to repeatedly come and go during looping, it could
395 * prevent the decay backlog ever becoming empty.
396 */
397 void *p = do_mallocx(1, flags);
398 uint64_t dirty_npurge1, muzzy_npurge1;
399 do {
400 for (unsigned i = 0; i < DECAY_NTICKS_PER_UPDATE / 2;
401 i++) {
402 void *q = do_mallocx(1, flags);
403 dallocx(q, flags);
404 }
405 dirty_npurge1 = get_arena_dirty_npurge(arena_ind);
406 muzzy_npurge1 = get_arena_muzzy_npurge(arena_ind);
407
408 nstime_add(&time_mock, &update_interval);
409 nstime_update(&time);
410 } while (nstime_compare(&time, &deadline) <= 0 && ((dirty_npurge1 ==
411 dirty_npurge0 && muzzy_npurge1 == muzzy_npurge0) ||
412 !terminate_asap));
413 dallocx(p, flags);
414
415 if (config_stats) {
416 assert_u64_gt(dirty_npurge1 + muzzy_npurge1, dirty_npurge0 +
417 muzzy_npurge0, "Expected purging to occur");
418 }
419 #undef NINTERVALS
420 }
421
TEST_BEGIN(test_decay_ticker)422 TEST_BEGIN(test_decay_ticker) {
423 test_skip_if(check_background_thread_enabled());
424 #define NPS 2048
425 ssize_t ddt = opt_dirty_decay_ms;
426 ssize_t mdt = opt_muzzy_decay_ms;
427 unsigned arena_ind = do_arena_create(ddt, mdt);
428 int flags = (MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE);
429 void *ps[NPS];
430 size_t large;
431
432 /*
433 * Allocate a bunch of large objects, pause the clock, deallocate every
434 * other object (to fragment virtual memory), restore the clock, then
435 * [md]allocx() in a tight loop while advancing time rapidly to verify
436 * the ticker triggers purging.
437 */
438
439 size_t tcache_max;
440 size_t sz = sizeof(size_t);
441 assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max, &sz, NULL,
442 0), 0, "Unexpected mallctl failure");
443 large = nallocx(tcache_max + 1, flags);
444
445 do_purge(arena_ind);
446 uint64_t dirty_npurge0 = get_arena_dirty_npurge(arena_ind);
447 uint64_t muzzy_npurge0 = get_arena_muzzy_npurge(arena_ind);
448
449 for (unsigned i = 0; i < NPS; i++) {
450 ps[i] = do_mallocx(large, flags);
451 }
452
453 nupdates_mock = 0;
454 nstime_init(&time_mock, 0);
455 nstime_update(&time_mock);
456 monotonic_mock = true;
457
458 nstime_monotonic_orig = nstime_monotonic;
459 nstime_update_orig = nstime_update;
460 nstime_monotonic = nstime_monotonic_mock;
461 nstime_update = nstime_update_mock;
462
463 for (unsigned i = 0; i < NPS; i += 2) {
464 dallocx(ps[i], flags);
465 unsigned nupdates0 = nupdates_mock;
466 do_decay(arena_ind);
467 assert_u_gt(nupdates_mock, nupdates0,
468 "Expected nstime_update() to be called");
469 }
470
471 decay_ticker_helper(arena_ind, flags, true, ddt, dirty_npurge0,
472 muzzy_npurge0, true);
473 decay_ticker_helper(arena_ind, flags, false, ddt+mdt, dirty_npurge0,
474 muzzy_npurge0, false);
475
476 do_arena_destroy(arena_ind);
477
478 nstime_monotonic = nstime_monotonic_orig;
479 nstime_update = nstime_update_orig;
480 #undef NPS
481 }
482 TEST_END
483
TEST_BEGIN(test_decay_nonmonotonic)484 TEST_BEGIN(test_decay_nonmonotonic) {
485 test_skip_if(check_background_thread_enabled());
486 #define NPS (SMOOTHSTEP_NSTEPS + 1)
487 int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE);
488 void *ps[NPS];
489 uint64_t npurge0 = 0;
490 uint64_t npurge1 = 0;
491 size_t sz, large0;
492 unsigned i, nupdates0;
493
494 sz = sizeof(size_t);
495 assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
496 0), 0, "Unexpected mallctl failure");
497
498 assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
499 "Unexpected mallctl failure");
500 do_epoch();
501 sz = sizeof(uint64_t);
502 npurge0 = get_arena_npurge(0);
503
504 nupdates_mock = 0;
505 nstime_init(&time_mock, 0);
506 nstime_update(&time_mock);
507 monotonic_mock = false;
508
509 nstime_monotonic_orig = nstime_monotonic;
510 nstime_update_orig = nstime_update;
511 nstime_monotonic = nstime_monotonic_mock;
512 nstime_update = nstime_update_mock;
513
514 for (i = 0; i < NPS; i++) {
515 ps[i] = mallocx(large0, flags);
516 assert_ptr_not_null(ps[i], "Unexpected mallocx() failure");
517 }
518
519 for (i = 0; i < NPS; i++) {
520 dallocx(ps[i], flags);
521 nupdates0 = nupdates_mock;
522 assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
523 "Unexpected arena.0.decay failure");
524 assert_u_gt(nupdates_mock, nupdates0,
525 "Expected nstime_update() to be called");
526 }
527
528 do_epoch();
529 sz = sizeof(uint64_t);
530 npurge1 = get_arena_npurge(0);
531
532 if (config_stats) {
533 assert_u64_eq(npurge0, npurge1, "Unexpected purging occurred");
534 }
535
536 nstime_monotonic = nstime_monotonic_orig;
537 nstime_update = nstime_update_orig;
538 #undef NPS
539 }
540 TEST_END
541
TEST_BEGIN(test_decay_now)542 TEST_BEGIN(test_decay_now) {
543 test_skip_if(check_background_thread_enabled());
544
545 unsigned arena_ind = do_arena_create(0, 0);
546 assert_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages");
547 assert_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages");
548 size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2};
549 /* Verify that dirty/muzzy pages never linger after deallocation. */
550 for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
551 size_t size = sizes[i];
552 generate_dirty(arena_ind, size);
553 assert_zu_eq(get_arena_pdirty(arena_ind), 0,
554 "Unexpected dirty pages");
555 assert_zu_eq(get_arena_pmuzzy(arena_ind), 0,
556 "Unexpected muzzy pages");
557 }
558 do_arena_destroy(arena_ind);
559 }
560 TEST_END
561
TEST_BEGIN(test_decay_never)562 TEST_BEGIN(test_decay_never) {
563 test_skip_if(check_background_thread_enabled());
564
565 unsigned arena_ind = do_arena_create(-1, -1);
566 int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
567 assert_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages");
568 assert_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages");
569 size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2};
570 void *ptrs[sizeof(sizes)/sizeof(size_t)];
571 for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
572 ptrs[i] = do_mallocx(sizes[i], flags);
573 }
574 /* Verify that each deallocation generates additional dirty pages. */
575 size_t pdirty_prev = get_arena_pdirty(arena_ind);
576 size_t pmuzzy_prev = get_arena_pmuzzy(arena_ind);
577 assert_zu_eq(pdirty_prev, 0, "Unexpected dirty pages");
578 assert_zu_eq(pmuzzy_prev, 0, "Unexpected muzzy pages");
579 for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
580 dallocx(ptrs[i], flags);
581 size_t pdirty = get_arena_pdirty(arena_ind);
582 size_t pmuzzy = get_arena_pmuzzy(arena_ind);
583 assert_zu_gt(pdirty, pdirty_prev,
584 "Expected dirty pages to increase.");
585 assert_zu_eq(pmuzzy, 0, "Unexpected muzzy pages");
586 pdirty_prev = pdirty;
587 }
588 do_arena_destroy(arena_ind);
589 }
590 TEST_END
591
592 int
main(void)593 main(void) {
594 return test(
595 test_decay_ticks,
596 test_decay_ticker,
597 test_decay_nonmonotonic,
598 test_decay_now,
599 test_decay_never);
600 }
601