xref: /aosp_15_r20/external/jemalloc_new/src/jemalloc.c (revision 1208bc7e437ced7eb82efac44ba17e3beba411da)
1 #define JEMALLOC_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
4 
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/atomic.h"
7 #include "jemalloc/internal/ctl.h"
8 #include "jemalloc/internal/extent_dss.h"
9 #include "jemalloc/internal/extent_mmap.h"
10 #include "jemalloc/internal/jemalloc_internal_types.h"
11 #include "jemalloc/internal/log.h"
12 #include "jemalloc/internal/malloc_io.h"
13 #include "jemalloc/internal/mutex.h"
14 #include "jemalloc/internal/rtree.h"
15 #include "jemalloc/internal/size_classes.h"
16 #include "jemalloc/internal/spin.h"
17 #include "jemalloc/internal/sz.h"
18 #include "jemalloc/internal/ticker.h"
19 #include "jemalloc/internal/util.h"
20 
21 /******************************************************************************/
22 /* Data. */
23 
24 /* Runtime configuration options. */
25 const char	*je_malloc_conf
26 #ifndef _WIN32
27     JEMALLOC_ATTR(weak)
28 #endif
29     ;
30 bool	opt_abort =
31 #ifdef JEMALLOC_DEBUG
32     true
33 #else
34     false
35 #endif
36     ;
37 bool	opt_abort_conf =
38 #ifdef JEMALLOC_DEBUG
39     true
40 #else
41     false
42 #endif
43     ;
44 const char	*opt_junk =
45 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
46     "true"
47 #else
48     "false"
49 #endif
50     ;
51 bool	opt_junk_alloc =
52 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
53     true
54 #else
55     false
56 #endif
57     ;
58 bool	opt_junk_free =
59 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
60     true
61 #else
62     false
63 #endif
64     ;
65 
66 bool	opt_utrace = false;
67 bool	opt_xmalloc = false;
68 bool	opt_zero = false;
69 unsigned	opt_narenas = 0;
70 
71 unsigned	ncpus;
72 
73 /* Protects arenas initialization. */
74 malloc_mutex_t arenas_lock;
75 /*
76  * Arenas that are used to service external requests.  Not all elements of the
77  * arenas array are necessarily used; arenas are created lazily as needed.
78  *
79  * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
80  * arenas.  arenas[narenas_auto..narenas_total) are only used if the application
81  * takes some action to create them and allocate from them.
82  *
83  * Points to an arena_t.
84  */
85 JEMALLOC_ALIGNED(CACHELINE)
86 atomic_p_t		arenas[MALLOCX_ARENA_LIMIT];
87 static atomic_u_t	narenas_total; /* Use narenas_total_*(). */
88 static arena_t		*a0; /* arenas[0]; read-only after initialization. */
89 unsigned		narenas_auto; /* Read-only after initialization. */
90 
91 typedef enum {
92 	malloc_init_uninitialized	= 3,
93 	malloc_init_a0_initialized	= 2,
94 	malloc_init_recursible		= 1,
95 	malloc_init_initialized		= 0 /* Common case --> jnz. */
96 } malloc_init_t;
97 static malloc_init_t	malloc_init_state = malloc_init_uninitialized;
98 
99 /* False should be the common case.  Set to true to trigger initialization. */
100 bool			malloc_slow = true;
101 
102 /* When malloc_slow is true, set the corresponding bits for sanity check. */
103 enum {
104 	flag_opt_junk_alloc	= (1U),
105 	flag_opt_junk_free	= (1U << 1),
106 	flag_opt_zero		= (1U << 2),
107 	flag_opt_utrace		= (1U << 3),
108 	flag_opt_xmalloc	= (1U << 4)
109 };
110 static uint8_t	malloc_slow_flags;
111 
112 #ifdef JEMALLOC_THREADED_INIT
113 /* Used to let the initializing thread recursively allocate. */
114 #  define NO_INITIALIZER	((pthread_t)0)
115 #  define INITIALIZER		pthread_self()
116 #  define IS_INITIALIZER	(malloc_initializer == pthread_self())
117 static pthread_t		malloc_initializer = NO_INITIALIZER;
118 #else
119 #  define NO_INITIALIZER	false
120 #  define INITIALIZER		true
121 #  define IS_INITIALIZER	malloc_initializer
122 static bool			malloc_initializer = NO_INITIALIZER;
123 #endif
124 
125 /* Used to avoid initialization races. */
126 #ifdef _WIN32
127 #if _WIN32_WINNT >= 0x0600
128 static malloc_mutex_t	init_lock = SRWLOCK_INIT;
129 #else
130 static malloc_mutex_t	init_lock;
131 static bool init_lock_initialized = false;
132 
JEMALLOC_ATTR(constructor)133 JEMALLOC_ATTR(constructor)
134 static void WINAPI
135 _init_init_lock(void) {
136 	/*
137 	 * If another constructor in the same binary is using mallctl to e.g.
138 	 * set up extent hooks, it may end up running before this one, and
139 	 * malloc_init_hard will crash trying to lock the uninitialized lock. So
140 	 * we force an initialization of the lock in malloc_init_hard as well.
141 	 * We don't try to care about atomicity of the accessed to the
142 	 * init_lock_initialized boolean, since it really only matters early in
143 	 * the process creation, before any separate thread normally starts
144 	 * doing anything.
145 	 */
146 	if (!init_lock_initialized) {
147 		malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT,
148 		    malloc_mutex_rank_exclusive);
149 	}
150 	init_lock_initialized = true;
151 }
152 
153 #ifdef _MSC_VER
154 #  pragma section(".CRT$XCU", read)
155 JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
156 static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
157 #endif
158 #endif
159 #else
160 static malloc_mutex_t	init_lock = MALLOC_MUTEX_INITIALIZER;
161 #endif
162 
163 typedef struct {
164 	void	*p;	/* Input pointer (as in realloc(p, s)). */
165 	size_t	s;	/* Request size. */
166 	void	*r;	/* Result pointer. */
167 } malloc_utrace_t;
168 
169 #ifdef JEMALLOC_UTRACE
170 #  define UTRACE(a, b, c) do {						\
171 	if (unlikely(opt_utrace)) {					\
172 		int utrace_serrno = errno;				\
173 		malloc_utrace_t ut;					\
174 		ut.p = (a);						\
175 		ut.s = (b);						\
176 		ut.r = (c);						\
177 		utrace(&ut, sizeof(ut));				\
178 		errno = utrace_serrno;					\
179 	}								\
180 } while (0)
181 #else
182 #  define UTRACE(a, b, c)
183 #endif
184 
185 /* Whether encountered any invalid config options. */
186 static bool had_conf_error = false;
187 
188 /******************************************************************************/
189 /*
190  * Function prototypes for static functions that are referenced prior to
191  * definition.
192  */
193 
194 static bool	malloc_init_hard_a0(void);
195 static bool	malloc_init_hard(void);
196 
197 /******************************************************************************/
198 /*
199  * Begin miscellaneous support functions.
200  */
201 
202 bool
malloc_initialized(void)203 malloc_initialized(void) {
204 	return (malloc_init_state == malloc_init_initialized);
205 }
206 
207 JEMALLOC_ALWAYS_INLINE bool
malloc_init_a0(void)208 malloc_init_a0(void) {
209 	if (unlikely(malloc_init_state == malloc_init_uninitialized)) {
210 		return malloc_init_hard_a0();
211 	}
212 	return false;
213 }
214 
215 JEMALLOC_ALWAYS_INLINE bool
malloc_init(void)216 malloc_init(void) {
217 	if (unlikely(!malloc_initialized()) && malloc_init_hard()) {
218 		return true;
219 	}
220 	return false;
221 }
222 
223 /*
224  * The a0*() functions are used instead of i{d,}alloc() in situations that
225  * cannot tolerate TLS variable access.
226  */
227 
228 static void *
a0ialloc(size_t size,bool zero,bool is_internal)229 a0ialloc(size_t size, bool zero, bool is_internal) {
230 	if (unlikely(malloc_init_a0())) {
231 		return NULL;
232 	}
233 
234 	return iallocztm(TSDN_NULL, size, sz_size2index(size), zero, NULL,
235 	    is_internal, arena_get(TSDN_NULL, 0, true), true);
236 }
237 
238 static void
a0idalloc(void * ptr,bool is_internal)239 a0idalloc(void *ptr, bool is_internal) {
240 	idalloctm(TSDN_NULL, ptr, NULL, NULL, is_internal, true);
241 }
242 
243 void *
a0malloc(size_t size)244 a0malloc(size_t size) {
245 	return a0ialloc(size, false, true);
246 }
247 
248 void
a0dalloc(void * ptr)249 a0dalloc(void *ptr) {
250 	a0idalloc(ptr, true);
251 }
252 
253 /*
254  * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
255  * situations that cannot tolerate TLS variable access (TLS allocation and very
256  * early internal data structure initialization).
257  */
258 
259 void *
bootstrap_malloc(size_t size)260 bootstrap_malloc(size_t size) {
261 	if (unlikely(size == 0)) {
262 		size = 1;
263 	}
264 
265 	return a0ialloc(size, false, false);
266 }
267 
268 void *
bootstrap_calloc(size_t num,size_t size)269 bootstrap_calloc(size_t num, size_t size) {
270 	size_t num_size;
271 
272 	num_size = num * size;
273 	if (unlikely(num_size == 0)) {
274 		assert(num == 0 || size == 0);
275 		num_size = 1;
276 	}
277 
278 	return a0ialloc(num_size, true, false);
279 }
280 
281 void
bootstrap_free(void * ptr)282 bootstrap_free(void *ptr) {
283 	if (unlikely(ptr == NULL)) {
284 		return;
285 	}
286 
287 	a0idalloc(ptr, false);
288 }
289 
290 void
arena_set(unsigned ind,arena_t * arena)291 arena_set(unsigned ind, arena_t *arena) {
292 	atomic_store_p(&arenas[ind], arena, ATOMIC_RELEASE);
293 }
294 
295 static void
narenas_total_set(unsigned narenas)296 narenas_total_set(unsigned narenas) {
297 	atomic_store_u(&narenas_total, narenas, ATOMIC_RELEASE);
298 }
299 
300 static void
narenas_total_inc(void)301 narenas_total_inc(void) {
302 	atomic_fetch_add_u(&narenas_total, 1, ATOMIC_RELEASE);
303 }
304 
305 unsigned
narenas_total_get(void)306 narenas_total_get(void) {
307 	return atomic_load_u(&narenas_total, ATOMIC_ACQUIRE);
308 }
309 
310 /* Create a new arena and insert it into the arenas array at index ind. */
311 static arena_t *
arena_init_locked(tsdn_t * tsdn,unsigned ind,extent_hooks_t * extent_hooks)312 arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
313 	arena_t *arena;
314 
315 	assert(ind <= narenas_total_get());
316 	if (ind >= MALLOCX_ARENA_LIMIT) {
317 		return NULL;
318 	}
319 	if (ind == narenas_total_get()) {
320 		narenas_total_inc();
321 	}
322 
323 	/*
324 	 * Another thread may have already initialized arenas[ind] if it's an
325 	 * auto arena.
326 	 */
327 	arena = arena_get(tsdn, ind, false);
328 	if (arena != NULL) {
329 		assert(ind < narenas_auto);
330 		return arena;
331 	}
332 
333 	/* Actually initialize the arena. */
334 	arena = arena_new(tsdn, ind, extent_hooks);
335 
336 	return arena;
337 }
338 
339 static void
arena_new_create_background_thread(tsdn_t * tsdn,unsigned ind)340 arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) {
341 	if (ind == 0) {
342 		return;
343 	}
344 	if (have_background_thread) {
345 		bool err;
346 		malloc_mutex_lock(tsdn, &background_thread_lock);
347 		err = background_thread_create(tsdn_tsd(tsdn), ind);
348 		malloc_mutex_unlock(tsdn, &background_thread_lock);
349 		if (err) {
350 			malloc_printf("<jemalloc>: error in background thread "
351 				      "creation for arena %u. Abort.\n", ind);
352 			abort();
353 		}
354 	}
355 }
356 
357 arena_t *
arena_init(tsdn_t * tsdn,unsigned ind,extent_hooks_t * extent_hooks)358 arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
359 	arena_t *arena;
360 
361 	malloc_mutex_lock(tsdn, &arenas_lock);
362 	arena = arena_init_locked(tsdn, ind, extent_hooks);
363 	malloc_mutex_unlock(tsdn, &arenas_lock);
364 
365 	arena_new_create_background_thread(tsdn, ind);
366 
367 	return arena;
368 }
369 
370 static void
arena_bind(tsd_t * tsd,unsigned ind,bool internal)371 arena_bind(tsd_t *tsd, unsigned ind, bool internal) {
372 	arena_t *arena = arena_get(tsd_tsdn(tsd), ind, false);
373 	arena_nthreads_inc(arena, internal);
374 
375 	if (internal) {
376 		tsd_iarena_set(tsd, arena);
377 	} else {
378 		tsd_arena_set(tsd, arena);
379 	}
380 }
381 
382 void
arena_migrate(tsd_t * tsd,unsigned oldind,unsigned newind)383 arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) {
384 	arena_t *oldarena, *newarena;
385 
386 	oldarena = arena_get(tsd_tsdn(tsd), oldind, false);
387 	newarena = arena_get(tsd_tsdn(tsd), newind, false);
388 	arena_nthreads_dec(oldarena, false);
389 	arena_nthreads_inc(newarena, false);
390 	tsd_arena_set(tsd, newarena);
391 }
392 
393 static void
arena_unbind(tsd_t * tsd,unsigned ind,bool internal)394 arena_unbind(tsd_t *tsd, unsigned ind, bool internal) {
395 	arena_t *arena;
396 
397 	arena = arena_get(tsd_tsdn(tsd), ind, false);
398 	arena_nthreads_dec(arena, internal);
399 
400 	if (internal) {
401 		tsd_iarena_set(tsd, NULL);
402 	} else {
403 		tsd_arena_set(tsd, NULL);
404 	}
405 }
406 
407 arena_tdata_t *
arena_tdata_get_hard(tsd_t * tsd,unsigned ind)408 arena_tdata_get_hard(tsd_t *tsd, unsigned ind) {
409 	arena_tdata_t *tdata, *arenas_tdata_old;
410 	arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
411 	unsigned narenas_tdata_old, i;
412 	unsigned narenas_tdata = tsd_narenas_tdata_get(tsd);
413 	unsigned narenas_actual = narenas_total_get();
414 
415 	/*
416 	 * Dissociate old tdata array (and set up for deallocation upon return)
417 	 * if it's too small.
418 	 */
419 	if (arenas_tdata != NULL && narenas_tdata < narenas_actual) {
420 		arenas_tdata_old = arenas_tdata;
421 		narenas_tdata_old = narenas_tdata;
422 		arenas_tdata = NULL;
423 		narenas_tdata = 0;
424 		tsd_arenas_tdata_set(tsd, arenas_tdata);
425 		tsd_narenas_tdata_set(tsd, narenas_tdata);
426 	} else {
427 		arenas_tdata_old = NULL;
428 		narenas_tdata_old = 0;
429 	}
430 
431 	/* Allocate tdata array if it's missing. */
432 	if (arenas_tdata == NULL) {
433 		bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd);
434 		narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1;
435 
436 		if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) {
437 			*arenas_tdata_bypassp = true;
438 			arenas_tdata = (arena_tdata_t *)a0malloc(
439 			    sizeof(arena_tdata_t) * narenas_tdata);
440 			*arenas_tdata_bypassp = false;
441 		}
442 		if (arenas_tdata == NULL) {
443 			tdata = NULL;
444 			goto label_return;
445 		}
446 		assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp);
447 		tsd_arenas_tdata_set(tsd, arenas_tdata);
448 		tsd_narenas_tdata_set(tsd, narenas_tdata);
449 	}
450 
451 	/*
452 	 * Copy to tdata array.  It's possible that the actual number of arenas
453 	 * has increased since narenas_total_get() was called above, but that
454 	 * causes no correctness issues unless two threads concurrently execute
455 	 * the arenas.create mallctl, which we trust mallctl synchronization to
456 	 * prevent.
457 	 */
458 
459 	/* Copy/initialize tickers. */
460 	for (i = 0; i < narenas_actual; i++) {
461 		if (i < narenas_tdata_old) {
462 			ticker_copy(&arenas_tdata[i].decay_ticker,
463 			    &arenas_tdata_old[i].decay_ticker);
464 		} else {
465 			ticker_init(&arenas_tdata[i].decay_ticker,
466 			    DECAY_NTICKS_PER_UPDATE);
467 		}
468 	}
469 	if (narenas_tdata > narenas_actual) {
470 		memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t)
471 		    * (narenas_tdata - narenas_actual));
472 	}
473 
474 	/* Read the refreshed tdata array. */
475 	tdata = &arenas_tdata[ind];
476 label_return:
477 	if (arenas_tdata_old != NULL) {
478 		a0dalloc(arenas_tdata_old);
479 	}
480 	return tdata;
481 }
482 
483 /* Slow path, called only by arena_choose(). */
484 arena_t *
arena_choose_hard(tsd_t * tsd,bool internal)485 arena_choose_hard(tsd_t *tsd, bool internal) {
486 	arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL);
487 
488 	if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)) {
489 		unsigned choose = percpu_arena_choose();
490 		ret = arena_get(tsd_tsdn(tsd), choose, true);
491 		assert(ret != NULL);
492 		arena_bind(tsd, arena_ind_get(ret), false);
493 		arena_bind(tsd, arena_ind_get(ret), true);
494 
495 		return ret;
496 	}
497 
498 	if (narenas_auto > 1) {
499 		unsigned i, j, choose[2], first_null;
500 		bool is_new_arena[2];
501 
502 		/*
503 		 * Determine binding for both non-internal and internal
504 		 * allocation.
505 		 *
506 		 *   choose[0]: For application allocation.
507 		 *   choose[1]: For internal metadata allocation.
508 		 */
509 
510 		for (j = 0; j < 2; j++) {
511 			choose[j] = 0;
512 			is_new_arena[j] = false;
513 		}
514 
515 		first_null = narenas_auto;
516 		malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock);
517 		assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL);
518 		for (i = 1; i < narenas_auto; i++) {
519 			if (arena_get(tsd_tsdn(tsd), i, false) != NULL) {
520 				/*
521 				 * Choose the first arena that has the lowest
522 				 * number of threads assigned to it.
523 				 */
524 				for (j = 0; j < 2; j++) {
525 					if (arena_nthreads_get(arena_get(
526 					    tsd_tsdn(tsd), i, false), !!j) <
527 					    arena_nthreads_get(arena_get(
528 					    tsd_tsdn(tsd), choose[j], false),
529 					    !!j)) {
530 						choose[j] = i;
531 					}
532 				}
533 			} else if (first_null == narenas_auto) {
534 				/*
535 				 * Record the index of the first uninitialized
536 				 * arena, in case all extant arenas are in use.
537 				 *
538 				 * NB: It is possible for there to be
539 				 * discontinuities in terms of initialized
540 				 * versus uninitialized arenas, due to the
541 				 * "thread.arena" mallctl.
542 				 */
543 				first_null = i;
544 			}
545 		}
546 
547 		for (j = 0; j < 2; j++) {
548 			if (arena_nthreads_get(arena_get(tsd_tsdn(tsd),
549 			    choose[j], false), !!j) == 0 || first_null ==
550 			    narenas_auto) {
551 				/*
552 				 * Use an unloaded arena, or the least loaded
553 				 * arena if all arenas are already initialized.
554 				 */
555 				if (!!j == internal) {
556 					ret = arena_get(tsd_tsdn(tsd),
557 					    choose[j], false);
558 				}
559 			} else {
560 				arena_t *arena;
561 
562 				/* Initialize a new arena. */
563 				choose[j] = first_null;
564 				arena = arena_init_locked(tsd_tsdn(tsd),
565 				    choose[j],
566 				    (extent_hooks_t *)&extent_hooks_default);
567 				if (arena == NULL) {
568 					malloc_mutex_unlock(tsd_tsdn(tsd),
569 					    &arenas_lock);
570 					return NULL;
571 				}
572 				is_new_arena[j] = true;
573 				if (!!j == internal) {
574 					ret = arena;
575 				}
576 			}
577 			arena_bind(tsd, choose[j], !!j);
578 		}
579 		malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock);
580 
581 		for (j = 0; j < 2; j++) {
582 			if (is_new_arena[j]) {
583 				assert(choose[j] > 0);
584 				arena_new_create_background_thread(
585 				    tsd_tsdn(tsd), choose[j]);
586 			}
587 		}
588 
589 	} else {
590 		ret = arena_get(tsd_tsdn(tsd), 0, false);
591 		arena_bind(tsd, 0, false);
592 		arena_bind(tsd, 0, true);
593 	}
594 
595 	return ret;
596 }
597 
598 void
iarena_cleanup(tsd_t * tsd)599 iarena_cleanup(tsd_t *tsd) {
600 	arena_t *iarena;
601 
602 	iarena = tsd_iarena_get(tsd);
603 	if (iarena != NULL) {
604 		arena_unbind(tsd, arena_ind_get(iarena), true);
605 	}
606 }
607 
608 void
arena_cleanup(tsd_t * tsd)609 arena_cleanup(tsd_t *tsd) {
610 	arena_t *arena;
611 
612 	arena = tsd_arena_get(tsd);
613 	if (arena != NULL) {
614 		arena_unbind(tsd, arena_ind_get(arena), false);
615 	}
616 }
617 
618 void
arenas_tdata_cleanup(tsd_t * tsd)619 arenas_tdata_cleanup(tsd_t *tsd) {
620 	arena_tdata_t *arenas_tdata;
621 
622 	/* Prevent tsd->arenas_tdata from being (re)created. */
623 	*tsd_arenas_tdata_bypassp_get(tsd) = true;
624 
625 	arenas_tdata = tsd_arenas_tdata_get(tsd);
626 	if (arenas_tdata != NULL) {
627 		tsd_arenas_tdata_set(tsd, NULL);
628 		a0dalloc(arenas_tdata);
629 	}
630 }
631 
632 static void
stats_print_atexit(void)633 stats_print_atexit(void) {
634 #if defined(ANDROID_ENABLE_TCACHE)
635 	if (config_stats) {
636 		tsdn_t *tsdn;
637 		unsigned narenas, i;
638 
639 		tsdn = tsdn_fetch();
640 
641 		/*
642 		 * Merge stats from extant threads.  This is racy, since
643 		 * individual threads do not lock when recording tcache stats
644 		 * events.  As a consequence, the final stats may be slightly
645 		 * out of date by the time they are reported, if other threads
646 		 * continue to allocate.
647 		 */
648 		for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
649 			arena_t *arena = arena_get(tsdn, i, false);
650 			if (arena != NULL) {
651 				tcache_t *tcache;
652 
653 				malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
654 				ql_foreach(tcache, &arena->tcache_ql, link) {
655 					tcache_stats_merge(tsdn, tcache, arena);
656 				}
657 				malloc_mutex_unlock(tsdn,
658 				    &arena->tcache_ql_mtx);
659 			}
660 		}
661 	}
662 #endif
663 	je_malloc_stats_print(NULL, NULL, opt_stats_print_opts);
664 }
665 
666 /*
667  * Ensure that we don't hold any locks upon entry to or exit from allocator
668  * code (in a "broad" sense that doesn't count a reentrant allocation as an
669  * entrance or exit).
670  */
671 JEMALLOC_ALWAYS_INLINE void
check_entry_exit_locking(tsdn_t * tsdn)672 check_entry_exit_locking(tsdn_t *tsdn) {
673 	if (!config_debug) {
674 		return;
675 	}
676 	if (tsdn_null(tsdn)) {
677 		return;
678 	}
679 	tsd_t *tsd = tsdn_tsd(tsdn);
680 	/*
681 	 * It's possible we hold locks at entry/exit if we're in a nested
682 	 * allocation.
683 	 */
684 	int8_t reentrancy_level = tsd_reentrancy_level_get(tsd);
685 	if (reentrancy_level != 0) {
686 		return;
687 	}
688 	witness_assert_lockless(tsdn_witness_tsdp_get(tsdn));
689 }
690 
691 /*
692  * End miscellaneous support functions.
693  */
694 /******************************************************************************/
695 /*
696  * Begin initialization functions.
697  */
698 
699 static char *
jemalloc_secure_getenv(const char * name)700 jemalloc_secure_getenv(const char *name) {
701 #ifdef JEMALLOC_HAVE_SECURE_GETENV
702 	return secure_getenv(name);
703 #else
704 #  ifdef JEMALLOC_HAVE_ISSETUGID
705 	if (issetugid() != 0) {
706 		return NULL;
707 	}
708 #  endif
709 	return getenv(name);
710 #endif
711 }
712 
713 static unsigned
malloc_ncpus(void)714 malloc_ncpus(void) {
715 	long result;
716 
717 #ifdef _WIN32
718 	SYSTEM_INFO si;
719 	GetSystemInfo(&si);
720 	result = si.dwNumberOfProcessors;
721 #elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT)
722 	/*
723 	 * glibc >= 2.6 has the CPU_COUNT macro.
724 	 *
725 	 * glibc's sysconf() uses isspace().  glibc allocates for the first time
726 	 * *before* setting up the isspace tables.  Therefore we need a
727 	 * different method to get the number of CPUs.
728 	 */
729 	{
730 		cpu_set_t set;
731 
732 		pthread_getaffinity_np(pthread_self(), sizeof(set), &set);
733 		result = CPU_COUNT(&set);
734 	}
735 #else
736 	result = sysconf(_SC_NPROCESSORS_ONLN);
737 #endif
738 	return ((result == -1) ? 1 : (unsigned)result);
739 }
740 
741 static void
init_opt_stats_print_opts(const char * v,size_t vlen)742 init_opt_stats_print_opts(const char *v, size_t vlen) {
743 	size_t opts_len = strlen(opt_stats_print_opts);
744 	assert(opts_len <= stats_print_tot_num_options);
745 
746 	for (size_t i = 0; i < vlen; i++) {
747 		switch (v[i]) {
748 #define OPTION(o, v, d, s) case o: break;
749 			STATS_PRINT_OPTIONS
750 #undef OPTION
751 		default: continue;
752 		}
753 
754 		if (strchr(opt_stats_print_opts, v[i]) != NULL) {
755 			/* Ignore repeated. */
756 			continue;
757 		}
758 
759 		opt_stats_print_opts[opts_len++] = v[i];
760 		opt_stats_print_opts[opts_len] = '\0';
761 		assert(opts_len <= stats_print_tot_num_options);
762 	}
763 	assert(opts_len == strlen(opt_stats_print_opts));
764 }
765 
766 static bool
malloc_conf_next(char const ** opts_p,char const ** k_p,size_t * klen_p,char const ** v_p,size_t * vlen_p)767 malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
768     char const **v_p, size_t *vlen_p) {
769 	bool accept;
770 	const char *opts = *opts_p;
771 
772 	*k_p = opts;
773 
774 	for (accept = false; !accept;) {
775 		switch (*opts) {
776 		case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
777 		case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
778 		case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
779 		case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
780 		case 'Y': case 'Z':
781 		case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
782 		case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
783 		case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
784 		case 's': case 't': case 'u': case 'v': case 'w': case 'x':
785 		case 'y': case 'z':
786 		case '0': case '1': case '2': case '3': case '4': case '5':
787 		case '6': case '7': case '8': case '9':
788 		case '_':
789 			opts++;
790 			break;
791 		case ':':
792 			opts++;
793 			*klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
794 			*v_p = opts;
795 			accept = true;
796 			break;
797 		case '\0':
798 			if (opts != *opts_p) {
799 				malloc_write("<jemalloc>: Conf string ends "
800 				    "with key\n");
801 			}
802 			return true;
803 		default:
804 			malloc_write("<jemalloc>: Malformed conf string\n");
805 			return true;
806 		}
807 	}
808 
809 	for (accept = false; !accept;) {
810 		switch (*opts) {
811 		case ',':
812 			opts++;
813 			/*
814 			 * Look ahead one character here, because the next time
815 			 * this function is called, it will assume that end of
816 			 * input has been cleanly reached if no input remains,
817 			 * but we have optimistically already consumed the
818 			 * comma if one exists.
819 			 */
820 			if (*opts == '\0') {
821 				malloc_write("<jemalloc>: Conf string ends "
822 				    "with comma\n");
823 			}
824 			*vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
825 			accept = true;
826 			break;
827 		case '\0':
828 			*vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
829 			accept = true;
830 			break;
831 		default:
832 			opts++;
833 			break;
834 		}
835 	}
836 
837 	*opts_p = opts;
838 	return false;
839 }
840 
841 static void
malloc_abort_invalid_conf(void)842 malloc_abort_invalid_conf(void) {
843 	assert(opt_abort_conf);
844 	malloc_printf("<jemalloc>: Abort (abort_conf:true) on invalid conf "
845 	    "value (see above).\n");
846 	abort();
847 }
848 
849 static void
malloc_conf_error(const char * msg,const char * k,size_t klen,const char * v,size_t vlen)850 malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
851     size_t vlen) {
852 	malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
853 	    (int)vlen, v);
854 	/* If abort_conf is set, error out after processing all options. */
855 	had_conf_error = true;
856 }
857 
858 static void
malloc_slow_flag_init(void)859 malloc_slow_flag_init(void) {
860 	/*
861 	 * Combine the runtime options into malloc_slow for fast path.  Called
862 	 * after processing all the options.
863 	 */
864 	malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0)
865 	    | (opt_junk_free ? flag_opt_junk_free : 0)
866 	    | (opt_zero ? flag_opt_zero : 0)
867 	    | (opt_utrace ? flag_opt_utrace : 0)
868 	    | (opt_xmalloc ? flag_opt_xmalloc : 0);
869 
870 	malloc_slow = (malloc_slow_flags != 0);
871 }
872 
873 static void
malloc_conf_init(void)874 malloc_conf_init(void) {
875 	unsigned i;
876 	char buf[PATH_MAX + 1];
877 	const char *opts, *k, *v;
878 	size_t klen, vlen;
879 
880 #if defined(__BIONIC__)
881 	/* For Android, do not look at files nor environment variables for
882 	 * config data.
883 	 */
884 	for (i = 0; i < 2; i++) {
885 #else
886 	for (i = 0; i < 4; i++) {
887 #endif
888 		/* Get runtime configuration. */
889 		switch (i) {
890 		case 0:
891 			opts = config_malloc_conf;
892 			break;
893 		case 1:
894 			if (je_malloc_conf != NULL) {
895 				/*
896 				 * Use options that were compiled into the
897 				 * program.
898 				 */
899 				opts = je_malloc_conf;
900 			} else {
901 				/* No configuration specified. */
902 				buf[0] = '\0';
903 				opts = buf;
904 			}
905 			break;
906 		case 2: {
907 			ssize_t linklen = 0;
908 #ifndef _WIN32
909 			int saved_errno = errno;
910 			const char *linkname =
911 #  ifdef JEMALLOC_PREFIX
912 			    "/etc/"JEMALLOC_PREFIX"malloc.conf"
913 #  else
914 			    "/etc/malloc.conf"
915 #  endif
916 			    ;
917 
918 			/*
919 			 * Try to use the contents of the "/etc/malloc.conf"
920 			 * symbolic link's name.
921 			 */
922 			linklen = readlink(linkname, buf, sizeof(buf) - 1);
923 			if (linklen == -1) {
924 				/* No configuration specified. */
925 				linklen = 0;
926 				/* Restore errno. */
927 				set_errno(saved_errno);
928 			}
929 #endif
930 			buf[linklen] = '\0';
931 			opts = buf;
932 			break;
933 		} case 3: {
934 			const char *envname =
935 #ifdef JEMALLOC_PREFIX
936 			    JEMALLOC_CPREFIX"MALLOC_CONF"
937 #else
938 			    "MALLOC_CONF"
939 #endif
940 			    ;
941 
942 			if ((opts = jemalloc_secure_getenv(envname)) != NULL) {
943 				/*
944 				 * Do nothing; opts is already initialized to
945 				 * the value of the MALLOC_CONF environment
946 				 * variable.
947 				 */
948 			} else {
949 				/* No configuration specified. */
950 				buf[0] = '\0';
951 				opts = buf;
952 			}
953 			break;
954 		} default:
955 			not_reached();
956 			buf[0] = '\0';
957 			opts = buf;
958 		}
959 
960 		while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
961 		    &vlen)) {
962 #define CONF_MATCH(n)							\
963 	(sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
964 #define CONF_MATCH_VALUE(n)						\
965 	(sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
966 #define CONF_HANDLE_BOOL(o, n)						\
967 			if (CONF_MATCH(n)) {				\
968 				if (CONF_MATCH_VALUE("true")) {		\
969 					o = true;			\
970 				} else if (CONF_MATCH_VALUE("false")) {	\
971 					o = false;			\
972 				} else {				\
973 					malloc_conf_error(		\
974 					    "Invalid conf value",	\
975 					    k, klen, v, vlen);		\
976 				}					\
977 				continue;				\
978 			}
979 #define CONF_MIN_no(um, min)	false
980 #define CONF_MIN_yes(um, min)	((um) < (min))
981 #define CONF_MAX_no(um, max)	false
982 #define CONF_MAX_yes(um, max)	((um) > (max))
983 #define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip)	\
984 			if (CONF_MATCH(n)) {				\
985 				uintmax_t um;				\
986 				char *end;				\
987 									\
988 				set_errno(0);				\
989 				um = malloc_strtoumax(v, &end, 0);	\
990 				if (get_errno() != 0 || (uintptr_t)end -\
991 				    (uintptr_t)v != vlen) {		\
992 					malloc_conf_error(		\
993 					    "Invalid conf value",	\
994 					    k, klen, v, vlen);		\
995 				} else if (clip) {			\
996 					if (CONF_MIN_##check_min(um,	\
997 					    (t)(min))) {		\
998 						o = (t)(min);		\
999 					} else if (			\
1000 					    CONF_MAX_##check_max(um,	\
1001 					    (t)(max))) {		\
1002 						o = (t)(max);		\
1003 					} else {			\
1004 						o = (t)um;		\
1005 					}				\
1006 				} else {				\
1007 					if (CONF_MIN_##check_min(um,	\
1008 					    (t)(min)) ||		\
1009 					    CONF_MAX_##check_max(um,	\
1010 					    (t)(max))) {		\
1011 						malloc_conf_error(	\
1012 						    "Out-of-range "	\
1013 						    "conf value",	\
1014 						    k, klen, v, vlen);	\
1015 					} else {			\
1016 						o = (t)um;		\
1017 					}				\
1018 				}					\
1019 				continue;				\
1020 			}
1021 #define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max,	\
1022     clip)								\
1023 			CONF_HANDLE_T_U(unsigned, o, n, min, max,	\
1024 			    check_min, check_max, clip)
1025 #define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip)	\
1026 			CONF_HANDLE_T_U(size_t, o, n, min, max,		\
1027 			    check_min, check_max, clip)
1028 #define CONF_HANDLE_SSIZE_T(o, n, min, max)				\
1029 			if (CONF_MATCH(n)) {				\
1030 				long l;					\
1031 				char *end;				\
1032 									\
1033 				set_errno(0);				\
1034 				l = strtol(v, &end, 0);			\
1035 				if (get_errno() != 0 || (uintptr_t)end -\
1036 				    (uintptr_t)v != vlen) {		\
1037 					malloc_conf_error(		\
1038 					    "Invalid conf value",	\
1039 					    k, klen, v, vlen);		\
1040 				} else if (l < (ssize_t)(min) || l >	\
1041 				    (ssize_t)(max)) {			\
1042 					malloc_conf_error(		\
1043 					    "Out-of-range conf value",	\
1044 					    k, klen, v, vlen);		\
1045 				} else {				\
1046 					o = l;				\
1047 				}					\
1048 				continue;				\
1049 			}
1050 #define CONF_HANDLE_CHAR_P(o, n, d)					\
1051 			if (CONF_MATCH(n)) {				\
1052 				size_t cpylen = (vlen <=		\
1053 				    sizeof(o)-1) ? vlen :		\
1054 				    sizeof(o)-1;			\
1055 				strncpy(o, v, cpylen);			\
1056 				o[cpylen] = '\0';			\
1057 				continue;				\
1058 			}
1059 
1060 			CONF_HANDLE_BOOL(opt_abort, "abort")
1061 			CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf")
1062 			if (strncmp("metadata_thp", k, klen) == 0) {
1063 				int i;
1064 				bool match = false;
1065 				for (i = 0; i < metadata_thp_mode_limit; i++) {
1066 					if (strncmp(metadata_thp_mode_names[i],
1067 					    v, vlen) == 0) {
1068 						opt_metadata_thp = i;
1069 						match = true;
1070 						break;
1071 					}
1072 				}
1073 				if (!match) {
1074 					malloc_conf_error("Invalid conf value",
1075 					    k, klen, v, vlen);
1076 				}
1077 				continue;
1078 			}
1079 			CONF_HANDLE_BOOL(opt_retain, "retain")
1080 			if (strncmp("dss", k, klen) == 0) {
1081 				int i;
1082 				bool match = false;
1083 				for (i = 0; i < dss_prec_limit; i++) {
1084 					if (strncmp(dss_prec_names[i], v, vlen)
1085 					    == 0) {
1086 						if (extent_dss_prec_set(i)) {
1087 							malloc_conf_error(
1088 							    "Error setting dss",
1089 							    k, klen, v, vlen);
1090 						} else {
1091 							opt_dss =
1092 							    dss_prec_names[i];
1093 							match = true;
1094 							break;
1095 						}
1096 					}
1097 				}
1098 				if (!match) {
1099 					malloc_conf_error("Invalid conf value",
1100 					    k, klen, v, vlen);
1101 				}
1102 				continue;
1103 			}
1104 			CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
1105 			    UINT_MAX, yes, no, false)
1106 			CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms,
1107 			    "dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
1108 			    QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
1109 			    SSIZE_MAX);
1110 			CONF_HANDLE_SSIZE_T(opt_muzzy_decay_ms,
1111 			    "muzzy_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
1112 			    QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
1113 			    SSIZE_MAX);
1114 			CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
1115 			if (CONF_MATCH("stats_print_opts")) {
1116 				init_opt_stats_print_opts(v, vlen);
1117 				continue;
1118 			}
1119 			if (config_fill) {
1120 				if (CONF_MATCH("junk")) {
1121 					if (CONF_MATCH_VALUE("true")) {
1122 						opt_junk = "true";
1123 						opt_junk_alloc = opt_junk_free =
1124 						    true;
1125 					} else if (CONF_MATCH_VALUE("false")) {
1126 						opt_junk = "false";
1127 						opt_junk_alloc = opt_junk_free =
1128 						    false;
1129 					} else if (CONF_MATCH_VALUE("alloc")) {
1130 						opt_junk = "alloc";
1131 						opt_junk_alloc = true;
1132 						opt_junk_free = false;
1133 					} else if (CONF_MATCH_VALUE("free")) {
1134 						opt_junk = "free";
1135 						opt_junk_alloc = false;
1136 						opt_junk_free = true;
1137 					} else {
1138 						malloc_conf_error(
1139 						    "Invalid conf value", k,
1140 						    klen, v, vlen);
1141 					}
1142 					continue;
1143 				}
1144 				CONF_HANDLE_BOOL(opt_zero, "zero")
1145 			}
1146 			if (config_utrace) {
1147 				CONF_HANDLE_BOOL(opt_utrace, "utrace")
1148 			}
1149 			if (config_xmalloc) {
1150 				CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
1151 			}
1152 			CONF_HANDLE_BOOL(opt_tcache, "tcache")
1153 			CONF_HANDLE_SIZE_T(opt_lg_extent_max_active_fit,
1154 			    "lg_extent_max_active_fit", 0,
1155 			    (sizeof(size_t) << 3), yes, yes, false)
1156 			CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max",
1157 			    -1, (sizeof(size_t) << 3) - 1)
1158 			if (strncmp("percpu_arena", k, klen) == 0) {
1159 				bool match = false;
1160 				for (int i = percpu_arena_mode_names_base; i <
1161 				    percpu_arena_mode_names_limit; i++) {
1162 					if (strncmp(percpu_arena_mode_names[i],
1163 					    v, vlen) == 0) {
1164 						if (!have_percpu_arena) {
1165 							malloc_conf_error(
1166 							    "No getcpu support",
1167 							    k, klen, v, vlen);
1168 						}
1169 						opt_percpu_arena = i;
1170 						match = true;
1171 						break;
1172 					}
1173 				}
1174 				if (!match) {
1175 					malloc_conf_error("Invalid conf value",
1176 					    k, klen, v, vlen);
1177 				}
1178 				continue;
1179 			}
1180 			CONF_HANDLE_BOOL(opt_background_thread,
1181 			    "background_thread");
1182 			CONF_HANDLE_SIZE_T(opt_max_background_threads,
1183 					   "max_background_threads", 1,
1184 					   opt_max_background_threads, yes, yes,
1185 					   true);
1186 			if (config_prof) {
1187 				CONF_HANDLE_BOOL(opt_prof, "prof")
1188 				CONF_HANDLE_CHAR_P(opt_prof_prefix,
1189 				    "prof_prefix", "jeprof")
1190 				CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
1191 				CONF_HANDLE_BOOL(opt_prof_thread_active_init,
1192 				    "prof_thread_active_init")
1193 				CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
1194 				    "lg_prof_sample", 0, (sizeof(uint64_t) << 3)
1195 				    - 1, no, yes, true)
1196 				CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
1197 				CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
1198 				    "lg_prof_interval", -1,
1199 				    (sizeof(uint64_t) << 3) - 1)
1200 				CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
1201 				CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
1202 				CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
1203 			}
1204 			if (config_log) {
1205 				if (CONF_MATCH("log")) {
1206 					size_t cpylen = (
1207 					    vlen <= sizeof(log_var_names) ?
1208 					    vlen : sizeof(log_var_names) - 1);
1209 					strncpy(log_var_names, v, cpylen);
1210 					log_var_names[cpylen] = '\0';
1211 					continue;
1212 				}
1213 			}
1214 			if (CONF_MATCH("thp")) {
1215 				bool match = false;
1216 				for (int i = 0; i < thp_mode_names_limit; i++) {
1217 					if (strncmp(thp_mode_names[i],v, vlen)
1218 					    == 0) {
1219 						if (!have_madvise_huge) {
1220 							malloc_conf_error(
1221 							    "No THP support",
1222 							    k, klen, v, vlen);
1223 						}
1224 						opt_thp = i;
1225 						match = true;
1226 						break;
1227 					}
1228 				}
1229 				if (!match) {
1230 					malloc_conf_error("Invalid conf value",
1231 					    k, klen, v, vlen);
1232 				}
1233 				continue;
1234 			}
1235 			malloc_conf_error("Invalid conf pair", k, klen, v,
1236 			    vlen);
1237 #undef CONF_MATCH
1238 #undef CONF_MATCH_VALUE
1239 #undef CONF_HANDLE_BOOL
1240 #undef CONF_MIN_no
1241 #undef CONF_MIN_yes
1242 #undef CONF_MAX_no
1243 #undef CONF_MAX_yes
1244 #undef CONF_HANDLE_T_U
1245 #undef CONF_HANDLE_UNSIGNED
1246 #undef CONF_HANDLE_SIZE_T
1247 #undef CONF_HANDLE_SSIZE_T
1248 #undef CONF_HANDLE_CHAR_P
1249 		}
1250 		if (opt_abort_conf && had_conf_error) {
1251 			malloc_abort_invalid_conf();
1252 		}
1253 	}
1254 	atomic_store_b(&log_init_done, true, ATOMIC_RELEASE);
1255 }
1256 
1257 static bool
1258 malloc_init_hard_needed(void) {
1259 	if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
1260 	    malloc_init_recursible)) {
1261 		/*
1262 		 * Another thread initialized the allocator before this one
1263 		 * acquired init_lock, or this thread is the initializing
1264 		 * thread, and it is recursively allocating.
1265 		 */
1266 		return false;
1267 	}
1268 #ifdef JEMALLOC_THREADED_INIT
1269 	if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
1270 		/* Busy-wait until the initializing thread completes. */
1271 		spin_t spinner = SPIN_INITIALIZER;
1272 		do {
1273 			malloc_mutex_unlock(TSDN_NULL, &init_lock);
1274 			spin_adaptive(&spinner);
1275 			malloc_mutex_lock(TSDN_NULL, &init_lock);
1276 		} while (!malloc_initialized());
1277 		return false;
1278 	}
1279 #endif
1280 	return true;
1281 }
1282 
1283 static bool
1284 malloc_init_hard_a0_locked() {
1285 	malloc_initializer = INITIALIZER;
1286 
1287 	if (config_prof) {
1288 		prof_boot0();
1289 	}
1290 	malloc_conf_init();
1291 	if (opt_stats_print) {
1292 		/* Print statistics at exit. */
1293 		if (atexit(stats_print_atexit) != 0) {
1294 			malloc_write("<jemalloc>: Error in atexit()\n");
1295 			if (opt_abort) {
1296 				abort();
1297 			}
1298 		}
1299 	}
1300 	if (pages_boot()) {
1301 		return true;
1302 	}
1303 	if (base_boot(TSDN_NULL)) {
1304 		return true;
1305 	}
1306 	if (extent_boot()) {
1307 		return true;
1308 	}
1309 	if (ctl_boot()) {
1310 		return true;
1311 	}
1312 	if (config_prof) {
1313 		prof_boot1();
1314 	}
1315 	arena_boot();
1316 	if (tcache_boot(TSDN_NULL)) {
1317 		return true;
1318 	}
1319 	if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS,
1320 	    malloc_mutex_rank_exclusive)) {
1321 		return true;
1322 	}
1323 	/*
1324 	 * Create enough scaffolding to allow recursive allocation in
1325 	 * malloc_ncpus().
1326 	 */
1327 	narenas_auto = 1;
1328 	memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
1329 	/*
1330 	 * Initialize one arena here.  The rest are lazily created in
1331 	 * arena_choose_hard().
1332 	 */
1333 	if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)&extent_hooks_default)
1334 	    == NULL) {
1335 		return true;
1336 	}
1337 	a0 = arena_get(TSDN_NULL, 0, false);
1338 	malloc_init_state = malloc_init_a0_initialized;
1339 
1340 	return false;
1341 }
1342 
1343 static bool
1344 malloc_init_hard_a0(void) {
1345 	bool ret;
1346 
1347 	malloc_mutex_lock(TSDN_NULL, &init_lock);
1348 	ret = malloc_init_hard_a0_locked();
1349 	malloc_mutex_unlock(TSDN_NULL, &init_lock);
1350 	return ret;
1351 }
1352 
1353 /* Initialize data structures which may trigger recursive allocation. */
1354 static bool
1355 malloc_init_hard_recursible(void) {
1356 	malloc_init_state = malloc_init_recursible;
1357 
1358 #if defined(__BIONIC__) && defined(ANDROID_NUM_ARENAS)
1359 	/* Hardcode since this value won't be used. */
1360 	ncpus = 2;
1361 #else
1362 	ncpus = malloc_ncpus();
1363 #endif
1364 
1365 #if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \
1366     && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \
1367     !defined(__native_client__))
1368 	/* LinuxThreads' pthread_atfork() allocates. */
1369 	if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
1370 	    jemalloc_postfork_child) != 0) {
1371 		malloc_write("<jemalloc>: Error in pthread_atfork()\n");
1372 		if (opt_abort) {
1373 			abort();
1374 		}
1375 		return true;
1376 	}
1377 #endif
1378 
1379 	if (background_thread_boot0()) {
1380 		return true;
1381 	}
1382 
1383 	return false;
1384 }
1385 
1386 static unsigned
1387 malloc_narenas_default(void) {
1388 #if defined(ANDROID_NUM_ARENAS)
1389 	return ANDROID_NUM_ARENAS;
1390 #else
1391 	assert(ncpus > 0);
1392 	/*
1393 	 * For SMP systems, create more than one arena per CPU by
1394 	 * default.
1395 	 */
1396 	if (ncpus > 1) {
1397 		return ncpus << 2;
1398 	} else {
1399 		return 1;
1400 	}
1401 #endif
1402 }
1403 
1404 static percpu_arena_mode_t
1405 percpu_arena_as_initialized(percpu_arena_mode_t mode) {
1406 	assert(!malloc_initialized());
1407 	assert(mode <= percpu_arena_disabled);
1408 
1409 	if (mode != percpu_arena_disabled) {
1410 		mode += percpu_arena_mode_enabled_base;
1411 	}
1412 
1413 	return mode;
1414 }
1415 
1416 static bool
1417 malloc_init_narenas(void) {
1418 	assert(ncpus > 0);
1419 
1420 	if (opt_percpu_arena != percpu_arena_disabled) {
1421 		if (!have_percpu_arena || malloc_getcpu() < 0) {
1422 			opt_percpu_arena = percpu_arena_disabled;
1423 			malloc_printf("<jemalloc>: perCPU arena getcpu() not "
1424 			    "available. Setting narenas to %u.\n", opt_narenas ?
1425 			    opt_narenas : malloc_narenas_default());
1426 			if (opt_abort) {
1427 				abort();
1428 			}
1429 		} else {
1430 			if (ncpus >= MALLOCX_ARENA_LIMIT) {
1431 				malloc_printf("<jemalloc>: narenas w/ percpu"
1432 				    "arena beyond limit (%d)\n", ncpus);
1433 				if (opt_abort) {
1434 					abort();
1435 				}
1436 				return true;
1437 			}
1438 			/* NB: opt_percpu_arena isn't fully initialized yet. */
1439 			if (percpu_arena_as_initialized(opt_percpu_arena) ==
1440 			    per_phycpu_arena && ncpus % 2 != 0) {
1441 				malloc_printf("<jemalloc>: invalid "
1442 				    "configuration -- per physical CPU arena "
1443 				    "with odd number (%u) of CPUs (no hyper "
1444 				    "threading?).\n", ncpus);
1445 				if (opt_abort)
1446 					abort();
1447 			}
1448 			unsigned n = percpu_arena_ind_limit(
1449 			    percpu_arena_as_initialized(opt_percpu_arena));
1450 			if (opt_narenas < n) {
1451 				/*
1452 				 * If narenas is specified with percpu_arena
1453 				 * enabled, actual narenas is set as the greater
1454 				 * of the two. percpu_arena_choose will be free
1455 				 * to use any of the arenas based on CPU
1456 				 * id. This is conservative (at a small cost)
1457 				 * but ensures correctness.
1458 				 *
1459 				 * If for some reason the ncpus determined at
1460 				 * boot is not the actual number (e.g. because
1461 				 * of affinity setting from numactl), reserving
1462 				 * narenas this way provides a workaround for
1463 				 * percpu_arena.
1464 				 */
1465 				opt_narenas = n;
1466 			}
1467 		}
1468 	}
1469 	if (opt_narenas == 0) {
1470 		opt_narenas = malloc_narenas_default();
1471 	}
1472 	assert(opt_narenas > 0);
1473 
1474 	narenas_auto = opt_narenas;
1475 	/*
1476 	 * Limit the number of arenas to the indexing range of MALLOCX_ARENA().
1477 	 */
1478 	if (narenas_auto >= MALLOCX_ARENA_LIMIT) {
1479 		narenas_auto = MALLOCX_ARENA_LIMIT - 1;
1480 		malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
1481 		    narenas_auto);
1482 	}
1483 	narenas_total_set(narenas_auto);
1484 
1485 	return false;
1486 }
1487 
1488 static void
1489 malloc_init_percpu(void) {
1490 	opt_percpu_arena = percpu_arena_as_initialized(opt_percpu_arena);
1491 }
1492 
1493 static bool
1494 malloc_init_hard_finish(void) {
1495 	if (malloc_mutex_boot()) {
1496 		return true;
1497 	}
1498 
1499 	malloc_init_state = malloc_init_initialized;
1500 	malloc_slow_flag_init();
1501 
1502 	return false;
1503 }
1504 
1505 static void
1506 malloc_init_hard_cleanup(tsdn_t *tsdn, bool reentrancy_set) {
1507 	malloc_mutex_assert_owner(tsdn, &init_lock);
1508 	malloc_mutex_unlock(tsdn, &init_lock);
1509 	if (reentrancy_set) {
1510 		assert(!tsdn_null(tsdn));
1511 		tsd_t *tsd = tsdn_tsd(tsdn);
1512 		assert(tsd_reentrancy_level_get(tsd) > 0);
1513 		post_reentrancy(tsd);
1514 	}
1515 }
1516 
1517 static bool
1518 malloc_init_hard(void) {
1519 	tsd_t *tsd;
1520 
1521 #if defined(_WIN32) && _WIN32_WINNT < 0x0600
1522 	_init_init_lock();
1523 #endif
1524 	malloc_mutex_lock(TSDN_NULL, &init_lock);
1525 
1526 #define UNLOCK_RETURN(tsdn, ret, reentrancy)		\
1527 	malloc_init_hard_cleanup(tsdn, reentrancy);	\
1528 	return ret;
1529 
1530 	if (!malloc_init_hard_needed()) {
1531 		UNLOCK_RETURN(TSDN_NULL, false, false)
1532 	}
1533 
1534 	if (malloc_init_state != malloc_init_a0_initialized &&
1535 	    malloc_init_hard_a0_locked()) {
1536 		UNLOCK_RETURN(TSDN_NULL, true, false)
1537 	}
1538 
1539 	malloc_mutex_unlock(TSDN_NULL, &init_lock);
1540 	/* Recursive allocation relies on functional tsd. */
1541 	tsd = malloc_tsd_boot0();
1542 	if (tsd == NULL) {
1543 		return true;
1544 	}
1545 	if (malloc_init_hard_recursible()) {
1546 		return true;
1547 	}
1548 
1549 	malloc_mutex_lock(tsd_tsdn(tsd), &init_lock);
1550 	/* Set reentrancy level to 1 during init. */
1551 	pre_reentrancy(tsd, NULL);
1552 	/* Initialize narenas before prof_boot2 (for allocation). */
1553 	if (malloc_init_narenas() || background_thread_boot1(tsd_tsdn(tsd))) {
1554 		UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
1555 	}
1556 	if (config_prof && prof_boot2(tsd)) {
1557 		UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
1558 	}
1559 
1560 	malloc_init_percpu();
1561 
1562 	if (malloc_init_hard_finish()) {
1563 		UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
1564 	}
1565 	post_reentrancy(tsd);
1566 	malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
1567 
1568 	witness_assert_lockless(witness_tsd_tsdn(
1569 	    tsd_witness_tsdp_get_unsafe(tsd)));
1570 	malloc_tsd_boot1();
1571 	/* Update TSD after tsd_boot1. */
1572 	tsd = tsd_fetch();
1573 	if (opt_background_thread) {
1574 		assert(have_background_thread);
1575 		/*
1576 		 * Need to finish init & unlock first before creating background
1577 		 * threads (pthread_create depends on malloc).  ctl_init (which
1578 		 * sets isthreaded) needs to be called without holding any lock.
1579 		 */
1580 		background_thread_ctl_init(tsd_tsdn(tsd));
1581 
1582 		malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
1583 		bool err = background_thread_create(tsd, 0);
1584 		malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
1585 		if (err) {
1586 			return true;
1587 		}
1588 	}
1589 #undef UNLOCK_RETURN
1590 	return false;
1591 }
1592 
1593 /*
1594  * End initialization functions.
1595  */
1596 /******************************************************************************/
1597 /*
1598  * Begin allocation-path internal functions and data structures.
1599  */
1600 
1601 /*
1602  * Settings determined by the documented behavior of the allocation functions.
1603  */
1604 typedef struct static_opts_s static_opts_t;
1605 struct static_opts_s {
1606 	/* Whether or not allocation size may overflow. */
1607 	bool may_overflow;
1608 	/* Whether or not allocations of size 0 should be treated as size 1. */
1609 	bool bump_empty_alloc;
1610 	/*
1611 	 * Whether to assert that allocations are not of size 0 (after any
1612 	 * bumping).
1613 	 */
1614 	bool assert_nonempty_alloc;
1615 
1616 	/*
1617 	 * Whether or not to modify the 'result' argument to malloc in case of
1618 	 * error.
1619 	 */
1620 	bool null_out_result_on_error;
1621 	/* Whether to set errno when we encounter an error condition. */
1622 	bool set_errno_on_error;
1623 
1624 	/*
1625 	 * The minimum valid alignment for functions requesting aligned storage.
1626 	 */
1627 	size_t min_alignment;
1628 
1629 	/* The error string to use if we oom. */
1630 	const char *oom_string;
1631 	/* The error string to use if the passed-in alignment is invalid. */
1632 	const char *invalid_alignment_string;
1633 
1634 	/*
1635 	 * False if we're configured to skip some time-consuming operations.
1636 	 *
1637 	 * This isn't really a malloc "behavior", but it acts as a useful
1638 	 * summary of several other static (or at least, static after program
1639 	 * initialization) options.
1640 	 */
1641 	bool slow;
1642 };
1643 
1644 JEMALLOC_ALWAYS_INLINE void
1645 static_opts_init(static_opts_t *static_opts) {
1646 	static_opts->may_overflow = false;
1647 	static_opts->bump_empty_alloc = false;
1648 	static_opts->assert_nonempty_alloc = false;
1649 	static_opts->null_out_result_on_error = false;
1650 	static_opts->set_errno_on_error = false;
1651 	static_opts->min_alignment = 0;
1652 	static_opts->oom_string = "";
1653 	static_opts->invalid_alignment_string = "";
1654 	static_opts->slow = false;
1655 }
1656 
1657 /*
1658  * These correspond to the macros in jemalloc/jemalloc_macros.h.  Broadly, we
1659  * should have one constant here per magic value there.  Note however that the
1660  * representations need not be related.
1661  */
1662 #define TCACHE_IND_NONE ((unsigned)-1)
1663 #define TCACHE_IND_AUTOMATIC ((unsigned)-2)
1664 #define ARENA_IND_AUTOMATIC ((unsigned)-1)
1665 
1666 typedef struct dynamic_opts_s dynamic_opts_t;
1667 struct dynamic_opts_s {
1668 	void **result;
1669 	size_t num_items;
1670 	size_t item_size;
1671 	size_t alignment;
1672 	bool zero;
1673 	unsigned tcache_ind;
1674 	unsigned arena_ind;
1675 };
1676 
1677 JEMALLOC_ALWAYS_INLINE void
1678 dynamic_opts_init(dynamic_opts_t *dynamic_opts) {
1679 	dynamic_opts->result = NULL;
1680 	dynamic_opts->num_items = 0;
1681 	dynamic_opts->item_size = 0;
1682 	dynamic_opts->alignment = 0;
1683 	dynamic_opts->zero = false;
1684 	dynamic_opts->tcache_ind = TCACHE_IND_AUTOMATIC;
1685 	dynamic_opts->arena_ind = ARENA_IND_AUTOMATIC;
1686 }
1687 
1688 /* ind is ignored if dopts->alignment > 0. */
1689 JEMALLOC_ALWAYS_INLINE void *
1690 imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
1691     size_t size, size_t usize, szind_t ind) {
1692 	tcache_t *tcache;
1693 	arena_t *arena;
1694 
1695 	/* Fill in the tcache. */
1696 	if (dopts->tcache_ind == TCACHE_IND_AUTOMATIC) {
1697 		if (likely(!sopts->slow)) {
1698 			/* Getting tcache ptr unconditionally. */
1699 			tcache = tsd_tcachep_get(tsd);
1700 			assert(tcache == tcache_get(tsd));
1701 		} else {
1702 			tcache = tcache_get(tsd);
1703 		}
1704 	} else if (dopts->tcache_ind == TCACHE_IND_NONE) {
1705 		tcache = NULL;
1706 	} else {
1707 		tcache = tcaches_get(tsd, dopts->tcache_ind);
1708 	}
1709 
1710 	/* Fill in the arena. */
1711 	if (dopts->arena_ind == ARENA_IND_AUTOMATIC) {
1712 		/*
1713 		 * In case of automatic arena management, we defer arena
1714 		 * computation until as late as we can, hoping to fill the
1715 		 * allocation out of the tcache.
1716 		 */
1717 		arena = NULL;
1718 	} else {
1719 		arena = arena_get(tsd_tsdn(tsd), dopts->arena_ind, true);
1720 	}
1721 
1722 	if (unlikely(dopts->alignment != 0)) {
1723 		return ipalloct(tsd_tsdn(tsd), usize, dopts->alignment,
1724 		    dopts->zero, tcache, arena);
1725 	}
1726 
1727 	return iallocztm(tsd_tsdn(tsd), size, ind, dopts->zero, tcache, false,
1728 	    arena, sopts->slow);
1729 }
1730 
1731 JEMALLOC_ALWAYS_INLINE void *
1732 imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
1733     size_t usize, szind_t ind) {
1734 	void *ret;
1735 
1736 	/*
1737 	 * For small allocations, sampling bumps the usize.  If so, we allocate
1738 	 * from the ind_large bucket.
1739 	 */
1740 	szind_t ind_large;
1741 	size_t bumped_usize = usize;
1742 
1743 	if (usize <= SMALL_MAXCLASS) {
1744 		assert(((dopts->alignment == 0) ? sz_s2u(LARGE_MINCLASS) :
1745 		    sz_sa2u(LARGE_MINCLASS, dopts->alignment))
1746 		    == LARGE_MINCLASS);
1747 		ind_large = sz_size2index(LARGE_MINCLASS);
1748 		bumped_usize = sz_s2u(LARGE_MINCLASS);
1749 		ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize,
1750 		    bumped_usize, ind_large);
1751 		if (unlikely(ret == NULL)) {
1752 			return NULL;
1753 		}
1754 		arena_prof_promote(tsd_tsdn(tsd), ret, usize);
1755 	} else {
1756 		ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind);
1757 	}
1758 
1759 	return ret;
1760 }
1761 
1762 /*
1763  * Returns true if the allocation will overflow, and false otherwise.  Sets
1764  * *size to the product either way.
1765  */
1766 JEMALLOC_ALWAYS_INLINE bool
1767 compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts,
1768     size_t *size) {
1769 	/*
1770 	 * This function is just num_items * item_size, except that we may have
1771 	 * to check for overflow.
1772 	 */
1773 
1774 	if (!may_overflow) {
1775 		assert(dopts->num_items == 1);
1776 		*size = dopts->item_size;
1777 		return false;
1778 	}
1779 
1780 	/* A size_t with its high-half bits all set to 1. */
1781 	static const size_t high_bits = SIZE_T_MAX << (sizeof(size_t) * 8 / 2);
1782 
1783 	*size = dopts->item_size * dopts->num_items;
1784 
1785 	if (unlikely(*size == 0)) {
1786 		return (dopts->num_items != 0 && dopts->item_size != 0);
1787 	}
1788 
1789 	/*
1790 	 * We got a non-zero size, but we don't know if we overflowed to get
1791 	 * there.  To avoid having to do a divide, we'll be clever and note that
1792 	 * if both A and B can be represented in N/2 bits, then their product
1793 	 * can be represented in N bits (without the possibility of overflow).
1794 	 */
1795 	if (likely((high_bits & (dopts->num_items | dopts->item_size)) == 0)) {
1796 		return false;
1797 	}
1798 	if (likely(*size / dopts->item_size == dopts->num_items)) {
1799 		return false;
1800 	}
1801 	return true;
1802 }
1803 
1804 JEMALLOC_ALWAYS_INLINE int
1805 imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
1806 	/* Where the actual allocated memory will live. */
1807 	void *allocation = NULL;
1808 	/* Filled in by compute_size_with_overflow below. */
1809 	size_t size = 0;
1810 	/*
1811 	 * For unaligned allocations, we need only ind.  For aligned
1812 	 * allocations, or in case of stats or profiling we need usize.
1813 	 *
1814 	 * These are actually dead stores, in that their values are reset before
1815 	 * any branch on their value is taken.  Sometimes though, it's
1816 	 * convenient to pass them as arguments before this point.  To avoid
1817 	 * undefined behavior then, we initialize them with dummy stores.
1818 	 */
1819 	szind_t ind = 0;
1820 	size_t usize = 0;
1821 
1822 	/* Reentrancy is only checked on slow path. */
1823 	int8_t reentrancy_level;
1824 
1825 	/* Compute the amount of memory the user wants. */
1826 	if (unlikely(compute_size_with_overflow(sopts->may_overflow, dopts,
1827 	    &size))) {
1828 		goto label_oom;
1829 	}
1830 
1831 	/* Validate the user input. */
1832 	if (sopts->bump_empty_alloc) {
1833 		if (unlikely(size == 0)) {
1834 			size = 1;
1835 		}
1836 	}
1837 
1838 	if (sopts->assert_nonempty_alloc) {
1839 		assert (size != 0);
1840 	}
1841 
1842 	if (unlikely(dopts->alignment < sopts->min_alignment
1843 	    || (dopts->alignment & (dopts->alignment - 1)) != 0)) {
1844 		goto label_invalid_alignment;
1845 	}
1846 
1847 	/* This is the beginning of the "core" algorithm. */
1848 
1849 	if (dopts->alignment == 0) {
1850 		ind = sz_size2index(size);
1851 		if (unlikely(ind >= NSIZES)) {
1852 			goto label_oom;
1853 		}
1854 		if (config_stats || (config_prof && opt_prof)) {
1855 			usize = sz_index2size(ind);
1856 			assert(usize > 0 && usize <= LARGE_MAXCLASS);
1857 		}
1858 	} else {
1859 		usize = sz_sa2u(size, dopts->alignment);
1860 		if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
1861 			goto label_oom;
1862 		}
1863 	}
1864 
1865 	check_entry_exit_locking(tsd_tsdn(tsd));
1866 
1867 	/*
1868 	 * If we need to handle reentrancy, we can do it out of a
1869 	 * known-initialized arena (i.e. arena 0).
1870 	 */
1871 	reentrancy_level = tsd_reentrancy_level_get(tsd);
1872 	if (sopts->slow && unlikely(reentrancy_level > 0)) {
1873 		/*
1874 		 * We should never specify particular arenas or tcaches from
1875 		 * within our internal allocations.
1876 		 */
1877 		assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC ||
1878 		    dopts->tcache_ind == TCACHE_IND_NONE);
1879 		assert(dopts->arena_ind == ARENA_IND_AUTOMATIC);
1880 		dopts->tcache_ind = TCACHE_IND_NONE;
1881 		/* We know that arena 0 has already been initialized. */
1882 		dopts->arena_ind = 0;
1883 	}
1884 
1885 	/* If profiling is on, get our profiling context. */
1886 	if (config_prof && opt_prof) {
1887 		/*
1888 		 * Note that if we're going down this path, usize must have been
1889 		 * initialized in the previous if statement.
1890 		 */
1891 		prof_tctx_t *tctx = prof_alloc_prep(
1892 		    tsd, usize, prof_active_get_unlocked(), true);
1893 
1894 		alloc_ctx_t alloc_ctx;
1895 		if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
1896 			alloc_ctx.slab = (usize <= SMALL_MAXCLASS);
1897 			allocation = imalloc_no_sample(
1898 			    sopts, dopts, tsd, usize, usize, ind);
1899 		} else if ((uintptr_t)tctx > (uintptr_t)1U) {
1900 			/*
1901 			 * Note that ind might still be 0 here.  This is fine;
1902 			 * imalloc_sample ignores ind if dopts->alignment > 0.
1903 			 */
1904 			allocation = imalloc_sample(
1905 			    sopts, dopts, tsd, usize, ind);
1906 			alloc_ctx.slab = false;
1907 		} else {
1908 			allocation = NULL;
1909 		}
1910 
1911 		if (unlikely(allocation == NULL)) {
1912 			prof_alloc_rollback(tsd, tctx, true);
1913 			goto label_oom;
1914 		}
1915 		prof_malloc(tsd_tsdn(tsd), allocation, usize, &alloc_ctx, tctx);
1916 	} else {
1917 		/*
1918 		 * If dopts->alignment > 0, then ind is still 0, but usize was
1919 		 * computed in the previous if statement.  Down the positive
1920 		 * alignment path, imalloc_no_sample ignores ind and size
1921 		 * (relying only on usize).
1922 		 */
1923 		allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize,
1924 		    ind);
1925 		if (unlikely(allocation == NULL)) {
1926 			goto label_oom;
1927 		}
1928 	}
1929 
1930 	/*
1931 	 * Allocation has been done at this point.  We still have some
1932 	 * post-allocation work to do though.
1933 	 */
1934 	assert(dopts->alignment == 0
1935 	    || ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0));
1936 
1937 	if (config_stats) {
1938 		assert(usize == isalloc(tsd_tsdn(tsd), allocation));
1939 		*tsd_thread_allocatedp_get(tsd) += usize;
1940 	}
1941 
1942 	if (sopts->slow) {
1943 		UTRACE(0, size, allocation);
1944 	}
1945 
1946 	/* Success! */
1947 	check_entry_exit_locking(tsd_tsdn(tsd));
1948 	*dopts->result = allocation;
1949 	return 0;
1950 
1951 label_oom:
1952 	if (unlikely(sopts->slow) && config_xmalloc && unlikely(opt_xmalloc)) {
1953 		malloc_write(sopts->oom_string);
1954 		abort();
1955 	}
1956 
1957 	if (sopts->slow) {
1958 		UTRACE(NULL, size, NULL);
1959 	}
1960 
1961 	check_entry_exit_locking(tsd_tsdn(tsd));
1962 
1963 	if (sopts->set_errno_on_error) {
1964 		set_errno(ENOMEM);
1965 	}
1966 
1967 	if (sopts->null_out_result_on_error) {
1968 		*dopts->result = NULL;
1969 	}
1970 
1971 	return ENOMEM;
1972 
1973 	/*
1974 	 * This label is only jumped to by one goto; we move it out of line
1975 	 * anyways to avoid obscuring the non-error paths, and for symmetry with
1976 	 * the oom case.
1977 	 */
1978 label_invalid_alignment:
1979 	if (config_xmalloc && unlikely(opt_xmalloc)) {
1980 		malloc_write(sopts->invalid_alignment_string);
1981 		abort();
1982 	}
1983 
1984 	if (sopts->set_errno_on_error) {
1985 		set_errno(EINVAL);
1986 	}
1987 
1988 	if (sopts->slow) {
1989 		UTRACE(NULL, size, NULL);
1990 	}
1991 
1992 	check_entry_exit_locking(tsd_tsdn(tsd));
1993 
1994 	if (sopts->null_out_result_on_error) {
1995 		*dopts->result = NULL;
1996 	}
1997 
1998 	return EINVAL;
1999 }
2000 
2001 /* Returns the errno-style error code of the allocation. */
2002 JEMALLOC_ALWAYS_INLINE int
2003 imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) {
2004 	if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) {
2005 		if (config_xmalloc && unlikely(opt_xmalloc)) {
2006 			malloc_write(sopts->oom_string);
2007 			abort();
2008 		}
2009 		UTRACE(NULL, dopts->num_items * dopts->item_size, NULL);
2010 		set_errno(ENOMEM);
2011 		*dopts->result = NULL;
2012 
2013 		return ENOMEM;
2014 	}
2015 
2016 	/* We always need the tsd.  Let's grab it right away. */
2017 	tsd_t *tsd = tsd_fetch();
2018 	assert(tsd);
2019 	if (likely(tsd_fast(tsd))) {
2020 		/* Fast and common path. */
2021 		tsd_assert_fast(tsd);
2022 		sopts->slow = false;
2023 		return imalloc_body(sopts, dopts, tsd);
2024 	} else {
2025 		sopts->slow = true;
2026 		return imalloc_body(sopts, dopts, tsd);
2027 	}
2028 }
2029 /******************************************************************************/
2030 /*
2031  * Begin malloc(3)-compatible functions.
2032  */
2033 
2034 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2035 void JEMALLOC_NOTHROW *
2036 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
2037 je_malloc(size_t size) {
2038 	void *ret;
2039 	static_opts_t sopts;
2040 	dynamic_opts_t dopts;
2041 
2042 	LOG("core.malloc.entry", "size: %zu", size);
2043 
2044 	static_opts_init(&sopts);
2045 	dynamic_opts_init(&dopts);
2046 
2047 	sopts.bump_empty_alloc = true;
2048 	sopts.null_out_result_on_error = true;
2049 	sopts.set_errno_on_error = true;
2050 	sopts.oom_string = "<jemalloc>: Error in malloc(): out of memory\n";
2051 
2052 	dopts.result = &ret;
2053 	dopts.num_items = 1;
2054 	dopts.item_size = size;
2055 
2056 	imalloc(&sopts, &dopts);
2057 
2058 	LOG("core.malloc.exit", "result: %p", ret);
2059 
2060 	return ret;
2061 }
2062 
2063 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2064 JEMALLOC_ATTR(nonnull(1))
2065 je_posix_memalign(void **memptr, size_t alignment, size_t size) {
2066 	int ret;
2067 	static_opts_t sopts;
2068 	dynamic_opts_t dopts;
2069 
2070 	LOG("core.posix_memalign.entry", "mem ptr: %p, alignment: %zu, "
2071 	    "size: %zu", memptr, alignment, size);
2072 
2073 	static_opts_init(&sopts);
2074 	dynamic_opts_init(&dopts);
2075 
2076 	sopts.bump_empty_alloc = true;
2077 	sopts.min_alignment = sizeof(void *);
2078 	sopts.oom_string =
2079 	    "<jemalloc>: Error allocating aligned memory: out of memory\n";
2080 	sopts.invalid_alignment_string =
2081 	    "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2082 
2083 	dopts.result = memptr;
2084 	dopts.num_items = 1;
2085 	dopts.item_size = size;
2086 	dopts.alignment = alignment;
2087 
2088 	ret = imalloc(&sopts, &dopts);
2089 
2090 	LOG("core.posix_memalign.exit", "result: %d, alloc ptr: %p", ret,
2091 	    *memptr);
2092 
2093 	return ret;
2094 }
2095 
2096 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2097 void JEMALLOC_NOTHROW *
2098 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
2099 je_aligned_alloc(size_t alignment, size_t size) {
2100 	void *ret;
2101 
2102 	static_opts_t sopts;
2103 	dynamic_opts_t dopts;
2104 
2105 	LOG("core.aligned_alloc.entry", "alignment: %zu, size: %zu\n",
2106 	    alignment, size);
2107 
2108 	static_opts_init(&sopts);
2109 	dynamic_opts_init(&dopts);
2110 
2111 	sopts.bump_empty_alloc = true;
2112 	sopts.null_out_result_on_error = true;
2113 	sopts.set_errno_on_error = true;
2114 	sopts.min_alignment = 1;
2115 	sopts.oom_string =
2116 	    "<jemalloc>: Error allocating aligned memory: out of memory\n";
2117 	sopts.invalid_alignment_string =
2118 	    "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2119 
2120 	dopts.result = &ret;
2121 	dopts.num_items = 1;
2122 	dopts.item_size = size;
2123 	dopts.alignment = alignment;
2124 
2125 	imalloc(&sopts, &dopts);
2126 
2127 	LOG("core.aligned_alloc.exit", "result: %p", ret);
2128 
2129 	return ret;
2130 }
2131 
2132 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2133 void JEMALLOC_NOTHROW *
2134 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
2135 je_calloc(size_t num, size_t size) {
2136 	void *ret;
2137 	static_opts_t sopts;
2138 	dynamic_opts_t dopts;
2139 
2140 	LOG("core.calloc.entry", "num: %zu, size: %zu\n", num, size);
2141 
2142 	static_opts_init(&sopts);
2143 	dynamic_opts_init(&dopts);
2144 
2145 	sopts.may_overflow = true;
2146 	sopts.bump_empty_alloc = true;
2147 	sopts.null_out_result_on_error = true;
2148 	sopts.set_errno_on_error = true;
2149 	sopts.oom_string = "<jemalloc>: Error in calloc(): out of memory\n";
2150 
2151 	dopts.result = &ret;
2152 	dopts.num_items = num;
2153 	dopts.item_size = size;
2154 	dopts.zero = true;
2155 
2156 	imalloc(&sopts, &dopts);
2157 
2158 	LOG("core.calloc.exit", "result: %p", ret);
2159 
2160 	return ret;
2161 }
2162 
2163 static void *
2164 irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
2165     prof_tctx_t *tctx) {
2166 	void *p;
2167 
2168 	if (tctx == NULL) {
2169 		return NULL;
2170 	}
2171 	if (usize <= SMALL_MAXCLASS) {
2172 		p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false);
2173 		if (p == NULL) {
2174 			return NULL;
2175 		}
2176 		arena_prof_promote(tsd_tsdn(tsd), p, usize);
2177 	} else {
2178 		p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
2179 	}
2180 
2181 	return p;
2182 }
2183 
2184 JEMALLOC_ALWAYS_INLINE void *
2185 irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
2186    alloc_ctx_t *alloc_ctx) {
2187 	void *p;
2188 	bool prof_active;
2189 	prof_tctx_t *old_tctx, *tctx;
2190 
2191 	prof_active = prof_active_get_unlocked();
2192 	old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx);
2193 	tctx = prof_alloc_prep(tsd, usize, prof_active, true);
2194 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2195 		p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx);
2196 	} else {
2197 		p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
2198 	}
2199 	if (unlikely(p == NULL)) {
2200 		prof_alloc_rollback(tsd, tctx, true);
2201 		return NULL;
2202 	}
2203 	prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize,
2204 	    old_tctx);
2205 
2206 	return p;
2207 }
2208 
2209 JEMALLOC_ALWAYS_INLINE void
2210 ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
2211 	if (!slow_path) {
2212 		tsd_assert_fast(tsd);
2213 	}
2214 	check_entry_exit_locking(tsd_tsdn(tsd));
2215 	if (tsd_reentrancy_level_get(tsd) != 0) {
2216 		assert(slow_path);
2217 	}
2218 
2219 	assert(ptr != NULL);
2220 	assert(malloc_initialized() || IS_INITIALIZER);
2221 
2222 	alloc_ctx_t alloc_ctx;
2223 	rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2224 	rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2225 	    (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2226 	assert(alloc_ctx.szind != NSIZES);
2227 
2228 	size_t usize;
2229 	if (config_prof && opt_prof) {
2230 		usize = sz_index2size(alloc_ctx.szind);
2231 		prof_free(tsd, ptr, usize, &alloc_ctx);
2232 	} else if (config_stats) {
2233 		usize = sz_index2size(alloc_ctx.szind);
2234 	}
2235 	if (config_stats) {
2236 		*tsd_thread_deallocatedp_get(tsd) += usize;
2237 	}
2238 
2239 	if (likely(!slow_path)) {
2240 		idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
2241 		    false);
2242 	} else {
2243 		idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
2244 		    true);
2245 	}
2246 }
2247 
2248 JEMALLOC_ALWAYS_INLINE void
2249 isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
2250 	if (!slow_path) {
2251 		tsd_assert_fast(tsd);
2252 	}
2253 	check_entry_exit_locking(tsd_tsdn(tsd));
2254 	if (tsd_reentrancy_level_get(tsd) != 0) {
2255 		assert(slow_path);
2256 	}
2257 
2258 	assert(ptr != NULL);
2259 	assert(malloc_initialized() || IS_INITIALIZER);
2260 
2261 	alloc_ctx_t alloc_ctx, *ctx;
2262 	if (!config_cache_oblivious && ((uintptr_t)ptr & PAGE_MASK) != 0) {
2263 		/*
2264 		 * When cache_oblivious is disabled and ptr is not page aligned,
2265 		 * the allocation was not sampled -- usize can be used to
2266 		 * determine szind directly.
2267 		 */
2268 		alloc_ctx.szind = sz_size2index(usize);
2269 		alloc_ctx.slab = true;
2270 		ctx = &alloc_ctx;
2271 		if (config_debug) {
2272 			alloc_ctx_t dbg_ctx;
2273 			rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2274 			rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree,
2275 			    rtree_ctx, (uintptr_t)ptr, true, &dbg_ctx.szind,
2276 			    &dbg_ctx.slab);
2277 			assert(dbg_ctx.szind == alloc_ctx.szind);
2278 			assert(dbg_ctx.slab == alloc_ctx.slab);
2279 		}
2280 	} else if (config_prof && opt_prof) {
2281 		rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2282 		rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2283 		    (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2284 		assert(alloc_ctx.szind == sz_size2index(usize));
2285 		ctx = &alloc_ctx;
2286 	} else {
2287 		ctx = NULL;
2288 	}
2289 
2290 	if (config_prof && opt_prof) {
2291 		prof_free(tsd, ptr, usize, ctx);
2292 	}
2293 	if (config_stats) {
2294 		*tsd_thread_deallocatedp_get(tsd) += usize;
2295 	}
2296 
2297 	if (likely(!slow_path)) {
2298 		isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, false);
2299 	} else {
2300 		isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, true);
2301 	}
2302 }
2303 
2304 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2305 void JEMALLOC_NOTHROW *
2306 JEMALLOC_ALLOC_SIZE(2)
2307 je_realloc(void *ptr, size_t size) {
2308 	void *ret;
2309 	tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL);
2310 	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
2311 	size_t old_usize = 0;
2312 
2313 	LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size);
2314 
2315 	if (unlikely(size == 0)) {
2316 		if (ptr != NULL) {
2317 			/* realloc(ptr, 0) is equivalent to free(ptr). */
2318 			UTRACE(ptr, 0, 0);
2319 			tcache_t *tcache;
2320 			tsd_t *tsd = tsd_fetch();
2321 			if (tsd_reentrancy_level_get(tsd) == 0) {
2322 				tcache = tcache_get(tsd);
2323 			} else {
2324 				tcache = NULL;
2325 			}
2326 			ifree(tsd, ptr, tcache, true);
2327 
2328 			LOG("core.realloc.exit", "result: %p", NULL);
2329 			return NULL;
2330 		}
2331 		size = 1;
2332 	}
2333 
2334 	if (likely(ptr != NULL)) {
2335 		assert(malloc_initialized() || IS_INITIALIZER);
2336 		tsd_t *tsd = tsd_fetch();
2337 
2338 		check_entry_exit_locking(tsd_tsdn(tsd));
2339 
2340 		alloc_ctx_t alloc_ctx;
2341 		rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2342 		rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2343 		    (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2344 		assert(alloc_ctx.szind != NSIZES);
2345 		old_usize = sz_index2size(alloc_ctx.szind);
2346 		assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
2347 		if (config_prof && opt_prof) {
2348 			usize = sz_s2u(size);
2349 			ret = unlikely(usize == 0 || usize > LARGE_MAXCLASS) ?
2350 			    NULL : irealloc_prof(tsd, ptr, old_usize, usize,
2351 			    &alloc_ctx);
2352 		} else {
2353 			if (config_stats) {
2354 				usize = sz_s2u(size);
2355 			}
2356 			ret = iralloc(tsd, ptr, old_usize, size, 0, false);
2357 		}
2358 		tsdn = tsd_tsdn(tsd);
2359 	} else {
2360 		/* realloc(NULL, size) is equivalent to malloc(size). */
2361 		void *ret = je_malloc(size);
2362 		LOG("core.realloc.exit", "result: %p", ret);
2363 		return ret;
2364 	}
2365 
2366 	if (unlikely(ret == NULL)) {
2367 		if (config_xmalloc && unlikely(opt_xmalloc)) {
2368 			malloc_write("<jemalloc>: Error in realloc(): "
2369 			    "out of memory\n");
2370 			abort();
2371 		}
2372 		set_errno(ENOMEM);
2373 	}
2374 	if (config_stats && likely(ret != NULL)) {
2375 		tsd_t *tsd;
2376 
2377 		assert(usize == isalloc(tsdn, ret));
2378 		tsd = tsdn_tsd(tsdn);
2379 		*tsd_thread_allocatedp_get(tsd) += usize;
2380 		*tsd_thread_deallocatedp_get(tsd) += old_usize;
2381 	}
2382 	UTRACE(ptr, size, ret);
2383 	check_entry_exit_locking(tsdn);
2384 
2385 	LOG("core.realloc.exit", "result: %p", ret);
2386 	return ret;
2387 }
2388 
2389 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2390 je_free(void *ptr) {
2391 	LOG("core.free.entry", "ptr: %p", ptr);
2392 
2393 	UTRACE(ptr, 0, 0);
2394 	if (likely(ptr != NULL)) {
2395 		/*
2396 		 * We avoid setting up tsd fully (e.g. tcache, arena binding)
2397 		 * based on only free() calls -- other activities trigger the
2398 		 * minimal to full transition.  This is because free() may
2399 		 * happen during thread shutdown after tls deallocation: if a
2400 		 * thread never had any malloc activities until then, a
2401 		 * fully-setup tsd won't be destructed properly.
2402 		 */
2403 		tsd_t *tsd = tsd_fetch_min();
2404 		check_entry_exit_locking(tsd_tsdn(tsd));
2405 
2406 		tcache_t *tcache;
2407 		if (likely(tsd_fast(tsd))) {
2408 			tsd_assert_fast(tsd);
2409 			/* Unconditionally get tcache ptr on fast path. */
2410 			tcache = tsd_tcachep_get(tsd);
2411 			ifree(tsd, ptr, tcache, false);
2412 		} else {
2413 			if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
2414 				tcache = tcache_get(tsd);
2415 			} else {
2416 				tcache = NULL;
2417 			}
2418 			ifree(tsd, ptr, tcache, true);
2419 		}
2420 		check_entry_exit_locking(tsd_tsdn(tsd));
2421 	}
2422 	LOG("core.free.exit", "");
2423 }
2424 
2425 /*
2426  * End malloc(3)-compatible functions.
2427  */
2428 /******************************************************************************/
2429 /*
2430  * Begin non-standard override functions.
2431  */
2432 
2433 #ifdef JEMALLOC_OVERRIDE_MEMALIGN
2434 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2435 void JEMALLOC_NOTHROW *
2436 JEMALLOC_ATTR(malloc)
2437 je_memalign(size_t alignment, size_t size) {
2438 	void *ret;
2439 	static_opts_t sopts;
2440 	dynamic_opts_t dopts;
2441 
2442 	LOG("core.memalign.entry", "alignment: %zu, size: %zu\n", alignment,
2443 	    size);
2444 
2445 	static_opts_init(&sopts);
2446 	dynamic_opts_init(&dopts);
2447 
2448 	sopts.bump_empty_alloc = true;
2449 	sopts.min_alignment = 1;
2450 	sopts.oom_string =
2451 	    "<jemalloc>: Error allocating aligned memory: out of memory\n";
2452 	sopts.invalid_alignment_string =
2453 	    "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2454 	sopts.null_out_result_on_error = true;
2455 
2456 	dopts.result = &ret;
2457 	dopts.num_items = 1;
2458 	dopts.item_size = size;
2459 	dopts.alignment = alignment;
2460 
2461 	imalloc(&sopts, &dopts);
2462 
2463 	LOG("core.memalign.exit", "result: %p", ret);
2464 	return ret;
2465 }
2466 #endif
2467 
2468 #ifdef JEMALLOC_OVERRIDE_VALLOC
2469 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2470 void JEMALLOC_NOTHROW *
2471 JEMALLOC_ATTR(malloc)
2472 je_valloc(size_t size) {
2473 	void *ret;
2474 
2475 	static_opts_t sopts;
2476 	dynamic_opts_t dopts;
2477 
2478 	LOG("core.valloc.entry", "size: %zu\n", size);
2479 
2480 	static_opts_init(&sopts);
2481 	dynamic_opts_init(&dopts);
2482 
2483 	sopts.bump_empty_alloc = true;
2484 	sopts.null_out_result_on_error = true;
2485 	sopts.min_alignment = PAGE;
2486 	sopts.oom_string =
2487 	    "<jemalloc>: Error allocating aligned memory: out of memory\n";
2488 	sopts.invalid_alignment_string =
2489 	    "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2490 
2491 	dopts.result = &ret;
2492 	dopts.num_items = 1;
2493 	dopts.item_size = size;
2494 	dopts.alignment = PAGE;
2495 
2496 	imalloc(&sopts, &dopts);
2497 
2498 	LOG("core.valloc.exit", "result: %p\n", ret);
2499 	return ret;
2500 }
2501 #endif
2502 
2503 #if defined(JEMALLOC_IS_MALLOC) && defined(JEMALLOC_GLIBC_MALLOC_HOOK)
2504 /*
2505  * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
2506  * to inconsistently reference libc's malloc(3)-compatible functions
2507  * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
2508  *
2509  * These definitions interpose hooks in glibc.  The functions are actually
2510  * passed an extra argument for the caller return address, which will be
2511  * ignored.
2512  */
2513 JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
2514 JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
2515 JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
2516 #  ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
2517 JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
2518     je_memalign;
2519 #  endif
2520 
2521 #  ifdef CPU_COUNT
2522 /*
2523  * To enable static linking with glibc, the libc specific malloc interface must
2524  * be implemented also, so none of glibc's malloc.o functions are added to the
2525  * link.
2526  */
2527 #    define ALIAS(je_fn)	__attribute__((alias (#je_fn), used))
2528 /* To force macro expansion of je_ prefix before stringification. */
2529 #    define PREALIAS(je_fn)	ALIAS(je_fn)
2530 #    ifdef JEMALLOC_OVERRIDE___LIBC_CALLOC
2531 void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc);
2532 #    endif
2533 #    ifdef JEMALLOC_OVERRIDE___LIBC_FREE
2534 void __libc_free(void* ptr) PREALIAS(je_free);
2535 #    endif
2536 #    ifdef JEMALLOC_OVERRIDE___LIBC_MALLOC
2537 void *__libc_malloc(size_t size) PREALIAS(je_malloc);
2538 #    endif
2539 #    ifdef JEMALLOC_OVERRIDE___LIBC_MEMALIGN
2540 void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign);
2541 #    endif
2542 #    ifdef JEMALLOC_OVERRIDE___LIBC_REALLOC
2543 void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc);
2544 #    endif
2545 #    ifdef JEMALLOC_OVERRIDE___LIBC_VALLOC
2546 void *__libc_valloc(size_t size) PREALIAS(je_valloc);
2547 #    endif
2548 #    ifdef JEMALLOC_OVERRIDE___POSIX_MEMALIGN
2549 int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign);
2550 #    endif
2551 #    undef PREALIAS
2552 #    undef ALIAS
2553 #  endif
2554 #endif
2555 
2556 /*
2557  * End non-standard override functions.
2558  */
2559 /******************************************************************************/
2560 /*
2561  * Begin non-standard functions.
2562  */
2563 
2564 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2565 void JEMALLOC_NOTHROW *
2566 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
2567 je_mallocx(size_t size, int flags) {
2568 	void *ret;
2569 	static_opts_t sopts;
2570 	dynamic_opts_t dopts;
2571 
2572 	LOG("core.mallocx.entry", "size: %zu, flags: %d", size, flags);
2573 
2574 	static_opts_init(&sopts);
2575 	dynamic_opts_init(&dopts);
2576 
2577 	sopts.assert_nonempty_alloc = true;
2578 	sopts.null_out_result_on_error = true;
2579 	sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n";
2580 
2581 	dopts.result = &ret;
2582 	dopts.num_items = 1;
2583 	dopts.item_size = size;
2584 	if (unlikely(flags != 0)) {
2585 		if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) {
2586 			dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
2587 		}
2588 
2589 		dopts.zero = MALLOCX_ZERO_GET(flags);
2590 
2591 		if ((flags & MALLOCX_TCACHE_MASK) != 0) {
2592 			if ((flags & MALLOCX_TCACHE_MASK)
2593 			    == MALLOCX_TCACHE_NONE) {
2594 				dopts.tcache_ind = TCACHE_IND_NONE;
2595 			} else {
2596 				dopts.tcache_ind = MALLOCX_TCACHE_GET(flags);
2597 			}
2598 		} else {
2599 			dopts.tcache_ind = TCACHE_IND_AUTOMATIC;
2600 		}
2601 
2602 		if ((flags & MALLOCX_ARENA_MASK) != 0)
2603 			dopts.arena_ind = MALLOCX_ARENA_GET(flags);
2604 	}
2605 
2606 	imalloc(&sopts, &dopts);
2607 
2608 	LOG("core.mallocx.exit", "result: %p", ret);
2609 	return ret;
2610 }
2611 
2612 static void *
2613 irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize,
2614     size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
2615     prof_tctx_t *tctx) {
2616 	void *p;
2617 
2618 	if (tctx == NULL) {
2619 		return NULL;
2620 	}
2621 	if (usize <= SMALL_MAXCLASS) {
2622 		p = iralloct(tsdn, old_ptr, old_usize, LARGE_MINCLASS,
2623 		    alignment, zero, tcache, arena);
2624 		if (p == NULL) {
2625 			return NULL;
2626 		}
2627 		arena_prof_promote(tsdn, p, usize);
2628 	} else {
2629 		p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero,
2630 		    tcache, arena);
2631 	}
2632 
2633 	return p;
2634 }
2635 
2636 JEMALLOC_ALWAYS_INLINE void *
2637 irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
2638     size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
2639     arena_t *arena, alloc_ctx_t *alloc_ctx) {
2640 	void *p;
2641 	bool prof_active;
2642 	prof_tctx_t *old_tctx, *tctx;
2643 
2644 	prof_active = prof_active_get_unlocked();
2645 	old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx);
2646 	tctx = prof_alloc_prep(tsd, *usize, prof_active, false);
2647 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2648 		p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize,
2649 		    *usize, alignment, zero, tcache, arena, tctx);
2650 	} else {
2651 		p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment,
2652 		    zero, tcache, arena);
2653 	}
2654 	if (unlikely(p == NULL)) {
2655 		prof_alloc_rollback(tsd, tctx, false);
2656 		return NULL;
2657 	}
2658 
2659 	if (p == old_ptr && alignment != 0) {
2660 		/*
2661 		 * The allocation did not move, so it is possible that the size
2662 		 * class is smaller than would guarantee the requested
2663 		 * alignment, and that the alignment constraint was
2664 		 * serendipitously satisfied.  Additionally, old_usize may not
2665 		 * be the same as the current usize because of in-place large
2666 		 * reallocation.  Therefore, query the actual value of usize.
2667 		 */
2668 		*usize = isalloc(tsd_tsdn(tsd), p);
2669 	}
2670 	prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr,
2671 	    old_usize, old_tctx);
2672 
2673 	return p;
2674 }
2675 
2676 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2677 void JEMALLOC_NOTHROW *
2678 JEMALLOC_ALLOC_SIZE(2)
2679 je_rallocx(void *ptr, size_t size, int flags) {
2680 	void *p;
2681 	tsd_t *tsd;
2682 	size_t usize;
2683 	size_t old_usize;
2684 	size_t alignment = MALLOCX_ALIGN_GET(flags);
2685 	bool zero = flags & MALLOCX_ZERO;
2686 	arena_t *arena;
2687 	tcache_t *tcache;
2688 
2689 	LOG("core.rallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
2690 	    size, flags);
2691 
2692 
2693 	assert(ptr != NULL);
2694 	assert(size != 0);
2695 	assert(malloc_initialized() || IS_INITIALIZER);
2696 	tsd = tsd_fetch();
2697 	check_entry_exit_locking(tsd_tsdn(tsd));
2698 
2699 	if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
2700 		unsigned arena_ind = MALLOCX_ARENA_GET(flags);
2701 		arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
2702 		if (unlikely(arena == NULL)) {
2703 			goto label_oom;
2704 		}
2705 	} else {
2706 		arena = NULL;
2707 	}
2708 
2709 	if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2710 		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
2711 			tcache = NULL;
2712 		} else {
2713 			tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2714 		}
2715 	} else {
2716 		tcache = tcache_get(tsd);
2717 	}
2718 
2719 	alloc_ctx_t alloc_ctx;
2720 	rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2721 	rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2722 	    (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2723 	assert(alloc_ctx.szind != NSIZES);
2724 	old_usize = sz_index2size(alloc_ctx.szind);
2725 	assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
2726 	if (config_prof && opt_prof) {
2727 		usize = (alignment == 0) ?
2728 		    sz_s2u(size) : sz_sa2u(size, alignment);
2729 		if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
2730 			goto label_oom;
2731 		}
2732 		p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
2733 		    zero, tcache, arena, &alloc_ctx);
2734 		if (unlikely(p == NULL)) {
2735 			goto label_oom;
2736 		}
2737 	} else {
2738 		p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment,
2739 		    zero, tcache, arena);
2740 		if (unlikely(p == NULL)) {
2741 			goto label_oom;
2742 		}
2743 		if (config_stats) {
2744 			usize = isalloc(tsd_tsdn(tsd), p);
2745 		}
2746 	}
2747 	assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2748 
2749 	if (config_stats) {
2750 		*tsd_thread_allocatedp_get(tsd) += usize;
2751 		*tsd_thread_deallocatedp_get(tsd) += old_usize;
2752 	}
2753 	UTRACE(ptr, size, p);
2754 	check_entry_exit_locking(tsd_tsdn(tsd));
2755 
2756 	LOG("core.rallocx.exit", "result: %p", p);
2757 	return p;
2758 label_oom:
2759 	if (config_xmalloc && unlikely(opt_xmalloc)) {
2760 		malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
2761 		abort();
2762 	}
2763 	UTRACE(ptr, size, 0);
2764 	check_entry_exit_locking(tsd_tsdn(tsd));
2765 
2766 	LOG("core.rallocx.exit", "result: %p", NULL);
2767 	return NULL;
2768 }
2769 
2770 JEMALLOC_ALWAYS_INLINE size_t
2771 ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
2772     size_t extra, size_t alignment, bool zero) {
2773 	size_t usize;
2774 
2775 	if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero)) {
2776 		return old_usize;
2777 	}
2778 	usize = isalloc(tsdn, ptr);
2779 
2780 	return usize;
2781 }
2782 
2783 static size_t
2784 ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
2785     size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) {
2786 	size_t usize;
2787 
2788 	if (tctx == NULL) {
2789 		return old_usize;
2790 	}
2791 	usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment,
2792 	    zero);
2793 
2794 	return usize;
2795 }
2796 
2797 JEMALLOC_ALWAYS_INLINE size_t
2798 ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
2799     size_t extra, size_t alignment, bool zero, alloc_ctx_t *alloc_ctx) {
2800 	size_t usize_max, usize;
2801 	bool prof_active;
2802 	prof_tctx_t *old_tctx, *tctx;
2803 
2804 	prof_active = prof_active_get_unlocked();
2805 	old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx);
2806 	/*
2807 	 * usize isn't knowable before ixalloc() returns when extra is non-zero.
2808 	 * Therefore, compute its maximum possible value and use that in
2809 	 * prof_alloc_prep() to decide whether to capture a backtrace.
2810 	 * prof_realloc() will use the actual usize to decide whether to sample.
2811 	 */
2812 	if (alignment == 0) {
2813 		usize_max = sz_s2u(size+extra);
2814 		assert(usize_max > 0 && usize_max <= LARGE_MAXCLASS);
2815 	} else {
2816 		usize_max = sz_sa2u(size+extra, alignment);
2817 		if (unlikely(usize_max == 0 || usize_max > LARGE_MAXCLASS)) {
2818 			/*
2819 			 * usize_max is out of range, and chances are that
2820 			 * allocation will fail, but use the maximum possible
2821 			 * value and carry on with prof_alloc_prep(), just in
2822 			 * case allocation succeeds.
2823 			 */
2824 			usize_max = LARGE_MAXCLASS;
2825 		}
2826 	}
2827 	tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
2828 
2829 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2830 		usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize,
2831 		    size, extra, alignment, zero, tctx);
2832 	} else {
2833 		usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
2834 		    extra, alignment, zero);
2835 	}
2836 	if (usize == old_usize) {
2837 		prof_alloc_rollback(tsd, tctx, false);
2838 		return usize;
2839 	}
2840 	prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize,
2841 	    old_tctx);
2842 
2843 	return usize;
2844 }
2845 
2846 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2847 je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
2848 	tsd_t *tsd;
2849 	size_t usize, old_usize;
2850 	size_t alignment = MALLOCX_ALIGN_GET(flags);
2851 	bool zero = flags & MALLOCX_ZERO;
2852 
2853 	LOG("core.xallocx.entry", "ptr: %p, size: %zu, extra: %zu, "
2854 	    "flags: %d", ptr, size, extra, flags);
2855 
2856 	assert(ptr != NULL);
2857 	assert(size != 0);
2858 	assert(SIZE_T_MAX - size >= extra);
2859 	assert(malloc_initialized() || IS_INITIALIZER);
2860 	tsd = tsd_fetch();
2861 	check_entry_exit_locking(tsd_tsdn(tsd));
2862 
2863 	alloc_ctx_t alloc_ctx;
2864 	rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2865 	rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2866 	    (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2867 	assert(alloc_ctx.szind != NSIZES);
2868 	old_usize = sz_index2size(alloc_ctx.szind);
2869 	assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
2870 	/*
2871 	 * The API explicitly absolves itself of protecting against (size +
2872 	 * extra) numerical overflow, but we may need to clamp extra to avoid
2873 	 * exceeding LARGE_MAXCLASS.
2874 	 *
2875 	 * Ordinarily, size limit checking is handled deeper down, but here we
2876 	 * have to check as part of (size + extra) clamping, since we need the
2877 	 * clamped value in the above helper functions.
2878 	 */
2879 	if (unlikely(size > LARGE_MAXCLASS)) {
2880 		usize = old_usize;
2881 		goto label_not_resized;
2882 	}
2883 	if (unlikely(LARGE_MAXCLASS - size < extra)) {
2884 		extra = LARGE_MAXCLASS - size;
2885 	}
2886 
2887 	if (config_prof && opt_prof) {
2888 		usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
2889 		    alignment, zero, &alloc_ctx);
2890 	} else {
2891 		usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
2892 		    extra, alignment, zero);
2893 	}
2894 	if (unlikely(usize == old_usize)) {
2895 		goto label_not_resized;
2896 	}
2897 
2898 	if (config_stats) {
2899 		*tsd_thread_allocatedp_get(tsd) += usize;
2900 		*tsd_thread_deallocatedp_get(tsd) += old_usize;
2901 	}
2902 label_not_resized:
2903 	UTRACE(ptr, size, ptr);
2904 	check_entry_exit_locking(tsd_tsdn(tsd));
2905 
2906 	LOG("core.xallocx.exit", "result: %zu", usize);
2907 	return usize;
2908 }
2909 
2910 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2911 JEMALLOC_ATTR(pure)
2912 je_sallocx(const void *ptr, UNUSED int flags) {
2913 	size_t usize;
2914 	tsdn_t *tsdn;
2915 
2916 	LOG("core.sallocx.entry", "ptr: %p, flags: %d", ptr, flags);
2917 
2918 	assert(malloc_initialized() || IS_INITIALIZER);
2919 	assert(ptr != NULL);
2920 
2921 	tsdn = tsdn_fetch();
2922 	check_entry_exit_locking(tsdn);
2923 
2924 	if (config_debug || force_ivsalloc) {
2925 		usize = ivsalloc(tsdn, ptr);
2926 		assert(force_ivsalloc || usize != 0);
2927 	} else {
2928 		usize = isalloc(tsdn, ptr);
2929 	}
2930 
2931 	check_entry_exit_locking(tsdn);
2932 
2933 	LOG("core.sallocx.exit", "result: %zu", usize);
2934 	return usize;
2935 }
2936 
2937 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2938 je_dallocx(void *ptr, int flags) {
2939 	LOG("core.dallocx.entry", "ptr: %p, flags: %d", ptr, flags);
2940 
2941 	assert(ptr != NULL);
2942 	assert(malloc_initialized() || IS_INITIALIZER);
2943 
2944 	tsd_t *tsd = tsd_fetch();
2945 	bool fast = tsd_fast(tsd);
2946 	check_entry_exit_locking(tsd_tsdn(tsd));
2947 
2948 	tcache_t *tcache;
2949 	if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2950 		/* Not allowed to be reentrant and specify a custom tcache. */
2951 		assert(tsd_reentrancy_level_get(tsd) == 0);
2952 		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
2953 			tcache = NULL;
2954 		} else {
2955 			tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2956 		}
2957 	} else {
2958 		if (likely(fast)) {
2959 			tcache = tsd_tcachep_get(tsd);
2960 			assert(tcache == tcache_get(tsd));
2961 		} else {
2962 			if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
2963 				tcache = tcache_get(tsd);
2964 			}  else {
2965 				tcache = NULL;
2966 			}
2967 		}
2968 	}
2969 
2970 	UTRACE(ptr, 0, 0);
2971 	if (likely(fast)) {
2972 		tsd_assert_fast(tsd);
2973 		ifree(tsd, ptr, tcache, false);
2974 	} else {
2975 		ifree(tsd, ptr, tcache, true);
2976 	}
2977 	check_entry_exit_locking(tsd_tsdn(tsd));
2978 
2979 	LOG("core.dallocx.exit", "");
2980 }
2981 
2982 JEMALLOC_ALWAYS_INLINE size_t
2983 inallocx(tsdn_t *tsdn, size_t size, int flags) {
2984 	check_entry_exit_locking(tsdn);
2985 
2986 	size_t usize;
2987 	if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) {
2988 		usize = sz_s2u(size);
2989 	} else {
2990 		usize = sz_sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
2991 	}
2992 	check_entry_exit_locking(tsdn);
2993 	return usize;
2994 }
2995 
2996 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2997 je_sdallocx(void *ptr, size_t size, int flags) {
2998 	assert(ptr != NULL);
2999 	assert(malloc_initialized() || IS_INITIALIZER);
3000 
3001 	LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
3002 	    size, flags);
3003 
3004 	tsd_t *tsd = tsd_fetch();
3005 	bool fast = tsd_fast(tsd);
3006 	size_t usize = inallocx(tsd_tsdn(tsd), size, flags);
3007 	assert(usize == isalloc(tsd_tsdn(tsd), ptr));
3008 	check_entry_exit_locking(tsd_tsdn(tsd));
3009 
3010 	tcache_t *tcache;
3011 	if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
3012 		/* Not allowed to be reentrant and specify a custom tcache. */
3013 		assert(tsd_reentrancy_level_get(tsd) == 0);
3014 		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
3015 			tcache = NULL;
3016 		} else {
3017 			tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
3018 		}
3019 	} else {
3020 		if (likely(fast)) {
3021 			tcache = tsd_tcachep_get(tsd);
3022 			assert(tcache == tcache_get(tsd));
3023 		} else {
3024 			if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
3025 				tcache = tcache_get(tsd);
3026 			} else {
3027 				tcache = NULL;
3028 			}
3029 		}
3030 	}
3031 
3032 	UTRACE(ptr, 0, 0);
3033 	if (likely(fast)) {
3034 		tsd_assert_fast(tsd);
3035 		isfree(tsd, ptr, usize, tcache, false);
3036 	} else {
3037 		isfree(tsd, ptr, usize, tcache, true);
3038 	}
3039 	check_entry_exit_locking(tsd_tsdn(tsd));
3040 
3041 	LOG("core.sdallocx.exit", "");
3042 }
3043 
3044 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
3045 JEMALLOC_ATTR(pure)
3046 je_nallocx(size_t size, int flags) {
3047 	size_t usize;
3048 	tsdn_t *tsdn;
3049 
3050 	assert(size != 0);
3051 
3052 	if (unlikely(malloc_init())) {
3053 		LOG("core.nallocx.exit", "result: %zu", ZU(0));
3054 		return 0;
3055 	}
3056 
3057 	tsdn = tsdn_fetch();
3058 	check_entry_exit_locking(tsdn);
3059 
3060 	usize = inallocx(tsdn, size, flags);
3061 	if (unlikely(usize > LARGE_MAXCLASS)) {
3062 		LOG("core.nallocx.exit", "result: %zu", ZU(0));
3063 		return 0;
3064 	}
3065 
3066 	check_entry_exit_locking(tsdn);
3067 	LOG("core.nallocx.exit", "result: %zu", usize);
3068 	return usize;
3069 }
3070 
3071 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
3072 je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
3073     size_t newlen) {
3074 	int ret;
3075 	tsd_t *tsd;
3076 
3077 	LOG("core.mallctl.entry", "name: %s", name);
3078 
3079 	if (unlikely(malloc_init())) {
3080 		LOG("core.mallctl.exit", "result: %d", EAGAIN);
3081 		return EAGAIN;
3082 	}
3083 
3084 	tsd = tsd_fetch();
3085 	check_entry_exit_locking(tsd_tsdn(tsd));
3086 	ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen);
3087 	check_entry_exit_locking(tsd_tsdn(tsd));
3088 
3089 	LOG("core.mallctl.exit", "result: %d", ret);
3090 	return ret;
3091 }
3092 
3093 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
3094 je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) {
3095 	int ret;
3096 
3097 	LOG("core.mallctlnametomib.entry", "name: %s", name);
3098 
3099 	if (unlikely(malloc_init())) {
3100 		LOG("core.mallctlnametomib.exit", "result: %d", EAGAIN);
3101 		return EAGAIN;
3102 	}
3103 
3104 	tsd_t *tsd = tsd_fetch();
3105 	check_entry_exit_locking(tsd_tsdn(tsd));
3106 	ret = ctl_nametomib(tsd, name, mibp, miblenp);
3107 	check_entry_exit_locking(tsd_tsdn(tsd));
3108 
3109 	LOG("core.mallctlnametomib.exit", "result: %d", ret);
3110 	return ret;
3111 }
3112 
3113 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
3114 je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
3115   void *newp, size_t newlen) {
3116 	int ret;
3117 	tsd_t *tsd;
3118 
3119 	LOG("core.mallctlbymib.entry", "");
3120 
3121 	if (unlikely(malloc_init())) {
3122 		LOG("core.mallctlbymib.exit", "result: %d", EAGAIN);
3123 		return EAGAIN;
3124 	}
3125 
3126 	tsd = tsd_fetch();
3127 	check_entry_exit_locking(tsd_tsdn(tsd));
3128 	ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
3129 	check_entry_exit_locking(tsd_tsdn(tsd));
3130 	LOG("core.mallctlbymib.exit", "result: %d", ret);
3131 	return ret;
3132 }
3133 
3134 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
3135 je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
3136     const char *opts) {
3137 	tsdn_t *tsdn;
3138 
3139 	LOG("core.malloc_stats_print.entry", "");
3140 
3141 	tsdn = tsdn_fetch();
3142 	check_entry_exit_locking(tsdn);
3143 	stats_print(write_cb, cbopaque, opts);
3144 	check_entry_exit_locking(tsdn);
3145 	LOG("core.malloc_stats_print.exit", "");
3146 }
3147 
3148 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
3149 je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
3150 	size_t ret;
3151 	tsdn_t *tsdn;
3152 
3153 	LOG("core.malloc_usable_size.entry", "ptr: %p", ptr);
3154 
3155 	assert(malloc_initialized() || IS_INITIALIZER);
3156 
3157 	tsdn = tsdn_fetch();
3158 	check_entry_exit_locking(tsdn);
3159 
3160 	if (unlikely(ptr == NULL)) {
3161 		ret = 0;
3162 	} else {
3163 		if (config_debug || force_ivsalloc) {
3164 			ret = ivsalloc(tsdn, ptr);
3165 			assert(force_ivsalloc || ret != 0);
3166 		} else {
3167 			ret = isalloc(tsdn, ptr);
3168 		}
3169 	}
3170 
3171 	check_entry_exit_locking(tsdn);
3172 	LOG("core.malloc_usable_size.exit", "result: %zu", ret);
3173 	return ret;
3174 }
3175 
3176 /*
3177  * End non-standard functions.
3178  */
3179 /******************************************************************************/
3180 /*
3181  * The following functions are used by threading libraries for protection of
3182  * malloc during fork().
3183  */
3184 
3185 /*
3186  * If an application creates a thread before doing any allocation in the main
3187  * thread, then calls fork(2) in the main thread followed by memory allocation
3188  * in the child process, a race can occur that results in deadlock within the
3189  * child: the main thread may have forked while the created thread had
3190  * partially initialized the allocator.  Ordinarily jemalloc prevents
3191  * fork/malloc races via the following functions it registers during
3192  * initialization using pthread_atfork(), but of course that does no good if
3193  * the allocator isn't fully initialized at fork time.  The following library
3194  * constructor is a partial solution to this problem.  It may still be possible
3195  * to trigger the deadlock described above, but doing so would involve forking
3196  * via a library constructor that runs before jemalloc's runs.
3197  */
3198 #ifndef JEMALLOC_JET
3199 JEMALLOC_ATTR(constructor)
3200 static void
3201 jemalloc_constructor(void) {
3202 	malloc_init();
3203 }
3204 #endif
3205 
3206 #ifndef JEMALLOC_MUTEX_INIT_CB
3207 void
3208 jemalloc_prefork(void)
3209 #else
3210 JEMALLOC_EXPORT void
3211 _malloc_prefork(void)
3212 #endif
3213 {
3214 	tsd_t *tsd;
3215 	unsigned i, j, narenas;
3216 	arena_t *arena;
3217 
3218 #ifdef JEMALLOC_MUTEX_INIT_CB
3219 	if (!malloc_initialized()) {
3220 		return;
3221 	}
3222 #endif
3223 	assert(malloc_initialized());
3224 
3225 	tsd = tsd_fetch();
3226 
3227 	narenas = narenas_total_get();
3228 
3229 	witness_prefork(tsd_witness_tsdp_get(tsd));
3230 	/* Acquire all mutexes in a safe order. */
3231 	ctl_prefork(tsd_tsdn(tsd));
3232 	tcache_prefork(tsd_tsdn(tsd));
3233 	malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock);
3234 	if (have_background_thread) {
3235 		background_thread_prefork0(tsd_tsdn(tsd));
3236 	}
3237 	prof_prefork0(tsd_tsdn(tsd));
3238 	if (have_background_thread) {
3239 		background_thread_prefork1(tsd_tsdn(tsd));
3240 	}
3241 	/* Break arena prefork into stages to preserve lock order. */
3242 	for (i = 0; i < 8; i++) {
3243 		for (j = 0; j < narenas; j++) {
3244 			if ((arena = arena_get(tsd_tsdn(tsd), j, false)) !=
3245 			    NULL) {
3246 				switch (i) {
3247 				case 0:
3248 					arena_prefork0(tsd_tsdn(tsd), arena);
3249 					break;
3250 				case 1:
3251 					arena_prefork1(tsd_tsdn(tsd), arena);
3252 					break;
3253 				case 2:
3254 					arena_prefork2(tsd_tsdn(tsd), arena);
3255 					break;
3256 				case 3:
3257 					arena_prefork3(tsd_tsdn(tsd), arena);
3258 					break;
3259 				case 4:
3260 					arena_prefork4(tsd_tsdn(tsd), arena);
3261 					break;
3262 				case 5:
3263 					arena_prefork5(tsd_tsdn(tsd), arena);
3264 					break;
3265 				case 6:
3266 					arena_prefork6(tsd_tsdn(tsd), arena);
3267 					break;
3268 				case 7:
3269 					arena_prefork7(tsd_tsdn(tsd), arena);
3270 					break;
3271 				default: not_reached();
3272 				}
3273 			}
3274 		}
3275 	}
3276 	prof_prefork1(tsd_tsdn(tsd));
3277 }
3278 
3279 #ifndef JEMALLOC_MUTEX_INIT_CB
3280 void
3281 jemalloc_postfork_parent(void)
3282 #else
3283 JEMALLOC_EXPORT void
3284 _malloc_postfork(void)
3285 #endif
3286 {
3287 	tsd_t *tsd;
3288 	unsigned i, narenas;
3289 
3290 #ifdef JEMALLOC_MUTEX_INIT_CB
3291 	if (!malloc_initialized()) {
3292 		return;
3293 	}
3294 #endif
3295 	assert(malloc_initialized());
3296 
3297 	tsd = tsd_fetch();
3298 
3299 	witness_postfork_parent(tsd_witness_tsdp_get(tsd));
3300 	/* Release all mutexes, now that fork() has completed. */
3301 	for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
3302 		arena_t *arena;
3303 
3304 		if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
3305 			arena_postfork_parent(tsd_tsdn(tsd), arena);
3306 		}
3307 	}
3308 	prof_postfork_parent(tsd_tsdn(tsd));
3309 	if (have_background_thread) {
3310 		background_thread_postfork_parent(tsd_tsdn(tsd));
3311 	}
3312 	malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock);
3313 	tcache_postfork_parent(tsd_tsdn(tsd));
3314 	ctl_postfork_parent(tsd_tsdn(tsd));
3315 }
3316 
3317 void
3318 jemalloc_postfork_child(void) {
3319 	tsd_t *tsd;
3320 	unsigned i, narenas;
3321 
3322 	assert(malloc_initialized());
3323 
3324 	tsd = tsd_fetch();
3325 
3326 	witness_postfork_child(tsd_witness_tsdp_get(tsd));
3327 	extent_postfork_child(tsd_tsdn(tsd));
3328 	/* Release all mutexes, now that fork() has completed. */
3329 	for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
3330 		arena_t *arena;
3331 
3332 		if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
3333 			arena_postfork_child(tsd_tsdn(tsd), arena);
3334 		}
3335 	}
3336 	prof_postfork_child(tsd_tsdn(tsd));
3337 	if (have_background_thread) {
3338 		background_thread_postfork_child(tsd_tsdn(tsd));
3339 	}
3340 	malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock);
3341 	tcache_postfork_child(tsd_tsdn(tsd));
3342 	ctl_postfork_child(tsd_tsdn(tsd));
3343 }
3344 
3345 /******************************************************************************/
3346 
3347 #if defined(__BIONIC__) && !defined(JEMALLOC_JET)
3348 #include "android_je_iterate.c"
3349 #include "android_je_mallinfo.c"
3350 #include "android_je_stats.c"
3351 #endif
3352