1 #include <stdlib.h>
2 #include <stdint.h>
3 #include <limits.h>
4 #include <string.h>
5 #include <sys/mman.h>
6 #include <errno.h>
7
8 #include "meta.h"
9
10 LOCK_OBJ_DEF;
11
12 const uint16_t size_classes[] = {
13 1, 2, 3, 4, 5, 6, 7, 8,
14 9, 10, 12, 15,
15 18, 20, 25, 31,
16 36, 42, 50, 63,
17 72, 84, 102, 127,
18 146, 170, 204, 255,
19 292, 340, 409, 511,
20 584, 682, 818, 1023,
21 1169, 1364, 1637, 2047,
22 2340, 2730, 3276, 4095,
23 4680, 5460, 6552, 8191,
24 };
25
26 static const uint8_t small_cnt_tab[][3] = {
27 { 30, 30, 30 },
28 { 31, 15, 15 },
29 { 20, 10, 10 },
30 { 31, 15, 7 },
31 { 25, 12, 6 },
32 { 21, 10, 5 },
33 { 18, 8, 4 },
34 { 31, 15, 7 },
35 { 28, 14, 6 },
36 };
37
38 static const uint8_t med_cnt_tab[4] = { 28, 24, 20, 32 };
39
40 struct malloc_context ctx = { 0 };
41
alloc_meta(void)42 struct meta *alloc_meta(void)
43 {
44 struct meta *m;
45 unsigned char *p;
46 if (!ctx.init_done) {
47 #ifndef PAGESIZE
48 ctx.pagesize = get_page_size();
49 #endif
50 ctx.secret = get_random_secret();
51 ctx.init_done = 1;
52 }
53 size_t pagesize = PGSZ;
54 if (pagesize < 4096) pagesize = 4096;
55 if ((m = dequeue_head(&ctx.free_meta_head))) return m;
56 if (!ctx.avail_meta_count) {
57 int need_unprotect = 1;
58 if (!ctx.avail_meta_area_count && ctx.brk!=-1) {
59 uintptr_t new = ctx.brk + pagesize;
60 int need_guard = 0;
61 if (!ctx.brk) {
62 need_guard = 1;
63 ctx.brk = brk(0);
64 // some ancient kernels returned _ebss
65 // instead of next page as initial brk.
66 ctx.brk += -ctx.brk & (pagesize-1);
67 new = ctx.brk + 2*pagesize;
68 }
69 if (brk(new) != new) {
70 ctx.brk = -1;
71 } else {
72 if (need_guard) mmap((void *)ctx.brk, pagesize,
73 PROT_NONE, MAP_ANON|MAP_PRIVATE|MAP_FIXED, -1, 0);
74 ctx.brk = new;
75 ctx.avail_meta_areas = (void *)(new - pagesize);
76 ctx.avail_meta_area_count = pagesize>>12;
77 need_unprotect = 0;
78 }
79 }
80 if (!ctx.avail_meta_area_count) {
81 size_t n = 2UL << ctx.meta_alloc_shift;
82 p = mmap(0, n*pagesize, PROT_NONE,
83 MAP_PRIVATE|MAP_ANON, -1, 0);
84 if (p==MAP_FAILED) return 0;
85 ctx.avail_meta_areas = p + pagesize;
86 ctx.avail_meta_area_count = (n-1)*(pagesize>>12);
87 ctx.meta_alloc_shift++;
88 }
89 p = ctx.avail_meta_areas;
90 if ((uintptr_t)p & (pagesize-1)) need_unprotect = 0;
91 if (need_unprotect)
92 if (mprotect(p, pagesize, PROT_READ|PROT_WRITE)
93 && errno != ENOSYS)
94 return 0;
95 ctx.avail_meta_area_count--;
96 ctx.avail_meta_areas = p + 4096;
97 if (ctx.meta_area_tail) {
98 ctx.meta_area_tail->next = (void *)p;
99 } else {
100 ctx.meta_area_head = (void *)p;
101 }
102 ctx.meta_area_tail = (void *)p;
103 ctx.meta_area_tail->check = ctx.secret;
104 ctx.avail_meta_count = ctx.meta_area_tail->nslots
105 = (4096-sizeof(struct meta_area))/sizeof *m;
106 ctx.avail_meta = ctx.meta_area_tail->slots;
107 }
108 ctx.avail_meta_count--;
109 m = ctx.avail_meta++;
110 m->prev = m->next = 0;
111 return m;
112 }
113
try_avail(struct meta ** pm)114 static uint32_t try_avail(struct meta **pm)
115 {
116 struct meta *m = *pm;
117 uint32_t first;
118 if (!m) return 0;
119 uint32_t mask = m->avail_mask;
120 if (!mask) {
121 if (!m) return 0;
122 if (!m->freed_mask) {
123 dequeue(pm, m);
124 m = *pm;
125 if (!m) return 0;
126 } else {
127 m = m->next;
128 *pm = m;
129 }
130
131 mask = m->freed_mask;
132
133 // skip fully-free group unless it's the only one
134 // or it's a permanently non-freeable group
135 if (mask == (2u<<m->last_idx)-1 && m->freeable) {
136 m = m->next;
137 *pm = m;
138 mask = m->freed_mask;
139 }
140
141 // activate more slots in a not-fully-active group
142 // if needed, but only as a last resort. prefer using
143 // any other group with free slots. this avoids
144 // touching & dirtying as-yet-unused pages.
145 if (!(mask & ((2u<<m->mem->active_idx)-1))) {
146 if (m->next != m) {
147 m = m->next;
148 *pm = m;
149 } else {
150 int cnt = m->mem->active_idx + 2;
151 int size = size_classes[m->sizeclass]*UNIT;
152 int span = UNIT + size*cnt;
153 // activate up to next 4k boundary
154 while ((span^(span+size-1)) < 4096) {
155 cnt++;
156 span += size;
157 }
158 if (cnt > m->last_idx+1)
159 cnt = m->last_idx+1;
160 m->mem->active_idx = cnt-1;
161 }
162 }
163 mask = activate_group(m);
164 assert(mask);
165 decay_bounces(m->sizeclass);
166 }
167 first = mask&-mask;
168 m->avail_mask = mask-first;
169 return first;
170 }
171
172 static int alloc_slot(int, size_t);
173
alloc_group(int sc,size_t req)174 static struct meta *alloc_group(int sc, size_t req)
175 {
176 size_t size = UNIT*size_classes[sc];
177 int i = 0, cnt;
178 unsigned char *p;
179 struct meta *m = alloc_meta();
180 if (!m) return 0;
181 size_t usage = ctx.usage_by_class[sc];
182 size_t pagesize = PGSZ;
183 int active_idx;
184 if (sc < 9) {
185 while (i<2 && 4*small_cnt_tab[sc][i] > usage)
186 i++;
187 cnt = small_cnt_tab[sc][i];
188 } else {
189 // lookup max number of slots fitting in power-of-two size
190 // from a table, along with number of factors of two we
191 // can divide out without a remainder or reaching 1.
192 cnt = med_cnt_tab[sc&3];
193
194 // reduce cnt to avoid excessive eagar allocation.
195 while (!(cnt&1) && 4*cnt > usage)
196 cnt >>= 1;
197
198 // data structures don't support groups whose slot offsets
199 // in units don't fit in 16 bits.
200 while (size*cnt >= 65536*UNIT)
201 cnt >>= 1;
202 }
203
204 // If we selected a count of 1 above but it's not sufficient to use
205 // mmap, increase to 2. Then it might be; if not it will nest.
206 if (cnt==1 && size*cnt+UNIT <= pagesize/2) cnt = 2;
207
208 // All choices of size*cnt are "just below" a power of two, so anything
209 // larger than half the page size should be allocated as whole pages.
210 if (size*cnt+UNIT > pagesize/2) {
211 // check/update bounce counter to start/increase retention
212 // of freed maps, and inhibit use of low-count, odd-size
213 // small mappings and single-slot groups if activated.
214 int nosmall = is_bouncing(sc);
215 account_bounce(sc);
216 step_seq();
217
218 // since the following count reduction opportunities have
219 // an absolute memory usage cost, don't overdo them. count
220 // coarse usage as part of usage.
221 if (!(sc&1) && sc<32) usage += ctx.usage_by_class[sc+1];
222
223 // try to drop to a lower count if the one found above
224 // increases usage by more than 25%. these reduced counts
225 // roughly fill an integral number of pages, just not a
226 // power of two, limiting amount of unusable space.
227 if (4*cnt > usage && !nosmall) {
228 if (0);
229 else if ((sc&3)==1 && size*cnt>8*pagesize) cnt = 2;
230 else if ((sc&3)==2 && size*cnt>4*pagesize) cnt = 3;
231 else if ((sc&3)==0 && size*cnt>8*pagesize) cnt = 3;
232 else if ((sc&3)==0 && size*cnt>2*pagesize) cnt = 5;
233 }
234 size_t needed = size*cnt + UNIT;
235 needed += -needed & (pagesize-1);
236
237 // produce an individually-mmapped allocation if usage is low,
238 // bounce counter hasn't triggered, and either it saves memory
239 // or it avoids eagar slot allocation without wasting too much.
240 if (!nosmall && cnt<=7) {
241 req += IB + UNIT;
242 req += -req & (pagesize-1);
243 if (req<size+UNIT || (req>=4*pagesize && 2*cnt>usage)) {
244 cnt = 1;
245 needed = req;
246 }
247 }
248
249 p = mmap(0, needed, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0);
250 if (p==MAP_FAILED) {
251 free_meta(m);
252 return 0;
253 }
254 m->maplen = needed>>12;
255 ctx.mmap_counter++;
256 active_idx = (4096-UNIT)/size-1;
257 if (active_idx > cnt-1) active_idx = cnt-1;
258 if (active_idx < 0) active_idx = 0;
259 } else {
260 int j = size_to_class(UNIT+cnt*size-IB);
261 int idx = alloc_slot(j, UNIT+cnt*size-IB);
262 if (idx < 0) {
263 free_meta(m);
264 return 0;
265 }
266 struct meta *g = ctx.active[j];
267 p = enframe(g, idx, UNIT*size_classes[j]-IB, ctx.mmap_counter);
268 m->maplen = 0;
269 p[-3] = (p[-3]&31) | (6<<5);
270 for (int i=0; i<=cnt; i++)
271 p[UNIT+i*size-4] = 0;
272 active_idx = cnt-1;
273 }
274 ctx.usage_by_class[sc] += cnt;
275 m->avail_mask = (2u<<active_idx)-1;
276 m->freed_mask = (2u<<(cnt-1))-1 - m->avail_mask;
277 m->mem = (void *)p;
278 m->mem->meta = m;
279 m->mem->active_idx = active_idx;
280 m->last_idx = cnt-1;
281 m->freeable = 1;
282 m->sizeclass = sc;
283 return m;
284 }
285
alloc_slot(int sc,size_t req)286 static int alloc_slot(int sc, size_t req)
287 {
288 uint32_t first = try_avail(&ctx.active[sc]);
289 if (first) return a_ctz_32(first);
290
291 struct meta *g = alloc_group(sc, req);
292 if (!g) return -1;
293
294 g->avail_mask--;
295 queue(&ctx.active[sc], g);
296 return 0;
297 }
298
malloc(size_t n)299 void *malloc(size_t n)
300 {
301 if (size_overflows(n)) return 0;
302 struct meta *g;
303 uint32_t mask, first;
304 int sc;
305 int idx;
306 int ctr;
307
308 if (n >= MMAP_THRESHOLD) {
309 size_t needed = n + IB + UNIT;
310 void *p = mmap(0, needed, PROT_READ|PROT_WRITE,
311 MAP_PRIVATE|MAP_ANON, -1, 0);
312 if (p==MAP_FAILED) return 0;
313 wrlock();
314 step_seq();
315 g = alloc_meta();
316 if (!g) {
317 unlock();
318 munmap(p, needed);
319 return 0;
320 }
321 g->mem = p;
322 g->mem->meta = g;
323 g->last_idx = 0;
324 g->freeable = 1;
325 g->sizeclass = 63;
326 g->maplen = (needed+4095)/4096;
327 g->avail_mask = g->freed_mask = 0;
328 // use a global counter to cycle offset in
329 // individually-mmapped allocations.
330 ctx.mmap_counter++;
331 idx = 0;
332 goto success;
333 }
334
335 sc = size_to_class(n);
336
337 rdlock();
338 g = ctx.active[sc];
339
340 // use coarse size classes initially when there are not yet
341 // any groups of desired size. this allows counts of 2 or 3
342 // to be allocated at first rather than having to start with
343 // 7 or 5, the min counts for even size classes.
344 if (!g && sc>=4 && sc<32 && sc!=6 && !(sc&1) && !ctx.usage_by_class[sc]) {
345 size_t usage = ctx.usage_by_class[sc|1];
346 // if a new group may be allocated, count it toward
347 // usage in deciding if we can use coarse class.
348 if (!ctx.active[sc|1] || (!ctx.active[sc|1]->avail_mask
349 && !ctx.active[sc|1]->freed_mask))
350 usage += 3;
351 if (usage <= 12)
352 sc |= 1;
353 g = ctx.active[sc];
354 }
355
356 for (;;) {
357 mask = g ? g->avail_mask : 0;
358 first = mask&-mask;
359 if (!first) break;
360 if (RDLOCK_IS_EXCLUSIVE || !MT)
361 g->avail_mask = mask-first;
362 else if (a_cas(&g->avail_mask, mask, mask-first)!=mask)
363 continue;
364 idx = a_ctz_32(first);
365 goto success;
366 }
367 upgradelock();
368
369 idx = alloc_slot(sc, n);
370 if (idx < 0) {
371 unlock();
372 return 0;
373 }
374 g = ctx.active[sc];
375
376 success:
377 ctr = ctx.mmap_counter;
378 unlock();
379 return enframe(g, idx, n, ctr);
380 }
381
is_allzero(void * p)382 int is_allzero(void *p)
383 {
384 struct meta *g = get_meta(p);
385 return g->sizeclass >= 48 ||
386 get_stride(g) < UNIT*size_classes[g->sizeclass];
387 }
388