1 /*
2   Default header file for malloc-2.8.x, written by Doug Lea
3   and released to the public domain, as explained at
4   http://creativecommons.org/publicdomain/zero/1.0/
5 
6   This header is for ANSI C/C++ only.  You can set any of
7   the following #defines before including:
8 
9   * If USE_DL_PREFIX is defined, it is assumed that malloc.c
10     was also compiled with this option, so all routines
11     have names starting with "dl".
12 
13   * If HAVE_USR_INCLUDE_MALLOC_H is defined, it is assumed that this
14     file will be #included AFTER <malloc.h>. This is needed only if
15     your system defines a struct mallinfo that is incompatible with the
16     standard one declared here.  Otherwise, you can include this file
17     INSTEAD of your system system <malloc.h>.  At least on ANSI, all
18     declarations should be compatible with system versions
19 
20   * If MSPACES is defined, declarations for mspace versions are included.
21 */
22 
23 #ifndef MALLOC_280_H
24 #define MALLOC_280_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
30 #include <stddef.h>   /* for size_t */
31 
32 #if LK
33 #define USE_DL_PREFIX 1
34 #endif
35 
36 #ifndef ONLY_MSPACES
37 #define ONLY_MSPACES 0     /* define to a value */
38 #elif ONLY_MSPACES != 0
39 #define ONLY_MSPACES 1
40 #endif  /* ONLY_MSPACES */
41 #ifndef NO_MALLINFO
42 #define NO_MALLINFO 0
43 #endif  /* NO_MALLINFO */
44 
45 #ifndef MSPACES
46 #if ONLY_MSPACES
47 #define MSPACES 1
48 #else   /* ONLY_MSPACES */
49 #define MSPACES 0
50 #endif  /* ONLY_MSPACES */
51 #endif  /* MSPACES */
52 
53 #if !ONLY_MSPACES
54 
55 #ifndef USE_DL_PREFIX
56 #define dlcalloc               calloc
57 #define dlfree                 free
58 #define dlmalloc               malloc
59 #define dlmemalign             memalign
60 #define dlposix_memalign       posix_memalign
61 #define dlrealloc              realloc
62 #define dlvalloc               valloc
63 #define dlpvalloc              pvalloc
64 #define dlmallinfo             mallinfo
65 #define dlmallopt              mallopt
66 #define dlmalloc_trim          malloc_trim
67 #define dlmalloc_stats         malloc_stats
68 #define dlmalloc_usable_size   malloc_usable_size
69 #define dlmalloc_footprint     malloc_footprint
70 #define dlmalloc_max_footprint malloc_max_footprint
71 #define dlmalloc_footprint_limit malloc_footprint_limit
72 #define dlmalloc_set_footprint_limit malloc_set_footprint_limit
73 #define dlmalloc_inspect_all   malloc_inspect_all
74 #define dlindependent_calloc   independent_calloc
75 #define dlindependent_comalloc independent_comalloc
76 #define dlbulk_free            bulk_free
77 #endif /* USE_DL_PREFIX */
78 
79 #if !NO_MALLINFO
80 #ifndef HAVE_USR_INCLUDE_MALLOC_H
81 #ifndef _MALLOC_H
82 #ifndef MALLINFO_FIELD_TYPE
83 #define MALLINFO_FIELD_TYPE size_t
84 #endif /* MALLINFO_FIELD_TYPE */
85 #ifndef STRUCT_MALLINFO_DECLARED
86 #define STRUCT_MALLINFO_DECLARED 1
87 struct mallinfo {
88   MALLINFO_FIELD_TYPE arena;    /* non-mmapped space allocated from system */
89   MALLINFO_FIELD_TYPE ordblks;  /* number of free chunks */
90   MALLINFO_FIELD_TYPE smblks;   /* always 0 */
91   MALLINFO_FIELD_TYPE hblks;    /* always 0 */
92   MALLINFO_FIELD_TYPE hblkhd;   /* space in mmapped regions */
93   MALLINFO_FIELD_TYPE usmblks;  /* maximum total allocated space */
94   MALLINFO_FIELD_TYPE fsmblks;  /* always 0 */
95   MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
96   MALLINFO_FIELD_TYPE fordblks; /* total free space */
97   MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
98 };
99 #endif /* STRUCT_MALLINFO_DECLARED */
100 #endif  /* _MALLOC_H */
101 #endif  /* HAVE_USR_INCLUDE_MALLOC_H */
102 #endif  /* !NO_MALLINFO */
103 
104 /*
105   malloc(size_t n)
106   Returns a pointer to a newly allocated chunk of at least n bytes, or
107   null if no space is available, in which case errno is set to ENOMEM
108   on ANSI C systems.
109 
110   If n is zero, malloc returns a minimum-sized chunk. (The minimum
111   size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
112   systems.)  Note that size_t is an unsigned type, so calls with
113   arguments that would be negative if signed are interpreted as
114   requests for huge amounts of space, which will often fail. The
115   maximum supported value of n differs across systems, but is in all
116   cases less than the maximum representable value of a size_t.
117 */
118 void* dlmalloc(size_t);
119 
120 /*
121   free(void* p)
122   Releases the chunk of memory pointed to by p, that had been previously
123   allocated using malloc or a related routine such as realloc.
124   It has no effect if p is null. If p was not malloced or already
125   freed, free(p) will by default cuase the current program to abort.
126 */
127 void  dlfree(void*);
128 
129 /*
130   calloc(size_t n_elements, size_t element_size);
131   Returns a pointer to n_elements * element_size bytes, with all locations
132   set to zero.
133 */
134 void* dlcalloc(size_t, size_t);
135 
136 /*
137   realloc(void* p, size_t n)
138   Returns a pointer to a chunk of size n that contains the same data
139   as does chunk p up to the minimum of (n, p's size) bytes, or null
140   if no space is available.
141 
142   The returned pointer may or may not be the same as p. The algorithm
143   prefers extending p in most cases when possible, otherwise it
144   employs the equivalent of a malloc-copy-free sequence.
145 
146   If p is null, realloc is equivalent to malloc.
147 
148   If space is not available, realloc returns null, errno is set (if on
149   ANSI) and p is NOT freed.
150 
151   if n is for fewer bytes than already held by p, the newly unused
152   space is lopped off and freed if possible.  realloc with a size
153   argument of zero (re)allocates a minimum-sized chunk.
154 
155   The old unix realloc convention of allowing the last-free'd chunk
156   to be used as an argument to realloc is not supported.
157 */
158 void* dlrealloc(void*, size_t);
159 
160 /*
161   realloc_in_place(void* p, size_t n)
162   Resizes the space allocated for p to size n, only if this can be
163   done without moving p (i.e., only if there is adjacent space
164   available if n is greater than p's current allocated size, or n is
165   less than or equal to p's size). This may be used instead of plain
166   realloc if an alternative allocation strategy is needed upon failure
167   to expand space; for example, reallocation of a buffer that must be
168   memory-aligned or cleared. You can use realloc_in_place to trigger
169   these alternatives only when needed.
170 
171   Returns p if successful; otherwise null.
172 */
173 void* dlrealloc_in_place(void*, size_t);
174 
175 /*
176   memalign(size_t alignment, size_t n);
177   Returns a pointer to a newly allocated chunk of n bytes, aligned
178   in accord with the alignment argument.
179 
180   The alignment argument should be a power of two. If the argument is
181   not a power of two, the nearest greater power is used.
182   8-byte alignment is guaranteed by normal malloc calls, so don't
183   bother calling memalign with an argument of 8 or less.
184 
185   Overreliance on memalign is a sure way to fragment space.
186 */
187 void* dlmemalign(size_t, size_t);
188 
189 /*
190   int posix_memalign(void** pp, size_t alignment, size_t n);
191   Allocates a chunk of n bytes, aligned in accord with the alignment
192   argument. Differs from memalign only in that it (1) assigns the
193   allocated memory to *pp rather than returning it, (2) fails and
194   returns EINVAL if the alignment is not a power of two (3) fails and
195   returns ENOMEM if memory cannot be allocated.
196 */
197 int dlposix_memalign(void**, size_t, size_t);
198 
199 /*
200   valloc(size_t n);
201   Equivalent to memalign(pagesize, n), where pagesize is the page
202   size of the system. If the pagesize is unknown, 4096 is used.
203 */
204 void* dlvalloc(size_t);
205 
206 /*
207   mallopt(int parameter_number, int parameter_value)
208   Sets tunable parameters The format is to provide a
209   (parameter-number, parameter-value) pair.  mallopt then sets the
210   corresponding parameter to the argument value if it can (i.e., so
211   long as the value is meaningful), and returns 1 if successful else
212   0.  SVID/XPG/ANSI defines four standard param numbers for mallopt,
213   normally defined in malloc.h.  None of these are use in this malloc,
214   so setting them has no effect. But this malloc also supports other
215   options in mallopt:
216 
217   Symbol            param #  default    allowed param values
218   M_TRIM_THRESHOLD     -1   2*1024*1024   any   (-1U disables trimming)
219   M_GRANULARITY        -2     page size   any power of 2 >= page size
220   M_MMAP_THRESHOLD     -3      256*1024   any   (or 0 if no MMAP support)
221 */
222 int dlmallopt(int, int);
223 
224 #define M_TRIM_THRESHOLD     (-1)
225 #define M_GRANULARITY        (-2)
226 #define M_MMAP_THRESHOLD     (-3)
227 
228 
229 /*
230   malloc_footprint();
231   Returns the number of bytes obtained from the system.  The total
232   number of bytes allocated by malloc, realloc etc., is less than this
233   value. Unlike mallinfo, this function returns only a precomputed
234   result, so can be called frequently to monitor memory consumption.
235   Even if locks are otherwise defined, this function does not use them,
236   so results might not be up to date.
237 */
238 size_t dlmalloc_footprint(void);
239 
240 /*
241   malloc_max_footprint();
242   Returns the maximum number of bytes obtained from the system. This
243   value will be greater than current footprint if deallocated space
244   has been reclaimed by the system. The peak number of bytes allocated
245   by malloc, realloc etc., is less than this value. Unlike mallinfo,
246   this function returns only a precomputed result, so can be called
247   frequently to monitor memory consumption.  Even if locks are
248   otherwise defined, this function does not use them, so results might
249   not be up to date.
250 */
251 size_t dlmalloc_max_footprint(void);
252 
253 /*
254   malloc_footprint_limit();
255   Returns the number of bytes that the heap is allowed to obtain from
256   the system, returning the last value returned by
257   malloc_set_footprint_limit, or the maximum size_t value if
258   never set. The returned value reflects a permission. There is no
259   guarantee that this number of bytes can actually be obtained from
260   the system.
261 */
262 size_t dlmalloc_footprint_limit(void);
263 
264 /*
265   malloc_set_footprint_limit();
266   Sets the maximum number of bytes to obtain from the system, causing
267   failure returns from malloc and related functions upon attempts to
268   exceed this value. The argument value may be subject to page
269   rounding to an enforceable limit; this actual value is returned.
270   Using an argument of the maximum possible size_t effectively
271   disables checks. If the argument is less than or equal to the
272   current malloc_footprint, then all future allocations that require
273   additional system memory will fail. However, invocation cannot
274   retroactively deallocate existing used memory.
275 */
276 size_t dlmalloc_set_footprint_limit(size_t bytes);
277 
278 /*
279   malloc_inspect_all(void(*handler)(void *start,
280                                     void *end,
281                                     size_t used_bytes,
282                                     void* callback_arg),
283                       void* arg);
284   Traverses the heap and calls the given handler for each managed
285   region, skipping all bytes that are (or may be) used for bookkeeping
286   purposes.  Traversal does not include include chunks that have been
287   directly memory mapped. Each reported region begins at the start
288   address, and continues up to but not including the end address.  The
289   first used_bytes of the region contain allocated data. If
290   used_bytes is zero, the region is unallocated. The handler is
291   invoked with the given callback argument. If locks are defined, they
292   are held during the entire traversal. It is a bad idea to invoke
293   other malloc functions from within the handler.
294 
295   For example, to count the number of in-use chunks with size greater
296   than 1000, you could write:
297   static int count = 0;
298   void count_chunks(void* start, void* end, size_t used, void* arg) {
299     if (used >= 1000) ++count;
300   }
301   then:
302     malloc_inspect_all(count_chunks, NULL);
303 
304   malloc_inspect_all is compiled only if MALLOC_INSPECT_ALL is defined.
305 */
306 void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, void*),
307                            void* arg);
308 
309 #if !NO_MALLINFO
310 /*
311   mallinfo()
312   Returns (by copy) a struct containing various summary statistics:
313 
314   arena:     current total non-mmapped bytes allocated from system
315   ordblks:   the number of free chunks
316   smblks:    always zero.
317   hblks:     current number of mmapped regions
318   hblkhd:    total bytes held in mmapped regions
319   usmblks:   the maximum total allocated space. This will be greater
320                 than current total if trimming has occurred.
321   fsmblks:   always zero
322   uordblks:  current total allocated space (normal or mmapped)
323   fordblks:  total free space
324   keepcost:  the maximum number of bytes that could ideally be released
325                back to system via malloc_trim. ("ideally" means that
326                it ignores page restrictions etc.)
327 
328   Because these fields are ints, but internal bookkeeping may
329   be kept as longs, the reported values may wrap around zero and
330   thus be inaccurate.
331 */
332 
333 struct mallinfo dlmallinfo(void);
334 #endif  /* NO_MALLINFO */
335 
336 /*
337   independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
338 
339   independent_calloc is similar to calloc, but instead of returning a
340   single cleared space, it returns an array of pointers to n_elements
341   independent elements that can hold contents of size elem_size, each
342   of which starts out cleared, and can be independently freed,
343   realloc'ed etc. The elements are guaranteed to be adjacently
344   allocated (this is not guaranteed to occur with multiple callocs or
345   mallocs), which may also improve cache locality in some
346   applications.
347 
348   The "chunks" argument is optional (i.e., may be null, which is
349   probably the most typical usage). If it is null, the returned array
350   is itself dynamically allocated and should also be freed when it is
351   no longer needed. Otherwise, the chunks array must be of at least
352   n_elements in length. It is filled in with the pointers to the
353   chunks.
354 
355   In either case, independent_calloc returns this pointer array, or
356   null if the allocation failed.  If n_elements is zero and "chunks"
357   is null, it returns a chunk representing an array with zero elements
358   (which should be freed if not wanted).
359 
360   Each element must be freed when it is no longer needed. This can be
361   done all at once using bulk_free.
362 
363   independent_calloc simplifies and speeds up implementations of many
364   kinds of pools.  It may also be useful when constructing large data
365   structures that initially have a fixed number of fixed-sized nodes,
366   but the number is not known at compile time, and some of the nodes
367   may later need to be freed. For example:
368 
369   struct Node { int item; struct Node* next; };
370 
371   struct Node* build_list() {
372     struct Node** pool;
373     int n = read_number_of_nodes_needed();
374     if (n <= 0) return 0;
375     pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
376     if (pool == 0) die();
377     // organize into a linked list...
378     struct Node* first = pool[0];
379     for (i = 0; i < n-1; ++i)
380       pool[i]->next = pool[i+1];
381     free(pool);     // Can now free the array (or not, if it is needed later)
382     return first;
383   }
384 */
385 void** dlindependent_calloc(size_t, size_t, void**);
386 
387 /*
388   independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
389 
390   independent_comalloc allocates, all at once, a set of n_elements
391   chunks with sizes indicated in the "sizes" array.    It returns
392   an array of pointers to these elements, each of which can be
393   independently freed, realloc'ed etc. The elements are guaranteed to
394   be adjacently allocated (this is not guaranteed to occur with
395   multiple callocs or mallocs), which may also improve cache locality
396   in some applications.
397 
398   The "chunks" argument is optional (i.e., may be null). If it is null
399   the returned array is itself dynamically allocated and should also
400   be freed when it is no longer needed. Otherwise, the chunks array
401   must be of at least n_elements in length. It is filled in with the
402   pointers to the chunks.
403 
404   In either case, independent_comalloc returns this pointer array, or
405   null if the allocation failed.  If n_elements is zero and chunks is
406   null, it returns a chunk representing an array with zero elements
407   (which should be freed if not wanted).
408 
409   Each element must be freed when it is no longer needed. This can be
410   done all at once using bulk_free.
411 
412   independent_comallac differs from independent_calloc in that each
413   element may have a different size, and also that it does not
414   automatically clear elements.
415 
416   independent_comalloc can be used to speed up allocation in cases
417   where several structs or objects must always be allocated at the
418   same time.  For example:
419 
420   struct Head { ... }
421   struct Foot { ... }
422 
423   void send_message(char* msg) {
424     int msglen = strlen(msg);
425     size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
426     void* chunks[3];
427     if (independent_comalloc(3, sizes, chunks) == 0)
428       die();
429     struct Head* head = (struct Head*)(chunks[0]);
430     char*        body = (char*)(chunks[1]);
431     struct Foot* foot = (struct Foot*)(chunks[2]);
432     // ...
433   }
434 
435   In general though, independent_comalloc is worth using only for
436   larger values of n_elements. For small values, you probably won't
437   detect enough difference from series of malloc calls to bother.
438 
439   Overuse of independent_comalloc can increase overall memory usage,
440   since it cannot reuse existing noncontiguous small chunks that
441   might be available for some of the elements.
442 */
443 void** dlindependent_comalloc(size_t, size_t*, void**);
444 
445 /*
446   bulk_free(void* array[], size_t n_elements)
447   Frees and clears (sets to null) each non-null pointer in the given
448   array.  This is likely to be faster than freeing them one-by-one.
449   If footers are used, pointers that have been allocated in different
450   mspaces are not freed or cleared, and the count of all such pointers
451   is returned.  For large arrays of pointers with poor locality, it
452   may be worthwhile to sort this array before calling bulk_free.
453 */
454 size_t  dlbulk_free(void**, size_t n_elements);
455 
456 /*
457   pvalloc(size_t n);
458   Equivalent to valloc(minimum-page-that-holds(n)), that is,
459   round up n to nearest pagesize.
460  */
461 void*  dlpvalloc(size_t);
462 
463 /*
464   malloc_trim(size_t pad);
465 
466   If possible, gives memory back to the system (via negative arguments
467   to sbrk) if there is unused memory at the `high' end of the malloc
468   pool or in unused MMAP segments. You can call this after freeing
469   large blocks of memory to potentially reduce the system-level memory
470   requirements of a program. However, it cannot guarantee to reduce
471   memory. Under some allocation patterns, some large free blocks of
472   memory will be locked between two used chunks, so they cannot be
473   given back to the system.
474 
475   The `pad' argument to malloc_trim represents the amount of free
476   trailing space to leave untrimmed. If this argument is zero, only
477   the minimum amount of memory to maintain internal data structures
478   will be left. Non-zero arguments can be supplied to maintain enough
479   trailing space to service future expected allocations without having
480   to re-obtain memory from the system.
481 
482   Malloc_trim returns 1 if it actually released any memory, else 0.
483 */
484 int  dlmalloc_trim(size_t);
485 
486 /*
487   malloc_stats();
488   Prints on stderr the amount of space obtained from the system (both
489   via sbrk and mmap), the maximum amount (which may be more than
490   current if malloc_trim and/or munmap got called), and the current
491   number of bytes allocated via malloc (or realloc, etc) but not yet
492   freed. Note that this is the number of bytes allocated, not the
493   number requested. It will be larger than the number requested
494   because of alignment and bookkeeping overhead. Because it includes
495   alignment wastage as being in use, this figure may be greater than
496   zero even when no user-level chunks are allocated.
497 
498   The reported current and maximum system memory can be inaccurate if
499   a program makes other calls to system memory allocation functions
500   (normally sbrk) outside of malloc.
501 
502   malloc_stats prints only the most commonly interesting statistics.
503   More information can be obtained by calling mallinfo.
504 
505   malloc_stats is not compiled if NO_MALLOC_STATS is defined.
506 */
507 void  dlmalloc_stats(void);
508 
509 #endif /* !ONLY_MSPACES */
510 
511 /*
512   malloc_usable_size(void* p);
513 
514   Returns the number of bytes you can actually use in
515   an allocated chunk, which may be more than you requested (although
516   often not) due to alignment and minimum size constraints.
517   You can use this many bytes without worrying about
518   overwriting other allocated objects. This is not a particularly great
519   programming practice. malloc_usable_size can be more useful in
520   debugging and assertions, for example:
521 
522   p = malloc(n);
523   assert(malloc_usable_size(p) >= 256);
524 */
525 size_t dlmalloc_usable_size(const void*);
526 
527 #if MSPACES
528 
529 /*
530   mspace is an opaque type representing an independent
531   region of space that supports mspace_malloc, etc.
532 */
533 typedef void* mspace;
534 
535 /*
536   create_mspace creates and returns a new independent space with the
537   given initial capacity, or, if 0, the default granularity size.  It
538   returns null if there is no system memory available to create the
539   space.  If argument locked is non-zero, the space uses a separate
540   lock to control access. The capacity of the space will grow
541   dynamically as needed to service mspace_malloc requests.  You can
542   control the sizes of incremental increases of this space by
543   compiling with a different DEFAULT_GRANULARITY or dynamically
544   setting with mallopt(M_GRANULARITY, value).
545 */
546 mspace create_mspace(size_t capacity, int locked);
547 
548 /*
549   destroy_mspace destroys the given space, and attempts to return all
550   of its memory back to the system, returning the total number of
551   bytes freed. After destruction, the results of access to all memory
552   used by the space become undefined.
553 */
554 size_t destroy_mspace(mspace msp);
555 
556 /*
557   create_mspace_with_base uses the memory supplied as the initial base
558   of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
559   space is used for bookkeeping, so the capacity must be at least this
560   large. (Otherwise 0 is returned.) When this initial space is
561   exhausted, additional memory will be obtained from the system.
562   Destroying this space will deallocate all additionally allocated
563   space (if possible) but not the initial base.
564 */
565 mspace create_mspace_with_base(void* base, size_t capacity, int locked);
566 
567 /*
568   mspace_track_large_chunks controls whether requests for large chunks
569   are allocated in their own untracked mmapped regions, separate from
570   others in this mspace. By default large chunks are not tracked,
571   which reduces fragmentation. However, such chunks are not
572   necessarily released to the system upon destroy_mspace.  Enabling
573   tracking by setting to true may increase fragmentation, but avoids
574   leakage when relying on destroy_mspace to release all memory
575   allocated using this space.  The function returns the previous
576   setting.
577 */
578 int mspace_track_large_chunks(mspace msp, int enable);
579 
580 #if !NO_MALLINFO
581 /*
582   mspace_mallinfo behaves as mallinfo, but reports properties of
583   the given space.
584 */
585 struct mallinfo mspace_mallinfo(mspace msp);
586 #endif /* NO_MALLINFO */
587 
588 /*
589   An alias for mallopt.
590 */
591 int mspace_mallopt(int, int);
592 
593 /*
594   The following operate identically to their malloc counterparts
595   but operate only for the given mspace argument
596 */
597 void* mspace_malloc(mspace msp, size_t bytes);
598 void mspace_free(mspace msp, void* mem);
599 void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
600 void* mspace_realloc(mspace msp, void* mem, size_t newsize);
601 void* mspace_realloc_in_place(mspace msp, void* mem, size_t newsize);
602 void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
603 void** mspace_independent_calloc(mspace msp, size_t n_elements,
604                                  size_t elem_size, void* chunks[]);
605 void** mspace_independent_comalloc(mspace msp, size_t n_elements,
606                                    size_t sizes[], void* chunks[]);
607 size_t mspace_bulk_free(mspace msp, void**, size_t n_elements);
608 size_t mspace_usable_size(const void* mem);
609 void mspace_malloc_stats(mspace msp);
610 int mspace_trim(mspace msp, size_t pad);
611 size_t mspace_footprint(mspace msp);
612 size_t mspace_max_footprint(mspace msp);
613 size_t mspace_footprint_limit(mspace msp);
614 size_t mspace_set_footprint_limit(mspace msp, size_t bytes);
615 void mspace_inspect_all(mspace msp,
616                         void(*handler)(void *, void *, size_t, void*),
617                         void* arg);
618 #endif  /* MSPACES */
619 
620 #ifdef __cplusplus
621 };  /* end of extern "C" */
622 #endif
623 
624 #endif /* MALLOC_280_H */
625