189
|
1 |
// Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies).
|
|
2 |
// All rights reserved.
|
|
3 |
// This component and the accompanying materials are made available
|
|
4 |
// under the terms of the License "Eclipse Public License v1.0"
|
|
5 |
// which accompanies this distribution, and is available
|
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
|
|
7 |
//
|
|
8 |
// Initial Contributors:
|
|
9 |
// Nokia Corporation - initial contribution.
|
|
10 |
//
|
|
11 |
// Contributors:
|
|
12 |
//
|
|
13 |
// Description:
|
|
14 |
// kernel\eka\include\dla.h
|
|
15 |
//
|
|
16 |
// Uses malloc (aka dlmalloc) written by Doug Lea version 2.8.4
|
|
17 |
//
|
|
18 |
|
|
19 |
#ifndef __DLA__
|
|
20 |
#define __DLA__
|
|
21 |
|
|
22 |
#define DEFAULT_TRIM_THRESHOLD ((size_t)4U * (size_t)1024U)
|
|
23 |
|
|
24 |
#define MSPACES 0
|
|
25 |
#define HAVE_MORECORE 1
|
|
26 |
#define MORECORE_CONTIGUOUS 1
|
|
27 |
#define HAVE_MMAP 0
|
|
28 |
#define HAVE_MREMAP 0
|
|
29 |
#define DEFAULT_GRANULARITY (4096U)
|
|
30 |
#define FOOTERS 0
|
|
31 |
#define USE_LOCKS 0
|
|
32 |
#define INSECURE 1
|
|
33 |
#define NO_MALLINFO 0
|
|
34 |
|
|
35 |
#define LACKS_SYS_TYPES_H
|
|
36 |
#ifndef LACKS_SYS_TYPES_H
|
|
37 |
#include <sys/types.h> /* For size_t */
|
|
38 |
#else
|
|
39 |
#ifndef _SIZE_T_DECLARED
|
|
40 |
typedef unsigned int size_t;
|
|
41 |
#define _SIZE_T_DECLARED
|
|
42 |
#endif
|
|
43 |
#endif /* LACKS_SYS_TYPES_H */
|
|
44 |
|
|
45 |
/* The maximum possible size_t value has all bits set */
|
|
46 |
#define MAX_SIZE_T (~(size_t)0)
|
|
47 |
|
|
48 |
#ifndef ONLY_MSPACES
|
|
49 |
#define ONLY_MSPACES 0
|
|
50 |
#endif /* ONLY_MSPACES */
|
|
51 |
|
|
52 |
#ifndef MSPACES
|
|
53 |
#if ONLY_MSPACES
|
|
54 |
#define MSPACES 1
|
|
55 |
#else /* ONLY_MSPACES */
|
|
56 |
#define MSPACES 0
|
|
57 |
#endif /* ONLY_MSPACES */
|
|
58 |
#endif /* MSPACES */
|
|
59 |
|
|
60 |
//#ifndef MALLOC_ALIGNMENT
|
|
61 |
// #define MALLOC_ALIGNMENT ((size_t)8U)
|
|
62 |
//#endif /* MALLOC_ALIGNMENT */
|
|
63 |
|
|
64 |
#ifndef FOOTERS
|
|
65 |
#define FOOTERS 0
|
|
66 |
#endif /* FOOTERS */
|
|
67 |
|
|
68 |
#ifndef ABORT
|
|
69 |
// #define ABORT abort()
|
|
70 |
// #define ABORT User::Invariant()// redefined so euser isn't dependant on oe
|
|
71 |
#define ABORT HEAP_PANIC(ETHeapBadCellAddress)
|
|
72 |
#endif /* ABORT */
|
|
73 |
|
|
74 |
#ifndef PROCEED_ON_ERROR
|
|
75 |
#define PROCEED_ON_ERROR 0
|
|
76 |
#endif /* PROCEED_ON_ERROR */
|
|
77 |
|
|
78 |
#ifndef USE_LOCKS
|
|
79 |
#define USE_LOCKS 0
|
|
80 |
#endif /* USE_LOCKS */
|
|
81 |
|
|
82 |
#ifndef INSECURE
|
|
83 |
#define INSECURE 0
|
|
84 |
#endif /* INSECURE */
|
|
85 |
|
|
86 |
#ifndef HAVE_MMAP
|
|
87 |
#define HAVE_MMAP 1
|
|
88 |
#endif /* HAVE_MMAP */
|
|
89 |
|
|
90 |
#ifndef MMAP_CLEARS
|
|
91 |
#define MMAP_CLEARS 1
|
|
92 |
#endif /* MMAP_CLEARS */
|
|
93 |
|
|
94 |
#ifndef HAVE_MREMAP
|
|
95 |
#ifdef linux
|
|
96 |
#define HAVE_MREMAP 1
|
|
97 |
#else /* linux */
|
|
98 |
#define HAVE_MREMAP 0
|
|
99 |
#endif /* linux */
|
|
100 |
#endif /* HAVE_MREMAP */
|
|
101 |
|
|
102 |
#ifndef MALLOC_FAILURE_ACTION
|
|
103 |
//#define MALLOC_FAILURE_ACTION errno = ENOMEM;
|
|
104 |
#define MALLOC_FAILURE_ACTION ;
|
|
105 |
#endif /* MALLOC_FAILURE_ACTION */
|
|
106 |
|
|
107 |
#ifndef HAVE_MORECORE
|
|
108 |
#if ONLY_MSPACES
|
|
109 |
#define HAVE_MORECORE 1 /*AMOD: has changed */
|
|
110 |
#else /* ONLY_MSPACES */
|
|
111 |
#define HAVE_MORECORE 1
|
|
112 |
#endif /* ONLY_MSPACES */
|
|
113 |
#endif /* HAVE_MORECORE */
|
|
114 |
|
|
115 |
#if !HAVE_MORECORE
|
|
116 |
#define MORECORE_CONTIGUOUS 0
|
|
117 |
#else /* !HAVE_MORECORE */
|
|
118 |
#ifndef MORECORE
|
|
119 |
#define MORECORE DLAdjust
|
|
120 |
#endif /* MORECORE */
|
|
121 |
#ifndef MORECORE_CONTIGUOUS
|
|
122 |
#define MORECORE_CONTIGUOUS 0
|
|
123 |
#endif /* MORECORE_CONTIGUOUS */
|
|
124 |
#endif /* !HAVE_MORECORE */
|
|
125 |
|
|
126 |
#ifndef DEFAULT_GRANULARITY
|
|
127 |
#if MORECORE_CONTIGUOUS
|
|
128 |
#define DEFAULT_GRANULARITY 4096 /* 0 means to compute in init_mparams */
|
|
129 |
#else /* MORECORE_CONTIGUOUS */
|
|
130 |
#define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
|
|
131 |
#endif /* MORECORE_CONTIGUOUS */
|
|
132 |
#endif /* DEFAULT_GRANULARITY */
|
|
133 |
|
|
134 |
#ifndef DEFAULT_TRIM_THRESHOLD
|
|
135 |
#ifndef MORECORE_CANNOT_TRIM
|
|
136 |
#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
|
|
137 |
#else /* MORECORE_CANNOT_TRIM */
|
|
138 |
#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
|
|
139 |
#endif /* MORECORE_CANNOT_TRIM */
|
|
140 |
#endif /* DEFAULT_TRIM_THRESHOLD */
|
|
141 |
|
|
142 |
#ifndef DEFAULT_MMAP_THRESHOLD
|
|
143 |
#if HAVE_MMAP
|
|
144 |
#define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
|
|
145 |
#else /* HAVE_MMAP */
|
|
146 |
#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
|
|
147 |
#endif /* HAVE_MMAP */
|
|
148 |
#endif /* DEFAULT_MMAP_THRESHOLD */
|
|
149 |
|
|
150 |
#ifndef USE_BUILTIN_FFS
|
|
151 |
#define USE_BUILTIN_FFS 0
|
|
152 |
#endif /* USE_BUILTIN_FFS */
|
|
153 |
|
|
154 |
#ifndef USE_DEV_RANDOM
|
|
155 |
#define USE_DEV_RANDOM 0
|
|
156 |
#endif /* USE_DEV_RANDOM */
|
|
157 |
|
|
158 |
#ifndef NO_MALLINFO
|
|
159 |
#define NO_MALLINFO 0
|
|
160 |
#endif /* NO_MALLINFO */
|
|
161 |
#ifndef MALLINFO_FIELD_TYPE
|
|
162 |
#define MALLINFO_FIELD_TYPE size_t
|
|
163 |
#endif /* MALLINFO_FIELD_TYPE */
|
|
164 |
|
|
165 |
/*
|
|
166 |
mallopt tuning options. SVID/XPG defines four standard parameter
|
|
167 |
numbers for mallopt, normally defined in malloc.h. None of these
|
|
168 |
are used in this malloc, so setting them has no effect. But this
|
|
169 |
malloc does support the following options.
|
|
170 |
*/
|
|
171 |
|
|
172 |
#define M_TRIM_THRESHOLD (-1)
|
|
173 |
#define M_GRANULARITY (-2)
|
|
174 |
#define M_MMAP_THRESHOLD (-3)
|
|
175 |
|
|
176 |
#if !NO_MALLINFO
|
|
177 |
/*
|
|
178 |
This version of malloc supports the standard SVID/XPG mallinfo
|
|
179 |
routine that returns a struct containing usage properties and
|
|
180 |
statistics. It should work on any system that has a
|
|
181 |
/usr/include/malloc.h defining struct mallinfo. The main
|
|
182 |
declaration needed is the mallinfo struct that is returned (by-copy)
|
|
183 |
by mallinfo(). The malloinfo struct contains a bunch of fields that
|
|
184 |
are not even meaningful in this version of malloc. These fields are
|
|
185 |
are instead filled by mallinfo() with other numbers that might be of
|
|
186 |
interest.
|
|
187 |
|
|
188 |
HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
|
|
189 |
/usr/include/malloc.h file that includes a declaration of struct
|
|
190 |
mallinfo. If so, it is included; else a compliant version is
|
|
191 |
declared below. These must be precisely the same for mallinfo() to
|
|
192 |
work. The original SVID version of this struct, defined on most
|
|
193 |
systems with mallinfo, declares all fields as ints. But some others
|
|
194 |
define as unsigned long. If your system defines the fields using a
|
|
195 |
type of different width than listed here, you MUST #include your
|
|
196 |
system version and #define HAVE_USR_INCLUDE_MALLOC_H.
|
|
197 |
*/
|
|
198 |
|
|
199 |
/* #define HAVE_USR_INCLUDE_MALLOC_H */
|
|
200 |
|
|
201 |
#ifdef HAVE_USR_INCLUDE_MALLOC_H
|
|
202 |
#include "/usr/include/malloc.h"
|
|
203 |
#else /* HAVE_USR_INCLUDE_MALLOC_H */
|
|
204 |
|
|
205 |
struct mallinfo {
|
|
206 |
MALLINFO_FIELD_TYPE iArena; /* non-mmapped space allocated from system */
|
|
207 |
MALLINFO_FIELD_TYPE iOrdblks; /* number of free chunks */
|
|
208 |
MALLINFO_FIELD_TYPE iSmblks; /* always 0 */
|
|
209 |
MALLINFO_FIELD_TYPE iHblks; /* always 0 */
|
|
210 |
MALLINFO_FIELD_TYPE iHblkhd; /* space in mmapped regions */
|
|
211 |
MALLINFO_FIELD_TYPE iUsmblks; /* maximum total allocated space */
|
|
212 |
MALLINFO_FIELD_TYPE iFsmblks; /* always 0 */
|
|
213 |
MALLINFO_FIELD_TYPE iUordblks; /* total allocated space */
|
|
214 |
MALLINFO_FIELD_TYPE iFordblks; /* total free space */
|
|
215 |
MALLINFO_FIELD_TYPE iKeepcost; /* releasable (via malloc_trim) space */
|
|
216 |
MALLINFO_FIELD_TYPE iCellCount;/* Number of chunks allocated*/
|
|
217 |
};
|
|
218 |
|
|
219 |
#endif /* HAVE_USR_INCLUDE_MALLOC_H */
|
|
220 |
#endif /* NO_MALLINFO */
|
|
221 |
|
|
222 |
#if MSPACES
|
|
223 |
typedef void* mspace;
|
|
224 |
#endif /* MSPACES */
|
|
225 |
|
|
226 |
#if 0
|
|
227 |
|
|
228 |
#include <stdio.h>/* for printing in malloc_stats */
|
|
229 |
|
|
230 |
#ifndef LACKS_ERRNO_H
|
|
231 |
#include <errno.h> /* for MALLOC_FAILURE_ACTION */
|
|
232 |
#endif /* LACKS_ERRNO_H */
|
|
233 |
|
|
234 |
#if FOOTERS
|
|
235 |
#include <time.h> /* for iMagic initialization */
|
|
236 |
#endif /* FOOTERS */
|
|
237 |
|
|
238 |
#ifndef LACKS_STDLIB_H
|
|
239 |
#include <stdlib.h> /* for abort() */
|
|
240 |
#endif /* LACKS_STDLIB_H */
|
|
241 |
|
|
242 |
#if !defined(ASSERT)
|
|
243 |
#define ASSERT(x) __ASSERT_DEBUG(x, HEAP_PANIC(ETHeapBadCellAddress))
|
|
244 |
#endif
|
|
245 |
|
|
246 |
#ifndef LACKS_STRING_H
|
|
247 |
#include <string.h> /* for memset etc */
|
|
248 |
#endif /* LACKS_STRING_H */
|
|
249 |
|
|
250 |
#if USE_BUILTIN_FFS
|
|
251 |
#ifndef LACKS_STRINGS_H
|
|
252 |
#include <strings.h> /* for ffs */
|
|
253 |
#endif /* LACKS_STRINGS_H */
|
|
254 |
#endif /* USE_BUILTIN_FFS */
|
|
255 |
|
|
256 |
#if HAVE_MMAP
|
|
257 |
#ifndef LACKS_SYS_MMAN_H
|
|
258 |
#include <sys/mman.h> /* for mmap */
|
|
259 |
#endif /* LACKS_SYS_MMAN_H */
|
|
260 |
#ifndef LACKS_FCNTL_H
|
|
261 |
#include <fcntl.h>
|
|
262 |
#endif /* LACKS_FCNTL_H */
|
|
263 |
#endif /* HAVE_MMAP */
|
|
264 |
|
|
265 |
#if HAVE_MORECORE
|
|
266 |
#ifndef LACKS_UNISTD_H
|
|
267 |
#include <unistd.h> /* for sbrk */
|
|
268 |
extern void* sbrk(size_t);
|
|
269 |
#else /* LACKS_UNISTD_H */
|
|
270 |
#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
|
|
271 |
extern void* sbrk(ptrdiff_t);
|
|
272 |
/*Amod sbrk is not defined in WIN32 need to check in symbian*/
|
|
273 |
#endif /* FreeBSD etc */
|
|
274 |
#endif /* LACKS_UNISTD_H */
|
|
275 |
#endif /* HAVE_MORECORE */
|
|
276 |
|
|
277 |
#endif
|
|
278 |
|
|
279 |
/*AMOD: For MALLOC_GETPAGESIZE*/
|
|
280 |
#if 0 // replaced with GET_PAGE_SIZE() defined in heap.cpp
|
|
281 |
#ifndef WIN32
|
|
282 |
#ifndef MALLOC_GETPAGESIZE
|
|
283 |
#ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
|
|
284 |
#ifndef _SC_PAGE_SIZE
|
|
285 |
#define _SC_PAGE_SIZE _SC_PAGESIZE
|
|
286 |
#endif
|
|
287 |
#endif
|
|
288 |
#ifdef _SC_PAGE_SIZE
|
|
289 |
#define MALLOC_GETPAGESIZE sysconf(_SC_PAGE_SIZE)
|
|
290 |
#else
|
|
291 |
#if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
|
|
292 |
extern size_t getpagesize();
|
|
293 |
#define MALLOC_GETPAGESIZE getpagesize()
|
|
294 |
#else
|
|
295 |
#ifdef WIN32 /* use supplied emulation of getpagesize */
|
|
296 |
#define MALLOC_GETPAGESIZE getpagesize()
|
|
297 |
#else
|
|
298 |
#ifndef LACKS_SYS_PARAM_H
|
|
299 |
#include <sys/param.h>
|
|
300 |
#endif
|
|
301 |
#ifdef EXEC_PAGESIZE
|
|
302 |
#define MALLOC_GETPAGESIZE EXEC_PAGESIZE
|
|
303 |
#else
|
|
304 |
#ifdef NBPG
|
|
305 |
#ifndef CLSIZE
|
|
306 |
#define MALLOC_GETPAGESIZE NBPG
|
|
307 |
#else
|
|
308 |
#define MALLOC_GETPAGESIZE (NBPG * CLSIZE)
|
|
309 |
#endif
|
|
310 |
#else
|
|
311 |
#ifdef NBPC
|
|
312 |
#define MALLOC_GETPAGESIZE NBPC
|
|
313 |
#else
|
|
314 |
#ifdef PAGESIZE
|
|
315 |
#define MALLOC_GETPAGESIZE PAGESIZE
|
|
316 |
#else /* just guess */
|
|
317 |
#define MALLOC_GETPAGESIZE ((size_t)4096U)
|
|
318 |
#endif
|
|
319 |
#endif
|
|
320 |
#endif
|
|
321 |
#endif
|
|
322 |
#endif
|
|
323 |
#endif
|
|
324 |
#endif
|
|
325 |
#endif
|
|
326 |
#endif
|
|
327 |
#endif
|
|
328 |
/*AMOD: For MALLOC_GETPAGESIZE*/
|
|
329 |
|
|
330 |
/* ------------------- size_t and alignment properties -------------------- */
|
|
331 |
|
|
332 |
/* The byte and bit size of a size_t */
|
|
333 |
#define SIZE_T_SIZE (sizeof(size_t))
|
|
334 |
#define SIZE_T_BITSIZE (sizeof(size_t) << 3)
|
|
335 |
|
|
336 |
/* Some constants coerced to size_t */
|
|
337 |
/* Annoying but necessary to avoid errors on some plaftorms */
|
|
338 |
#define SIZE_T_ZERO ((size_t)0)
|
|
339 |
#define SIZE_T_ONE ((size_t)1)
|
|
340 |
#define SIZE_T_TWO ((size_t)2)
|
|
341 |
#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
|
|
342 |
#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
|
|
343 |
#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
|
|
344 |
#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)
|
|
345 |
|
|
346 |
/* The bit mask value corresponding to MALLOC_ALIGNMENT */
|
|
347 |
#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
|
|
348 |
|
|
349 |
/* True if address a has acceptable alignment */
|
|
350 |
//#define IS_ALIGNED(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
|
|
351 |
#define IS_ALIGNED(A) (((unsigned int)((A)) & (CHUNK_ALIGN_MASK)) == 0)
|
|
352 |
|
|
353 |
/* the number of bytes to offset an address to align it */
|
|
354 |
#define ALIGN_OFFSET(A)\
|
|
355 |
((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
|
|
356 |
((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
|
|
357 |
|
|
358 |
/* -------------------------- MMAP preliminaries ------------------------- */
|
|
359 |
|
|
360 |
/*
|
|
361 |
If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and
|
|
362 |
checks to fail so compiler optimizer can delete code rather than
|
|
363 |
using so many "#if"s.
|
|
364 |
*/
|
|
365 |
|
|
366 |
|
|
367 |
/* MORECORE and MMAP must return MFAIL on failure */
|
|
368 |
#define MFAIL ((void*)(MAX_SIZE_T))
|
|
369 |
#define CMFAIL ((TUint8*)(MFAIL)) /* defined for convenience */
|
|
370 |
|
|
371 |
#if !HAVE_MMAP
|
|
372 |
#define IS_MMAPPED_BIT (SIZE_T_ZERO)
|
|
373 |
#define USE_MMAP_BIT (SIZE_T_ZERO)
|
|
374 |
#define CALL_MMAP(s) MFAIL
|
|
375 |
#define CALL_MUNMAP(a, s) (-1)
|
|
376 |
#define DIRECT_MMAP(s) MFAIL
|
|
377 |
#else /* !HAVE_MMAP */
|
|
378 |
#define IS_MMAPPED_BIT (SIZE_T_ONE)
|
|
379 |
#define USE_MMAP_BIT (SIZE_T_ONE)
|
|
380 |
#ifndef WIN32
|
|
381 |
#define CALL_MUNMAP(a, s) DLUMMAP((a),(s)) /*munmap((a), (s))*/
|
|
382 |
#define MMAP_PROT (PROT_READ|PROT_WRITE)
|
|
383 |
#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
|
|
384 |
#define MAP_ANONYMOUS MAP_ANON
|
|
385 |
#endif /* MAP_ANON */
|
|
386 |
#ifdef MAP_ANONYMOUS
|
|
387 |
#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
|
|
388 |
#define CALL_MMAP(s) mmap(0, (s), MMAP_PROT, (int)MMAP_FLAGS, -1, 0)
|
|
389 |
#else /* MAP_ANONYMOUS */
|
|
390 |
/*
|
|
391 |
Nearly all versions of mmap support MAP_ANONYMOUS, so the following
|
|
392 |
is unlikely to be needed, but is supplied just in case.
|
|
393 |
*/
|
|
394 |
#define MMAP_FLAGS (MAP_PRIVATE)
|
|
395 |
//static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
|
|
396 |
#define CALL_MMAP(s) DLMMAP(s)
|
|
397 |
/*#define CALL_MMAP(s) ((dev_zero_fd < 0) ? \
|
|
398 |
(dev_zero_fd = open("/dev/zero", O_RDWR), \
|
|
399 |
mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
|
|
400 |
mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
|
|
401 |
*/
|
|
402 |
#define CALL_REMAP(a, s, d) DLREMAP((a),(s),(d))
|
|
403 |
#endif /* MAP_ANONYMOUS */
|
|
404 |
#define DIRECT_MMAP(s) CALL_MMAP(s)
|
|
405 |
#else /* WIN32 */
|
|
406 |
#define CALL_MMAP(s) win32mmap(s)
|
|
407 |
#define CALL_MUNMAP(a, s) win32munmap((a), (s))
|
|
408 |
#define DIRECT_MMAP(s) win32direct_mmap(s)
|
|
409 |
#endif /* WIN32 */
|
|
410 |
#endif /* HAVE_MMAP */
|
|
411 |
|
|
412 |
#if HAVE_MMAP && HAVE_MREMAP
|
|
413 |
#define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
|
|
414 |
#else /* HAVE_MMAP && HAVE_MREMAP */
|
|
415 |
#define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
|
|
416 |
#endif /* HAVE_MMAP && HAVE_MREMAP */
|
|
417 |
|
|
418 |
#if HAVE_MORECORE
|
|
419 |
#define CALL_MORECORE(S) SetBrk(S)
|
|
420 |
#else /* HAVE_MORECORE */
|
|
421 |
#define CALL_MORECORE(S) MFAIL
|
|
422 |
#endif /* HAVE_MORECORE */
|
|
423 |
|
|
424 |
/* mstate bit set if continguous morecore disabled or failed */
|
|
425 |
#define USE_NONCONTIGUOUS_BIT (4U)
|
|
426 |
|
|
427 |
/* segment bit set in create_mspace_with_base */
|
|
428 |
#define EXTERN_BIT (8U)
|
|
429 |
|
|
430 |
|
|
431 |
#if USE_LOCKS
|
|
432 |
/*
|
|
433 |
When locks are defined, there are up to two global locks:
|
|
434 |
* If HAVE_MORECORE, iMorecoreMutex protects sequences of calls to
|
|
435 |
MORECORE. In many cases sys_alloc requires two calls, that should
|
|
436 |
not be interleaved with calls by other threads. This does not
|
|
437 |
protect against direct calls to MORECORE by other threads not
|
|
438 |
using this lock, so there is still code to cope the best we can on
|
|
439 |
interference.
|
|
440 |
* iMagicInitMutex ensures that mparams.iMagic and other
|
|
441 |
unique mparams values are initialized only once.
|
|
442 |
*/
|
|
443 |
#ifndef WIN32
|
|
444 |
/* By default use posix locks */
|
|
445 |
#include <pthread.h>
|
|
446 |
#define MLOCK_T pthread_mutex_t
|
|
447 |
#define INITIAL_LOCK(l) pthread_mutex_init(l, NULL)
|
|
448 |
#define ACQUIRE_LOCK(l) pthread_mutex_lock(l)
|
|
449 |
#define RELEASE_LOCK(l) pthread_mutex_unlock(l)
|
|
450 |
|
|
451 |
#if HAVE_MORECORE
|
|
452 |
//static MLOCK_T iMorecoreMutex = PTHREAD_MUTEX_INITIALIZER;
|
|
453 |
#endif /* HAVE_MORECORE */
|
|
454 |
//static MLOCK_T iMagicInitMutex = PTHREAD_MUTEX_INITIALIZER;
|
|
455 |
#else /* WIN32 */
|
|
456 |
#define MLOCK_T long
|
|
457 |
#define INITIAL_LOCK(l) *(l)=0
|
|
458 |
#define ACQUIRE_LOCK(l) win32_acquire_lock(l)
|
|
459 |
#define RELEASE_LOCK(l) win32_release_lock(l)
|
|
460 |
#if HAVE_MORECORE
|
|
461 |
static MLOCK_T iMorecoreMutex;
|
|
462 |
#endif /* HAVE_MORECORE */
|
|
463 |
static MLOCK_T iMagicInitMutex;
|
|
464 |
#endif /* WIN32 */
|
|
465 |
#define USE_LOCK_BIT (2U)
|
|
466 |
#else /* USE_LOCKS */
|
|
467 |
#define USE_LOCK_BIT (0U)
|
|
468 |
#define INITIAL_LOCK(l)
|
|
469 |
#endif /* USE_LOCKS */
|
|
470 |
|
|
471 |
#if USE_LOCKS && HAVE_MORECORE
|
|
472 |
#define ACQUIRE_MORECORE_LOCK(M) ACQUIRE_LOCK((M->iMorecoreMutex)/*&iMorecoreMutex*/);
|
|
473 |
#define RELEASE_MORECORE_LOCK(M) RELEASE_LOCK((M->iMorecoreMutex)/*&iMorecoreMutex*/);
|
|
474 |
#else /* USE_LOCKS && HAVE_MORECORE */
|
|
475 |
#define ACQUIRE_MORECORE_LOCK(M)
|
|
476 |
#define RELEASE_MORECORE_LOCK(M)
|
|
477 |
#endif /* USE_LOCKS && HAVE_MORECORE */
|
|
478 |
|
|
479 |
#if USE_LOCKS
|
|
480 |
/*Currently not suporting this*/
|
|
481 |
#define ACQUIRE_MAGIC_INIT_LOCK(M) ACQUIRE_LOCK(((M)->iMagicInitMutex));
|
|
482 |
//AMOD: changed #define ACQUIRE_MAGIC_INIT_LOCK()
|
|
483 |
//#define RELEASE_MAGIC_INIT_LOCK()
|
|
484 |
#define RELEASE_MAGIC_INIT_LOCK(M) RELEASE_LOCK(((M)->iMagicInitMutex));
|
|
485 |
#else /* USE_LOCKS */
|
|
486 |
#define ACQUIRE_MAGIC_INIT_LOCK(M)
|
|
487 |
#define RELEASE_MAGIC_INIT_LOCK(M)
|
|
488 |
#endif /* USE_LOCKS */
|
|
489 |
|
|
490 |
/*CHUNK representation*/
|
|
491 |
struct malloc_chunk {
|
|
492 |
size_t iPrevFoot; /* Size of previous chunk (if free). */
|
|
493 |
size_t iHead; /* Size and inuse bits. */
|
|
494 |
struct malloc_chunk* iFd; /* double links -- used only if free. */
|
|
495 |
struct malloc_chunk* iBk;
|
|
496 |
};
|
|
497 |
|
|
498 |
typedef struct malloc_chunk mchunk;
|
|
499 |
typedef struct malloc_chunk* mchunkptr;
|
|
500 |
typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */
|
|
501 |
typedef unsigned int bindex_t; /* Described below */
|
|
502 |
typedef unsigned int binmap_t; /* Described below */
|
|
503 |
typedef unsigned int flag_t; /* The type of various bit flag sets */
|
|
504 |
|
|
505 |
|
|
506 |
/* ------------------- Chunks sizes and alignments ----------------------- */
|
|
507 |
#define MCHUNK_SIZE (sizeof(mchunk))
|
|
508 |
|
|
509 |
//#if FOOTERS
|
|
510 |
// #define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
|
|
511 |
//#else /* FOOTERS */
|
|
512 |
// #define CHUNK_OVERHEAD (SIZE_T_SIZE)
|
|
513 |
//#endif /* FOOTERS */
|
|
514 |
|
|
515 |
/* MMapped chunks need a second word of overhead ... */
|
|
516 |
#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
|
|
517 |
/* ... and additional padding for fake next-chunk at foot */
|
|
518 |
#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)
|
|
519 |
|
|
520 |
/* The smallest size we can malloc is an aligned minimal chunk */
|
|
521 |
#define MIN_CHUNK_SIZE ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
|
|
522 |
|
|
523 |
/* conversion from malloc headers to user pointers, and back */
|
|
524 |
#define CHUNK2MEM(p) ((void*)((TUint8*)(p) + TWO_SIZE_T_SIZES))
|
|
525 |
#define MEM2CHUNK(mem) ((mchunkptr)((TUint8*)(mem) - TWO_SIZE_T_SIZES))
|
|
526 |
/* chunk associated with aligned address A */
|
|
527 |
#define ALIGN_AS_CHUNK(A) (mchunkptr)((A) + ALIGN_OFFSET(CHUNK2MEM(A)))
|
|
528 |
|
|
529 |
/* Bounds on request (not chunk) sizes. */
|
|
530 |
#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
|
|
531 |
#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
|
|
532 |
|
|
533 |
/* pad request bytes into a usable size */
|
|
534 |
#define PAD_REQUEST(req) (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
|
|
535 |
|
|
536 |
/* pad request, checking for minimum (but not maximum) */
|
|
537 |
#define REQUEST2SIZE(req) (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : PAD_REQUEST(req))
|
|
538 |
|
|
539 |
/* ------------------ Operations on iHead and foot fields ----------------- */
|
|
540 |
|
|
541 |
/*
|
|
542 |
The iHead field of a chunk is or'ed with PINUSE_BIT when previous
|
|
543 |
adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
|
|
544 |
use. If the chunk was obtained with mmap, the iPrevFoot field has
|
|
545 |
IS_MMAPPED_BIT set, otherwise holding the offset of the base of the
|
|
546 |
mmapped region to the base of the chunk.
|
|
547 |
*/
|
|
548 |
#define PINUSE_BIT (SIZE_T_ONE)
|
|
549 |
#define CINUSE_BIT (SIZE_T_TWO)
|
|
550 |
#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
|
|
551 |
|
|
552 |
/* Head value for fenceposts */
|
|
553 |
#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
|
|
554 |
|
|
555 |
/* extraction of fields from iHead words */
|
|
556 |
#define CINUSE(p) ((p)->iHead & CINUSE_BIT)
|
|
557 |
#define PINUSE(p) ((p)->iHead & PINUSE_BIT)
|
|
558 |
#define CHUNKSIZE(p) ((p)->iHead & ~(INUSE_BITS))
|
|
559 |
|
|
560 |
#define CLEAR_PINUSE(p) ((p)->iHead &= ~PINUSE_BIT)
|
|
561 |
#define CLEAR_CINUSE(p) ((p)->iHead &= ~CINUSE_BIT)
|
|
562 |
|
|
563 |
/* Treat space at ptr +/- offset as a chunk */
|
|
564 |
#define CHUNK_PLUS_OFFSET(p, s) ((mchunkptr)(((TUint8*)(p)) + (s)))
|
|
565 |
#define CHUNK_MINUS_OFFSET(p, s) ((mchunkptr)(((TUint8*)(p)) - (s)))
|
|
566 |
|
|
567 |
/* Ptr to next or previous physical malloc_chunk. */
|
|
568 |
#define NEXT_CHUNK(p) ((mchunkptr)( ((TUint8*)(p)) + ((p)->iHead & ~INUSE_BITS)))
|
|
569 |
#define PREV_CHUNK(p) ((mchunkptr)( ((TUint8*)(p)) - ((p)->iPrevFoot) ))
|
|
570 |
|
|
571 |
/* extract next chunk's PINUSE bit */
|
|
572 |
#define NEXT_PINUSE(p) ((NEXT_CHUNK(p)->iHead) & PINUSE_BIT)
|
|
573 |
|
|
574 |
/* Get/set size at footer */
|
|
575 |
#define GET_FOOT(p, s) (((mchunkptr)((TUint8*)(p) + (s)))->iPrevFoot)
|
|
576 |
#define SET_FOOT(p, s) (((mchunkptr)((TUint8*)(p) + (s)))->iPrevFoot = (s))
|
|
577 |
|
|
578 |
/* Set size, PINUSE bit, and foot */
|
|
579 |
#define SET_SIZE_AND_PINUSE_OF_FREE_CHUNK(p, s) ((p)->iHead = (s|PINUSE_BIT), SET_FOOT(p, s))
|
|
580 |
|
|
581 |
/* Set size, PINUSE bit, foot, and clear next PINUSE */
|
|
582 |
#define SET_FREE_WITH_PINUSE(p, s, n) (CLEAR_PINUSE(n), SET_SIZE_AND_PINUSE_OF_FREE_CHUNK(p, s))
|
|
583 |
|
|
584 |
#define IS_MMAPPED(p) (!((p)->iHead & PINUSE_BIT) && ((p)->iPrevFoot & IS_MMAPPED_BIT))
|
|
585 |
|
|
586 |
/* Get the internal overhead associated with chunk p */
|
|
587 |
#define OVERHEAD_FOR(p) (IS_MMAPPED(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
|
|
588 |
|
|
589 |
/* Return true if malloced space is not necessarily cleared */
|
|
590 |
#if MMAP_CLEARS
|
|
591 |
#define CALLOC_MUST_CLEAR(p) (!IS_MMAPPED(p))
|
|
592 |
#else /* MMAP_CLEARS */
|
|
593 |
#define CALLOC_MUST_CLEAR(p) (1)
|
|
594 |
#endif /* MMAP_CLEARS */
|
|
595 |
|
|
596 |
/* ---------------------- Overlaid data structures ----------------------- */
|
|
597 |
struct malloc_tree_chunk {
|
|
598 |
/* The first four fields must be compatible with malloc_chunk */
|
|
599 |
size_t iPrevFoot;
|
|
600 |
size_t iHead;
|
|
601 |
struct malloc_tree_chunk* iFd;
|
|
602 |
struct malloc_tree_chunk* iBk;
|
|
603 |
|
|
604 |
struct malloc_tree_chunk* iChild[2];
|
|
605 |
struct malloc_tree_chunk* iParent;
|
|
606 |
bindex_t iIndex;
|
|
607 |
};
|
|
608 |
|
|
609 |
typedef struct malloc_tree_chunk tchunk;
|
|
610 |
typedef struct malloc_tree_chunk* tchunkptr;
|
|
611 |
typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
|
|
612 |
|
|
613 |
/* A little helper macro for trees */
|
|
614 |
#define LEFTMOST_CHILD(t) ((t)->iChild[0] != 0? (t)->iChild[0] : (t)->iChild[1])
|
|
615 |
/*Segment structur*/
|
|
616 |
//struct malloc_segment {
|
|
617 |
// TUint8* iBase; /* base address */
|
|
618 |
// size_t iSize; /* allocated size */
|
|
619 |
//};
|
|
620 |
|
|
621 |
#define IS_MMAPPED_SEGMENT(S) ((S)->iSflags & IS_MMAPPED_BIT)
|
|
622 |
#define IS_EXTERN_SEGMENT(S) ((S)->iSflags & EXTERN_BIT)
|
|
623 |
|
|
624 |
typedef struct malloc_segment msegment;
|
|
625 |
typedef struct malloc_segment* msegmentptr;
|
|
626 |
|
|
627 |
/*Malloc State data structur*/
|
|
628 |
|
|
629 |
//#define NSMALLBINS (32U)
|
|
630 |
//#define NTREEBINS (32U)
|
|
631 |
#define SMALLBIN_SHIFT (3U)
|
|
632 |
#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
|
|
633 |
#define TREEBIN_SHIFT (8U)
|
|
634 |
#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
|
|
635 |
#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
|
|
636 |
#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
|
|
637 |
|
|
638 |
/*struct malloc_state {
|
|
639 |
binmap_t iSmallMap;
|
|
640 |
binmap_t iTreeMap;
|
|
641 |
size_t iDvSize;
|
|
642 |
size_t iTopSize;
|
|
643 |
mchunkptr iDv;
|
|
644 |
mchunkptr iTop;
|
|
645 |
size_t iTrimCheck;
|
|
646 |
mchunkptr iSmallBins[(NSMALLBINS+1)*2];
|
|
647 |
tbinptr iTreeBins[NTREEBINS];
|
|
648 |
msegment iSeg;
|
|
649 |
};*/
|
|
650 |
/*
|
|
651 |
struct malloc_state {
|
|
652 |
binmap_t iSmallMap;
|
|
653 |
binmap_t iTreeMap;
|
|
654 |
size_t iDvSize;
|
|
655 |
size_t iTopSize;
|
|
656 |
TUint8* iLeastAddr;
|
|
657 |
mchunkptr iDv;
|
|
658 |
mchunkptr iTop;
|
|
659 |
size_t iTrimCheck;
|
|
660 |
size_t iMagic;
|
|
661 |
mchunkptr iSmallBins[(NSMALLBINS+1)*2];
|
|
662 |
tbinptr iTreeBins[NTREEBINS];
|
|
663 |
size_t iFootprint;
|
|
664 |
size_t iMaxFootprint;
|
|
665 |
flag_t iMflags;
|
|
666 |
#if USE_LOCKS
|
|
667 |
MLOCK_T iMutex;
|
|
668 |
MLOCK_T iMagicInitMutex;
|
|
669 |
MLOCK_T iMorecoreMutex;
|
|
670 |
#endif
|
|
671 |
msegment iSeg;
|
|
672 |
};
|
|
673 |
*/
|
|
674 |
typedef struct malloc_state* mstate;
|
|
675 |
|
|
676 |
/* ------------- Global malloc_state and malloc_params ------------------- */
|
|
677 |
|
|
678 |
/*
|
|
679 |
malloc_params holds global properties, including those that can be
|
|
680 |
dynamically set using mallopt. There is a single instance, mparams,
|
|
681 |
initialized in init_mparams.
|
|
682 |
*/
|
|
683 |
|
|
684 |
struct malloc_params {
|
|
685 |
size_t iMagic;
|
|
686 |
size_t iPageSize;
|
|
687 |
size_t iGranularity;
|
|
688 |
size_t iMmapThreshold;
|
|
689 |
size_t iTrimThreshold;
|
|
690 |
flag_t iDefaultMflags;
|
|
691 |
#if USE_LOCKS
|
|
692 |
MLOCK_T iMagicInitMutex;
|
|
693 |
#endif /* USE_LOCKS */
|
|
694 |
};
|
|
695 |
|
|
696 |
/* The global malloc_state used for all non-"mspace" calls */
|
|
697 |
/*AMOD: Need to check this as this will be the member of the class*/
|
|
698 |
|
|
699 |
//static struct malloc_state _gm_;
|
|
700 |
//#define GM (&_gm_)
|
|
701 |
|
|
702 |
//#define IS_GLOBAL(M) ((M) == &_gm_)
|
|
703 |
/*AMOD: has changed*/
|
|
704 |
#define IS_GLOBAL(M) ((M) == GM)
|
|
705 |
#define IS_INITIALIZED(M) ((M)->iTop != 0)
|
|
706 |
|
|
707 |
/* -------------------------- system alloc setup ------------------------- */
|
|
708 |
|
|
709 |
/* Operations on iMflags */
|
|
710 |
|
|
711 |
#define USE_LOCK(M) ((M)->iMflags & USE_LOCK_BIT)
|
|
712 |
#define ENABLE_LOCK(M) ((M)->iMflags |= USE_LOCK_BIT)
|
|
713 |
#define DISABLE_LOCK(M) ((M)->iMflags &= ~USE_LOCK_BIT)
|
|
714 |
|
|
715 |
#define USE_MMAP(M) ((M)->iMflags & USE_MMAP_BIT)
|
|
716 |
#define ENABLE_MMAP(M) ((M)->iMflags |= USE_MMAP_BIT)
|
|
717 |
#define DISABLE_MMAP(M) ((M)->iMflags &= ~USE_MMAP_BIT)
|
|
718 |
|
|
719 |
#define USE_NONCONTIGUOUS(M) ((M)->iMflags & USE_NONCONTIGUOUS_BIT)
|
|
720 |
#define DISABLE_CONTIGUOUS(M) ((M)->iMflags |= USE_NONCONTIGUOUS_BIT)
|
|
721 |
|
|
722 |
#define SET_LOCK(M,L) ((M)->iMflags = (L)? ((M)->iMflags | USE_LOCK_BIT) : ((M)->iMflags & ~USE_LOCK_BIT))
|
|
723 |
|
|
724 |
/* page-align a size */
|
|
725 |
#define PAGE_ALIGN(S) (((S) + (mparams.iPageSize)) & ~(mparams.iPageSize - SIZE_T_ONE))
|
|
726 |
|
|
727 |
/* iGranularity-align a size */
|
|
728 |
#define GRANULARITY_ALIGN(S) (((S) + (mparams.iGranularity)) & ~(mparams.iGranularity - SIZE_T_ONE))
|
|
729 |
|
|
730 |
#define IS_PAGE_ALIGNED(S) (((size_t)(S) & (mparams.iPageSize - SIZE_T_ONE)) == 0)
|
|
731 |
#define IS_GRANULARITY_ALIGNED(S) (((size_t)(S) & (mparams.iGranularity - SIZE_T_ONE)) == 0)
|
|
732 |
|
|
733 |
/* True if segment S holds address A */
|
|
734 |
#define SEGMENT_HOLDS(S, A) ((TUint8*)(A) >= S->iBase && (TUint8*)(A) < S->iBase + S->iSize)
|
|
735 |
|
|
736 |
#ifndef MORECORE_CANNOT_TRIM
|
|
737 |
#define SHOULD_TRIM(M,s) ((s) > (M)->iTrimCheck)
|
|
738 |
#else /* MORECORE_CANNOT_TRIM */
|
|
739 |
#define SHOULD_TRIM(M,s) (0)
|
|
740 |
#endif /* MORECORE_CANNOT_TRIM */
|
|
741 |
|
|
742 |
/*
|
|
743 |
TOP_FOOT_SIZE is padding at the end of a segment, including space
|
|
744 |
that may be needed to place segment records and fenceposts when new
|
|
745 |
noncontiguous segments are added.
|
|
746 |
*/
|
|
747 |
#define TOP_FOOT_SIZE (ALIGN_OFFSET(CHUNK2MEM(0))+PAD_REQUEST(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
|
|
748 |
|
|
749 |
#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
|
|
750 |
/* ------------------------------- Hooks -------------------------------- */
|
|
751 |
|
|
752 |
/*
|
|
753 |
PREACTION should be defined to return 0 on success, and nonzero on
|
|
754 |
failure. If you are not using locking, you can redefine these to do
|
|
755 |
anything you like.
|
|
756 |
*/
|
|
757 |
|
|
758 |
#if USE_LOCKS
|
|
759 |
/* Ensure locks are initialized */
|
|
760 |
#define GLOBALLY_INITIALIZE() (mparams.iPageSize == 0 && init_mparams())
|
|
761 |
#define PREACTION(M) (USE_LOCK((M))?(ACQUIRE_LOCK((M)->iMutex),0):0) /*Action to take like lock before alloc*/
|
|
762 |
#define POSTACTION(M) { if (USE_LOCK(M)) RELEASE_LOCK((M)->iMutex); }
|
|
763 |
|
|
764 |
#else /* USE_LOCKS */
|
|
765 |
#ifndef PREACTION
|
|
766 |
#define PREACTION(M) (0)
|
|
767 |
#endif /* PREACTION */
|
|
768 |
#ifndef POSTACTION
|
|
769 |
#define POSTACTION(M)
|
|
770 |
#endif /* POSTACTION */
|
|
771 |
#endif /* USE_LOCKS */
|
|
772 |
|
|
773 |
/*
|
|
774 |
CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
|
|
775 |
USAGE_ERROR_ACTION is triggered on detected bad frees and
|
|
776 |
reallocs. The argument p is an address that might have triggered the
|
|
777 |
fault. It is ignored by the two predefined actions, but might be
|
|
778 |
useful in custom actions that try to help diagnose errors.
|
|
779 |
*/
|
|
780 |
|
|
781 |
#if PROCEED_ON_ERROR
|
|
782 |
/* A count of the number of corruption errors causing resets */
|
|
783 |
int malloc_corruption_error_count;
|
|
784 |
/* default corruption action */
|
|
785 |
static void ResetOnError(mstate m);
|
|
786 |
#define CORRUPTION_ERROR_ACTION(m) ResetOnError(m)
|
|
787 |
#define USAGE_ERROR_ACTION(m, p)
|
|
788 |
#else /* PROCEED_ON_ERROR */
|
|
789 |
#ifndef CORRUPTION_ERROR_ACTION
|
|
790 |
#define CORRUPTION_ERROR_ACTION(m) ABORT
|
|
791 |
#endif /* CORRUPTION_ERROR_ACTION */
|
|
792 |
#ifndef USAGE_ERROR_ACTION
|
|
793 |
#define USAGE_ERROR_ACTION(m,p) ABORT
|
|
794 |
#endif /* USAGE_ERROR_ACTION */
|
|
795 |
#endif /* PROCEED_ON_ERROR */
|
|
796 |
|
|
797 |
|
|
798 |
#ifdef _DEBUG
|
|
799 |
#define CHECK_FREE_CHUNK(M,P) DoCheckFreeChunk(M,P)
|
|
800 |
#define CHECK_INUSE_CHUNK(M,P) DoCheckInuseChunk(M,P)
|
|
801 |
#define CHECK_TOP_CHUNK(M,P) DoCheckTopChunk(M,P)
|
|
802 |
#define CHECK_MALLOCED_CHUNK(M,P,N) DoCheckMallocedChunk(M,P,N)
|
|
803 |
#define CHECK_MMAPPED_CHUNK(M,P) DoCheckMmappedChunk(M,P)
|
|
804 |
#define CHECK_MALLOC_STATE(M) DoCheckMallocState(M)
|
|
805 |
#else /* DEBUG */
|
|
806 |
#define CHECK_FREE_CHUNK(M,P)
|
|
807 |
#define CHECK_INUSE_CHUNK(M,P)
|
|
808 |
#define CHECK_MALLOCED_CHUNK(M,P,N)
|
|
809 |
#define CHECK_MMAPPED_CHUNK(M,P)
|
|
810 |
#define CHECK_MALLOC_STATE(M)
|
|
811 |
#define CHECK_TOP_CHUNK(M,P)
|
|
812 |
#endif /* DEBUG */
|
|
813 |
|
|
814 |
/* ---------------------------- Indexing Bins ---------------------------- */
|
|
815 |
|
|
816 |
#define IS_SMALL(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
|
|
817 |
#define SMALL_INDEX(s) ((s) >> SMALLBIN_SHIFT)
|
|
818 |
#define SMALL_INDEX2SIZE(i) ((i) << SMALLBIN_SHIFT)
|
|
819 |
#define MIN_SMALL_INDEX (SMALL_INDEX(MIN_CHUNK_SIZE))
|
|
820 |
|
|
821 |
/* addressing by index. See above about smallbin repositioning */
|
|
822 |
#define SMALLBIN_AT(M, i) ((sbinptr)((TUint8*)&((M)->iSmallBins[(i)<<1])))
|
|
823 |
#define TREEBIN_AT(M,i) (&((M)->iTreeBins[i]))
|
|
824 |
|
|
825 |
|
|
826 |
/* Bit representing maximum resolved size in a treebin at i */
|
|
827 |
#define BIT_FOR_TREE_INDEX(i) (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
|
|
828 |
|
|
829 |
/* Shift placing maximum resolved bit in a treebin at i as sign bit */
|
|
830 |
#define LEFTSHIFT_FOR_TREE_INDEX(i) ((i == NTREEBINS-1)? 0 : ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
|
|
831 |
|
|
832 |
/* The size of the smallest chunk held in bin with index i */
|
|
833 |
#define MINSIZE_FOR_TREE_INDEX(i) ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
|
|
834 |
|
|
835 |
|
|
836 |
/* ------------------------ Operations on bin maps ----------------------- */
|
|
837 |
/* bit corresponding to given index */
|
|
838 |
#define IDX2BIT(i) ((binmap_t)(1) << (i))
|
|
839 |
/* Mark/Clear bits with given index */
|
|
840 |
#define MARK_SMALLMAP(M,i) ((M)->iSmallMap |= IDX2BIT(i))
|
|
841 |
#define CLEAR_SMALLMAP(M,i) ((M)->iSmallMap &= ~IDX2BIT(i))
|
|
842 |
#define SMALLMAP_IS_MARKED(M,i) ((M)->iSmallMap & IDX2BIT(i))
|
|
843 |
#define MARK_TREEMAP(M,i) ((M)->iTreeMap |= IDX2BIT(i))
|
|
844 |
#define CLEAR_TREEMAP(M,i) ((M)->iTreeMap &= ~IDX2BIT(i))
|
|
845 |
#define TREEMAP_IS_MARKED(M,i) ((M)->iTreeMap & IDX2BIT(i))
|
|
846 |
|
|
847 |
/* isolate the least set bit of a bitmap */
|
|
848 |
#define LEAST_BIT(x) ((x) & -(x))
|
|
849 |
|
|
850 |
/* mask with all bits to left of least bit of x on */
|
|
851 |
#define LEFT_BITS(x) ((x<<1) | -(x<<1))
|
|
852 |
|
|
853 |
/* mask with all bits to left of or equal to least bit of x on */
|
|
854 |
#define SAME_OR_LEFT_BITS(x) ((x) | -(x))
|
|
855 |
|
|
856 |
#if !INSECURE
|
|
857 |
/* Check if address a is at least as high as any from MORECORE or MMAP */
|
|
858 |
#define OK_ADDRESS(M, a) ((TUint8*)(a) >= (M)->iLeastAddr)
|
|
859 |
/* Check if address of next chunk n is higher than base chunk p */
|
|
860 |
#define OK_NEXT(p, n) ((TUint8*)(p) < (TUint8*)(n))
|
|
861 |
/* Check if p has its CINUSE bit on */
|
|
862 |
#define OK_CINUSE(p) CINUSE(p)
|
|
863 |
/* Check if p has its PINUSE bit on */
|
|
864 |
#define OK_PINUSE(p) PINUSE(p)
|
|
865 |
#else /* !INSECURE */
|
|
866 |
#define OK_ADDRESS(M, a) (1)
|
|
867 |
#define OK_NEXT(b, n) (1)
|
|
868 |
#define OK_CINUSE(p) (1)
|
|
869 |
#define OK_PINUSE(p) (1)
|
|
870 |
#endif /* !INSECURE */
|
|
871 |
|
|
872 |
#if (FOOTERS && !INSECURE)
|
|
873 |
/* Check if (alleged) mstate m has expected iMagic field */
|
|
874 |
#define OK_MAGIC(M) ((M)->iMagic == mparams.iMagic)
|
|
875 |
#else /* (FOOTERS && !INSECURE) */
|
|
876 |
#define OK_MAGIC(M) (1)
|
|
877 |
#endif /* (FOOTERS && !INSECURE) */
|
|
878 |
|
|
879 |
/* In gcc, use __builtin_expect to minimize impact of checks */
|
|
880 |
#if !INSECURE
|
|
881 |
#if defined(__GNUC__) && __GNUC__ >= 3
|
|
882 |
#define RTCHECK(e) __builtin_expect(e, 1)
|
|
883 |
#else /* GNUC */
|
|
884 |
#define RTCHECK(e) (e)
|
|
885 |
#endif /* GNUC */
|
|
886 |
|
|
887 |
#else /* !INSECURE */
|
|
888 |
#define RTCHECK(e) (1)
|
|
889 |
#endif /* !INSECURE */
|
|
890 |
/* macros to set up inuse chunks with or without footers */
|
|
891 |
#if !FOOTERS
|
|
892 |
#define MARK_INUSE_FOOT(M,p,s)
|
|
893 |
/* Set CINUSE bit and PINUSE bit of next chunk */
|
|
894 |
#define SET_INUSE(M,p,s) ((p)->iHead = (((p)->iHead & PINUSE_BIT)|s|CINUSE_BIT),((mchunkptr)(((TUint8*)(p)) + (s)))->iHead |= PINUSE_BIT)
|
|
895 |
/* Set CINUSE and PINUSE of this chunk and PINUSE of next chunk */
|
|
896 |
#define SET_INUSE_AND_PINUSE(M,p,s) ((p)->iHead = (s|PINUSE_BIT|CINUSE_BIT),((mchunkptr)(((TUint8*)(p)) + (s)))->iHead |= PINUSE_BIT)
|
|
897 |
/* Set size, CINUSE and PINUSE bit of this chunk */
|
|
898 |
#define SET_SIZE_AND_PINUSE_OF_INUSE_CHUNK(M, p, s) ((p)->iHead = (s|PINUSE_BIT|CINUSE_BIT))
|
|
899 |
#else /* FOOTERS */
|
|
900 |
/* Set foot of inuse chunk to be xor of mstate and seed */
|
|
901 |
#define MARK_INUSE_FOOT(M,p,s) (((mchunkptr)((TUint8*)(p) + (s)))->iPrevFoot = ((size_t)(M) ^ mparams.iMagic))
|
|
902 |
#define GET_MSTATE_FOR(p) ((mstate)(((mchunkptr)((TUint8*)(p)+(CHUNKSIZE(p))))->iPrevFoot ^ mparams.iMagic))
|
|
903 |
#define SET_INUSE(M,p,s)\
|
|
904 |
((p)->iHead = (((p)->iHead & PINUSE_BIT)|s|CINUSE_BIT),\
|
|
905 |
(((mchunkptr)(((TUint8*)(p)) + (s)))->iHead |= PINUSE_BIT), \
|
|
906 |
MARK_INUSE_FOOT(M,p,s))
|
|
907 |
#define SET_INUSE_AND_PINUSE(M,p,s)\
|
|
908 |
((p)->iHead = (s|PINUSE_BIT|CINUSE_BIT),\
|
|
909 |
(((mchunkptr)(((TUint8*)(p)) + (s)))->iHead |= PINUSE_BIT),\
|
|
910 |
MARK_INUSE_FOOT(M,p,s))
|
|
911 |
#define SET_SIZE_AND_PINUSE_OF_INUSE_CHUNK(M, p, s)\
|
|
912 |
((p)->iHead = (s|PINUSE_BIT|CINUSE_BIT),\
|
|
913 |
MARK_INUSE_FOOT(M, p, s))
|
|
914 |
#endif /* !FOOTERS */
|
|
915 |
|
|
916 |
|
|
917 |
#if ONLY_MSPACES
|
|
918 |
#define INTERNAL_MALLOC(m, b) mspace_malloc(m, b)
|
|
919 |
#define INTERNAL_FREE(m, mem) mspace_free(m,mem);
|
|
920 |
#else /* ONLY_MSPACES */
|
|
921 |
#if MSPACES
|
|
922 |
#define INTERNAL_MALLOC(m, b) (m == GM)? dlmalloc(b) : mspace_malloc(m, b)
|
|
923 |
#define INTERNAL_FREE(m, mem) if (m == GM) dlfree(mem); else mspace_free(m,mem);
|
|
924 |
#else /* MSPACES */
|
|
925 |
#define INTERNAL_MALLOC(m, b) dlmalloc(b)
|
|
926 |
#define INTERNAL_FREE(m, mem) dlfree(mem)
|
|
927 |
#endif /* MSPACES */
|
|
928 |
#endif /* ONLY_MSPACES */
|
|
929 |
|
|
930 |
#ifndef NDEBUG
|
|
931 |
#define CHECKING 1
|
|
932 |
#endif
|
|
933 |
// #define HYSTERESIS 4
|
|
934 |
#define HYSTERESIS 1
|
|
935 |
#define HYSTERESIS_BYTES (2*PAGESIZE)
|
|
936 |
#define HYSTERESIS_GROW (HYSTERESIS*PAGESIZE)
|
|
937 |
|
|
938 |
#if CHECKING
|
|
939 |
#define CHECK(x) x
|
|
940 |
#else
|
|
941 |
#undef ASSERT
|
|
942 |
#define ASSERT(x) (void)0
|
|
943 |
#define CHECK(x) (void)0
|
|
944 |
#endif
|
|
945 |
|
|
946 |
#endif/*__DLA__*/
|