|
1 /* |
|
2 * Copyright (c) 2006 Nokia Corporation and/or its subsidiary(-ies). |
|
3 * All rights reserved. |
|
4 * This component and the accompanying materials are made available |
|
5 * under the terms of the License "Eclipse Public License v1.0" |
|
6 * which accompanies this distribution, and is available |
|
7 * at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
8 * |
|
9 * Initial Contributors: |
|
10 * Nokia Corporation - initial contribution. |
|
11 * |
|
12 * Contributors: |
|
13 * |
|
14 * Description: |
|
15 * |
|
16 * |
|
17 */ |
|
18 /*DLA.h*/ |
|
19 #ifndef __DLA__ |
|
20 #define __DLA__ |
|
21 |
|
22 #define DEFAULT_TRIM_THRESHOLD ((size_t)4U * (size_t)1024U) |
|
23 |
|
24 #define __SYMBIAN__ |
|
25 #define MSPACES 0 |
|
26 #define HAVE_MORECORE 1 |
|
27 #define MORECORE_CONTIGUOUS 1 |
|
28 #define HAVE_MMAP 0 |
|
29 #define HAVE_MREMAP 0 |
|
30 #define DEFAULT_GRANULARITY (4096U) |
|
31 #define FOOTERS 0 |
|
32 #define USE_LOCKS 0 |
|
33 #define INSECURE 1 |
|
34 #define NO_MALLINFO 0 |
|
35 |
|
36 #define LACKS_SYS_TYPES_H |
|
37 #ifndef LACKS_SYS_TYPES_H |
|
38 #include <sys/types.h> /* For size_t */ |
|
39 #else |
|
40 #ifndef _SIZE_T_DECLARED |
|
41 typedef unsigned int size_t; |
|
42 #define _SIZE_T_DECLARED |
|
43 #endif |
|
44 #endif /* LACKS_SYS_TYPES_H */ |
|
45 |
|
46 /* The maximum possible size_t value has all bits set */ |
|
47 #define MAX_SIZE_T (~(size_t)0) |
|
48 |
|
49 #ifndef ONLY_MSPACES |
|
50 #define ONLY_MSPACES 0 |
|
51 #endif /* ONLY_MSPACES */ |
|
52 |
|
53 #ifndef MSPACES |
|
54 #if ONLY_MSPACES |
|
55 #define MSPACES 1 |
|
56 #else /* ONLY_MSPACES */ |
|
57 #define MSPACES 0 |
|
58 #endif /* ONLY_MSPACES */ |
|
59 #endif /* MSPACES */ |
|
60 |
|
61 #ifndef MALLOC_ALIGNMENT |
|
62 #define MALLOC_ALIGNMENT ((size_t)8U) |
|
63 #endif /* MALLOC_ALIGNMENT */ |
|
64 |
|
65 #ifndef FOOTERS |
|
66 #define FOOTERS 0 |
|
67 #endif /* FOOTERS */ |
|
68 |
|
69 #ifndef ABORT |
|
70 // #define ABORT abort() |
|
71 #define ABORT User::Invariant()// redefined so euser isn't dependant on oe |
|
72 #endif /* ABORT */ |
|
73 |
|
74 #ifndef ABORT_ON_ASSERT_FAILURE |
|
75 #define ABORT_ON_ASSERT_FAILURE 1 |
|
76 #endif /* ABORT_ON_ASSERT_FAILURE */ |
|
77 |
|
78 #ifndef PROCEED_ON_ERROR |
|
79 #define PROCEED_ON_ERROR 0 |
|
80 #endif /* PROCEED_ON_ERROR */ |
|
81 |
|
82 #ifndef USE_LOCKS |
|
83 #define USE_LOCKS 0 |
|
84 #endif /* USE_LOCKS */ |
|
85 |
|
86 #ifndef INSECURE |
|
87 #define INSECURE 0 |
|
88 #endif /* INSECURE */ |
|
89 |
|
90 #ifndef HAVE_MMAP |
|
91 #define HAVE_MMAP 1 |
|
92 #endif /* HAVE_MMAP */ |
|
93 |
|
94 #ifndef MMAP_CLEARS |
|
95 #define MMAP_CLEARS 1 |
|
96 #endif /* MMAP_CLEARS */ |
|
97 |
|
98 #ifndef HAVE_MREMAP |
|
99 #ifdef linux |
|
100 #define HAVE_MREMAP 1 |
|
101 #else /* linux */ |
|
102 #define HAVE_MREMAP 0 |
|
103 #endif /* linux */ |
|
104 #endif /* HAVE_MREMAP */ |
|
105 |
|
106 #ifndef MALLOC_FAILURE_ACTION |
|
107 //#define MALLOC_FAILURE_ACTION errno = ENOMEM; |
|
108 #define MALLOC_FAILURE_ACTION ; |
|
109 #endif /* MALLOC_FAILURE_ACTION */ |
|
110 |
|
111 #ifndef HAVE_MORECORE |
|
112 #if ONLY_MSPACES |
|
113 #define HAVE_MORECORE 1 /*AMOD: has changed */ |
|
114 #else /* ONLY_MSPACES */ |
|
115 #define HAVE_MORECORE 1 |
|
116 #endif /* ONLY_MSPACES */ |
|
117 #endif /* HAVE_MORECORE */ |
|
118 |
|
119 #if !HAVE_MORECORE |
|
120 #define MORECORE_CONTIGUOUS 0 |
|
121 #else /* !HAVE_MORECORE */ |
|
122 #ifndef MORECORE |
|
123 #define MORECORE DLAdjust |
|
124 #endif /* MORECORE */ |
|
125 #ifndef MORECORE_CONTIGUOUS |
|
126 #define MORECORE_CONTIGUOUS 0 |
|
127 #endif /* MORECORE_CONTIGUOUS */ |
|
128 #endif /* !HAVE_MORECORE */ |
|
129 |
|
130 #ifndef DEFAULT_GRANULARITY |
|
131 #if MORECORE_CONTIGUOUS |
|
132 #define DEFAULT_GRANULARITY 4096 /* 0 means to compute in init_mparams */ |
|
133 #else /* MORECORE_CONTIGUOUS */ |
|
134 #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U) |
|
135 #endif /* MORECORE_CONTIGUOUS */ |
|
136 #endif /* DEFAULT_GRANULARITY */ |
|
137 |
|
138 #ifndef DEFAULT_TRIM_THRESHOLD |
|
139 #ifndef MORECORE_CANNOT_TRIM |
|
140 #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U) |
|
141 #else /* MORECORE_CANNOT_TRIM */ |
|
142 #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T |
|
143 #endif /* MORECORE_CANNOT_TRIM */ |
|
144 #endif /* DEFAULT_TRIM_THRESHOLD */ |
|
145 |
|
146 #ifndef DEFAULT_MMAP_THRESHOLD |
|
147 #if HAVE_MMAP |
|
148 #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U) |
|
149 #else /* HAVE_MMAP */ |
|
150 #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T |
|
151 #endif /* HAVE_MMAP */ |
|
152 #endif /* DEFAULT_MMAP_THRESHOLD */ |
|
153 |
|
154 #ifndef USE_BUILTIN_FFS |
|
155 #define USE_BUILTIN_FFS 0 |
|
156 #endif /* USE_BUILTIN_FFS */ |
|
157 |
|
158 #ifndef USE_DEV_RANDOM |
|
159 #define USE_DEV_RANDOM 0 |
|
160 #endif /* USE_DEV_RANDOM */ |
|
161 |
|
162 #ifndef NO_MALLINFO |
|
163 #define NO_MALLINFO 0 |
|
164 #endif /* NO_MALLINFO */ |
|
165 #ifndef MALLINFO_FIELD_TYPE |
|
166 #define MALLINFO_FIELD_TYPE size_t |
|
167 #endif /* MALLINFO_FIELD_TYPE */ |
|
168 |
|
169 /* |
|
170 mallopt tuning options. SVID/XPG defines four standard parameter |
|
171 numbers for mallopt, normally defined in malloc.h. None of these |
|
172 are used in this malloc, so setting them has no effect. But this |
|
173 malloc does support the following options. |
|
174 */ |
|
175 |
|
176 #define M_TRIM_THRESHOLD (-1) |
|
177 #define M_GRANULARITY (-2) |
|
178 #define M_MMAP_THRESHOLD (-3) |
|
179 |
|
180 #if !NO_MALLINFO |
|
181 /* |
|
182 This version of malloc supports the standard SVID/XPG mallinfo |
|
183 routine that returns a struct containing usage properties and |
|
184 statistics. It should work on any system that has a |
|
185 /usr/include/malloc.h defining struct mallinfo. The main |
|
186 declaration needed is the mallinfo struct that is returned (by-copy) |
|
187 by mallinfo(). The malloinfo struct contains a bunch of fields that |
|
188 are not even meaningful in this version of malloc. These fields are |
|
189 are instead filled by mallinfo() with other numbers that might be of |
|
190 interest. |
|
191 |
|
192 HAVE_USR_INCLUDE_MALLOC_H should be set if you have a |
|
193 /usr/include/malloc.h file that includes a declaration of struct |
|
194 mallinfo. If so, it is included; else a compliant version is |
|
195 declared below. These must be precisely the same for mallinfo() to |
|
196 work. The original SVID version of this struct, defined on most |
|
197 systems with mallinfo, declares all fields as ints. But some others |
|
198 define as unsigned long. If your system defines the fields using a |
|
199 type of different width than listed here, you MUST #include your |
|
200 system version and #define HAVE_USR_INCLUDE_MALLOC_H. |
|
201 */ |
|
202 |
|
203 /* #define HAVE_USR_INCLUDE_MALLOC_H */ |
|
204 |
|
205 #ifdef HAVE_USR_INCLUDE_MALLOC_H |
|
206 #include "/usr/include/malloc.h" |
|
207 #else /* HAVE_USR_INCLUDE_MALLOC_H */ |
|
208 |
|
209 struct mallinfo { |
|
210 MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */ |
|
211 MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */ |
|
212 MALLINFO_FIELD_TYPE smblks; /* always 0 */ |
|
213 MALLINFO_FIELD_TYPE hblks; /* always 0 */ |
|
214 MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */ |
|
215 MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */ |
|
216 MALLINFO_FIELD_TYPE fsmblks; /* always 0 */ |
|
217 MALLINFO_FIELD_TYPE uordblks; /* total allocated space */ |
|
218 MALLINFO_FIELD_TYPE fordblks; /* total free space */ |
|
219 MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */ |
|
220 MALLINFO_FIELD_TYPE cellCount;/* Number of chunks allocated*/ |
|
221 }; |
|
222 |
|
223 #endif /* HAVE_USR_INCLUDE_MALLOC_H */ |
|
224 #endif /* NO_MALLINFO */ |
|
225 |
|
226 #if MSPACES |
|
227 typedef void* mspace; |
|
228 #endif /* MSPACES */ |
|
229 |
|
230 #if 0 |
|
231 |
|
232 #include <stdio.h>/* for printing in malloc_stats */ |
|
233 |
|
234 #ifndef LACKS_ERRNO_H |
|
235 #include <errno.h> /* for MALLOC_FAILURE_ACTION */ |
|
236 #endif /* LACKS_ERRNO_H */ |
|
237 |
|
238 #if FOOTERS |
|
239 #include <time.h> /* for magic initialization */ |
|
240 #endif /* FOOTERS */ |
|
241 |
|
242 #ifndef LACKS_STDLIB_H |
|
243 #include <stdlib.h> /* for abort() */ |
|
244 #endif /* LACKS_STDLIB_H */ |
|
245 |
|
246 #ifdef DEBUG |
|
247 #if ABORT_ON_ASSERT_FAILURE |
|
248 #define assert(x) if(!(x)) ABORT |
|
249 #else /* ABORT_ON_ASSERT_FAILURE */ |
|
250 #include <assert.h> |
|
251 #endif /* ABORT_ON_ASSERT_FAILURE */ |
|
252 #else /* DEBUG */ |
|
253 #define assert(x) |
|
254 #endif /* DEBUG */ |
|
255 |
|
256 #ifndef LACKS_STRING_H |
|
257 #include <string.h> /* for memset etc */ |
|
258 #endif /* LACKS_STRING_H */ |
|
259 |
|
260 #if USE_BUILTIN_FFS |
|
261 #ifndef LACKS_STRINGS_H |
|
262 #include <strings.h> /* for ffs */ |
|
263 #endif /* LACKS_STRINGS_H */ |
|
264 #endif /* USE_BUILTIN_FFS */ |
|
265 |
|
266 #if HAVE_MMAP |
|
267 #ifndef LACKS_SYS_MMAN_H |
|
268 #include <sys/mman.h> /* for mmap */ |
|
269 #endif /* LACKS_SYS_MMAN_H */ |
|
270 #ifndef LACKS_FCNTL_H |
|
271 #include <fcntl.h> |
|
272 #endif /* LACKS_FCNTL_H */ |
|
273 #endif /* HAVE_MMAP */ |
|
274 |
|
275 #if HAVE_MORECORE |
|
276 #ifndef LACKS_UNISTD_H |
|
277 #include <unistd.h> /* for sbrk */ |
|
278 extern void* sbrk(size_t); |
|
279 #else /* LACKS_UNISTD_H */ |
|
280 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) |
|
281 extern void* sbrk(ptrdiff_t); |
|
282 /*Amod sbrk is not defined in WIN32 need to check in symbian*/ |
|
283 #endif /* FreeBSD etc */ |
|
284 #endif /* LACKS_UNISTD_H */ |
|
285 #endif /* HAVE_MORECORE */ |
|
286 |
|
287 #endif |
|
288 |
|
289 #define assert(x) ASSERT(x) |
|
290 |
|
291 /*AMOD: For malloc_getpagesize*/ |
|
292 #if 0 // replaced with GET_PAGE_SIZE() defined in heap.cpp |
|
293 #ifndef WIN32 |
|
294 #ifndef malloc_getpagesize |
|
295 #ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ |
|
296 #ifndef _SC_PAGE_SIZE |
|
297 #define _SC_PAGE_SIZE _SC_PAGESIZE |
|
298 #endif |
|
299 #endif |
|
300 #ifdef _SC_PAGE_SIZE |
|
301 #define malloc_getpagesize sysconf(_SC_PAGE_SIZE) |
|
302 #else |
|
303 #if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) |
|
304 extern size_t getpagesize(); |
|
305 #define malloc_getpagesize getpagesize() |
|
306 #else |
|
307 #ifdef WIN32 /* use supplied emulation of getpagesize */ |
|
308 #define malloc_getpagesize getpagesize() |
|
309 #else |
|
310 #ifndef LACKS_SYS_PARAM_H |
|
311 #include <sys/param.h> |
|
312 #endif |
|
313 #ifdef EXEC_PAGESIZE |
|
314 #define malloc_getpagesize EXEC_PAGESIZE |
|
315 #else |
|
316 #ifdef NBPG |
|
317 #ifndef CLSIZE |
|
318 #define malloc_getpagesize NBPG |
|
319 #else |
|
320 #define malloc_getpagesize (NBPG * CLSIZE) |
|
321 #endif |
|
322 #else |
|
323 #ifdef NBPC |
|
324 #define malloc_getpagesize NBPC |
|
325 #else |
|
326 #ifdef PAGESIZE |
|
327 #define malloc_getpagesize PAGESIZE |
|
328 #else /* just guess */ |
|
329 #define malloc_getpagesize ((size_t)4096U) |
|
330 #endif |
|
331 #endif |
|
332 #endif |
|
333 #endif |
|
334 #endif |
|
335 #endif |
|
336 #endif |
|
337 #endif |
|
338 #endif |
|
339 #endif |
|
340 /*AMOD: For malloc_getpagesize*/ |
|
341 |
|
342 /* ------------------- size_t and alignment properties -------------------- */ |
|
343 |
|
344 /* The byte and bit size of a size_t */ |
|
345 #define SIZE_T_SIZE (sizeof(size_t)) |
|
346 #define SIZE_T_BITSIZE (sizeof(size_t) << 3) |
|
347 |
|
348 /* Some constants coerced to size_t */ |
|
349 /* Annoying but necessary to avoid errors on some plaftorms */ |
|
350 #define SIZE_T_ZERO ((size_t)0) |
|
351 #define SIZE_T_ONE ((size_t)1) |
|
352 #define SIZE_T_TWO ((size_t)2) |
|
353 #define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1) |
|
354 #define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2) |
|
355 #define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES) |
|
356 #define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U) |
|
357 |
|
358 /* The bit mask value corresponding to MALLOC_ALIGNMENT */ |
|
359 #define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE) |
|
360 |
|
361 /* True if address a has acceptable alignment */ |
|
362 //#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0) |
|
363 #define is_aligned(A) (((unsigned int)((A)) & (CHUNK_ALIGN_MASK)) == 0) |
|
364 |
|
365 /* the number of bytes to offset an address to align it */ |
|
366 #define align_offset(A)\ |
|
367 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\ |
|
368 ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK)) |
|
369 |
|
370 /* -------------------------- MMAP preliminaries ------------------------- */ |
|
371 |
|
372 /* |
|
373 If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and |
|
374 checks to fail so compiler optimizer can delete code rather than |
|
375 using so many "#if"s. |
|
376 */ |
|
377 |
|
378 |
|
379 /* MORECORE and MMAP must return MFAIL on failure */ |
|
380 #define MFAIL ((void*)(MAX_SIZE_T)) |
|
381 #define CMFAIL ((TUint8*)(MFAIL)) /* defined for convenience */ |
|
382 |
|
383 #if !HAVE_MMAP |
|
384 #define IS_MMAPPED_BIT (SIZE_T_ZERO) |
|
385 #define USE_MMAP_BIT (SIZE_T_ZERO) |
|
386 #define CALL_MMAP(s) MFAIL |
|
387 #define CALL_MUNMAP(a, s) (-1) |
|
388 #define DIRECT_MMAP(s) MFAIL |
|
389 #else /* !HAVE_MMAP */ |
|
390 #define IS_MMAPPED_BIT (SIZE_T_ONE) |
|
391 #define USE_MMAP_BIT (SIZE_T_ONE) |
|
392 #ifndef WIN32 |
|
393 #define CALL_MUNMAP(a, s) DLUMMAP((a),(s)) /*munmap((a), (s))*/ |
|
394 #define MMAP_PROT (PROT_READ|PROT_WRITE) |
|
395 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) |
|
396 #define MAP_ANONYMOUS MAP_ANON |
|
397 #endif /* MAP_ANON */ |
|
398 #ifdef MAP_ANONYMOUS |
|
399 #define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS) |
|
400 #define CALL_MMAP(s) mmap(0, (s), MMAP_PROT, (int)MMAP_FLAGS, -1, 0) |
|
401 #else /* MAP_ANONYMOUS */ |
|
402 /* |
|
403 Nearly all versions of mmap support MAP_ANONYMOUS, so the following |
|
404 is unlikely to be needed, but is supplied just in case. |
|
405 */ |
|
406 #define MMAP_FLAGS (MAP_PRIVATE) |
|
407 //static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */ |
|
408 #define CALL_MMAP(s) DLMMAP(s) |
|
409 /*#define CALL_MMAP(s) ((dev_zero_fd < 0) ? \ |
|
410 (dev_zero_fd = open("/dev/zero", O_RDWR), \ |
|
411 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \ |
|
412 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) |
|
413 */ |
|
414 #define CALL_REMAP(a, s, d) DLREMAP((a),(s),(d)) |
|
415 #endif /* MAP_ANONYMOUS */ |
|
416 #define DIRECT_MMAP(s) CALL_MMAP(s) |
|
417 #else /* WIN32 */ |
|
418 #define CALL_MMAP(s) win32mmap(s) |
|
419 #define CALL_MUNMAP(a, s) win32munmap((a), (s)) |
|
420 #define DIRECT_MMAP(s) win32direct_mmap(s) |
|
421 #endif /* WIN32 */ |
|
422 #endif /* HAVE_MMAP */ |
|
423 |
|
424 #if HAVE_MMAP && HAVE_MREMAP |
|
425 #define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv)) |
|
426 #else /* HAVE_MMAP && HAVE_MREMAP */ |
|
427 #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL |
|
428 #endif /* HAVE_MMAP && HAVE_MREMAP */ |
|
429 |
|
430 #if HAVE_MORECORE |
|
431 #define CALL_MORECORE(S) SetBrk(S) |
|
432 #else /* HAVE_MORECORE */ |
|
433 #define CALL_MORECORE(S) MFAIL |
|
434 #endif /* HAVE_MORECORE */ |
|
435 |
|
436 /* mstate bit set if continguous morecore disabled or failed */ |
|
437 #define USE_NONCONTIGUOUS_BIT (4U) |
|
438 |
|
439 /* segment bit set in create_mspace_with_base */ |
|
440 #define EXTERN_BIT (8U) |
|
441 |
|
442 |
|
443 #if USE_LOCKS |
|
444 /* |
|
445 When locks are defined, there are up to two global locks: |
|
446 * If HAVE_MORECORE, morecore_mutex protects sequences of calls to |
|
447 MORECORE. In many cases sys_alloc requires two calls, that should |
|
448 not be interleaved with calls by other threads. This does not |
|
449 protect against direct calls to MORECORE by other threads not |
|
450 using this lock, so there is still code to cope the best we can on |
|
451 interference. |
|
452 * magic_init_mutex ensures that mparams.magic and other |
|
453 unique mparams values are initialized only once. |
|
454 */ |
|
455 #ifndef WIN32 |
|
456 /* By default use posix locks */ |
|
457 #include <pthread.h> |
|
458 #define MLOCK_T pthread_mutex_t |
|
459 #define INITIAL_LOCK(l) pthread_mutex_init(l, NULL) |
|
460 #define ACQUIRE_LOCK(l) pthread_mutex_lock(l) |
|
461 #define RELEASE_LOCK(l) pthread_mutex_unlock(l) |
|
462 |
|
463 #if HAVE_MORECORE |
|
464 //static MLOCK_T morecore_mutex = PTHREAD_MUTEX_INITIALIZER; |
|
465 #endif /* HAVE_MORECORE */ |
|
466 //static MLOCK_T magic_init_mutex = PTHREAD_MUTEX_INITIALIZER; |
|
467 #else /* WIN32 */ |
|
468 #define MLOCK_T long |
|
469 #define INITIAL_LOCK(l) *(l)=0 |
|
470 #define ACQUIRE_LOCK(l) win32_acquire_lock(l) |
|
471 #define RELEASE_LOCK(l) win32_release_lock(l) |
|
472 #if HAVE_MORECORE |
|
473 static MLOCK_T morecore_mutex; |
|
474 #endif /* HAVE_MORECORE */ |
|
475 static MLOCK_T magic_init_mutex; |
|
476 #endif /* WIN32 */ |
|
477 #define USE_LOCK_BIT (2U) |
|
478 #else /* USE_LOCKS */ |
|
479 #define USE_LOCK_BIT (0U) |
|
480 #define INITIAL_LOCK(l) |
|
481 #endif /* USE_LOCKS */ |
|
482 |
|
483 #if USE_LOCKS && HAVE_MORECORE |
|
484 #define ACQUIRE_MORECORE_LOCK(M) ACQUIRE_LOCK((M->morecore_mutex)/*&morecore_mutex*/); |
|
485 #define RELEASE_MORECORE_LOCK(M) RELEASE_LOCK((M->morecore_mutex)/*&morecore_mutex*/); |
|
486 #else /* USE_LOCKS && HAVE_MORECORE */ |
|
487 #define ACQUIRE_MORECORE_LOCK(M) |
|
488 #define RELEASE_MORECORE_LOCK(M) |
|
489 #endif /* USE_LOCKS && HAVE_MORECORE */ |
|
490 |
|
491 #if USE_LOCKS |
|
492 /*Currently not suporting this*/ |
|
493 #define ACQUIRE_MAGIC_INIT_LOCK(M) ACQUIRE_LOCK(((M)->magic_init_mutex)); |
|
494 //AMOD: changed #define ACQUIRE_MAGIC_INIT_LOCK() |
|
495 //#define RELEASE_MAGIC_INIT_LOCK() |
|
496 #define RELEASE_MAGIC_INIT_LOCK(M) RELEASE_LOCK(((M)->magic_init_mutex)); |
|
497 #else /* USE_LOCKS */ |
|
498 #define ACQUIRE_MAGIC_INIT_LOCK(M) |
|
499 #define RELEASE_MAGIC_INIT_LOCK(M) |
|
500 #endif /* USE_LOCKS */ |
|
501 |
|
502 /*CHUNK representation*/ |
|
503 struct malloc_chunk { |
|
504 size_t prev_foot; /* Size of previous chunk (if free). */ |
|
505 size_t head; /* Size and inuse bits. */ |
|
506 struct malloc_chunk* fd; /* double links -- used only if free. */ |
|
507 struct malloc_chunk* bk; |
|
508 }; |
|
509 |
|
510 typedef struct malloc_chunk mchunk; |
|
511 typedef struct malloc_chunk* mchunkptr; |
|
512 typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */ |
|
513 typedef unsigned int bindex_t; /* Described below */ |
|
514 typedef unsigned int binmap_t; /* Described below */ |
|
515 typedef unsigned int flag_t; /* The type of various bit flag sets */ |
|
516 |
|
517 |
|
518 /* ------------------- Chunks sizes and alignments ----------------------- */ |
|
519 #define MCHUNK_SIZE (sizeof(mchunk)) |
|
520 |
|
521 #if FOOTERS |
|
522 #define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) |
|
523 #else /* FOOTERS */ |
|
524 #define CHUNK_OVERHEAD (SIZE_T_SIZE) |
|
525 #endif /* FOOTERS */ |
|
526 |
|
527 /* MMapped chunks need a second word of overhead ... */ |
|
528 #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) |
|
529 /* ... and additional padding for fake next-chunk at foot */ |
|
530 #define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES) |
|
531 |
|
532 /* The smallest size we can malloc is an aligned minimal chunk */ |
|
533 #define MIN_CHUNK_SIZE ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) |
|
534 |
|
535 /* conversion from malloc headers to user pointers, and back */ |
|
536 #define chunk2mem(p) ((void*)((TUint8*)(p) + TWO_SIZE_T_SIZES)) |
|
537 #define mem2chunk(mem) ((mchunkptr)((TUint8*)(mem) - TWO_SIZE_T_SIZES)) |
|
538 /* chunk associated with aligned address A */ |
|
539 #define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A))) |
|
540 |
|
541 /* Bounds on request (not chunk) sizes. */ |
|
542 #define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2) |
|
543 #define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE) |
|
544 |
|
545 /* pad request bytes into a usable size */ |
|
546 #define pad_request(req) (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) |
|
547 |
|
548 /* pad request, checking for minimum (but not maximum) */ |
|
549 #define request2size(req) (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req)) |
|
550 |
|
551 /* ------------------ Operations on head and foot fields ----------------- */ |
|
552 |
|
553 /* |
|
554 The head field of a chunk is or'ed with PINUSE_BIT when previous |
|
555 adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in |
|
556 use. If the chunk was obtained with mmap, the prev_foot field has |
|
557 IS_MMAPPED_BIT set, otherwise holding the offset of the base of the |
|
558 mmapped region to the base of the chunk. |
|
559 */ |
|
560 #define PINUSE_BIT (SIZE_T_ONE) |
|
561 #define CINUSE_BIT (SIZE_T_TWO) |
|
562 #define INUSE_BITS (PINUSE_BIT|CINUSE_BIT) |
|
563 |
|
564 /* Head value for fenceposts */ |
|
565 #define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE) |
|
566 |
|
567 /* extraction of fields from head words */ |
|
568 #define cinuse(p) ((p)->head & CINUSE_BIT) |
|
569 #define pinuse(p) ((p)->head & PINUSE_BIT) |
|
570 #define chunksize(p) ((p)->head & ~(INUSE_BITS)) |
|
571 |
|
572 #define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT) |
|
573 #define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT) |
|
574 |
|
575 /* Treat space at ptr +/- offset as a chunk */ |
|
576 #define chunk_plus_offset(p, s) ((mchunkptr)(((TUint8*)(p)) + (s))) |
|
577 #define chunk_minus_offset(p, s) ((mchunkptr)(((TUint8*)(p)) - (s))) |
|
578 |
|
579 /* Ptr to next or previous physical malloc_chunk. */ |
|
580 #define next_chunk(p) ((mchunkptr)( ((TUint8*)(p)) + ((p)->head & ~INUSE_BITS))) |
|
581 #define prev_chunk(p) ((mchunkptr)( ((TUint8*)(p)) - ((p)->prev_foot) )) |
|
582 |
|
583 /* extract next chunk's pinuse bit */ |
|
584 #define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT) |
|
585 |
|
586 /* Get/set size at footer */ |
|
587 #define get_foot(p, s) (((mchunkptr)((TUint8*)(p) + (s)))->prev_foot) |
|
588 #define set_foot(p, s) (((mchunkptr)((TUint8*)(p) + (s)))->prev_foot = (s)) |
|
589 |
|
590 /* Set size, pinuse bit, and foot */ |
|
591 #define set_size_and_pinuse_of_free_chunk(p, s) ((p)->head = (s|PINUSE_BIT), set_foot(p, s)) |
|
592 |
|
593 /* Set size, pinuse bit, foot, and clear next pinuse */ |
|
594 #define set_free_with_pinuse(p, s, n) (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s)) |
|
595 |
|
596 #define is_mmapped(p) (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT)) |
|
597 |
|
598 /* Get the internal overhead associated with chunk p */ |
|
599 #define overhead_for(p) (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD) |
|
600 |
|
601 /* Return true if malloced space is not necessarily cleared */ |
|
602 #if MMAP_CLEARS |
|
603 #define calloc_must_clear(p) (!is_mmapped(p)) |
|
604 #else /* MMAP_CLEARS */ |
|
605 #define calloc_must_clear(p) (1) |
|
606 #endif /* MMAP_CLEARS */ |
|
607 |
|
608 /* ---------------------- Overlaid data structures ----------------------- */ |
|
609 struct malloc_tree_chunk { |
|
610 /* The first four fields must be compatible with malloc_chunk */ |
|
611 size_t prev_foot; |
|
612 size_t head; |
|
613 struct malloc_tree_chunk* fd; |
|
614 struct malloc_tree_chunk* bk; |
|
615 |
|
616 struct malloc_tree_chunk* child[2]; |
|
617 struct malloc_tree_chunk* parent; |
|
618 bindex_t index; |
|
619 }; |
|
620 |
|
621 typedef struct malloc_tree_chunk tchunk; |
|
622 typedef struct malloc_tree_chunk* tchunkptr; |
|
623 typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */ |
|
624 |
|
625 /* A little helper macro for trees */ |
|
626 #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1]) |
|
627 /*Segment structur*/ |
|
628 struct malloc_segment { |
|
629 TUint8* base; /* base address */ |
|
630 size_t size; /* allocated size */ |
|
631 struct malloc_segment* next; /* ptr to next segment */ |
|
632 flag_t sflags; /* mmap and extern flag */ |
|
633 }; |
|
634 |
|
635 #define is_mmapped_segment(S) ((S)->sflags & IS_MMAPPED_BIT) |
|
636 #define is_extern_segment(S) ((S)->sflags & EXTERN_BIT) |
|
637 |
|
638 typedef struct malloc_segment msegment; |
|
639 typedef struct malloc_segment* msegmentptr; |
|
640 |
|
641 /*Malloc State data structur*/ |
|
642 |
|
643 #define NSMALLBINS (32U) |
|
644 #define NTREEBINS (32U) |
|
645 #define SMALLBIN_SHIFT (3U) |
|
646 #define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT) |
|
647 #define TREEBIN_SHIFT (8U) |
|
648 #define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT) |
|
649 #define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE) |
|
650 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD) |
|
651 |
|
652 struct malloc_state { |
|
653 binmap_t smallmap; |
|
654 binmap_t treemap; |
|
655 size_t dvsize; |
|
656 size_t topsize; |
|
657 TUint8* least_addr; |
|
658 mchunkptr dv; |
|
659 mchunkptr top; |
|
660 size_t trim_check; |
|
661 size_t magic; |
|
662 mchunkptr smallbins[(NSMALLBINS+1)*2]; |
|
663 tbinptr treebins[NTREEBINS]; |
|
664 size_t footprint; |
|
665 size_t max_footprint; |
|
666 flag_t mflags; |
|
667 #if USE_LOCKS |
|
668 MLOCK_T mutex; /* locate lock among fields that rarely change */ |
|
669 MLOCK_T magic_init_mutex; |
|
670 MLOCK_T morecore_mutex; |
|
671 #endif /* USE_LOCKS */ |
|
672 msegment seg; |
|
673 }; |
|
674 |
|
675 typedef struct malloc_state* mstate; |
|
676 |
|
677 /* ------------- Global malloc_state and malloc_params ------------------- */ |
|
678 |
|
679 /* |
|
680 malloc_params holds global properties, including those that can be |
|
681 dynamically set using mallopt. There is a single instance, mparams, |
|
682 initialized in init_mparams. |
|
683 */ |
|
684 |
|
685 struct malloc_params { |
|
686 size_t magic; |
|
687 size_t page_size; |
|
688 size_t granularity; |
|
689 size_t mmap_threshold; |
|
690 size_t trim_threshold; |
|
691 flag_t default_mflags; |
|
692 #if USE_LOCKS |
|
693 MLOCK_T magic_init_mutex; |
|
694 #endif /* USE_LOCKS */ |
|
695 }; |
|
696 |
|
697 /* The global malloc_state used for all non-"mspace" calls */ |
|
698 /*AMOD: Need to check this as this will be the member of the class*/ |
|
699 |
|
700 //static struct malloc_state _gm_; |
|
701 //#define gm (&_gm_) |
|
702 |
|
703 //#define is_global(M) ((M) == &_gm_) |
|
704 /*AMOD: has changed*/ |
|
705 #define is_global(M) ((M) == gm) |
|
706 #define is_initialized(M) ((M)->top != 0) |
|
707 |
|
708 /* -------------------------- system alloc setup ------------------------- */ |
|
709 |
|
710 /* Operations on mflags */ |
|
711 |
|
712 #define use_lock(M) ((M)->mflags & USE_LOCK_BIT) |
|
713 #define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT) |
|
714 #define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT) |
|
715 |
|
716 #define use_mmap(M) ((M)->mflags & USE_MMAP_BIT) |
|
717 #define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT) |
|
718 #define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT) |
|
719 |
|
720 #define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT) |
|
721 #define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT) |
|
722 |
|
723 #define set_lock(M,L) ((M)->mflags = (L)? ((M)->mflags | USE_LOCK_BIT) : ((M)->mflags & ~USE_LOCK_BIT)) |
|
724 |
|
725 /* page-align a size */ |
|
726 #define page_align(S) (((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE)) |
|
727 |
|
728 /* granularity-align a size */ |
|
729 #define granularity_align(S) (((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE)) |
|
730 |
|
731 #define is_page_aligned(S) (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0) |
|
732 #define is_granularity_aligned(S) (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0) |
|
733 |
|
734 /* True if segment S holds address A */ |
|
735 #define segment_holds(S, A) ((TUint8*)(A) >= S->base && (TUint8*)(A) < S->base + S->size) |
|
736 |
|
737 #ifndef MORECORE_CANNOT_TRIM |
|
738 #define should_trim(M,s) ((s) > (M)->trim_check) |
|
739 #else /* MORECORE_CANNOT_TRIM */ |
|
740 #define should_trim(M,s) (0) |
|
741 #endif /* MORECORE_CANNOT_TRIM */ |
|
742 |
|
743 /* |
|
744 TOP_FOOT_SIZE is padding at the end of a segment, including space |
|
745 that may be needed to place segment records and fenceposts when new |
|
746 noncontiguous segments are added. |
|
747 */ |
|
748 #define TOP_FOOT_SIZE (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE) |
|
749 |
|
750 /* ------------------------------- Hooks -------------------------------- */ |
|
751 |
|
752 /* |
|
753 PREACTION should be defined to return 0 on success, and nonzero on |
|
754 failure. If you are not using locking, you can redefine these to do |
|
755 anything you like. |
|
756 */ |
|
757 |
|
758 #if USE_LOCKS |
|
759 /* Ensure locks are initialized */ |
|
760 #define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams()) |
|
761 #define PREACTION(M) (use_lock((M))?(ACQUIRE_LOCK((M)->mutex),0):0) /*Action to take like lock before alloc*/ |
|
762 #define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK((M)->mutex); } |
|
763 |
|
764 #else /* USE_LOCKS */ |
|
765 #ifndef PREACTION |
|
766 #define PREACTION(M) (0) |
|
767 #endif /* PREACTION */ |
|
768 #ifndef POSTACTION |
|
769 #define POSTACTION(M) |
|
770 #endif /* POSTACTION */ |
|
771 #endif /* USE_LOCKS */ |
|
772 |
|
773 /* |
|
774 CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses. |
|
775 USAGE_ERROR_ACTION is triggered on detected bad frees and |
|
776 reallocs. The argument p is an address that might have triggered the |
|
777 fault. It is ignored by the two predefined actions, but might be |
|
778 useful in custom actions that try to help diagnose errors. |
|
779 */ |
|
780 |
|
781 #if PROCEED_ON_ERROR |
|
782 /* A count of the number of corruption errors causing resets */ |
|
783 int malloc_corruption_error_count; |
|
784 /* default corruption action */ |
|
785 static void reset_on_error(mstate m); |
|
786 #define CORRUPTION_ERROR_ACTION(m) reset_on_error(m) |
|
787 #define USAGE_ERROR_ACTION(m, p) |
|
788 #else /* PROCEED_ON_ERROR */ |
|
789 #ifndef CORRUPTION_ERROR_ACTION |
|
790 #define CORRUPTION_ERROR_ACTION(m) ABORT |
|
791 #endif /* CORRUPTION_ERROR_ACTION */ |
|
792 #ifndef USAGE_ERROR_ACTION |
|
793 #define USAGE_ERROR_ACTION(m,p) ABORT |
|
794 #endif /* USAGE_ERROR_ACTION */ |
|
795 #endif /* PROCEED_ON_ERROR */ |
|
796 |
|
797 /* -------------------------- Debugging setup ---------------------------- */ |
|
798 |
|
799 #if ! DEBUG |
|
800 #define check_free_chunk(M,P) |
|
801 #define check_inuse_chunk(M,P) |
|
802 #define check_malloced_chunk(M,P,N) |
|
803 #define check_mmapped_chunk(M,P) |
|
804 #define check_malloc_state(M) |
|
805 #define check_top_chunk(M,P) |
|
806 #else /* DEBUG */ |
|
807 #define check_free_chunk(M,P) do_check_free_chunk(M,P) |
|
808 #define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P) |
|
809 #define check_top_chunk(M,P) do_check_top_chunk(M,P) |
|
810 #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N) |
|
811 #define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P) |
|
812 #define check_malloc_state(M) do_check_malloc_state(M) |
|
813 static void do_check_any_chunk(mstate m, mchunkptr p); |
|
814 static void do_check_top_chunk(mstate m, mchunkptr p); |
|
815 static void do_check_mmapped_chunk(mstate m, mchunkptr p); |
|
816 static void do_check_inuse_chunk(mstate m, mchunkptr p); |
|
817 static void do_check_free_chunk(mstate m, mchunkptr p); |
|
818 static void do_check_malloced_chunk(mstate m, void* mem, size_t s); |
|
819 static void do_check_tree(mstate m, tchunkptr t); |
|
820 static void do_check_treebin(mstate m, bindex_t i); |
|
821 static void do_check_smallbin(mstate m, bindex_t i); |
|
822 static void do_check_malloc_state(mstate m); |
|
823 static int bin_find(mstate m, mchunkptr x); |
|
824 static size_t traverse_and_check(mstate m); |
|
825 #endif /* DEBUG */ |
|
826 |
|
827 /* ---------------------------- Indexing Bins ---------------------------- */ |
|
828 |
|
829 #define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) |
|
830 #define small_index(s) ((s) >> SMALLBIN_SHIFT) |
|
831 #define small_index2size(i) ((i) << SMALLBIN_SHIFT) |
|
832 #define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE)) |
|
833 |
|
834 /* addressing by index. See above about smallbin repositioning */ |
|
835 #define smallbin_at(M, i) ((sbinptr)((TUint8*)&((M)->smallbins[(i)<<1]))) |
|
836 #define treebin_at(M,i) (&((M)->treebins[i])) |
|
837 |
|
838 |
|
839 /* Bit representing maximum resolved size in a treebin at i */ |
|
840 #define bit_for_tree_index(i) (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2) |
|
841 |
|
842 /* Shift placing maximum resolved bit in a treebin at i as sign bit */ |
|
843 #define leftshift_for_tree_index(i) ((i == NTREEBINS-1)? 0 : ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2))) |
|
844 |
|
845 /* The size of the smallest chunk held in bin with index i */ |
|
846 #define minsize_for_tree_index(i) ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1))) |
|
847 |
|
848 |
|
849 /* ------------------------ Operations on bin maps ----------------------- */ |
|
850 /* bit corresponding to given index */ |
|
851 #define idx2bit(i) ((binmap_t)(1) << (i)) |
|
852 /* Mark/Clear bits with given index */ |
|
853 #define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i)) |
|
854 #define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i)) |
|
855 #define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i)) |
|
856 #define mark_treemap(M,i) ((M)->treemap |= idx2bit(i)) |
|
857 #define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i)) |
|
858 #define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i)) |
|
859 |
|
860 /* isolate the least set bit of a bitmap */ |
|
861 #define least_bit(x) ((x) & -(x)) |
|
862 |
|
863 /* mask with all bits to left of least bit of x on */ |
|
864 #define left_bits(x) ((x<<1) | -(x<<1)) |
|
865 |
|
866 /* mask with all bits to left of or equal to least bit of x on */ |
|
867 #define same_or_left_bits(x) ((x) | -(x)) |
|
868 |
|
869 /* isolate the least set bit of a bitmap */ |
|
870 #define least_bit(x) ((x) & -(x)) |
|
871 |
|
872 /* mask with all bits to left of least bit of x on */ |
|
873 #define left_bits(x) ((x<<1) | -(x<<1)) |
|
874 |
|
875 /* mask with all bits to left of or equal to least bit of x on */ |
|
876 #define same_or_left_bits(x) ((x) | -(x)) |
|
877 |
|
878 #if !INSECURE |
|
879 /* Check if address a is at least as high as any from MORECORE or MMAP */ |
|
880 #define ok_address(M, a) ((TUint8*)(a) >= (M)->least_addr) |
|
881 /* Check if address of next chunk n is higher than base chunk p */ |
|
882 #define ok_next(p, n) ((TUint8*)(p) < (TUint8*)(n)) |
|
883 /* Check if p has its cinuse bit on */ |
|
884 #define ok_cinuse(p) cinuse(p) |
|
885 /* Check if p has its pinuse bit on */ |
|
886 #define ok_pinuse(p) pinuse(p) |
|
887 #else /* !INSECURE */ |
|
888 #define ok_address(M, a) (1) |
|
889 #define ok_next(b, n) (1) |
|
890 #define ok_cinuse(p) (1) |
|
891 #define ok_pinuse(p) (1) |
|
892 #endif /* !INSECURE */ |
|
893 |
|
894 #if (FOOTERS && !INSECURE) |
|
895 /* Check if (alleged) mstate m has expected magic field */ |
|
896 #define ok_magic(M) ((M)->magic == mparams.magic) |
|
897 #else /* (FOOTERS && !INSECURE) */ |
|
898 #define ok_magic(M) (1) |
|
899 #endif /* (FOOTERS && !INSECURE) */ |
|
900 |
|
901 /* In gcc, use __builtin_expect to minimize impact of checks */ |
|
902 #if !INSECURE |
|
903 #if defined(__GNUC__) && __GNUC__ >= 3 |
|
904 #define RTCHECK(e) __builtin_expect(e, 1) |
|
905 #else /* GNUC */ |
|
906 #define RTCHECK(e) (e) |
|
907 #endif /* GNUC */ |
|
908 |
|
909 #else /* !INSECURE */ |
|
910 #define RTCHECK(e) (1) |
|
911 #endif /* !INSECURE */ |
|
912 /* macros to set up inuse chunks with or without footers */ |
|
913 #if !FOOTERS |
|
914 #define mark_inuse_foot(M,p,s) |
|
915 /* Set cinuse bit and pinuse bit of next chunk */ |
|
916 #define set_inuse(M,p,s) ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),((mchunkptr)(((TUint8*)(p)) + (s)))->head |= PINUSE_BIT) |
|
917 /* Set cinuse and pinuse of this chunk and pinuse of next chunk */ |
|
918 #define set_inuse_and_pinuse(M,p,s) ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),((mchunkptr)(((TUint8*)(p)) + (s)))->head |= PINUSE_BIT) |
|
919 /* Set size, cinuse and pinuse bit of this chunk */ |
|
920 #define set_size_and_pinuse_of_inuse_chunk(M, p, s) ((p)->head = (s|PINUSE_BIT|CINUSE_BIT)) |
|
921 #else /* FOOTERS */ |
|
922 /* Set foot of inuse chunk to be xor of mstate and seed */ |
|
923 #define mark_inuse_foot(M,p,s) (((mchunkptr)((TUint8*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic)) |
|
924 #define get_mstate_for(p) ((mstate)(((mchunkptr)((TUint8*)(p)+(chunksize(p))))->prev_foot ^ mparams.magic)) |
|
925 #define set_inuse(M,p,s)\ |
|
926 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ |
|
927 (((mchunkptr)(((TUint8*)(p)) + (s)))->head |= PINUSE_BIT), \ |
|
928 mark_inuse_foot(M,p,s)) |
|
929 #define set_inuse_and_pinuse(M,p,s)\ |
|
930 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ |
|
931 (((mchunkptr)(((TUint8*)(p)) + (s)))->head |= PINUSE_BIT),\ |
|
932 mark_inuse_foot(M,p,s)) |
|
933 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ |
|
934 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ |
|
935 mark_inuse_foot(M, p, s)) |
|
936 #endif /* !FOOTERS */ |
|
937 |
|
938 |
|
939 #if ONLY_MSPACES |
|
940 #define internal_malloc(m, b) mspace_malloc(m, b) |
|
941 #define internal_free(m, mem) mspace_free(m,mem); |
|
942 #else /* ONLY_MSPACES */ |
|
943 #if MSPACES |
|
944 #define internal_malloc(m, b) (m == gm)? dlmalloc(b) : mspace_malloc(m, b) |
|
945 #define internal_free(m, mem) if (m == gm) dlfree(mem); else mspace_free(m,mem); |
|
946 #else /* MSPACES */ |
|
947 #define internal_malloc(m, b) dlmalloc(b) |
|
948 #define internal_free(m, mem) dlfree(mem) |
|
949 #endif /* MSPACES */ |
|
950 #endif /* ONLY_MSPACES */ |
|
951 /******CODE TO SUPORT SLAB ALLOCATOR******/ |
|
952 |
|
953 #ifndef NDEBUG |
|
954 #define CHECKING 1 |
|
955 #endif |
|
956 #define HYSTERESIS 4 |
|
957 #define HYSTERESIS_BYTES (2*pagesize) |
|
958 |
|
959 #if CHECKING |
|
960 //#define ASSERT(x) {if (!(x)) abort();} |
|
961 #define CHECK(x) x |
|
962 #else |
|
963 #define ASSERT(x) (void)0 |
|
964 #define CHECK(x) (void)0 |
|
965 #endif |
|
966 |
|
967 class slab; |
|
968 class slabhdr; |
|
969 #define maxslabsize 60 |
|
970 #define pageshift 12 |
|
971 #define pagesize (1<<pageshift) |
|
972 #define slabshift 10 |
|
973 #define slabsize (1 << slabshift) |
|
974 #define cellalign 8 |
|
975 const unsigned slabfull = 0; |
|
976 const TInt slabsperpage = (int)(pagesize/slabsize); |
|
977 #define hibit(bits) (((unsigned)bits & 0xc) ? 2 + ((unsigned)bits>>3) : ((unsigned) bits>>1)) |
|
978 |
|
979 #define lowbit(bits) (((unsigned) bits&3) ? 1 - ((unsigned)bits&1) : 3 - (((unsigned)bits>>2)&1)) |
|
980 #define maxslabsize 60 |
|
981 #define minpagepower pageshift+2 |
|
982 #define cellalign 8 |
|
983 class slabhdr |
|
984 { |
|
985 public: |
|
986 unsigned header; |
|
987 // made up of |
|
988 // bits | 31 | 30..28 | 27..18 | 17..12 | 11..8 | 7..0 | |
|
989 // +----------+--------+--------+--------+---------+----------+ |
|
990 // field | floating | zero | used-4 | size | pagemap | free pos | |
|
991 // |
|
992 slab** parent; // reference to parent's pointer to this slab in tree |
|
993 slab* child1; // 1st child in tree |
|
994 slab* child2; // 2nd child in tree |
|
995 }; |
|
996 |
|
997 inline unsigned header_floating(unsigned h) |
|
998 {return (h&0x80000000);} |
|
999 const unsigned maxuse = (slabsize - sizeof(slabhdr))>>2; |
|
1000 const unsigned firstpos = sizeof(slabhdr)>>2; |
|
1001 #define checktree(x) (void)0 |
|
1002 template <class T> inline T floor(const T addr, unsigned aln) |
|
1003 {return T((unsigned(addr))&~(aln-1));} |
|
1004 template <class T> inline T ceiling(T addr, unsigned aln) |
|
1005 {return T((unsigned(addr)+(aln-1))&~(aln-1));} |
|
1006 template <class T> inline unsigned lowbits(T addr, unsigned aln) |
|
1007 {return unsigned(addr)&(aln-1);} |
|
1008 template <class T1, class T2> inline int ptrdiff(const T1* a1, const T2* a2) |
|
1009 {return reinterpret_cast<const unsigned char*>(a1) - reinterpret_cast<const unsigned char*>(a2);} |
|
1010 template <class T> inline T offset(T addr, unsigned ofs) |
|
1011 {return T(unsigned(addr)+ofs);} |
|
1012 class slabset |
|
1013 { |
|
1014 public: |
|
1015 void* initslab(slab* s); |
|
1016 unsigned size; |
|
1017 slab* partial; |
|
1018 }; |
|
1019 |
|
1020 class slab : public slabhdr |
|
1021 { |
|
1022 public: |
|
1023 void init(unsigned clz); |
|
1024 //static slab* slabfor( void* p); |
|
1025 static slab* slabfor(const void* p) ; |
|
1026 private: |
|
1027 unsigned char payload[slabsize-sizeof(slabhdr)]; |
|
1028 }; |
|
1029 class page |
|
1030 { |
|
1031 public: |
|
1032 inline static page* pagefor(slab* s); |
|
1033 //slab slabs; |
|
1034 slab slabs[slabsperpage]; |
|
1035 }; |
|
1036 |
|
1037 |
|
1038 inline page* page::pagefor(slab* s) |
|
1039 { |
|
1040 return reinterpret_cast<page*>(floor(s, pagesize)); |
|
1041 } |
|
1042 struct pagecell |
|
1043 { |
|
1044 void* page; |
|
1045 unsigned size; |
|
1046 }; |
|
1047 /******CODE TO SUPORT SLAB ALLOCATOR******/ |
|
1048 #endif/*__DLA__*/ |