1 /* |
|
2 * Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies). |
|
3 * All rights reserved. |
|
4 * This component and the accompanying materials are made available |
|
5 * under the terms of "Eclipse Public License v1.0" |
|
6 * which accompanies this distribution, and is available |
|
7 * at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
8 * |
|
9 * Initial Contributors: |
|
10 * Nokia Corporation - initial contribution. |
|
11 * |
|
12 * Contributors: |
|
13 * |
|
14 * Description: |
|
15 * |
|
16 */ |
|
17 /*DLA.h*/ |
|
18 #ifndef __DLA__ |
|
19 #define __DLA__ |
|
20 |
|
21 #define DEFAULT_TRIM_THRESHOLD ((size_t)4U * (size_t)1024U) |
|
22 |
|
23 #define __SYMBIAN__ |
|
24 #define MSPACES 0 |
|
25 #define HAVE_MORECORE 1 |
|
26 #define MORECORE_CONTIGUOUS 1 |
|
27 #define HAVE_MMAP 0 |
|
28 #define HAVE_MREMAP 0 |
|
29 #define DEFAULT_GRANULARITY (4096U) |
|
30 #define FOOTERS 0 |
|
31 #define USE_LOCKS 0 |
|
32 #define INSECURE 1 |
|
33 #define NO_MALLINFO 0 |
|
34 |
|
35 #define LACKS_SYS_TYPES_H |
|
36 #ifndef LACKS_SYS_TYPES_H |
|
37 #include <sys/types.h> /* For size_t */ |
|
38 #else |
|
39 #ifndef _SIZE_T_DECLARED |
|
40 typedef unsigned int size_t; |
|
41 #define _SIZE_T_DECLARED |
|
42 #endif |
|
43 #endif /* LACKS_SYS_TYPES_H */ |
|
44 |
|
45 /* The maximum possible size_t value has all bits set */ |
|
46 #define MAX_SIZE_T (~(size_t)0) |
|
47 |
|
48 #ifndef ONLY_MSPACES |
|
49 #define ONLY_MSPACES 0 |
|
50 #endif /* ONLY_MSPACES */ |
|
51 |
|
52 #ifndef MSPACES |
|
53 #if ONLY_MSPACES |
|
54 #define MSPACES 1 |
|
55 #else /* ONLY_MSPACES */ |
|
56 #define MSPACES 0 |
|
57 #endif /* ONLY_MSPACES */ |
|
58 #endif /* MSPACES */ |
|
59 |
|
60 #ifndef MALLOC_ALIGNMENT |
|
61 #define MALLOC_ALIGNMENT ((size_t)8U) |
|
62 #endif /* MALLOC_ALIGNMENT */ |
|
63 |
|
64 #ifndef FOOTERS |
|
65 #define FOOTERS 0 |
|
66 #endif /* FOOTERS */ |
|
67 |
|
68 #ifndef ABORT |
|
69 // #define ABORT abort() |
|
70 #define ABORT User::Invariant()// redefined so euser isn't dependant on oe |
|
71 #endif /* ABORT */ |
|
72 |
|
73 #ifndef ABORT_ON_ASSERT_FAILURE |
|
74 #define ABORT_ON_ASSERT_FAILURE 1 |
|
75 #endif /* ABORT_ON_ASSERT_FAILURE */ |
|
76 |
|
77 #ifndef PROCEED_ON_ERROR |
|
78 #define PROCEED_ON_ERROR 0 |
|
79 #endif /* PROCEED_ON_ERROR */ |
|
80 |
|
81 #ifndef USE_LOCKS |
|
82 #define USE_LOCKS 0 |
|
83 #endif /* USE_LOCKS */ |
|
84 |
|
85 #ifndef INSECURE |
|
86 #define INSECURE 0 |
|
87 #endif /* INSECURE */ |
|
88 |
|
89 #ifndef HAVE_MMAP |
|
90 #define HAVE_MMAP 1 |
|
91 #endif /* HAVE_MMAP */ |
|
92 |
|
93 #ifndef MMAP_CLEARS |
|
94 #define MMAP_CLEARS 1 |
|
95 #endif /* MMAP_CLEARS */ |
|
96 |
|
97 #ifndef HAVE_MREMAP |
|
98 #ifdef linux |
|
99 #define HAVE_MREMAP 1 |
|
100 #else /* linux */ |
|
101 #define HAVE_MREMAP 0 |
|
102 #endif /* linux */ |
|
103 #endif /* HAVE_MREMAP */ |
|
104 |
|
105 #ifndef MALLOC_FAILURE_ACTION |
|
106 //#define MALLOC_FAILURE_ACTION errno = ENOMEM; |
|
107 #define MALLOC_FAILURE_ACTION ; |
|
108 #endif /* MALLOC_FAILURE_ACTION */ |
|
109 |
|
110 #ifndef HAVE_MORECORE |
|
111 #if ONLY_MSPACES |
|
112 #define HAVE_MORECORE 1 /*AMOD: has changed */ |
|
113 #else /* ONLY_MSPACES */ |
|
114 #define HAVE_MORECORE 1 |
|
115 #endif /* ONLY_MSPACES */ |
|
116 #endif /* HAVE_MORECORE */ |
|
117 |
|
118 #if !HAVE_MORECORE |
|
119 #define MORECORE_CONTIGUOUS 0 |
|
120 #else /* !HAVE_MORECORE */ |
|
121 #ifndef MORECORE |
|
122 #define MORECORE DLAdjust |
|
123 #endif /* MORECORE */ |
|
124 #ifndef MORECORE_CONTIGUOUS |
|
125 #define MORECORE_CONTIGUOUS 0 |
|
126 #endif /* MORECORE_CONTIGUOUS */ |
|
127 #endif /* !HAVE_MORECORE */ |
|
128 |
|
129 #ifndef DEFAULT_GRANULARITY |
|
130 #if MORECORE_CONTIGUOUS |
|
131 #define DEFAULT_GRANULARITY 4096 /* 0 means to compute in init_mparams */ |
|
132 #else /* MORECORE_CONTIGUOUS */ |
|
133 #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U) |
|
134 #endif /* MORECORE_CONTIGUOUS */ |
|
135 #endif /* DEFAULT_GRANULARITY */ |
|
136 |
|
137 #ifndef DEFAULT_TRIM_THRESHOLD |
|
138 #ifndef MORECORE_CANNOT_TRIM |
|
139 #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U) |
|
140 #else /* MORECORE_CANNOT_TRIM */ |
|
141 #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T |
|
142 #endif /* MORECORE_CANNOT_TRIM */ |
|
143 #endif /* DEFAULT_TRIM_THRESHOLD */ |
|
144 |
|
145 #ifndef DEFAULT_MMAP_THRESHOLD |
|
146 #if HAVE_MMAP |
|
147 #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U) |
|
148 #else /* HAVE_MMAP */ |
|
149 #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T |
|
150 #endif /* HAVE_MMAP */ |
|
151 #endif /* DEFAULT_MMAP_THRESHOLD */ |
|
152 |
|
153 #ifndef USE_BUILTIN_FFS |
|
154 #define USE_BUILTIN_FFS 0 |
|
155 #endif /* USE_BUILTIN_FFS */ |
|
156 |
|
157 #ifndef USE_DEV_RANDOM |
|
158 #define USE_DEV_RANDOM 0 |
|
159 #endif /* USE_DEV_RANDOM */ |
|
160 |
|
161 #ifndef NO_MALLINFO |
|
162 #define NO_MALLINFO 0 |
|
163 #endif /* NO_MALLINFO */ |
|
164 #ifndef MALLINFO_FIELD_TYPE |
|
165 #define MALLINFO_FIELD_TYPE size_t |
|
166 #endif /* MALLINFO_FIELD_TYPE */ |
|
167 |
|
168 /* |
|
169 mallopt tuning options. SVID/XPG defines four standard parameter |
|
170 numbers for mallopt, normally defined in malloc.h. None of these |
|
171 are used in this malloc, so setting them has no effect. But this |
|
172 malloc does support the following options. |
|
173 */ |
|
174 |
|
175 #define M_TRIM_THRESHOLD (-1) |
|
176 #define M_GRANULARITY (-2) |
|
177 #define M_MMAP_THRESHOLD (-3) |
|
178 |
|
179 #if !NO_MALLINFO |
|
180 /* |
|
181 This version of malloc supports the standard SVID/XPG mallinfo |
|
182 routine that returns a struct containing usage properties and |
|
183 statistics. It should work on any system that has a |
|
184 /usr/include/malloc.h defining struct mallinfo. The main |
|
185 declaration needed is the mallinfo struct that is returned (by-copy) |
|
186 by mallinfo(). The malloinfo struct contains a bunch of fields that |
|
187 are not even meaningful in this version of malloc. These fields are |
|
188 are instead filled by mallinfo() with other numbers that might be of |
|
189 interest. |
|
190 |
|
191 HAVE_USR_INCLUDE_MALLOC_H should be set if you have a |
|
192 /usr/include/malloc.h file that includes a declaration of struct |
|
193 mallinfo. If so, it is included; else a compliant version is |
|
194 declared below. These must be precisely the same for mallinfo() to |
|
195 work. The original SVID version of this struct, defined on most |
|
196 systems with mallinfo, declares all fields as ints. But some others |
|
197 define as unsigned long. If your system defines the fields using a |
|
198 type of different width than listed here, you MUST #include your |
|
199 system version and #define HAVE_USR_INCLUDE_MALLOC_H. |
|
200 */ |
|
201 |
|
202 /* #define HAVE_USR_INCLUDE_MALLOC_H */ |
|
203 |
|
204 #ifdef HAVE_USR_INCLUDE_MALLOC_H |
|
205 #include "/usr/include/malloc.h" |
|
206 #else /* HAVE_USR_INCLUDE_MALLOC_H */ |
|
207 |
|
208 struct mallinfo { |
|
209 MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */ |
|
210 MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */ |
|
211 MALLINFO_FIELD_TYPE smblks; /* always 0 */ |
|
212 MALLINFO_FIELD_TYPE hblks; /* always 0 */ |
|
213 MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */ |
|
214 MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */ |
|
215 MALLINFO_FIELD_TYPE fsmblks; /* always 0 */ |
|
216 MALLINFO_FIELD_TYPE uordblks; /* total allocated space */ |
|
217 MALLINFO_FIELD_TYPE fordblks; /* total free space */ |
|
218 MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */ |
|
219 MALLINFO_FIELD_TYPE cellCount;/* Number of chunks allocated*/ |
|
220 }; |
|
221 |
|
222 #endif /* HAVE_USR_INCLUDE_MALLOC_H */ |
|
223 #endif /* NO_MALLINFO */ |
|
224 |
|
225 #if MSPACES |
|
226 typedef void* mspace; |
|
227 #endif /* MSPACES */ |
|
228 |
|
229 #if 0 |
|
230 |
|
231 #include <stdio.h>/* for printing in malloc_stats */ |
|
232 |
|
233 #ifndef LACKS_ERRNO_H |
|
234 #include <errno.h> /* for MALLOC_FAILURE_ACTION */ |
|
235 #endif /* LACKS_ERRNO_H */ |
|
236 |
|
237 #if FOOTERS |
|
238 #include <time.h> /* for magic initialization */ |
|
239 #endif /* FOOTERS */ |
|
240 |
|
241 #ifndef LACKS_STDLIB_H |
|
242 #include <stdlib.h> /* for abort() */ |
|
243 #endif /* LACKS_STDLIB_H */ |
|
244 |
|
245 #ifdef DEBUG |
|
246 #if ABORT_ON_ASSERT_FAILURE |
|
247 #define assert(x) if(!(x)) ABORT |
|
248 #else /* ABORT_ON_ASSERT_FAILURE */ |
|
249 #include <assert.h> |
|
250 #endif /* ABORT_ON_ASSERT_FAILURE */ |
|
251 #else /* DEBUG */ |
|
252 #define assert(x) |
|
253 #endif /* DEBUG */ |
|
254 |
|
255 #ifndef LACKS_STRING_H |
|
256 #include <string.h> /* for memset etc */ |
|
257 #endif /* LACKS_STRING_H */ |
|
258 |
|
259 #if USE_BUILTIN_FFS |
|
260 #ifndef LACKS_STRINGS_H |
|
261 #include <strings.h> /* for ffs */ |
|
262 #endif /* LACKS_STRINGS_H */ |
|
263 #endif /* USE_BUILTIN_FFS */ |
|
264 |
|
265 #if HAVE_MMAP |
|
266 #ifndef LACKS_SYS_MMAN_H |
|
267 #include <sys/mman.h> /* for mmap */ |
|
268 #endif /* LACKS_SYS_MMAN_H */ |
|
269 #ifndef LACKS_FCNTL_H |
|
270 #include <fcntl.h> |
|
271 #endif /* LACKS_FCNTL_H */ |
|
272 #endif /* HAVE_MMAP */ |
|
273 |
|
274 #if HAVE_MORECORE |
|
275 #ifndef LACKS_UNISTD_H |
|
276 #include <unistd.h> /* for sbrk */ |
|
277 extern void* sbrk(size_t); |
|
278 #else /* LACKS_UNISTD_H */ |
|
279 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) |
|
280 extern void* sbrk(ptrdiff_t); |
|
281 /*Amod sbrk is not defined in WIN32 need to check in symbian*/ |
|
282 #endif /* FreeBSD etc */ |
|
283 #endif /* LACKS_UNISTD_H */ |
|
284 #endif /* HAVE_MORECORE */ |
|
285 |
|
286 #endif |
|
287 |
|
288 #define assert(x) ASSERT(x) |
|
289 |
|
290 /*AMOD: For malloc_getpagesize*/ |
|
291 #if 0 // replaced with GET_PAGE_SIZE() defined in heap.cpp |
|
292 #ifndef WIN32 |
|
293 #ifndef malloc_getpagesize |
|
294 #ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ |
|
295 #ifndef _SC_PAGE_SIZE |
|
296 #define _SC_PAGE_SIZE _SC_PAGESIZE |
|
297 #endif |
|
298 #endif |
|
299 #ifdef _SC_PAGE_SIZE |
|
300 #define malloc_getpagesize sysconf(_SC_PAGE_SIZE) |
|
301 #else |
|
302 #if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) |
|
303 extern size_t getpagesize(); |
|
304 #define malloc_getpagesize getpagesize() |
|
305 #else |
|
306 #ifdef WIN32 /* use supplied emulation of getpagesize */ |
|
307 #define malloc_getpagesize getpagesize() |
|
308 #else |
|
309 #ifndef LACKS_SYS_PARAM_H |
|
310 #include <sys/param.h> |
|
311 #endif |
|
312 #ifdef EXEC_PAGESIZE |
|
313 #define malloc_getpagesize EXEC_PAGESIZE |
|
314 #else |
|
315 #ifdef NBPG |
|
316 #ifndef CLSIZE |
|
317 #define malloc_getpagesize NBPG |
|
318 #else |
|
319 #define malloc_getpagesize (NBPG * CLSIZE) |
|
320 #endif |
|
321 #else |
|
322 #ifdef NBPC |
|
323 #define malloc_getpagesize NBPC |
|
324 #else |
|
325 #ifdef PAGESIZE |
|
326 #define malloc_getpagesize PAGESIZE |
|
327 #else /* just guess */ |
|
328 #define malloc_getpagesize ((size_t)4096U) |
|
329 #endif |
|
330 #endif |
|
331 #endif |
|
332 #endif |
|
333 #endif |
|
334 #endif |
|
335 #endif |
|
336 #endif |
|
337 #endif |
|
338 #endif |
|
339 /*AMOD: For malloc_getpagesize*/ |
|
340 |
|
341 /* ------------------- size_t and alignment properties -------------------- */ |
|
342 |
|
343 /* The byte and bit size of a size_t */ |
|
344 #define SIZE_T_SIZE (sizeof(size_t)) |
|
345 #define SIZE_T_BITSIZE (sizeof(size_t) << 3) |
|
346 |
|
347 /* Some constants coerced to size_t */ |
|
348 /* Annoying but necessary to avoid errors on some plaftorms */ |
|
349 #define SIZE_T_ZERO ((size_t)0) |
|
350 #define SIZE_T_ONE ((size_t)1) |
|
351 #define SIZE_T_TWO ((size_t)2) |
|
352 #define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1) |
|
353 #define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2) |
|
354 #define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES) |
|
355 #define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U) |
|
356 |
|
357 /* The bit mask value corresponding to MALLOC_ALIGNMENT */ |
|
358 #define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE) |
|
359 |
|
360 /* True if address a has acceptable alignment */ |
|
361 //#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0) |
|
362 #define is_aligned(A) (((unsigned int)((A)) & (CHUNK_ALIGN_MASK)) == 0) |
|
363 |
|
364 /* the number of bytes to offset an address to align it */ |
|
365 #define align_offset(A)\ |
|
366 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\ |
|
367 ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK)) |
|
368 |
|
369 /* -------------------------- MMAP preliminaries ------------------------- */ |
|
370 |
|
371 /* |
|
372 If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and |
|
373 checks to fail so compiler optimizer can delete code rather than |
|
374 using so many "#if"s. |
|
375 */ |
|
376 |
|
377 |
|
378 /* MORECORE and MMAP must return MFAIL on failure */ |
|
379 #define MFAIL ((void*)(MAX_SIZE_T)) |
|
380 #define CMFAIL ((TUint8*)(MFAIL)) /* defined for convenience */ |
|
381 |
|
382 #if !HAVE_MMAP |
|
383 #define IS_MMAPPED_BIT (SIZE_T_ZERO) |
|
384 #define USE_MMAP_BIT (SIZE_T_ZERO) |
|
385 #define CALL_MMAP(s) MFAIL |
|
386 #define CALL_MUNMAP(a, s) (-1) |
|
387 #define DIRECT_MMAP(s) MFAIL |
|
388 #else /* !HAVE_MMAP */ |
|
389 #define IS_MMAPPED_BIT (SIZE_T_ONE) |
|
390 #define USE_MMAP_BIT (SIZE_T_ONE) |
|
391 #ifndef WIN32 |
|
392 #define CALL_MUNMAP(a, s) DLUMMAP((a),(s)) /*munmap((a), (s))*/ |
|
393 #define MMAP_PROT (PROT_READ|PROT_WRITE) |
|
394 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) |
|
395 #define MAP_ANONYMOUS MAP_ANON |
|
396 #endif /* MAP_ANON */ |
|
397 #ifdef MAP_ANONYMOUS |
|
398 #define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS) |
|
399 #define CALL_MMAP(s) mmap(0, (s), MMAP_PROT, (int)MMAP_FLAGS, -1, 0) |
|
400 #else /* MAP_ANONYMOUS */ |
|
401 /* |
|
402 Nearly all versions of mmap support MAP_ANONYMOUS, so the following |
|
403 is unlikely to be needed, but is supplied just in case. |
|
404 */ |
|
405 #define MMAP_FLAGS (MAP_PRIVATE) |
|
406 //static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */ |
|
407 #define CALL_MMAP(s) DLMMAP(s) |
|
408 /*#define CALL_MMAP(s) ((dev_zero_fd < 0) ? \ |
|
409 (dev_zero_fd = open("/dev/zero", O_RDWR), \ |
|
410 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \ |
|
411 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) |
|
412 */ |
|
413 #define CALL_REMAP(a, s, d) DLREMAP((a),(s),(d)) |
|
414 #endif /* MAP_ANONYMOUS */ |
|
415 #define DIRECT_MMAP(s) CALL_MMAP(s) |
|
416 #else /* WIN32 */ |
|
417 #define CALL_MMAP(s) win32mmap(s) |
|
418 #define CALL_MUNMAP(a, s) win32munmap((a), (s)) |
|
419 #define DIRECT_MMAP(s) win32direct_mmap(s) |
|
420 #endif /* WIN32 */ |
|
421 #endif /* HAVE_MMAP */ |
|
422 |
|
423 #if HAVE_MMAP && HAVE_MREMAP |
|
424 #define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv)) |
|
425 #else /* HAVE_MMAP && HAVE_MREMAP */ |
|
426 #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL |
|
427 #endif /* HAVE_MMAP && HAVE_MREMAP */ |
|
428 |
|
429 #if HAVE_MORECORE |
|
430 #define CALL_MORECORE(S) SetBrk(S) |
|
431 #else /* HAVE_MORECORE */ |
|
432 #define CALL_MORECORE(S) MFAIL |
|
433 #endif /* HAVE_MORECORE */ |
|
434 |
|
435 /* mstate bit set if continguous morecore disabled or failed */ |
|
436 #define USE_NONCONTIGUOUS_BIT (4U) |
|
437 |
|
438 /* segment bit set in create_mspace_with_base */ |
|
439 #define EXTERN_BIT (8U) |
|
440 |
|
441 |
|
442 #if USE_LOCKS |
|
443 /* |
|
444 When locks are defined, there are up to two global locks: |
|
445 * If HAVE_MORECORE, morecore_mutex protects sequences of calls to |
|
446 MORECORE. In many cases sys_alloc requires two calls, that should |
|
447 not be interleaved with calls by other threads. This does not |
|
448 protect against direct calls to MORECORE by other threads not |
|
449 using this lock, so there is still code to cope the best we can on |
|
450 interference. |
|
451 * magic_init_mutex ensures that mparams.magic and other |
|
452 unique mparams values are initialized only once. |
|
453 */ |
|
454 #ifndef WIN32 |
|
455 /* By default use posix locks */ |
|
456 #include <pthread.h> |
|
457 #define MLOCK_T pthread_mutex_t |
|
458 #define INITIAL_LOCK(l) pthread_mutex_init(l, NULL) |
|
459 #define ACQUIRE_LOCK(l) pthread_mutex_lock(l) |
|
460 #define RELEASE_LOCK(l) pthread_mutex_unlock(l) |
|
461 |
|
462 #if HAVE_MORECORE |
|
463 //static MLOCK_T morecore_mutex = PTHREAD_MUTEX_INITIALIZER; |
|
464 #endif /* HAVE_MORECORE */ |
|
465 //static MLOCK_T magic_init_mutex = PTHREAD_MUTEX_INITIALIZER; |
|
466 #else /* WIN32 */ |
|
467 #define MLOCK_T long |
|
468 #define INITIAL_LOCK(l) *(l)=0 |
|
469 #define ACQUIRE_LOCK(l) win32_acquire_lock(l) |
|
470 #define RELEASE_LOCK(l) win32_release_lock(l) |
|
471 #if HAVE_MORECORE |
|
472 static MLOCK_T morecore_mutex; |
|
473 #endif /* HAVE_MORECORE */ |
|
474 static MLOCK_T magic_init_mutex; |
|
475 #endif /* WIN32 */ |
|
476 #define USE_LOCK_BIT (2U) |
|
477 #else /* USE_LOCKS */ |
|
478 #define USE_LOCK_BIT (0U) |
|
479 #define INITIAL_LOCK(l) |
|
480 #endif /* USE_LOCKS */ |
|
481 |
|
482 #if USE_LOCKS && HAVE_MORECORE |
|
483 #define ACQUIRE_MORECORE_LOCK(M) ACQUIRE_LOCK((M->morecore_mutex)/*&morecore_mutex*/); |
|
484 #define RELEASE_MORECORE_LOCK(M) RELEASE_LOCK((M->morecore_mutex)/*&morecore_mutex*/); |
|
485 #else /* USE_LOCKS && HAVE_MORECORE */ |
|
486 #define ACQUIRE_MORECORE_LOCK(M) |
|
487 #define RELEASE_MORECORE_LOCK(M) |
|
488 #endif /* USE_LOCKS && HAVE_MORECORE */ |
|
489 |
|
490 #if USE_LOCKS |
|
491 /*Currently not suporting this*/ |
|
492 #define ACQUIRE_MAGIC_INIT_LOCK(M) ACQUIRE_LOCK(((M)->magic_init_mutex)); |
|
493 //AMOD: changed #define ACQUIRE_MAGIC_INIT_LOCK() |
|
494 //#define RELEASE_MAGIC_INIT_LOCK() |
|
495 #define RELEASE_MAGIC_INIT_LOCK(M) RELEASE_LOCK(((M)->magic_init_mutex)); |
|
496 #else /* USE_LOCKS */ |
|
497 #define ACQUIRE_MAGIC_INIT_LOCK(M) |
|
498 #define RELEASE_MAGIC_INIT_LOCK(M) |
|
499 #endif /* USE_LOCKS */ |
|
500 |
|
501 /*CHUNK representation*/ |
|
502 struct malloc_chunk { |
|
503 size_t prev_foot; /* Size of previous chunk (if free). */ |
|
504 size_t head; /* Size and inuse bits. */ |
|
505 struct malloc_chunk* fd; /* double links -- used only if free. */ |
|
506 struct malloc_chunk* bk; |
|
507 }; |
|
508 |
|
509 typedef struct malloc_chunk mchunk; |
|
510 typedef struct malloc_chunk* mchunkptr; |
|
511 typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */ |
|
512 typedef unsigned int bindex_t; /* Described below */ |
|
513 typedef unsigned int binmap_t; /* Described below */ |
|
514 typedef unsigned int flag_t; /* The type of various bit flag sets */ |
|
515 |
|
516 |
|
517 /* ------------------- Chunks sizes and alignments ----------------------- */ |
|
518 #define MCHUNK_SIZE (sizeof(mchunk)) |
|
519 |
|
520 #if FOOTERS |
|
521 #define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) |
|
522 #else /* FOOTERS */ |
|
523 #define CHUNK_OVERHEAD (SIZE_T_SIZE) |
|
524 #endif /* FOOTERS */ |
|
525 |
|
526 /* MMapped chunks need a second word of overhead ... */ |
|
527 #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) |
|
528 /* ... and additional padding for fake next-chunk at foot */ |
|
529 #define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES) |
|
530 |
|
531 /* The smallest size we can malloc is an aligned minimal chunk */ |
|
532 #define MIN_CHUNK_SIZE ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) |
|
533 |
|
534 /* conversion from malloc headers to user pointers, and back */ |
|
535 #define chunk2mem(p) ((void*)((TUint8*)(p) + TWO_SIZE_T_SIZES)) |
|
536 #define mem2chunk(mem) ((mchunkptr)((TUint8*)(mem) - TWO_SIZE_T_SIZES)) |
|
537 /* chunk associated with aligned address A */ |
|
538 #define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A))) |
|
539 |
|
540 /* Bounds on request (not chunk) sizes. */ |
|
541 #define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2) |
|
542 #define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE) |
|
543 |
|
544 /* pad request bytes into a usable size */ |
|
545 #define pad_request(req) (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) |
|
546 |
|
547 /* pad request, checking for minimum (but not maximum) */ |
|
548 #define request2size(req) (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req)) |
|
549 |
|
550 /* ------------------ Operations on head and foot fields ----------------- */ |
|
551 |
|
552 /* |
|
553 The head field of a chunk is or'ed with PINUSE_BIT when previous |
|
554 adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in |
|
555 use. If the chunk was obtained with mmap, the prev_foot field has |
|
556 IS_MMAPPED_BIT set, otherwise holding the offset of the base of the |
|
557 mmapped region to the base of the chunk. |
|
558 */ |
|
559 #define PINUSE_BIT (SIZE_T_ONE) |
|
560 #define CINUSE_BIT (SIZE_T_TWO) |
|
561 #define INUSE_BITS (PINUSE_BIT|CINUSE_BIT) |
|
562 |
|
563 /* Head value for fenceposts */ |
|
564 #define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE) |
|
565 |
|
566 /* extraction of fields from head words */ |
|
567 #define cinuse(p) ((p)->head & CINUSE_BIT) |
|
568 #define pinuse(p) ((p)->head & PINUSE_BIT) |
|
569 #define chunksize(p) ((p)->head & ~(INUSE_BITS)) |
|
570 |
|
571 #define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT) |
|
572 #define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT) |
|
573 |
|
574 /* Treat space at ptr +/- offset as a chunk */ |
|
575 #define chunk_plus_offset(p, s) ((mchunkptr)(((TUint8*)(p)) + (s))) |
|
576 #define chunk_minus_offset(p, s) ((mchunkptr)(((TUint8*)(p)) - (s))) |
|
577 |
|
578 /* Ptr to next or previous physical malloc_chunk. */ |
|
579 #define next_chunk(p) ((mchunkptr)( ((TUint8*)(p)) + ((p)->head & ~INUSE_BITS))) |
|
580 #define prev_chunk(p) ((mchunkptr)( ((TUint8*)(p)) - ((p)->prev_foot) )) |
|
581 |
|
582 /* extract next chunk's pinuse bit */ |
|
583 #define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT) |
|
584 |
|
585 /* Get/set size at footer */ |
|
586 #define get_foot(p, s) (((mchunkptr)((TUint8*)(p) + (s)))->prev_foot) |
|
587 #define set_foot(p, s) (((mchunkptr)((TUint8*)(p) + (s)))->prev_foot = (s)) |
|
588 |
|
589 /* Set size, pinuse bit, and foot */ |
|
590 #define set_size_and_pinuse_of_free_chunk(p, s) ((p)->head = (s|PINUSE_BIT), set_foot(p, s)) |
|
591 |
|
592 /* Set size, pinuse bit, foot, and clear next pinuse */ |
|
593 #define set_free_with_pinuse(p, s, n) (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s)) |
|
594 |
|
595 #define is_mmapped(p) (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT)) |
|
596 |
|
597 /* Get the internal overhead associated with chunk p */ |
|
598 #define overhead_for(p) (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD) |
|
599 |
|
600 /* Return true if malloced space is not necessarily cleared */ |
|
601 #if MMAP_CLEARS |
|
602 #define calloc_must_clear(p) (!is_mmapped(p)) |
|
603 #else /* MMAP_CLEARS */ |
|
604 #define calloc_must_clear(p) (1) |
|
605 #endif /* MMAP_CLEARS */ |
|
606 |
|
607 /* ---------------------- Overlaid data structures ----------------------- */ |
|
608 struct malloc_tree_chunk { |
|
609 /* The first four fields must be compatible with malloc_chunk */ |
|
610 size_t prev_foot; |
|
611 size_t head; |
|
612 struct malloc_tree_chunk* fd; |
|
613 struct malloc_tree_chunk* bk; |
|
614 |
|
615 struct malloc_tree_chunk* child[2]; |
|
616 struct malloc_tree_chunk* parent; |
|
617 bindex_t index; |
|
618 }; |
|
619 |
|
620 typedef struct malloc_tree_chunk tchunk; |
|
621 typedef struct malloc_tree_chunk* tchunkptr; |
|
622 typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */ |
|
623 |
|
624 /* A little helper macro for trees */ |
|
625 #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1]) |
|
626 /*Segment structur*/ |
|
627 struct malloc_segment { |
|
628 TUint8* base; /* base address */ |
|
629 size_t size; /* allocated size */ |
|
630 struct malloc_segment* next; /* ptr to next segment */ |
|
631 flag_t sflags; /* mmap and extern flag */ |
|
632 }; |
|
633 |
|
634 #define is_mmapped_segment(S) ((S)->sflags & IS_MMAPPED_BIT) |
|
635 #define is_extern_segment(S) ((S)->sflags & EXTERN_BIT) |
|
636 |
|
637 typedef struct malloc_segment msegment; |
|
638 typedef struct malloc_segment* msegmentptr; |
|
639 |
|
640 /*Malloc State data structur*/ |
|
641 |
|
642 #define NSMALLBINS (32U) |
|
643 #define NTREEBINS (32U) |
|
644 #define SMALLBIN_SHIFT (3U) |
|
645 #define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT) |
|
646 #define TREEBIN_SHIFT (8U) |
|
647 #define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT) |
|
648 #define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE) |
|
649 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD) |
|
650 |
|
651 struct malloc_state { |
|
652 binmap_t smallmap; |
|
653 binmap_t treemap; |
|
654 size_t dvsize; |
|
655 size_t topsize; |
|
656 TUint8* least_addr; |
|
657 mchunkptr dv; |
|
658 mchunkptr top; |
|
659 size_t trim_check; |
|
660 size_t magic; |
|
661 mchunkptr smallbins[(NSMALLBINS+1)*2]; |
|
662 tbinptr treebins[NTREEBINS]; |
|
663 size_t footprint; |
|
664 size_t max_footprint; |
|
665 flag_t mflags; |
|
666 #if USE_LOCKS |
|
667 MLOCK_T mutex; /* locate lock among fields that rarely change */ |
|
668 MLOCK_T magic_init_mutex; |
|
669 MLOCK_T morecore_mutex; |
|
670 #endif /* USE_LOCKS */ |
|
671 msegment seg; |
|
672 }; |
|
673 |
|
674 typedef struct malloc_state* mstate; |
|
675 |
|
676 /* ------------- Global malloc_state and malloc_params ------------------- */ |
|
677 |
|
678 /* |
|
679 malloc_params holds global properties, including those that can be |
|
680 dynamically set using mallopt. There is a single instance, mparams, |
|
681 initialized in init_mparams. |
|
682 */ |
|
683 |
|
684 struct malloc_params { |
|
685 size_t magic; |
|
686 size_t page_size; |
|
687 size_t granularity; |
|
688 size_t mmap_threshold; |
|
689 size_t trim_threshold; |
|
690 flag_t default_mflags; |
|
691 #if USE_LOCKS |
|
692 MLOCK_T magic_init_mutex; |
|
693 #endif /* USE_LOCKS */ |
|
694 }; |
|
695 |
|
696 /* The global malloc_state used for all non-"mspace" calls */ |
|
697 /*AMOD: Need to check this as this will be the member of the class*/ |
|
698 |
|
699 //static struct malloc_state _gm_; |
|
700 //#define gm (&_gm_) |
|
701 |
|
702 //#define is_global(M) ((M) == &_gm_) |
|
703 /*AMOD: has changed*/ |
|
704 #define is_global(M) ((M) == gm) |
|
705 #define is_initialized(M) ((M)->top != 0) |
|
706 |
|
707 /* -------------------------- system alloc setup ------------------------- */ |
|
708 |
|
709 /* Operations on mflags */ |
|
710 |
|
711 #define use_lock(M) ((M)->mflags & USE_LOCK_BIT) |
|
712 #define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT) |
|
713 #define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT) |
|
714 |
|
715 #define use_mmap(M) ((M)->mflags & USE_MMAP_BIT) |
|
716 #define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT) |
|
717 #define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT) |
|
718 |
|
719 #define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT) |
|
720 #define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT) |
|
721 |
|
722 #define set_lock(M,L) ((M)->mflags = (L)? ((M)->mflags | USE_LOCK_BIT) : ((M)->mflags & ~USE_LOCK_BIT)) |
|
723 |
|
724 /* page-align a size */ |
|
725 #define page_align(S) (((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE)) |
|
726 |
|
727 /* granularity-align a size */ |
|
728 #define granularity_align(S) (((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE)) |
|
729 |
|
730 #define is_page_aligned(S) (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0) |
|
731 #define is_granularity_aligned(S) (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0) |
|
732 |
|
733 /* True if segment S holds address A */ |
|
734 #define segment_holds(S, A) ((TUint8*)(A) >= S->base && (TUint8*)(A) < S->base + S->size) |
|
735 |
|
736 #ifndef MORECORE_CANNOT_TRIM |
|
737 #define should_trim(M,s) ((s) > (M)->trim_check) |
|
738 #else /* MORECORE_CANNOT_TRIM */ |
|
739 #define should_trim(M,s) (0) |
|
740 #endif /* MORECORE_CANNOT_TRIM */ |
|
741 |
|
742 /* |
|
743 TOP_FOOT_SIZE is padding at the end of a segment, including space |
|
744 that may be needed to place segment records and fenceposts when new |
|
745 noncontiguous segments are added. |
|
746 */ |
|
747 #define TOP_FOOT_SIZE (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE) |
|
748 |
|
749 /* ------------------------------- Hooks -------------------------------- */ |
|
750 |
|
751 /* |
|
752 PREACTION should be defined to return 0 on success, and nonzero on |
|
753 failure. If you are not using locking, you can redefine these to do |
|
754 anything you like. |
|
755 */ |
|
756 |
|
757 #if USE_LOCKS |
|
758 /* Ensure locks are initialized */ |
|
759 #define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams()) |
|
760 #define PREACTION(M) (use_lock((M))?(ACQUIRE_LOCK((M)->mutex),0):0) /*Action to take like lock before alloc*/ |
|
761 #define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK((M)->mutex); } |
|
762 |
|
763 #else /* USE_LOCKS */ |
|
764 #ifndef PREACTION |
|
765 #define PREACTION(M) (0) |
|
766 #endif /* PREACTION */ |
|
767 #ifndef POSTACTION |
|
768 #define POSTACTION(M) |
|
769 #endif /* POSTACTION */ |
|
770 #endif /* USE_LOCKS */ |
|
771 |
|
772 /* |
|
773 CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses. |
|
774 USAGE_ERROR_ACTION is triggered on detected bad frees and |
|
775 reallocs. The argument p is an address that might have triggered the |
|
776 fault. It is ignored by the two predefined actions, but might be |
|
777 useful in custom actions that try to help diagnose errors. |
|
778 */ |
|
779 |
|
780 #if PROCEED_ON_ERROR |
|
781 /* A count of the number of corruption errors causing resets */ |
|
782 int malloc_corruption_error_count; |
|
783 /* default corruption action */ |
|
784 static void reset_on_error(mstate m); |
|
785 #define CORRUPTION_ERROR_ACTION(m) reset_on_error(m) |
|
786 #define USAGE_ERROR_ACTION(m, p) |
|
787 #else /* PROCEED_ON_ERROR */ |
|
788 #ifndef CORRUPTION_ERROR_ACTION |
|
789 #define CORRUPTION_ERROR_ACTION(m) ABORT |
|
790 #endif /* CORRUPTION_ERROR_ACTION */ |
|
791 #ifndef USAGE_ERROR_ACTION |
|
792 #define USAGE_ERROR_ACTION(m,p) ABORT |
|
793 #endif /* USAGE_ERROR_ACTION */ |
|
794 #endif /* PROCEED_ON_ERROR */ |
|
795 |
|
796 /* -------------------------- Debugging setup ---------------------------- */ |
|
797 |
|
798 #if ! DEBUG |
|
799 #define check_free_chunk(M,P) |
|
800 #define check_inuse_chunk(M,P) |
|
801 #define check_malloced_chunk(M,P,N) |
|
802 #define check_mmapped_chunk(M,P) |
|
803 #define check_malloc_state(M) |
|
804 #define check_top_chunk(M,P) |
|
805 #else /* DEBUG */ |
|
806 #define check_free_chunk(M,P) do_check_free_chunk(M,P) |
|
807 #define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P) |
|
808 #define check_top_chunk(M,P) do_check_top_chunk(M,P) |
|
809 #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N) |
|
810 #define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P) |
|
811 #define check_malloc_state(M) do_check_malloc_state(M) |
|
812 static void do_check_any_chunk(mstate m, mchunkptr p); |
|
813 static void do_check_top_chunk(mstate m, mchunkptr p); |
|
814 static void do_check_mmapped_chunk(mstate m, mchunkptr p); |
|
815 static void do_check_inuse_chunk(mstate m, mchunkptr p); |
|
816 static void do_check_free_chunk(mstate m, mchunkptr p); |
|
817 static void do_check_malloced_chunk(mstate m, void* mem, size_t s); |
|
818 static void do_check_tree(mstate m, tchunkptr t); |
|
819 static void do_check_treebin(mstate m, bindex_t i); |
|
820 static void do_check_smallbin(mstate m, bindex_t i); |
|
821 static void do_check_malloc_state(mstate m); |
|
822 static int bin_find(mstate m, mchunkptr x); |
|
823 static size_t traverse_and_check(mstate m); |
|
824 #endif /* DEBUG */ |
|
825 |
|
826 /* ---------------------------- Indexing Bins ---------------------------- */ |
|
827 |
|
828 #define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) |
|
829 #define small_index(s) ((s) >> SMALLBIN_SHIFT) |
|
830 #define small_index2size(i) ((i) << SMALLBIN_SHIFT) |
|
831 #define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE)) |
|
832 |
|
833 /* addressing by index. See above about smallbin repositioning */ |
|
834 #define smallbin_at(M, i) ((sbinptr)((TUint8*)&((M)->smallbins[(i)<<1]))) |
|
835 #define treebin_at(M,i) (&((M)->treebins[i])) |
|
836 |
|
837 |
|
838 /* Bit representing maximum resolved size in a treebin at i */ |
|
839 #define bit_for_tree_index(i) (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2) |
|
840 |
|
841 /* Shift placing maximum resolved bit in a treebin at i as sign bit */ |
|
842 #define leftshift_for_tree_index(i) ((i == NTREEBINS-1)? 0 : ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2))) |
|
843 |
|
844 /* The size of the smallest chunk held in bin with index i */ |
|
845 #define minsize_for_tree_index(i) ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1))) |
|
846 |
|
847 |
|
848 /* ------------------------ Operations on bin maps ----------------------- */ |
|
849 /* bit corresponding to given index */ |
|
850 #define idx2bit(i) ((binmap_t)(1) << (i)) |
|
851 /* Mark/Clear bits with given index */ |
|
852 #define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i)) |
|
853 #define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i)) |
|
854 #define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i)) |
|
855 #define mark_treemap(M,i) ((M)->treemap |= idx2bit(i)) |
|
856 #define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i)) |
|
857 #define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i)) |
|
858 |
|
859 /* isolate the least set bit of a bitmap */ |
|
860 #define least_bit(x) ((x) & -(x)) |
|
861 |
|
862 /* mask with all bits to left of least bit of x on */ |
|
863 #define left_bits(x) ((x<<1) | -(x<<1)) |
|
864 |
|
865 /* mask with all bits to left of or equal to least bit of x on */ |
|
866 #define same_or_left_bits(x) ((x) | -(x)) |
|
867 |
|
868 /* isolate the least set bit of a bitmap */ |
|
869 #define least_bit(x) ((x) & -(x)) |
|
870 |
|
871 /* mask with all bits to left of least bit of x on */ |
|
872 #define left_bits(x) ((x<<1) | -(x<<1)) |
|
873 |
|
874 /* mask with all bits to left of or equal to least bit of x on */ |
|
875 #define same_or_left_bits(x) ((x) | -(x)) |
|
876 |
|
877 #if !INSECURE |
|
878 /* Check if address a is at least as high as any from MORECORE or MMAP */ |
|
879 #define ok_address(M, a) ((TUint8*)(a) >= (M)->least_addr) |
|
880 /* Check if address of next chunk n is higher than base chunk p */ |
|
881 #define ok_next(p, n) ((TUint8*)(p) < (TUint8*)(n)) |
|
882 /* Check if p has its cinuse bit on */ |
|
883 #define ok_cinuse(p) cinuse(p) |
|
884 /* Check if p has its pinuse bit on */ |
|
885 #define ok_pinuse(p) pinuse(p) |
|
886 #else /* !INSECURE */ |
|
887 #define ok_address(M, a) (1) |
|
888 #define ok_next(b, n) (1) |
|
889 #define ok_cinuse(p) (1) |
|
890 #define ok_pinuse(p) (1) |
|
891 #endif /* !INSECURE */ |
|
892 |
|
893 #if (FOOTERS && !INSECURE) |
|
894 /* Check if (alleged) mstate m has expected magic field */ |
|
895 #define ok_magic(M) ((M)->magic == mparams.magic) |
|
896 #else /* (FOOTERS && !INSECURE) */ |
|
897 #define ok_magic(M) (1) |
|
898 #endif /* (FOOTERS && !INSECURE) */ |
|
899 |
|
900 /* In gcc, use __builtin_expect to minimize impact of checks */ |
|
901 #if !INSECURE |
|
902 #if defined(__GNUC__) && __GNUC__ >= 3 |
|
903 #define RTCHECK(e) __builtin_expect(e, 1) |
|
904 #else /* GNUC */ |
|
905 #define RTCHECK(e) (e) |
|
906 #endif /* GNUC */ |
|
907 |
|
908 #else /* !INSECURE */ |
|
909 #define RTCHECK(e) (1) |
|
910 #endif /* !INSECURE */ |
|
911 /* macros to set up inuse chunks with or without footers */ |
|
912 #if !FOOTERS |
|
913 #define mark_inuse_foot(M,p,s) |
|
914 /* Set cinuse bit and pinuse bit of next chunk */ |
|
915 #define set_inuse(M,p,s) ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),((mchunkptr)(((TUint8*)(p)) + (s)))->head |= PINUSE_BIT) |
|
916 /* Set cinuse and pinuse of this chunk and pinuse of next chunk */ |
|
917 #define set_inuse_and_pinuse(M,p,s) ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),((mchunkptr)(((TUint8*)(p)) + (s)))->head |= PINUSE_BIT) |
|
918 /* Set size, cinuse and pinuse bit of this chunk */ |
|
919 #define set_size_and_pinuse_of_inuse_chunk(M, p, s) ((p)->head = (s|PINUSE_BIT|CINUSE_BIT)) |
|
920 #else /* FOOTERS */ |
|
921 /* Set foot of inuse chunk to be xor of mstate and seed */ |
|
922 #define mark_inuse_foot(M,p,s) (((mchunkptr)((TUint8*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic)) |
|
923 #define get_mstate_for(p) ((mstate)(((mchunkptr)((TUint8*)(p)+(chunksize(p))))->prev_foot ^ mparams.magic)) |
|
924 #define set_inuse(M,p,s)\ |
|
925 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ |
|
926 (((mchunkptr)(((TUint8*)(p)) + (s)))->head |= PINUSE_BIT), \ |
|
927 mark_inuse_foot(M,p,s)) |
|
928 #define set_inuse_and_pinuse(M,p,s)\ |
|
929 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ |
|
930 (((mchunkptr)(((TUint8*)(p)) + (s)))->head |= PINUSE_BIT),\ |
|
931 mark_inuse_foot(M,p,s)) |
|
932 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ |
|
933 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ |
|
934 mark_inuse_foot(M, p, s)) |
|
935 #endif /* !FOOTERS */ |
|
936 |
|
937 |
|
938 #if ONLY_MSPACES |
|
939 #define internal_malloc(m, b) mspace_malloc(m, b) |
|
940 #define internal_free(m, mem) mspace_free(m,mem); |
|
941 #else /* ONLY_MSPACES */ |
|
942 #if MSPACES |
|
943 #define internal_malloc(m, b) (m == gm)? dlmalloc(b) : mspace_malloc(m, b) |
|
944 #define internal_free(m, mem) if (m == gm) dlfree(mem); else mspace_free(m,mem); |
|
945 #else /* MSPACES */ |
|
946 #define internal_malloc(m, b) dlmalloc(b) |
|
947 #define internal_free(m, mem) dlfree(mem) |
|
948 #endif /* MSPACES */ |
|
949 #endif /* ONLY_MSPACES */ |
|
950 /******CODE TO SUPORT SLAB ALLOCATOR******/ |
|
951 |
|
952 #ifndef NDEBUG |
|
953 #define CHECKING 1 |
|
954 #endif |
|
955 |
|
956 #if CHECKING |
|
957 //#define ASSERT(x) {if (!(x)) abort();} |
|
958 #define CHECK(x) x |
|
959 #else |
|
960 #define ASSERT(x) (void)0 |
|
961 #define CHECK(x) (void)0 |
|
962 #endif |
|
963 |
|
964 class slab; |
|
965 class slabhdr; |
|
966 #define maxslabsize 56 |
|
967 #define pageshift 12 |
|
968 #define pagesize (1<<pageshift) |
|
969 #define slabshift 10 |
|
970 #define slabsize (1 << slabshift) |
|
971 #define cellalign 8 |
|
972 const unsigned slabfull = 0; |
|
973 const TInt slabsperpage = (int)(pagesize/slabsize); |
|
974 #define hibit(bits) (((unsigned)bits & 0xc) ? 2 + ((unsigned)bits>>3) : ((unsigned) bits>>1)) |
|
975 |
|
976 #define lowbit(bits) (((unsigned) bits&3) ? 1 - ((unsigned)bits&1) : 3 - (((unsigned)bits>>2)&1)) |
|
977 #define minpagepower pageshift+2 |
|
978 #define cellalign 8 |
|
979 class slabhdr |
|
980 { |
|
981 public: |
|
982 unsigned header; |
|
983 // made up of |
|
984 // bits | 31 | 30..28 | 27..18 | 17..12 | 11..8 | 7..0 | |
|
985 // +----------+--------+--------+--------+---------+----------+ |
|
986 // field | floating | zero | used-4 | size | pagemap | free pos | |
|
987 // |
|
988 slab** parent; // reference to parent's pointer to this slab in tree |
|
989 slab* child1; // 1st child in tree |
|
990 slab* child2; // 2nd child in tree |
|
991 }; |
|
992 |
|
993 inline unsigned header_floating(unsigned h) |
|
994 {return (h&0x80000000);} |
|
995 const unsigned maxuse = (slabsize - sizeof(slabhdr))>>2; |
|
996 const unsigned firstpos = sizeof(slabhdr)>>2; |
|
997 #define checktree(x) (void)0 |
|
998 template <class T> inline T floor(const T addr, unsigned aln) |
|
999 {return T((unsigned(addr))&~(aln-1));} |
|
1000 template <class T> inline T ceiling(T addr, unsigned aln) |
|
1001 {return T((unsigned(addr)+(aln-1))&~(aln-1));} |
|
1002 template <class T> inline unsigned lowbits(T addr, unsigned aln) |
|
1003 {return unsigned(addr)&(aln-1);} |
|
1004 template <class T1, class T2> inline int ptrdiff(const T1* a1, const T2* a2) |
|
1005 {return reinterpret_cast<const unsigned char*>(a1) - reinterpret_cast<const unsigned char*>(a2);} |
|
1006 template <class T> inline T offset(T addr, unsigned ofs) |
|
1007 {return T(unsigned(addr)+ofs);} |
|
1008 class slabset |
|
1009 { |
|
1010 public: |
|
1011 slab* partial; |
|
1012 }; |
|
1013 |
|
1014 class slab : public slabhdr |
|
1015 { |
|
1016 public: |
|
1017 void init(unsigned clz); |
|
1018 //static slab* slabfor( void* p); |
|
1019 static slab* slabfor(const void* p) ; |
|
1020 private: |
|
1021 unsigned char payload[slabsize-sizeof(slabhdr)]; |
|
1022 }; |
|
1023 class page |
|
1024 { |
|
1025 public: |
|
1026 inline static page* pagefor(slab* s); |
|
1027 //slab slabs; |
|
1028 slab slabs[slabsperpage]; |
|
1029 }; |
|
1030 |
|
1031 |
|
1032 inline page* page::pagefor(slab* s) |
|
1033 {return reinterpret_cast<page*>(floor(s, pagesize));} |
|
1034 struct pagecell |
|
1035 { |
|
1036 void* page; |
|
1037 unsigned size; |
|
1038 }; |
|
1039 /******CODE TO SUPORT SLAB ALLOCATOR******/ |
|
1040 #endif/*__DLA__*/ |
|