utilities/standaloneallocator/dla_p.h
changeset 16 3c88a81ff781
equal deleted inserted replaced
14:6aeb7a756187 16:3c88a81ff781
       
     1 /****************************************************************************
       
     2 **
       
     3  * Copyright (c) 2010 Nokia Corporation and/or its subsidiary(-ies).
       
     4  *
       
     5  * This file is part of Qt Web Runtime.
       
     6  *
       
     7  * This library is free software; you can redistribute it and/or
       
     8  * modify it under the terms of the GNU Lesser General Public License
       
     9  * version 2.1 as published by the Free Software Foundation.
       
    10  *
       
    11  * This library is distributed in the hope that it will be useful,
       
    12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
       
    13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
       
    14  * Lesser General Public License for more details.
       
    15  *
       
    16  * You should have received a copy of the GNU Lesser General Public
       
    17  * License along with this library; if not, write to the Free Software
       
    18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
       
    19  *
       
    20  */
       
    21 #ifndef __DLA__
       
    22 #define __DLA__
       
    23 
       
    24 #define DEFAULT_TRIM_THRESHOLD ((size_t)4U * (size_t)1024U)
       
    25 
       
    26 #define __SYMBIAN__
       
    27 #define MSPACES 0
       
    28 #define HAVE_MORECORE 1
       
    29 #define MORECORE_CONTIGUOUS 1
       
    30 #define HAVE_MMAP 0
       
    31 #define HAVE_MREMAP 0
       
    32 #define DEFAULT_GRANULARITY (4096U)
       
    33 #define FOOTERS 0
       
    34 #define USE_LOCKS 0
       
    35 #define INSECURE 1
       
    36 #define NO_MALLINFO 0
       
    37 #define HAVE_GETPAGESIZE
       
    38 
       
    39 #define LACKS_SYS_TYPES_H
       
    40 #ifndef LACKS_SYS_TYPES_H
       
    41 #include <sys/types.h>  /* For size_t */
       
    42 #else
       
    43 #ifndef _SIZE_T_DECLARED
       
    44 typedef unsigned int size_t;
       
    45 #define _SIZE_T_DECLARED
       
    46 #endif
       
    47 #endif  /* LACKS_SYS_TYPES_H */
       
    48 
       
    49 /* The maximum possible size_t value has all bits set */
       
    50 #define MAX_SIZE_T           (~(size_t)0)
       
    51 
       
    52 #ifndef ONLY_MSPACES
       
    53     #define ONLY_MSPACES 0
       
    54 #endif  /* ONLY_MSPACES */
       
    55 
       
    56 #ifndef MSPACES
       
    57     #if ONLY_MSPACES
       
    58         #define MSPACES 1
       
    59     #else   /* ONLY_MSPACES */
       
    60         #define MSPACES 0
       
    61     #endif  /* ONLY_MSPACES */
       
    62 #endif  /* MSPACES */
       
    63 
       
    64 #ifndef MALLOC_ALIGNMENT
       
    65     #define MALLOC_ALIGNMENT ((size_t)8U)
       
    66 #endif  /* MALLOC_ALIGNMENT */
       
    67 
       
    68 #ifndef FOOTERS
       
    69     #define FOOTERS 0
       
    70 #endif  /* FOOTERS */
       
    71 
       
    72 #ifndef ABORT
       
    73 //  #define ABORT  abort()
       
    74     #define ABORT  User::Invariant()// redefined so euser isn't dependant on oe
       
    75 #endif  /* ABORT */
       
    76 
       
    77 #ifndef ABORT_ON_ASSERT_FAILURE
       
    78     #define ABORT_ON_ASSERT_FAILURE 1
       
    79 #endif  /* ABORT_ON_ASSERT_FAILURE */
       
    80 
       
    81 #ifndef PROCEED_ON_ERROR
       
    82     #define PROCEED_ON_ERROR 0
       
    83 #endif  /* PROCEED_ON_ERROR */
       
    84 
       
    85 #ifndef USE_LOCKS
       
    86     #define USE_LOCKS 0
       
    87 #endif  /* USE_LOCKS */
       
    88 
       
    89 #ifndef INSECURE
       
    90     #define INSECURE 0
       
    91 #endif  /* INSECURE */
       
    92 
       
    93 #ifndef HAVE_MMAP
       
    94     #define HAVE_MMAP 1
       
    95 #endif  /* HAVE_MMAP */
       
    96 
       
    97 #ifndef MMAP_CLEARS
       
    98     #define MMAP_CLEARS 1
       
    99 #endif  /* MMAP_CLEARS */
       
   100 
       
   101 #ifndef HAVE_MREMAP
       
   102     #ifdef linux
       
   103         #define HAVE_MREMAP 1
       
   104     #else   /* linux */
       
   105         #define HAVE_MREMAP 0
       
   106     #endif  /* linux */
       
   107 #endif  /* HAVE_MREMAP */
       
   108 
       
   109 #ifndef MALLOC_FAILURE_ACTION
       
   110     //#define MALLOC_FAILURE_ACTION  errno = ENOMEM;
       
   111     #define MALLOC_FAILURE_ACTION ;
       
   112 #endif  /* MALLOC_FAILURE_ACTION */
       
   113 
       
   114 #ifndef HAVE_MORECORE
       
   115     #if ONLY_MSPACES
       
   116         #define HAVE_MORECORE 1 /*AMOD: has changed */
       
   117     #else   /* ONLY_MSPACES */
       
   118         #define HAVE_MORECORE 1
       
   119     #endif  /* ONLY_MSPACES */
       
   120 #endif  /* HAVE_MORECORE */
       
   121 
       
   122 #if !HAVE_MORECORE
       
   123     #define MORECORE_CONTIGUOUS 0
       
   124 #else   /* !HAVE_MORECORE */
       
   125     #ifndef MORECORE
       
   126         #define MORECORE DLAdjust
       
   127     #endif  /* MORECORE */
       
   128     #ifndef MORECORE_CONTIGUOUS
       
   129         #define MORECORE_CONTIGUOUS 0
       
   130     #endif  /* MORECORE_CONTIGUOUS */
       
   131 #endif  /* !HAVE_MORECORE */
       
   132 
       
   133 #ifndef DEFAULT_GRANULARITY
       
   134     #if MORECORE_CONTIGUOUS
       
   135         #define DEFAULT_GRANULARITY 4096  /* 0 means to compute in init_mparams */
       
   136     #else   /* MORECORE_CONTIGUOUS */
       
   137         #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
       
   138     #endif  /* MORECORE_CONTIGUOUS */
       
   139 #endif  /* DEFAULT_GRANULARITY */
       
   140 
       
   141 #ifndef DEFAULT_TRIM_THRESHOLD
       
   142     #ifndef MORECORE_CANNOT_TRIM
       
   143         #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
       
   144     #else   /* MORECORE_CANNOT_TRIM */
       
   145         #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
       
   146     #endif  /* MORECORE_CANNOT_TRIM */
       
   147 #endif  /* DEFAULT_TRIM_THRESHOLD */
       
   148 
       
   149 #ifndef DEFAULT_MMAP_THRESHOLD
       
   150     #if HAVE_MMAP
       
   151         #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
       
   152     #else   /* HAVE_MMAP */
       
   153         #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
       
   154     #endif  /* HAVE_MMAP */
       
   155 #endif  /* DEFAULT_MMAP_THRESHOLD */
       
   156 
       
   157 #ifndef USE_BUILTIN_FFS
       
   158     #define USE_BUILTIN_FFS 0
       
   159 #endif  /* USE_BUILTIN_FFS */
       
   160 
       
   161 #ifndef USE_DEV_RANDOM
       
   162     #define USE_DEV_RANDOM 0
       
   163 #endif  /* USE_DEV_RANDOM */
       
   164 
       
   165 #ifndef NO_MALLINFO
       
   166     #define NO_MALLINFO 0
       
   167 #endif  /* NO_MALLINFO */
       
   168 #ifndef MALLINFO_FIELD_TYPE
       
   169     #define MALLINFO_FIELD_TYPE size_t
       
   170 #endif  /* MALLINFO_FIELD_TYPE */
       
   171 
       
   172 /*
       
   173   mallopt tuning options.  SVID/XPG defines four standard parameter
       
   174   numbers for mallopt, normally defined in malloc.h.  None of these
       
   175   are used in this malloc, so setting them has no effect. But this
       
   176   malloc does support the following options.
       
   177 */
       
   178 
       
   179 #define M_TRIM_THRESHOLD     (-1)
       
   180 #define M_GRANULARITY        (-2)
       
   181 #define M_MMAP_THRESHOLD     (-3)
       
   182 
       
   183 #if !NO_MALLINFO
       
   184 /*
       
   185   This version of malloc supports the standard SVID/XPG mallinfo
       
   186   routine that returns a struct containing usage properties and
       
   187   statistics. It should work on any system that has a
       
   188   /usr/include/malloc.h defining struct mallinfo.  The main
       
   189   declaration needed is the mallinfo struct that is returned (by-copy)
       
   190   by mallinfo().  The malloinfo struct contains a bunch of fields that
       
   191   are not even meaningful in this version of malloc.  These fields are
       
   192   are instead filled by mallinfo() with other numbers that might be of
       
   193   interest.
       
   194 
       
   195   HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
       
   196   /usr/include/malloc.h file that includes a declaration of struct
       
   197   mallinfo.  If so, it is included; else a compliant version is
       
   198   declared below.  These must be precisely the same for mallinfo() to
       
   199   work.  The original SVID version of this struct, defined on most
       
   200   systems with mallinfo, declares all fields as ints. But some others
       
   201   define as unsigned long. If your system defines the fields using a
       
   202   type of different width than listed here, you MUST #include your
       
   203   system version and #define HAVE_USR_INCLUDE_MALLOC_H.
       
   204 */
       
   205 
       
   206 /* #define HAVE_USR_INCLUDE_MALLOC_H */
       
   207 
       
   208 #ifdef HAVE_USR_INCLUDE_MALLOC_H
       
   209 #include "/usr/include/malloc.h"
       
   210 #else /* HAVE_USR_INCLUDE_MALLOC_H */
       
   211 
       
   212 struct mallinfo {
       
   213   MALLINFO_FIELD_TYPE arena;    /* non-mmapped space allocated from system */
       
   214   MALLINFO_FIELD_TYPE ordblks;  /* number of free chunks */
       
   215   MALLINFO_FIELD_TYPE smblks;   /* always 0 */
       
   216   MALLINFO_FIELD_TYPE hblks;    /* always 0 */
       
   217   MALLINFO_FIELD_TYPE hblkhd;   /* space in mmapped regions */
       
   218   MALLINFO_FIELD_TYPE usmblks;  /* maximum total allocated space */
       
   219   MALLINFO_FIELD_TYPE fsmblks;  /* always 0 */
       
   220   MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
       
   221   MALLINFO_FIELD_TYPE fordblks; /* total free space */
       
   222   MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
       
   223   MALLINFO_FIELD_TYPE cellCount;/* Number of chunks allocated*/
       
   224   MALLINFO_FIELD_TYPE largestBlock;
       
   225 };
       
   226 
       
   227 #endif /* HAVE_USR_INCLUDE_MALLOC_H */
       
   228 #endif /* NO_MALLINFO */
       
   229 
       
   230 #if MSPACES
       
   231     typedef void* mspace;
       
   232 #endif /* MSPACES */
       
   233 
       
   234 #ifndef __SYMBIAN__
       
   235 
       
   236 #include <stdio.h>/* for printing in malloc_stats */
       
   237 
       
   238 #ifndef LACKS_ERRNO_H
       
   239     #include <errno.h>       /* for MALLOC_FAILURE_ACTION */
       
   240 #endif /* LACKS_ERRNO_H */
       
   241 
       
   242 #if FOOTERS
       
   243     #include <time.h>        /* for magic initialization */
       
   244 #endif /* FOOTERS */
       
   245 
       
   246 #ifndef LACKS_STDLIB_H
       
   247     #include <stdlib.h>      /* for abort() */
       
   248 #endif /* LACKS_STDLIB_H */
       
   249 
       
   250 #ifdef DEBUG
       
   251     #if ABORT_ON_ASSERT_FAILURE
       
   252         #define assert(x) if (!(x)) ABORT
       
   253     #else /* ABORT_ON_ASSERT_FAILURE */
       
   254         #include <assert.h>
       
   255     #endif /* ABORT_ON_ASSERT_FAILURE */
       
   256 #else  /* DEBUG */
       
   257         #define assert(x)
       
   258 #endif /* DEBUG */
       
   259 
       
   260 #ifndef LACKS_STRING_H
       
   261     #include <string.h>      /* for memset etc */
       
   262 #endif  /* LACKS_STRING_H */
       
   263 
       
   264 #if USE_BUILTIN_FFS
       
   265     #ifndef LACKS_STRINGS_H
       
   266         #include <strings.h>     /* for ffs */
       
   267     #endif /* LACKS_STRINGS_H */
       
   268 #endif /* USE_BUILTIN_FFS */
       
   269 
       
   270 #if HAVE_MMAP
       
   271     #ifndef LACKS_SYS_MMAN_H
       
   272         #include <sys/mman.h>    /* for mmap */
       
   273     #endif /* LACKS_SYS_MMAN_H */
       
   274     #ifndef LACKS_FCNTL_H
       
   275         #include <fcntl.h>
       
   276     #endif /* LACKS_FCNTL_H */
       
   277 #endif /* HAVE_MMAP */
       
   278 
       
   279 #if HAVE_MORECORE
       
   280     #ifndef LACKS_UNISTD_H
       
   281         #include <unistd.h>     /* for sbrk */
       
   282     extern void*     sbrk(size_t);
       
   283     #else /* LACKS_UNISTD_H */
       
   284         #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
       
   285             extern void*     sbrk(ptrdiff_t);
       
   286             /*Amod sbrk is not defined in WIN32 need to check in symbian*/
       
   287         #endif /* FreeBSD etc */
       
   288     #endif /* LACKS_UNISTD_H */
       
   289 #endif /* HAVE_MORECORE */
       
   290 
       
   291 #endif
       
   292 
       
   293 #define assert(x)   ASSERT(x)
       
   294 
       
   295 #ifndef WIN32
       
   296     #ifndef malloc_getpagesize
       
   297         #ifdef _SC_PAGESIZE         /* some SVR4 systems omit an underscore */
       
   298             #ifndef _SC_PAGE_SIZE
       
   299                 #define _SC_PAGE_SIZE _SC_PAGESIZE
       
   300             #endif
       
   301         #endif
       
   302         #ifdef _SC_PAGE_SIZE
       
   303             #define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
       
   304         #else
       
   305             #if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
       
   306                 extern size_t getpagesize();
       
   307                 #define malloc_getpagesize getpagesize()
       
   308             #else
       
   309                 #ifdef WIN32 /* use supplied emulation of getpagesize */
       
   310                     #define malloc_getpagesize getpagesize()
       
   311                 #else
       
   312                     #ifndef LACKS_SYS_PARAM_H
       
   313                         #include <sys/param.h>
       
   314                     #endif
       
   315                     #ifdef EXEC_PAGESIZE
       
   316                         #define malloc_getpagesize EXEC_PAGESIZE
       
   317                     #else
       
   318                         #ifdef NBPG
       
   319                             #ifndef CLSIZE
       
   320                                 #define malloc_getpagesize NBPG
       
   321                             #else
       
   322                                 #define malloc_getpagesize (NBPG * CLSIZE)
       
   323                             #endif
       
   324                         #else
       
   325                             #ifdef NBPC
       
   326                                 #define malloc_getpagesize NBPC
       
   327                             #else
       
   328                                 #ifdef PAGESIZE
       
   329                                     #define malloc_getpagesize PAGESIZE
       
   330                                 #else /* just guess */
       
   331                                     #define malloc_getpagesize ((size_t)4096U)
       
   332                                 #endif
       
   333                             #endif
       
   334                         #endif
       
   335                     #endif
       
   336                 #endif
       
   337             #endif
       
   338         #endif
       
   339     #endif
       
   340 #endif
       
   341 
       
   342 /* ------------------- size_t and alignment properties -------------------- */
       
   343 
       
   344 /* The byte and bit size of a size_t */
       
   345 #define SIZE_T_SIZE         (sizeof(size_t))
       
   346 #define SIZE_T_BITSIZE      (sizeof(size_t) << 3)
       
   347 
       
   348 /* Some constants coerced to size_t */
       
   349 /* Annoying but necessary to avoid errors on some plaftorms */
       
   350 #define SIZE_T_ZERO         ((size_t)0)
       
   351 #define SIZE_T_ONE          ((size_t)1)
       
   352 #define SIZE_T_TWO          ((size_t)2)
       
   353 #define TWO_SIZE_T_SIZES    (SIZE_T_SIZE<<1)
       
   354 #define FOUR_SIZE_T_SIZES   (SIZE_T_SIZE<<2)
       
   355 #define SIX_SIZE_T_SIZES    (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
       
   356 #define HALF_MAX_SIZE_T     (MAX_SIZE_T / 2U)
       
   357 
       
   358 /* The bit mask value corresponding to MALLOC_ALIGNMENT */
       
   359 #define CHUNK_ALIGN_MASK    (MALLOC_ALIGNMENT - SIZE_T_ONE)
       
   360 
       
   361 /* True if address a has acceptable alignment */
       
   362 //#define is_aligned(A)       (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
       
   363 #define is_aligned(A)       (((unsigned int)((A)) & (CHUNK_ALIGN_MASK)) == 0)
       
   364 
       
   365 /* the number of bytes to offset an address to align it */
       
   366 #define align_offset(A)\
       
   367     ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
       
   368     ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
       
   369 
       
   370 /* -------------------------- MMAP preliminaries ------------------------- */
       
   371 
       
   372 /*
       
   373    If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and
       
   374    checks to fail so compiler optimizer can delete code rather than
       
   375    using so many "#if"s.
       
   376 */
       
   377 
       
   378 
       
   379 /* MORECORE and MMAP must return MFAIL on failure */
       
   380 #define MFAIL                ((void*)(MAX_SIZE_T))
       
   381 #define CMFAIL               ((TUint8*)(MFAIL)) /* defined for convenience */
       
   382 
       
   383 #if !HAVE_MMAP
       
   384     #define IS_MMAPPED_BIT       (SIZE_T_ZERO)
       
   385     #define USE_MMAP_BIT         (SIZE_T_ZERO)
       
   386     #define CALL_MMAP(s)         MFAIL
       
   387     #define CALL_MUNMAP(a, s)    (-1)
       
   388     #define DIRECT_MMAP(s)       MFAIL
       
   389 #else /* !HAVE_MMAP */
       
   390     #define IS_MMAPPED_BIT       (SIZE_T_ONE)
       
   391     #define USE_MMAP_BIT         (SIZE_T_ONE)
       
   392         #ifndef WIN32
       
   393             #define CALL_MUNMAP(a, s)    DLUMMAP((a),(s)) /*munmap((a), (s))*/
       
   394             #define MMAP_PROT            (PROT_READ|PROT_WRITE)
       
   395             #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
       
   396                 #define MAP_ANONYMOUS        MAP_ANON
       
   397             #endif /* MAP_ANON */
       
   398             #ifdef MAP_ANONYMOUS
       
   399                 #define MMAP_FLAGS           (MAP_PRIVATE|MAP_ANONYMOUS)
       
   400                 #define CALL_MMAP(s)         mmap(0, (s), MMAP_PROT, (int)MMAP_FLAGS, -1, 0)
       
   401             #else /* MAP_ANONYMOUS */
       
   402                 /*
       
   403                    Nearly all versions of mmap support MAP_ANONYMOUS, so the following
       
   404                    is unlikely to be needed, but is supplied just in case.
       
   405                 */
       
   406                 #define MMAP_FLAGS           (MAP_PRIVATE)
       
   407                 //static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
       
   408                 #define CALL_MMAP(s) DLMMAP(s)
       
   409                 /*#define CALL_MMAP(s) ((dev_zero_fd < 0) ? \
       
   410                        (dev_zero_fd = open("/dev/zero", O_RDWR), \
       
   411                         mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
       
   412                         mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
       
   413                         */
       
   414                 #define CALL_REMAP(a, s, d)    DLREMAP((a),(s),(d))
       
   415             #endif /* MAP_ANONYMOUS */
       
   416             #define DIRECT_MMAP(s)       CALL_MMAP(s)
       
   417         #else /* WIN32 */
       
   418             #define CALL_MMAP(s)         win32mmap(s)
       
   419             #define CALL_MUNMAP(a, s)    win32munmap((a), (s))
       
   420             #define DIRECT_MMAP(s)       win32direct_mmap(s)
       
   421         #endif /* WIN32 */
       
   422 #endif /* HAVE_MMAP */
       
   423 
       
   424 #if HAVE_MMAP && HAVE_MREMAP
       
   425     #define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
       
   426 #else  /* HAVE_MMAP && HAVE_MREMAP */
       
   427     #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
       
   428 #endif /* HAVE_MMAP && HAVE_MREMAP */
       
   429 
       
   430 #if HAVE_MORECORE
       
   431     #define CALL_MORECORE(S)     SetBrk(S)
       
   432 #else  /* HAVE_MORECORE */
       
   433     #define CALL_MORECORE(S)     MFAIL
       
   434 #endif /* HAVE_MORECORE */
       
   435 
       
   436 /* mstate bit set if continguous morecore disabled or failed */
       
   437 #define USE_NONCONTIGUOUS_BIT (4U)
       
   438 
       
   439 /* segment bit set in create_mspace_with_base */
       
   440 #define EXTERN_BIT            (8U)
       
   441 
       
   442 
       
   443 #if USE_LOCKS
       
   444 /*
       
   445   When locks are defined, there are up to two global locks:
       
   446   * If HAVE_MORECORE, morecore_mutex protects sequences of calls to
       
   447     MORECORE.  In many cases sys_alloc requires two calls, that should
       
   448     not be interleaved with calls by other threads.  This does not
       
   449     protect against direct calls to MORECORE by other threads not
       
   450     using this lock, so there is still code to cope the best we can on
       
   451     interference.
       
   452   * magic_init_mutex ensures that mparams.magic and other
       
   453     unique mparams values are initialized only once.
       
   454 */
       
   455     #ifndef WIN32
       
   456         /* By default use posix locks */
       
   457         #include <pthread.h>
       
   458         #define MLOCK_T pthread_mutex_t
       
   459         #define INITIAL_LOCK(l)      pthread_mutex_init(l, NULL)
       
   460         #define ACQUIRE_LOCK(l)      pthread_mutex_lock(l)
       
   461         #define RELEASE_LOCK(l)      pthread_mutex_unlock(l)
       
   462 
       
   463         #if HAVE_MORECORE
       
   464             //static MLOCK_T morecore_mutex = PTHREAD_MUTEX_INITIALIZER;
       
   465         #endif /* HAVE_MORECORE */
       
   466             //static MLOCK_T magic_init_mutex = PTHREAD_MUTEX_INITIALIZER;
       
   467     #else /* WIN32 */
       
   468         #define MLOCK_T long
       
   469         #define INITIAL_LOCK(l)      *(l)=0
       
   470         #define ACQUIRE_LOCK(l)      win32_acquire_lock(l)
       
   471         #define RELEASE_LOCK(l)      win32_release_lock(l)
       
   472         #if HAVE_MORECORE
       
   473             static MLOCK_T morecore_mutex;
       
   474         #endif /* HAVE_MORECORE */
       
   475         static MLOCK_T magic_init_mutex;
       
   476     #endif /* WIN32 */
       
   477     #define USE_LOCK_BIT               (2U)
       
   478 #else  /* USE_LOCKS */
       
   479     #define USE_LOCK_BIT               (0U)
       
   480     #define INITIAL_LOCK(l)
       
   481 #endif /* USE_LOCKS */
       
   482 
       
   483 #if USE_LOCKS && HAVE_MORECORE
       
   484     #define ACQUIRE_MORECORE_LOCK(M)    ACQUIRE_LOCK((M->morecore_mutex)/*&morecore_mutex*/);
       
   485     #define RELEASE_MORECORE_LOCK(M)    RELEASE_LOCK((M->morecore_mutex)/*&morecore_mutex*/);
       
   486 #else /* USE_LOCKS && HAVE_MORECORE */
       
   487     #define ACQUIRE_MORECORE_LOCK(M)
       
   488     #define RELEASE_MORECORE_LOCK(M)
       
   489 #endif /* USE_LOCKS && HAVE_MORECORE */
       
   490 
       
   491 #if USE_LOCKS
       
   492         /*Currently not suporting this*/
       
   493     #define ACQUIRE_MAGIC_INIT_LOCK(M)  ACQUIRE_LOCK(((M)->magic_init_mutex));
       
   494     //AMOD: changed #define ACQUIRE_MAGIC_INIT_LOCK()
       
   495     //#define RELEASE_MAGIC_INIT_LOCK()
       
   496     #define RELEASE_MAGIC_INIT_LOCK(M)  RELEASE_LOCK(((M)->magic_init_mutex));
       
   497 #else  /* USE_LOCKS */
       
   498     #define ACQUIRE_MAGIC_INIT_LOCK(M)
       
   499     #define RELEASE_MAGIC_INIT_LOCK(M)
       
   500 #endif /* USE_LOCKS */
       
   501 
       
   502 /*CHUNK representation*/
       
   503 struct malloc_chunk {
       
   504   size_t               prev_foot;  /* Size of previous chunk (if free).  */
       
   505   size_t               head;       /* Size and inuse bits. */
       
   506   struct malloc_chunk* fd;         /* double links -- used only if free. */
       
   507   struct malloc_chunk* bk;
       
   508 };
       
   509 
       
   510 typedef struct malloc_chunk  mchunk;
       
   511 typedef struct malloc_chunk* mchunkptr;
       
   512 typedef struct malloc_chunk* sbinptr;  /* The type of bins of chunks */
       
   513 typedef unsigned int bindex_t;         /* Described below */
       
   514 typedef unsigned int binmap_t;         /* Described below */
       
   515 typedef unsigned int flag_t;           /* The type of various bit flag sets */
       
   516 
       
   517 
       
   518 /* ------------------- Chunks sizes and alignments ----------------------- */
       
   519 #define MCHUNK_SIZE         (sizeof(mchunk))
       
   520 
       
   521 #if FOOTERS
       
   522     #define CHUNK_OVERHEAD      (TWO_SIZE_T_SIZES)
       
   523 #else /* FOOTERS */
       
   524     #define CHUNK_OVERHEAD      (SIZE_T_SIZE)
       
   525 #endif /* FOOTERS */
       
   526 
       
   527 /* MMapped chunks need a second word of overhead ... */
       
   528 #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
       
   529 /* ... and additional padding for fake next-chunk at foot */
       
   530 #define MMAP_FOOT_PAD       (FOUR_SIZE_T_SIZES)
       
   531 
       
   532 /* The smallest size we can malloc is an aligned minimal chunk */
       
   533 #define MIN_CHUNK_SIZE ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
       
   534 
       
   535 /* conversion from malloc headers to user pointers, and back */
       
   536 #define chunk2mem(p)        ((void*)((TUint8*)(p)       + TWO_SIZE_T_SIZES))
       
   537 #define mem2chunk(mem)      ((mchunkptr)((TUint8*)(mem) - TWO_SIZE_T_SIZES))
       
   538 /* chunk associated with aligned address A */
       
   539 #define align_as_chunk(A)   (mchunkptr)((A) + align_offset(chunk2mem(A)))
       
   540 
       
   541 /* Bounds on request (not chunk) sizes. */
       
   542 #define MAX_REQUEST         ((-MIN_CHUNK_SIZE) << 2)
       
   543 #define MIN_REQUEST         (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
       
   544 
       
   545 /* pad request bytes into a usable size */
       
   546 #define pad_request(req) (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
       
   547 
       
   548 /* pad request, checking for minimum (but not maximum) */
       
   549 #define request2size(req) (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
       
   550 
       
   551 /* ------------------ Operations on head and foot fields ----------------- */
       
   552 
       
   553 /*
       
   554   The head field of a chunk is or'ed with PINUSE_BIT when previous
       
   555   adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
       
   556   use. If the chunk was obtained with mmap, the prev_foot field has
       
   557   IS_MMAPPED_BIT set, otherwise holding the offset of the base of the
       
   558   mmapped region to the base of the chunk.
       
   559 */
       
   560 #define PINUSE_BIT          (SIZE_T_ONE)
       
   561 #define CINUSE_BIT          (SIZE_T_TWO)
       
   562 #define INUSE_BITS          (PINUSE_BIT|CINUSE_BIT)
       
   563 
       
   564 /* Head value for fenceposts */
       
   565 #define FENCEPOST_HEAD      (INUSE_BITS|SIZE_T_SIZE)
       
   566 
       
   567 /* extraction of fields from head words */
       
   568 #define cinuse(p)           ((p)->head & CINUSE_BIT)
       
   569 #define pinuse(p)           ((p)->head & PINUSE_BIT)
       
   570 #define chunksize(p)        ((p)->head & ~(INUSE_BITS))
       
   571 
       
   572 #define clear_pinuse(p)     ((p)->head &= ~PINUSE_BIT)
       
   573 #define clear_cinuse(p)     ((p)->head &= ~CINUSE_BIT)
       
   574 
       
   575 /* Treat space at ptr +/- offset as a chunk */
       
   576 #define chunk_plus_offset(p, s)  ((mchunkptr)(((TUint8*)(p)) + (s)))
       
   577 #define chunk_minus_offset(p, s) ((mchunkptr)(((TUint8*)(p)) - (s)))
       
   578 
       
   579 /* Ptr to next or previous physical malloc_chunk. */
       
   580 #define next_chunk(p) ((mchunkptr)( ((TUint8*)(p)) + ((p)->head & ~INUSE_BITS)))
       
   581 #define prev_chunk(p) ((mchunkptr)( ((TUint8*)(p)) - ((p)->prev_foot) ))
       
   582 
       
   583 /* extract next chunk's pinuse bit */
       
   584 #define next_pinuse(p)  ((next_chunk(p)->head) & PINUSE_BIT)
       
   585 
       
   586 /* Get/set size at footer */
       
   587 #define get_foot(p, s)  (((mchunkptr)((TUint8*)(p) + (s)))->prev_foot)
       
   588 #define set_foot(p, s)  (((mchunkptr)((TUint8*)(p) + (s)))->prev_foot = (s))
       
   589 
       
   590 /* Set size, pinuse bit, and foot */
       
   591 #define set_size_and_pinuse_of_free_chunk(p, s) ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
       
   592 
       
   593 /* Set size, pinuse bit, foot, and clear next pinuse */
       
   594 #define set_free_with_pinuse(p, s, n) (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
       
   595 
       
   596 #define is_mmapped(p) (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT))
       
   597 
       
   598 /* Get the internal overhead associated with chunk p */
       
   599 #define overhead_for(p) (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
       
   600 
       
   601 /* Return true if malloced space is not necessarily cleared */
       
   602 #if MMAP_CLEARS
       
   603     #define calloc_must_clear(p) (!is_mmapped(p))
       
   604 #else /* MMAP_CLEARS */
       
   605     #define calloc_must_clear(p) (1)
       
   606 #endif /* MMAP_CLEARS */
       
   607 
       
   608 /* ---------------------- Overlaid data structures ----------------------- */
       
   609 struct malloc_tree_chunk {
       
   610   /* The first four fields must be compatible with malloc_chunk */
       
   611   size_t                                    prev_foot;
       
   612   size_t                                    head;
       
   613   struct malloc_tree_chunk* fd;
       
   614   struct malloc_tree_chunk* bk;
       
   615 
       
   616   struct malloc_tree_chunk* child[2];
       
   617   struct malloc_tree_chunk* parent;
       
   618   bindex_t                  index;
       
   619   size_t            pageout;  /* chunk pageout flag  */
       
   620   size_t            npages;   /* chunk pageout size */
       
   621 };
       
   622 
       
   623 typedef struct malloc_tree_chunk  tchunk;
       
   624 typedef struct malloc_tree_chunk* tchunkptr;
       
   625 typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
       
   626 
       
   627 /* A little helper macro for trees */
       
   628 #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
       
   629 /*Segment structur*/
       
   630 struct malloc_segment {
       
   631   TUint8*        base;             /* base address */
       
   632   size_t       size;             /* allocated size */
       
   633   struct malloc_segment* next;   /* ptr to next segment */
       
   634   flag_t       sflags;           /* mmap and extern flag */
       
   635 };
       
   636 
       
   637 #define is_mmapped_segment(S)  ((S)->sflags & IS_MMAPPED_BIT)
       
   638 #define is_extern_segment(S)   ((S)->sflags & EXTERN_BIT)
       
   639 
       
   640 typedef struct malloc_segment  msegment;
       
   641 typedef struct malloc_segment* msegmentptr;
       
   642 
       
   643 /*Malloc State data structur*/
       
   644 
       
   645 #define NSMALLBINS        (32U)
       
   646 #define NTREEBINS         (32U)
       
   647 #define SMALLBIN_SHIFT    (3U)
       
   648 #define SMALLBIN_WIDTH    (SIZE_T_ONE << SMALLBIN_SHIFT)
       
   649 #define TREEBIN_SHIFT     (8U)
       
   650 #define MIN_LARGE_SIZE    (SIZE_T_ONE << TREEBIN_SHIFT)
       
   651 #define MAX_SMALL_SIZE    (MIN_LARGE_SIZE - SIZE_T_ONE)
       
   652 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
       
   653 
       
   654 struct malloc_state {
       
   655   binmap_t   smallmap;
       
   656   binmap_t   treemap;
       
   657   size_t     dvsize; // unused
       
   658   size_t     topsize;
       
   659   TUint8*      least_addr;
       
   660   mchunkptr  dv; // unused
       
   661   mchunkptr  top;
       
   662   size_t     trim_check;
       
   663   size_t     magic;
       
   664   mchunkptr  smallbins[(NSMALLBINS+1)*2];
       
   665   tbinptr    treebins[NTREEBINS];
       
   666   size_t     footprint;
       
   667   size_t     max_footprint;
       
   668   flag_t     mflags;
       
   669 #if USE_LOCKS
       
   670   MLOCK_T    mutex;     /* locate lock among fields that rarely change */
       
   671   MLOCK_T   magic_init_mutex;
       
   672   MLOCK_T   morecore_mutex;
       
   673 #endif /* USE_LOCKS */
       
   674   msegment   seg;
       
   675 };
       
   676 
       
   677 typedef struct malloc_state*    mstate;
       
   678 
       
   679 /* ------------- Global malloc_state and malloc_params ------------------- */
       
   680 
       
   681 /*
       
   682   malloc_params holds global properties, including those that can be
       
   683   dynamically set using mallopt. There is a single instance, mparams,
       
   684   initialized in init_mparams.
       
   685 */
       
   686 
       
   687 struct malloc_params {
       
   688   size_t magic;
       
   689   size_t page_size;
       
   690   size_t granularity;
       
   691   size_t mmap_threshold;
       
   692   size_t trim_threshold;
       
   693   flag_t default_mflags;
       
   694 #if USE_LOCKS
       
   695   MLOCK_T   magic_init_mutex;
       
   696 #endif /* USE_LOCKS */
       
   697 };
       
   698 
       
   699 /* The global malloc_state used for all non-"mspace" calls */
       
   700 /*AMOD: Need to check this as this will be the member of the class*/
       
   701 
       
   702 //static struct malloc_state _gm_;
       
   703 //#define gm                 (&_gm_)
       
   704 
       
   705 //#define is_global(M)       ((M) == &_gm_)
       
   706 /*AMOD: has changed*/
       
   707 #define is_global(M)       ((M) == gm)
       
   708 #define is_initialized(M)  ((M)->top != 0)
       
   709 
       
   710 /* -------------------------- system alloc setup ------------------------- */
       
   711 
       
   712 /* Operations on mflags */
       
   713 
       
   714 #define use_lock(M)           ((M)->mflags &   USE_LOCK_BIT)
       
   715 #define enable_lock(M)        ((M)->mflags |=  USE_LOCK_BIT)
       
   716 #define disable_lock(M)       ((M)->mflags &= ~USE_LOCK_BIT)
       
   717 
       
   718 #define use_mmap(M)           ((M)->mflags &   USE_MMAP_BIT)
       
   719 #define enable_mmap(M)        ((M)->mflags |=  USE_MMAP_BIT)
       
   720 #define disable_mmap(M)       ((M)->mflags &= ~USE_MMAP_BIT)
       
   721 
       
   722 #define use_noncontiguous(M)  ((M)->mflags &   USE_NONCONTIGUOUS_BIT)
       
   723 #define disable_contiguous(M) ((M)->mflags |=  USE_NONCONTIGUOUS_BIT)
       
   724 
       
   725 #define set_lock(M,L) ((M)->mflags = (L)? ((M)->mflags | USE_LOCK_BIT) :  ((M)->mflags & ~USE_LOCK_BIT))
       
   726 
       
   727 /* page-align a size */
       
   728 #define page_align(S) (((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE))
       
   729 
       
   730 /* granularity-align a size */
       
   731 #define granularity_align(S)  (((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE))
       
   732 
       
   733 #define is_page_aligned(S)   (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
       
   734 #define is_granularity_aligned(S)   (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
       
   735 
       
   736 /*  True if segment S holds address A */
       
   737 #define segment_holds(S, A)  ((TUint8*)(A) >= S->base && (TUint8*)(A) < S->base + S->size)
       
   738 
       
   739 #ifndef MORECORE_CANNOT_TRIM
       
   740     #define should_trim(M,s)  ((s) > (M)->trim_check)
       
   741 #else  /* MORECORE_CANNOT_TRIM */
       
   742     #define should_trim(M,s)  (0)
       
   743 #endif /* MORECORE_CANNOT_TRIM */
       
   744 
       
   745 /*
       
   746   TOP_FOOT_SIZE is padding at the end of a segment, including space
       
   747   that may be needed to place segment records and fenceposts when new
       
   748   noncontiguous segments are added.
       
   749 */
       
   750 #define TOP_FOOT_SIZE  (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
       
   751 
       
   752 /* -------------------------------  Hooks -------------------------------- */
       
   753 
       
   754 /*
       
   755   PREACTION should be defined to return 0 on success, and nonzero on
       
   756   failure. If you are not using locking, you can redefine these to do
       
   757   anything you like.
       
   758 */
       
   759 
       
   760 #if USE_LOCKS
       
   761     /* Ensure locks are initialized */
       
   762     #define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams())
       
   763     #define PREACTION(M) (use_lock((M))?(ACQUIRE_LOCK((M)->mutex),0):0) /*Action to take like lock before alloc*/
       
   764     #define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK((M)->mutex); }
       
   765 
       
   766 #else /* USE_LOCKS */
       
   767     #ifndef PREACTION
       
   768         #define PREACTION(M) (0)
       
   769     #endif  /* PREACTION */
       
   770     #ifndef POSTACTION
       
   771         #define POSTACTION(M)
       
   772     #endif  /* POSTACTION */
       
   773 #endif /* USE_LOCKS */
       
   774 
       
   775 /*
       
   776   CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
       
   777   USAGE_ERROR_ACTION is triggered on detected bad frees and
       
   778   reallocs. The argument p is an address that might have triggered the
       
   779   fault. It is ignored by the two predefined actions, but might be
       
   780   useful in custom actions that try to help diagnose errors.
       
   781 */
       
   782 
       
   783 #if PROCEED_ON_ERROR
       
   784     /* A count of the number of corruption errors causing resets */
       
   785     int malloc_corruption_error_count;
       
   786     /* default corruption action */
       
   787     static void reset_on_error(mstate m);
       
   788     #define CORRUPTION_ERROR_ACTION(m)  reset_on_error(m)
       
   789     #define USAGE_ERROR_ACTION(m, p)
       
   790 #else /* PROCEED_ON_ERROR */
       
   791     #ifndef CORRUPTION_ERROR_ACTION
       
   792         #define CORRUPTION_ERROR_ACTION(m) ABORT
       
   793     #endif /* CORRUPTION_ERROR_ACTION */
       
   794     #ifndef USAGE_ERROR_ACTION
       
   795         #define USAGE_ERROR_ACTION(m,p) ABORT
       
   796     #endif /* USAGE_ERROR_ACTION */
       
   797 #endif /* PROCEED_ON_ERROR */
       
   798 
       
   799     /* -------------------------- Debugging setup ---------------------------- */
       
   800 
       
   801 #if ! DEBUG
       
   802     #define check_free_chunk(M,P)
       
   803     #define check_inuse_chunk(M,P)
       
   804     #define check_malloced_chunk(M,P,N)
       
   805     #define check_mmapped_chunk(M,P)
       
   806     #define check_malloc_state(M)
       
   807     #define check_top_chunk(M,P)
       
   808 #else /* DEBUG */
       
   809     #define check_free_chunk(M,P)       do_check_free_chunk(M,P)
       
   810     #define check_inuse_chunk(M,P)      do_check_inuse_chunk(M,P)
       
   811     #define check_top_chunk(M,P)        do_check_top_chunk(M,P)
       
   812     #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
       
   813     #define check_mmapped_chunk(M,P)    do_check_mmapped_chunk(M,P)
       
   814     #define check_malloc_state(M)       do_check_malloc_state(M)
       
   815     static void   do_check_any_chunk(mstate m, mchunkptr p);
       
   816     static void   do_check_top_chunk(mstate m, mchunkptr p);
       
   817     static void   do_check_mmapped_chunk(mstate m, mchunkptr p);
       
   818     static void   do_check_inuse_chunk(mstate m, mchunkptr p);
       
   819     static void   do_check_free_chunk(mstate m, mchunkptr p);
       
   820     static void   do_check_malloced_chunk(mstate m, void* mem, size_t s);
       
   821     static void   do_check_tree(mstate m, tchunkptr t);
       
   822     static void   do_check_treebin(mstate m, bindex_t i);
       
   823     static void   do_check_smallbin(mstate m, bindex_t i);
       
   824     static void   do_check_malloc_state(mstate m);
       
   825     static int    bin_find(mstate m, mchunkptr x);
       
   826     static size_t traverse_and_check(mstate m);
       
   827 #endif /* DEBUG */
       
   828 
       
   829 /* ---------------------------- Indexing Bins ---------------------------- */
       
   830 
       
   831 #define is_small(s)         (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
       
   832 #define small_index(s)      ((s)  >> SMALLBIN_SHIFT)
       
   833 #define small_index2size(i) ((i)  << SMALLBIN_SHIFT)
       
   834 #define MIN_SMALL_INDEX     (small_index(MIN_CHUNK_SIZE))
       
   835 
       
   836 /* addressing by index. See above about smallbin repositioning */
       
   837 #define smallbin_at(M, i)   ((sbinptr)((TUint8*)&((M)->smallbins[(i)<<1])))
       
   838 #define treebin_at(M,i)     (&((M)->treebins[i]))
       
   839 
       
   840 
       
   841 /* Bit representing maximum resolved size in a treebin at i */
       
   842 #define bit_for_tree_index(i) (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
       
   843 
       
   844 /* Shift placing maximum resolved bit in a treebin at i as sign bit */
       
   845 #define leftshift_for_tree_index(i) ((i == NTREEBINS-1)? 0 : ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
       
   846 
       
   847 /* The size of the smallest chunk held in bin with index i */
       
   848 #define minsize_for_tree_index(i) ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) |  (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
       
   849 
       
   850 
       
   851 /* ------------------------ Operations on bin maps ----------------------- */
       
   852 /* bit corresponding to given index */
       
   853 #define idx2bit(i)              ((binmap_t)(1) << (i))
       
   854 /* Mark/Clear bits with given index */
       
   855 #define mark_smallmap(M,i)      ((M)->smallmap |=  idx2bit(i))
       
   856 #define clear_smallmap(M,i)     ((M)->smallmap &= ~idx2bit(i))
       
   857 #define smallmap_is_marked(M,i) ((M)->smallmap &   idx2bit(i))
       
   858 #define mark_treemap(M,i)       ((M)->treemap  |=  idx2bit(i))
       
   859 #define clear_treemap(M,i)      ((M)->treemap  &= ~idx2bit(i))
       
   860 #define treemap_is_marked(M,i)  ((M)->treemap  &   idx2bit(i))
       
   861 
       
   862 /* isolate the least set bit of a bitmap */
       
   863 #define least_bit(x)         ((x) & -(x))
       
   864 
       
   865 /* mask with all bits to left of least bit of x on */
       
   866 #define left_bits(x)         ((x<<1) | -(x<<1))
       
   867 
       
   868 /* mask with all bits to left of or equal to least bit of x on */
       
   869 #define same_or_left_bits(x) ((x) | -(x))
       
   870 
       
   871     /* isolate the least set bit of a bitmap */
       
   872 #define least_bit(x)         ((x) & -(x))
       
   873 
       
   874 /* mask with all bits to left of least bit of x on */
       
   875 #define left_bits(x)         ((x<<1) | -(x<<1))
       
   876 
       
   877 /* mask with all bits to left of or equal to least bit of x on */
       
   878 #define same_or_left_bits(x) ((x) | -(x))
       
   879 
       
   880 #if !INSECURE
       
   881     /* Check if address a is at least as high as any from MORECORE or MMAP */
       
   882     #define ok_address(M, a) ((TUint8*)(a) >= (M)->least_addr)
       
   883     /* Check if address of next chunk n is higher than base chunk p */
       
   884     #define ok_next(p, n)    ((TUint8*)(p) < (TUint8*)(n))
       
   885     /* Check if p has its cinuse bit on */
       
   886     #define ok_cinuse(p)     cinuse(p)
       
   887     /* Check if p has its pinuse bit on */
       
   888     #define ok_pinuse(p)     pinuse(p)
       
   889 #else /* !INSECURE */
       
   890     #define ok_address(M, a) (1)
       
   891     #define ok_next(b, n)    (1)
       
   892     #define ok_cinuse(p)     (1)
       
   893     #define ok_pinuse(p)     (1)
       
   894 #endif /* !INSECURE */
       
   895 
       
   896 #if (FOOTERS && !INSECURE)
       
   897     /* Check if (alleged) mstate m has expected magic field */
       
   898     #define ok_magic(M)      ((M)->magic == mparams.magic)
       
   899 #else  /* (FOOTERS && !INSECURE) */
       
   900     #define ok_magic(M)      (1)
       
   901 #endif /* (FOOTERS && !INSECURE) */
       
   902 
       
   903 /* In gcc, use __builtin_expect to minimize impact of checks */
       
   904 #if !INSECURE
       
   905     #if defined(__GNUC__) && __GNUC__ >= 3
       
   906         #define RTCHECK(e)  __builtin_expect(e, 1)
       
   907     #else /* GNUC */
       
   908         #define RTCHECK(e)  (e)
       
   909     #endif /* GNUC */
       
   910 
       
   911 #else /* !INSECURE */
       
   912     #define RTCHECK(e)  (1)
       
   913 #endif /* !INSECURE */
       
   914 /* macros to set up inuse chunks with or without footers */
       
   915 #if !FOOTERS
       
   916     #define mark_inuse_foot(M,p,s)
       
   917     /* Set cinuse bit and pinuse bit of next chunk */
       
   918     #define set_inuse(M,p,s)  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),((mchunkptr)(((TUint8*)(p)) + (s)))->head |= PINUSE_BIT)
       
   919     /* Set cinuse and pinuse of this chunk and pinuse of next chunk */
       
   920     #define set_inuse_and_pinuse(M,p,s) ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),((mchunkptr)(((TUint8*)(p)) + (s)))->head |= PINUSE_BIT)
       
   921     /* Set size, cinuse and pinuse bit of this chunk */
       
   922     #define set_size_and_pinuse_of_inuse_chunk(M, p, s) ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
       
   923 #else /* FOOTERS */
       
   924     /* Set foot of inuse chunk to be xor of mstate and seed */
       
   925     #define mark_inuse_foot(M,p,s) (((mchunkptr)((TUint8*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
       
   926     #define get_mstate_for(p) ((mstate)(((mchunkptr)((TUint8*)(p)+(chunksize(p))))->prev_foot ^ mparams.magic))
       
   927     #define set_inuse(M,p,s)\
       
   928         ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
       
   929         (((mchunkptr)(((TUint8*)(p)) + (s)))->head |= PINUSE_BIT), \
       
   930         mark_inuse_foot(M,p,s))
       
   931     #define set_inuse_and_pinuse(M,p,s)\
       
   932     ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
       
   933     (((mchunkptr)(((TUint8*)(p)) + (s)))->head |= PINUSE_BIT),\
       
   934     mark_inuse_foot(M,p,s))
       
   935     #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
       
   936     ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
       
   937     mark_inuse_foot(M, p, s))
       
   938 #endif /* !FOOTERS */
       
   939 
       
   940 
       
   941 #if ONLY_MSPACES
       
   942 #define internal_malloc(m, b) mspace_malloc(m, b)
       
   943 #define internal_free(m, mem) mspace_free(m,mem);
       
   944 #else /* ONLY_MSPACES */
       
   945     #if MSPACES
       
   946         #define internal_malloc(m, b) (m == gm)? dlmalloc(b) : mspace_malloc(m, b)
       
   947         #define internal_free(m, mem) if (m == gm) dlfree(mem); else mspace_free(m,mem);
       
   948     #else /* MSPACES */
       
   949         #define internal_malloc(m, b) dlmalloc(b)
       
   950         #define internal_free(m, mem) dlfree(mem)
       
   951     #endif /* MSPACES */
       
   952 #endif /* ONLY_MSPACES */
       
   953 /******CODE TO SUPORT SLAB ALLOCATOR******/
       
   954 
       
   955     #ifndef NDEBUG
       
   956     #define CHECKING 1
       
   957     #endif
       
   958 
       
   959     #if CHECKING
       
   960     #ifndef ASSERT
       
   961     #define ASSERT(x) {if (!(x)) abort();}
       
   962     #endif
       
   963     #define CHECK(x) x
       
   964     #else
       
   965     #ifndef ASSERT
       
   966     #define ASSERT(x) (void)0
       
   967     #endif
       
   968     #define CHECK(x) (void)0
       
   969     #endif
       
   970 
       
   971     class slab;
       
   972     class slabhdr;
       
   973     #define maxslabsize     60
       
   974     #define pageshift       12
       
   975     #define pagesize        (1<<pageshift)
       
   976     #define slabshift       10
       
   977     #define slabsize        (1 << slabshift)
       
   978     #define cellalign       8
       
   979     const unsigned slabfull = 0;
       
   980     const TInt  slabsperpage    =   (int)(pagesize/slabsize);
       
   981     #define hibit(bits) (((unsigned)bits & 0xc) ? 2 + ((unsigned)bits>>3) : ((unsigned) bits>>1))
       
   982 
       
   983     #define lowbit(bits)    (((unsigned) bits&3) ? 1 - ((unsigned)bits&1) : 3 - (((unsigned)bits>>2)&1))
       
   984     #define maxslabsize 60
       
   985     #define minpagepower    pageshift+2
       
   986     #define cellalign   8
       
   987     class slabhdr
       
   988     {
       
   989     public:
       
   990         unsigned header;
       
   991         // made up of
       
   992         // bits   |    31    | 30..28 | 27..18 | 17..12 |  11..8  |   7..0   |
       
   993         //        +----------+--------+--------+--------+---------+----------+
       
   994         // field  | floating |  zero  | used-4 |  size  | pagemap | free pos |
       
   995         //
       
   996         slab** parent;      // reference to parent's pointer to this slab in tree
       
   997         slab* child1;       // 1st child in tree
       
   998         slab* child2;       // 2nd child in tree
       
   999     };
       
  1000 
       
  1001     inline unsigned header_floating(unsigned h)
       
  1002     {return (h&0x80000000);}
       
  1003     const unsigned maxuse = (slabsize - sizeof(slabhdr))>>2;
       
  1004     const unsigned firstpos = sizeof(slabhdr)>>2;
       
  1005     #define checktree(x) (void)0
       
  1006     template <class T> inline T floor(const T addr, unsigned aln)
       
  1007         {return T((unsigned(addr))&~(aln-1));}
       
  1008     template <class T> inline T ceiling(T addr, unsigned aln)
       
  1009         {return T((unsigned(addr)+(aln-1))&~(aln-1));}
       
  1010     template <class T> inline unsigned lowbits(T addr, unsigned aln)
       
  1011         {return unsigned(addr)&(aln-1);}
       
  1012     template <class T1, class T2> inline int ptrdiff(const T1* a1, const T2* a2)
       
  1013         {return reinterpret_cast<const unsigned char*>(a1) - reinterpret_cast<const unsigned char*>(a2);}
       
  1014     template <class T> inline T offset(T addr, signed ofs)
       
  1015         {return T(unsigned(addr)+ofs);}
       
  1016     class slabset
       
  1017     {
       
  1018     public:
       
  1019         void* initslab(slab* s);
       
  1020         unsigned size;
       
  1021         slab* partial;
       
  1022     };
       
  1023 
       
  1024     class slab : public slabhdr
       
  1025     {
       
  1026     public:
       
  1027         void init(unsigned clz);
       
  1028         //static slab* slabfor( void* p);
       
  1029         static slab* slabfor(const void* p) ;
       
  1030     private:
       
  1031         unsigned char payload[slabsize-sizeof(slabhdr)];
       
  1032     };
       
  1033     class page
       
  1034     {
       
  1035     public:
       
  1036         inline static page* pagefor(slab* s);
       
  1037         //slab slabs;
       
  1038         slab slabs[slabsperpage];
       
  1039     };
       
  1040 
       
  1041 
       
  1042     inline page* page::pagefor(slab* s)
       
  1043         {return reinterpret_cast<page*>(floor(s, pagesize));}
       
  1044     struct pagecell
       
  1045     {
       
  1046         void* page;
       
  1047         unsigned size;
       
  1048     };
       
  1049     /******CODE TO SUPORT SLAB ALLOCATOR******/
       
  1050 
       
  1051     /****** COMMON DEFS CHUNK PAGE MAP/UNMAP *****/
       
  1052 #define CHUNK_PAGEOUT_THESHOLD (12*1024U)
       
  1053 #define CHUNK_PAGE_OUT_FLAG (98989U)
       
  1054 #define tchunk_page_align(p)    (char*)page_align((size_t)(p) + sizeof(tchunk) + TOP_FOOT_SIZE)
       
  1055 #define address_offset(HIGH, LOW)    (size_t)((char*)(HIGH) - (char*)(LOW))
       
  1056 
       
  1057 /* tree_malloc_chunk pageout header operations */
       
  1058 #define set_tchunk_mem_pageout(TP, NPAGES) \
       
  1059     { (TP)->pageout = CHUNK_PAGE_OUT_FLAG; (TP)->npages = (NPAGES); }
       
  1060 #define reset_tchunk_mem_pageout(TP) \
       
  1061     { (TP)->pageout = 0;  (TP)->npages = 0; }
       
  1062 #define page_not_in_memory(P, S) \
       
  1063     (  !is_small(S) &&  ( (((tchunkptr)(P))->pageout==CHUNK_PAGE_OUT_FLAG)?1:0 )  )
       
  1064 
       
  1065 
       
  1066 #ifdef DL_CHUNK_MEM_DEBUG
       
  1067 #define ASSERT_RCHUNK_SIZE() \
       
  1068     {RChunk rchunk; rchunk.SetHandle(iChunkHandle); assert(iChunkSize == rchunk.Size());}
       
  1069 #define TRACE_DL_CHUNK_MAP(c_addr, csize, page_addr, page_size) \
       
  1070     MEM_LOGF(_L8("DL_CHUNK_MAP$$:: chunk_addr=%x, chunk_size=%d, page_addr=%x, page_size=%d"), c_addr, csize, page_addr, (page_size));
       
  1071 #define TRACE_DL_CHUNK_UNMAP(c_addr, csize, page_addr, page_size) \
       
  1072     MEM_LOGF(_L8("DL_CHUNK_UNMAP:: chunk_addr=%x, chunk_size=%d, page_addr=%x, page_size=%d"), c_addr, csize, page_addr, (page_size));
       
  1073 #else
       
  1074 #define ASSERT_RCHUNK_SIZE()
       
  1075 #define TRACE_DL_CHUNK_MAP(c_addr, csize, p_addr, psize)
       
  1076 #define TRACE_DL_CHUNK_UNMAP(c_addr, csize, p_addr, psize)
       
  1077 #endif
       
  1078 
       
  1079 #ifdef OOM_LOGGING
       
  1080 #define TRACE_UNMAPPED_CHUNK(SZ) \
       
  1081     iUnmappedChunkSize += (SZ);
       
  1082 #define MEM_DUMP_OOM_LOGS(NB, MSG) \
       
  1083     MEM_LOG(MSG); \
       
  1084     C_LOG(MSG); \
       
  1085     dump_heap_logs(NB)
       
  1086 #else
       
  1087 #define MEM_DUMP_OOM_LOGS(NB, MSG)
       
  1088 #define TRACE_UNMAPPED_CHUNK(SZ)
       
  1089 #endif
       
  1090 
       
  1091     /****** COMMON DEFS PAGE MAP/UNMAP *****/
       
  1092 #endif/*__DLA__*/