glib/libglib/src/gslice.c
branchRCL_3
changeset 57 2efc27d87e1c
parent 0 e4d67989cc36
equal deleted inserted replaced
56:acd3cd4aaceb 57:2efc27d87e1c
       
     1 /* GLIB sliced memory - fast concurrent memory chunk allocator
       
     2  * Copyright (C) 2005 Tim Janik
       
     3  * Portions copyright (c) 2006 Nokia Corporation.  All rights reserved.
       
     4  *
       
     5  * This library is free software; you can redistribute it and/or
       
     6  * modify it under the terms of the GNU Lesser General Public
       
     7  * License as published by the Free Software Foundation; either
       
     8  * version 2 of the License, or (at your option) any later version.
       
     9  *
       
    10  * This library is distributed in the hope that it will be useful,
       
    11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
       
    12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
       
    13  * Lesser General Public License for more details.
       
    14  *
       
    15  * You should have received a copy of the GNU Lesser General Public
       
    16  * License along with this library; if not, write to the
       
    17  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
       
    18  * Boston, MA 02111-1307, USA.
       
    19  */
       
    20 /* MT safe */
       
    21 
       
    22 #include "config.h"
       
    23 
       
    24 #if     defined HAVE_POSIX_MEMALIGN && defined POSIX_MEMALIGN_WITH_COMPLIANT_ALLOCS
       
    25 #  define HAVE_COMPLIANT_POSIX_MEMALIGN 1
       
    26 #endif
       
    27 
       
    28 #ifdef HAVE_COMPLIANT_POSIX_MEMALIGN
       
    29 #define _XOPEN_SOURCE 600       /* posix_memalign() */
       
    30 #endif
       
    31 #include <stdlib.h>             /* posix_memalign() */
       
    32 #include <string.h>
       
    33 #include <errno.h>
       
    34 #include "gmem.h"               /* gslice.h */
       
    35 #include "gthreadinit.h"
       
    36 #include "galias.h"
       
    37 #include "glib.h"
       
    38 #ifdef HAVE_UNISTD_H
       
    39 #include <unistd.h>             /* sysconf() */
       
    40 #endif
       
    41 #ifdef G_OS_WIN32
       
    42 #include <windows.h>
       
    43 #include <process.h>
       
    44 #endif
       
    45 
       
    46 #ifdef __SYMBIAN32__
       
    47 #include <glib_wsd.h>
       
    48 #endif /* __SYMBIAN32__ */
       
    49 
       
    50 #if EMULATOR
       
    51 #define g_thread_functions_for_glib_use (*_g_thread_functions_for_glib_use())
       
    52 #define g_thread_use_default_impl (*_g_thread_use_default_impl())
       
    53 #define g_mem_gc_friendly (*_g_mem_gc_friendly())
       
    54 #endif /* EMULATOR */
       
    55 
       
    56 
       
    57 
       
    58 /* the GSlice allocator is split up into 4 layers, roughly modelled after the slab
       
    59  * allocator and magazine extensions as outlined in:
       
    60  * + [Bonwick94] Jeff Bonwick, The slab allocator: An object-caching kernel
       
    61  *   memory allocator. USENIX 1994, http://citeseer.ist.psu.edu/bonwick94slab.html
       
    62  * + [Bonwick01] Bonwick and Jonathan Adams, Magazines and vmem: Extending the
       
    63  *   slab allocator to many cpu's and arbitrary resources.
       
    64  *   USENIX 2001, http://citeseer.ist.psu.edu/bonwick01magazines.html
       
    65  * the layers are:
       
    66  * - the thread magazines. for each (aligned) chunk size, a magazine (a list)
       
    67  *   of recently freed and soon to be allocated chunks is maintained per thread.
       
    68  *   this way, most alloc/free requests can be quickly satisfied from per-thread
       
    69  *   free lists which only require one g_private_get() call to retrive the
       
    70  *   thread handle.
       
    71  * - the magazine cache. allocating and freeing chunks to/from threads only
       
    72  *   occours at magazine sizes from a global depot of magazines. the depot
       
    73  *   maintaines a 15 second working set of allocated magazines, so full
       
    74  *   magazines are not allocated and released too often.
       
    75  *   the chunk size dependent magazine sizes automatically adapt (within limits,
       
    76  *   see [3]) to lock contention to properly scale performance across a variety
       
    77  *   of SMP systems.
       
    78  * - the slab allocator. this allocator allocates slabs (blocks of memory) close
       
    79  *   to the system page size or multiples thereof which have to be page aligned.
       
    80  *   the blocks are divided into smaller chunks which are used to satisfy
       
    81  *   allocations from the upper layers. the space provided by the reminder of
       
    82  *   the chunk size division is used for cache colorization (random distribution
       
    83  *   of chunk addresses) to improve processor cache utilization. multiple slabs
       
    84  *   with the same chunk size are kept in a partially sorted ring to allow O(1)
       
    85  *   freeing and allocation of chunks (as long as the allocation of an entirely
       
    86  *   new slab can be avoided).
       
    87  * - the page allocator. on most modern systems, posix_memalign(3) or
       
    88  *   memalign(3) should be available, so this is used to allocate blocks with
       
    89  *   system page size based alignments and sizes or multiples thereof.
       
    90  *   if no memalign variant is provided, valloc() is used instead and
       
    91  *   block sizes are limited to the system page size (no multiples thereof).
       
    92  *   as a fallback, on system without even valloc(), a malloc(3)-based page
       
    93  *   allocator with alloc-only behaviour is used.
       
    94  *
       
    95  * NOTES:
       
    96  * [1] some systems memalign(3) implementations may rely on boundary tagging for
       
    97  *     the handed out memory chunks. to avoid excessive page-wise fragmentation,
       
    98  *     we reserve 2 * sizeof (void*) per block size for the systems memalign(3),
       
    99  *     specified in NATIVE_MALLOC_PADDING.
       
   100  * [2] using the slab allocator alone already provides for a fast and efficient
       
   101  *     allocator, it doesn't properly scale beyond single-threaded uses though.
       
   102  *     also, the slab allocator implements eager free(3)-ing, i.e. does not
       
   103  *     provide any form of caching or working set maintenance. so if used alone,
       
   104  *     it's vulnerable to trashing for sequences of balanced (alloc, free) pairs
       
   105  *     at certain thresholds.
       
   106  * [3] magazine sizes are bound by an implementation specific minimum size and
       
   107  *     a chunk size specific maximum to limit magazine storage sizes to roughly
       
   108  *     16KB.
       
   109  * [4] allocating ca. 8 chunks per block/page keeps a good balance between
       
   110  *     external and internal fragmentation (<= 12.5%). [Bonwick94]
       
   111  */
       
   112 
       
   113 /* --- macros and constants --- */
       
   114 #define LARGEALIGNMENT          (256)
       
   115 #define P2ALIGNMENT             (2 * sizeof (gsize))                            /* fits 2 pointers (assumed to be 2 * GLIB_SIZEOF_SIZE_T below) */
       
   116 #define ALIGN(size, base)       ((base) * (gsize) (((size) + (base) - 1) / (base)))
       
   117 #define NATIVE_MALLOC_PADDING   P2ALIGNMENT                                     /* per-page padding left for native malloc(3) see [1] */
       
   118 #define SLAB_INFO_SIZE          P2ALIGN (sizeof (SlabInfo) + NATIVE_MALLOC_PADDING)
       
   119 #define MAX_MAGAZINE_SIZE       (256)                                           /* see [3] and allocator_get_magazine_threshold() for this */
       
   120 #define MIN_MAGAZINE_SIZE       (4)
       
   121 #define MAX_STAMP_COUNTER       (7)                                             /* distributes the load of gettimeofday() */
       
   122 #define MAX_SLAB_CHUNK_SIZE(al) (((al)->max_page_size - SLAB_INFO_SIZE) / 8)    /* we want at last 8 chunks per page, see [4] */
       
   123 #define MAX_SLAB_INDEX(al)      (SLAB_INDEX (al, MAX_SLAB_CHUNK_SIZE (al)) + 1)
       
   124 #define SLAB_INDEX(al, asize)   ((asize) / P2ALIGNMENT - 1)                     /* asize must be P2ALIGNMENT aligned */
       
   125 #define SLAB_CHUNK_SIZE(al, ix) (((ix) + 1) * P2ALIGNMENT)
       
   126 #define SLAB_BPAGE_SIZE(al,csz) (8 * (csz) + SLAB_INFO_SIZE)
       
   127 
       
   128 /* optimized version of ALIGN (size, P2ALIGNMENT) */
       
   129 #if     GLIB_SIZEOF_SIZE_T * 2 == 8  /* P2ALIGNMENT */
       
   130 #define P2ALIGN(size)   (((size) + 0x7) & ~(gsize) 0x7)
       
   131 #elif   GLIB_SIZEOF_SIZE_T * 2 == 16 /* P2ALIGNMENT */
       
   132 #define P2ALIGN(size)   (((size) + 0xf) & ~(gsize) 0xf)
       
   133 #else
       
   134 #define P2ALIGN(size)   ALIGN (size, P2ALIGNMENT)
       
   135 #endif
       
   136 
       
   137 /* special helpers to avoid gmessage.c dependency */
       
   138 static void mem_error (const char *format, ...) G_GNUC_PRINTF (1,2);
       
   139 #define mem_assert(cond)    do { if (G_LIKELY (cond)) ; else mem_error ("assertion failed: %s", #cond); } while (0)
       
   140 
       
   141 /* --- structures --- */
       
   142 #if !(EMULATOR)
       
   143 typedef struct _ChunkLink      ChunkLink;
       
   144 typedef struct _SlabInfo       SlabInfo;
       
   145 #endif /* !(EMULATOR) */
       
   146 
       
   147 typedef struct _CachedMagazine CachedMagazine;
       
   148 
       
   149 #if !(EMULATOR)
       
   150 struct _ChunkLink {
       
   151   ChunkLink *next;
       
   152   ChunkLink *data;
       
   153 };
       
   154 struct _SlabInfo {
       
   155   ChunkLink *chunks;
       
   156   guint n_allocated;
       
   157   SlabInfo *next, *prev;
       
   158 };
       
   159 #endif /* !(EMULATOR) */
       
   160 
       
   161 typedef struct {
       
   162   ChunkLink *chunks;
       
   163   gsize      count;                     /* approximative chunks list length */
       
   164 } Magazine;
       
   165 typedef struct {
       
   166   Magazine   *magazine1;                /* array of MAX_SLAB_INDEX (allocator) */
       
   167   Magazine   *magazine2;                /* array of MAX_SLAB_INDEX (allocator) */
       
   168 } ThreadMemory;
       
   169 
       
   170 #if !(EMULATOR)
       
   171 typedef struct {
       
   172   gboolean always_malloc;
       
   173   gboolean bypass_magazines;
       
   174   gsize    working_set_msecs;
       
   175   guint    color_increment;
       
   176 } SliceConfig;
       
   177 typedef struct {
       
   178   /* const after initialization */
       
   179   gsize         min_page_size, max_page_size;
       
   180   SliceConfig   config;
       
   181   gsize         max_slab_chunk_size_for_magazine_cache;
       
   182   /* magazine cache */
       
   183   GMutex       *magazine_mutex;
       
   184   ChunkLink   **magazines;                /* array of MAX_SLAB_INDEX (allocator) */
       
   185   guint        *contention_counters;      /* array of MAX_SLAB_INDEX (allocator) */
       
   186   gint          mutex_counter;
       
   187   guint         stamp_counter;
       
   188   guint         last_stamp;
       
   189   /* slab allocator */
       
   190   GMutex       *slab_mutex;
       
   191   SlabInfo    **slab_stack;                /* array of MAX_SLAB_INDEX (allocator) */
       
   192   guint        color_accu;
       
   193 } Allocator;
       
   194 #endif /* !(EMULATOR)
       
   195 
       
   196 
       
   197 /* --- prototypes --- */
       
   198 static gpointer     slab_allocator_alloc_chunk       (gsize      chunk_size);
       
   199 static void         slab_allocator_free_chunk        (gsize      chunk_size,
       
   200                                                       gpointer   mem);
       
   201 static void         private_thread_memory_cleanup    (gpointer   data);
       
   202 static gpointer     allocator_memalign               (gsize      alignment,
       
   203                                                       gsize      memsize);
       
   204 static void         allocator_memfree                (gsize      memsize,
       
   205                                                       gpointer   mem);
       
   206 static inline void  magazine_cache_update_stamp      (void);
       
   207 #if EMULATOR
       
   208 static inline gsize allocator_get_magazine_threshold (Allocator *allocator1,
       
   209                                                       guint      ix);
       
   210 #else
       
   211 static inline gsize allocator_get_magazine_threshold (Allocator *allocator,
       
   212                                                       guint      ix);
       
   213 #endif /* EMULATOR */
       
   214 
       
   215 /* --- variables --- */
       
   216 #if EMULATOR
       
   217 
       
   218 PLS(private_thread_memory,gslice,GPrivate *)
       
   219 PLS(sys_page_size,gslice,gsize)
       
   220 PLS_ARRAY(allocator,gslice,Allocator)
       
   221 PLS(slice_config,gslice,SliceConfig)
       
   222 
       
   223 #define private_thread_memory  (*FUNCTION_NAME(private_thread_memory ,gslice)())
       
   224 #define sys_page_size  (*FUNCTION_NAME(sys_page_size ,gslice)())
       
   225 #define allocator  (FUNCTION_NAME(allocator ,gslice)())
       
   226 #define slice_config  (*FUNCTION_NAME(slice_config ,gslice)())
       
   227 
       
   228 #else
       
   229 
       
   230 static GPrivate   *private_thread_memory = NULL;
       
   231 static gsize       sys_page_size = 0;
       
   232 static Allocator   allocator[1] = { { 0, }, };
       
   233 static SliceConfig slice_config = {
       
   234   FALSE,        /* always_malloc */
       
   235   FALSE,        /* bypass_magazines */
       
   236   15 * 1000,    /* working_set_msecs */
       
   237   1,            /* color increment, alt: 0x7fffffff */
       
   238 };
       
   239 
       
   240 #endif /* EMULATOR */
       
   241 
       
   242 /* --- auxillary funcitons --- */
       
   243 void
       
   244 g_slice_set_config (GSliceConfig ckey,
       
   245                     gint64       value)
       
   246 {
       
   247   g_return_if_fail (sys_page_size == 0);
       
   248   switch (ckey)
       
   249     {
       
   250     case G_SLICE_CONFIG_ALWAYS_MALLOC:
       
   251       slice_config.always_malloc = value != 0;
       
   252       break;
       
   253     case G_SLICE_CONFIG_BYPASS_MAGAZINES:
       
   254       slice_config.bypass_magazines = value != 0;
       
   255       break;
       
   256     case G_SLICE_CONFIG_WORKING_SET_MSECS:
       
   257       slice_config.working_set_msecs = value;
       
   258       break;
       
   259     case G_SLICE_CONFIG_COLOR_INCREMENT:
       
   260       slice_config.color_increment = value;
       
   261     default: ;
       
   262     }
       
   263 }
       
   264 
       
   265 gint64
       
   266 g_slice_get_config (GSliceConfig ckey)
       
   267 {
       
   268   switch (ckey)
       
   269     {
       
   270     case G_SLICE_CONFIG_ALWAYS_MALLOC:
       
   271       return slice_config.always_malloc;
       
   272     case G_SLICE_CONFIG_BYPASS_MAGAZINES:
       
   273       return slice_config.bypass_magazines;
       
   274     case G_SLICE_CONFIG_WORKING_SET_MSECS:
       
   275       return slice_config.working_set_msecs;
       
   276     case G_SLICE_CONFIG_CHUNK_SIZES:
       
   277       return MAX_SLAB_INDEX (allocator);
       
   278     case G_SLICE_CONFIG_COLOR_INCREMENT:
       
   279       return slice_config.color_increment;
       
   280     default:
       
   281       return 0;
       
   282     }
       
   283 }
       
   284 
       
   285 gint64*
       
   286 g_slice_get_config_state (GSliceConfig ckey,
       
   287                           gint64       address,
       
   288                           guint       *n_values)
       
   289 {
       
   290   guint i = 0;
       
   291   g_return_val_if_fail (n_values != NULL, NULL);
       
   292   *n_values = 0;
       
   293   switch (ckey)
       
   294     {
       
   295       gint64 array[64];
       
   296     case G_SLICE_CONFIG_CONTENTION_COUNTER:
       
   297       array[i++] = SLAB_CHUNK_SIZE (allocator, address);
       
   298       array[i++] = allocator->contention_counters[address];
       
   299       array[i++] = allocator_get_magazine_threshold (allocator, address);
       
   300       *n_values = i;
       
   301       return g_memdup (array, sizeof (array[0]) * *n_values);
       
   302     default:
       
   303       return NULL;
       
   304     }
       
   305 }
       
   306 
       
   307 static void
       
   308 slice_config_init (SliceConfig *config)
       
   309 {
       
   310   /* don't use g_malloc/g_message here */
       
   311   gchar buffer[1024];
       
   312   const gchar *val = _g_getenv_nomalloc ("G_SLICE", buffer);
       
   313   static const GDebugKey keys[] = {
       
   314     { "always-malloc", 1 << 0 },
       
   315   };
       
   316   gint flags = !val ? 0 : g_parse_debug_string (val, keys, G_N_ELEMENTS (keys));
       
   317   *config = slice_config;
       
   318   if (flags & (1 << 0))         /* always-malloc */
       
   319     {
       
   320       config->always_malloc = TRUE;
       
   321     }
       
   322 }
       
   323 
       
   324 static void
       
   325 g_slice_init_nomessage (void)
       
   326 {
       
   327   /* we may not use g_error() or friends here */
       
   328   mem_assert (sys_page_size == 0);
       
   329   mem_assert (MIN_MAGAZINE_SIZE >= 4);
       
   330 
       
   331 #ifdef G_OS_WIN32
       
   332   {
       
   333     SYSTEM_INFO system_info;
       
   334     GetSystemInfo (&system_info);
       
   335     sys_page_size = system_info.dwPageSize;
       
   336   }
       
   337 #else
       
   338   sys_page_size = sysconf (_SC_PAGESIZE); /* = sysconf (_SC_PAGE_SIZE); = getpagesize(); */
       
   339 #endif
       
   340   mem_assert (sys_page_size >= 2 * LARGEALIGNMENT);
       
   341   mem_assert ((sys_page_size & (sys_page_size - 1)) == 0);
       
   342   slice_config_init (&allocator->config);
       
   343   
       
   344   // If the allocator is configured in such a way that the glib always uses
       
   345   // system malloc, then we dont need to allocate the book keeping array.
       
   346   // Thats why we return after that check and when it is successful.
       
   347 #ifdef __SYMBIAN32__
       
   348   if(allocator->config.always_malloc)
       
   349   	return;
       
   350 #endif /* __SYMBIAN32__ */
       
   351   allocator->min_page_size = sys_page_size;
       
   352 #if HAVE_COMPLIANT_POSIX_MEMALIGN || HAVE_MEMALIGN
       
   353   /* allow allocation of pages up to 8KB (with 8KB alignment).
       
   354    * this is useful because many medium to large sized structures
       
   355    * fit less than 8 times (see [4]) into 4KB pages.
       
   356    * we allow very small page sizes here, to reduce wastage in
       
   357    * threads if only small allocations are required (this does
       
   358    * bear the risk of incresing allocation times and fragmentation
       
   359    * though).
       
   360    */
       
   361   allocator->min_page_size = MAX (allocator->min_page_size, 4096);
       
   362   allocator->max_page_size = MAX (allocator->min_page_size, 8192);
       
   363   allocator->min_page_size = MIN (allocator->min_page_size, 128);
       
   364 #else
       
   365   /* we can only align to system page size */
       
   366   allocator->max_page_size = sys_page_size;
       
   367 #endif
       
   368   allocator->magazine_mutex = NULL;     /* _g_slice_thread_init_nomessage() */
       
   369   allocator->magazines = g_new0 (ChunkLink*, MAX_SLAB_INDEX (allocator));
       
   370   allocator->contention_counters = g_new0 (guint, MAX_SLAB_INDEX (allocator));
       
   371   allocator->mutex_counter = 0;
       
   372   allocator->stamp_counter = MAX_STAMP_COUNTER; /* force initial update */
       
   373   allocator->last_stamp = 0;
       
   374   allocator->slab_mutex = NULL;         /* _g_slice_thread_init_nomessage() */
       
   375   allocator->slab_stack = g_new0 (SlabInfo*, MAX_SLAB_INDEX (allocator));
       
   376   allocator->color_accu = 0;
       
   377   magazine_cache_update_stamp();
       
   378   /* values cached for performance reasons */
       
   379   allocator->max_slab_chunk_size_for_magazine_cache = MAX_SLAB_CHUNK_SIZE (allocator);
       
   380   if (allocator->config.always_malloc || allocator->config.bypass_magazines)
       
   381     allocator->max_slab_chunk_size_for_magazine_cache = 0;      /* non-optimized cases */
       
   382   /* at this point, g_mem_gc_friendly() should be initialized, this
       
   383    * should have been accomplished by the above g_malloc/g_new calls
       
   384    */
       
   385 }
       
   386 
       
   387 static inline guint
       
   388 allocator_categorize (gsize aligned_chunk_size)
       
   389 {
       
   390   /* speed up the likely path */
       
   391   if (G_LIKELY (aligned_chunk_size && aligned_chunk_size <= allocator->max_slab_chunk_size_for_magazine_cache))
       
   392     return 1;           /* use magazine cache */
       
   393 
       
   394   /* the above will fail (max_slab_chunk_size_for_magazine_cache == 0) if the
       
   395    * allocator is still uninitialized, or if we are not configured to use the
       
   396    * magazine cache.
       
   397    */
       
   398   if (!sys_page_size)
       
   399     g_slice_init_nomessage ();
       
   400   
       
   401   // If the allocator is configured in such a way that the glib always uses
       
   402   // system malloc, then we dont need to allocate the book keeping array.
       
   403   // Thats why we return 0 after that check and when it is successful.
       
   404 #ifdef __SYMBIAN32__
       
   405   if(allocator->config.always_malloc)
       
   406   	return 0;
       
   407 #endif /* __SYMBIAN32__*/
       
   408   if (!allocator->config.always_malloc &&
       
   409       aligned_chunk_size &&
       
   410       aligned_chunk_size <= MAX_SLAB_CHUNK_SIZE (allocator))
       
   411     {
       
   412       if (allocator->config.bypass_magazines)
       
   413         return 2;       /* use slab allocator, see [2] */
       
   414       return 1;         /* use magazine cache */
       
   415     }
       
   416   return 0;             /* use malloc() */
       
   417 }
       
   418 
       
   419 void
       
   420 _g_slice_thread_init_nomessage (void)
       
   421 {
       
   422   /* we may not use g_error() or friends here */
       
   423   if (!sys_page_size)
       
   424     g_slice_init_nomessage();
       
   425   private_thread_memory = g_private_new (private_thread_memory_cleanup);
       
   426   allocator->magazine_mutex = g_mutex_new();
       
   427   allocator->slab_mutex = g_mutex_new();
       
   428 }
       
   429 
       
   430 static inline void
       
   431 g_mutex_lock_a (GMutex *mutex,
       
   432                 guint  *contention_counter)
       
   433 {
       
   434   gboolean contention = FALSE;
       
   435   if (!g_mutex_trylock (mutex))
       
   436     {
       
   437       g_mutex_lock (mutex);
       
   438       contention = TRUE;
       
   439     }
       
   440   if (contention)
       
   441     {
       
   442       allocator->mutex_counter++;
       
   443       if (allocator->mutex_counter >= 1)        /* quickly adapt to contention */
       
   444         {
       
   445           allocator->mutex_counter = 0;
       
   446           *contention_counter = MIN (*contention_counter + 1, MAX_MAGAZINE_SIZE);
       
   447         }
       
   448     }
       
   449   else /* !contention */
       
   450     {
       
   451       allocator->mutex_counter--;
       
   452       if (allocator->mutex_counter < -11)       /* moderately recover magazine sizes */
       
   453         {
       
   454           allocator->mutex_counter = 0;
       
   455           *contention_counter = MAX (*contention_counter, 1) - 1;
       
   456         }
       
   457     }
       
   458 }
       
   459 
       
   460 static inline ThreadMemory*
       
   461 thread_memory_from_self (void)
       
   462 {
       
   463   ThreadMemory *tmem = g_private_get (private_thread_memory);
       
   464   if (G_UNLIKELY (!tmem))
       
   465     {
       
   466       const guint n_magazines = MAX_SLAB_INDEX (allocator);
       
   467       tmem = g_malloc0 (sizeof (ThreadMemory) + sizeof (Magazine) * 2 * n_magazines);
       
   468       tmem->magazine1 = (Magazine*) (tmem + 1);
       
   469       tmem->magazine2 = &tmem->magazine1[n_magazines];
       
   470       g_private_set (private_thread_memory, tmem);
       
   471     }
       
   472   return tmem;
       
   473 }
       
   474 
       
   475 static inline ChunkLink*
       
   476 magazine_chain_pop_head (ChunkLink **magazine_chunks)
       
   477 {
       
   478   /* magazine chains are linked via ChunkLink->next.
       
   479    * each ChunkLink->data of the toplevel chain may point to a subchain,
       
   480    * linked via ChunkLink->next. ChunkLink->data of the subchains just
       
   481    * contains uninitialized junk.
       
   482    */
       
   483   ChunkLink *chunk = (*magazine_chunks)->data;
       
   484   if (G_UNLIKELY (chunk))
       
   485     {
       
   486       /* allocating from freed list */
       
   487       (*magazine_chunks)->data = chunk->next;
       
   488     }
       
   489   else
       
   490     {
       
   491       chunk = *magazine_chunks;
       
   492       *magazine_chunks = chunk->next;
       
   493     }
       
   494   return chunk;
       
   495 }
       
   496 
       
   497 #if 0 /* useful for debugging */
       
   498 static guint
       
   499 magazine_count (ChunkLink *head)
       
   500 {
       
   501   guint count = 0;
       
   502   if (!head)
       
   503     return 0;
       
   504   while (head)
       
   505     {
       
   506       ChunkLink *child = head->data;
       
   507       count += 1;
       
   508       for (child = head->data; child; child = child->next)
       
   509         count += 1;
       
   510       head = head->next;
       
   511     }
       
   512   return count;
       
   513 }
       
   514 #endif
       
   515 
       
   516 static inline gsize
       
   517 allocator_get_magazine_threshold (Allocator *allocator1,
       
   518                                   guint      ix)
       
   519 {
       
   520   /* the magazine size calculated here has a lower bound of MIN_MAGAZINE_SIZE,
       
   521    * which is required by the implementation. also, for moderately sized chunks
       
   522    * (say >= 64 bytes), magazine sizes shouldn't be much smaller then the number
       
   523    * of chunks available per page/2 to avoid excessive traffic in the magazine
       
   524    * cache for small to medium sized structures.
       
   525    * the upper bound of the magazine size is effectively provided by
       
   526    * MAX_MAGAZINE_SIZE. for larger chunks, this number is scaled down so that
       
   527    * the content of a single magazine doesn't exceed ca. 16KB.
       
   528    */
       
   529   gsize chunk_size = SLAB_CHUNK_SIZE (allocator1, ix);
       
   530   guint threshold = MAX (MIN_MAGAZINE_SIZE, allocator1->max_page_size / MAX (5 * chunk_size, 5 * 32));
       
   531   guint contention_counter = allocator1->contention_counters[ix];
       
   532   if (G_UNLIKELY (contention_counter))  /* single CPU bias */
       
   533     {
       
   534       /* adapt contention counter thresholds to chunk sizes */
       
   535       contention_counter = contention_counter * 64 / chunk_size;
       
   536       threshold = MAX (threshold, contention_counter);
       
   537     }
       
   538   return threshold;
       
   539 }
       
   540 
       
   541 /* --- magazine cache --- */
       
   542 static inline void
       
   543 magazine_cache_update_stamp (void)
       
   544 {
       
   545   if (allocator->stamp_counter >= MAX_STAMP_COUNTER)
       
   546     {
       
   547       GTimeVal tv;
       
   548       g_get_current_time (&tv);
       
   549       allocator->last_stamp = tv.tv_sec * 1000 + tv.tv_usec / 1000; /* milli seconds */
       
   550       allocator->stamp_counter = 0;
       
   551     }
       
   552   else
       
   553     allocator->stamp_counter++;
       
   554 }
       
   555 
       
   556 static inline ChunkLink*
       
   557 magazine_chain_prepare_fields (ChunkLink *magazine_chunks)
       
   558 {
       
   559   ChunkLink *chunk1;
       
   560   ChunkLink *chunk2;
       
   561   ChunkLink *chunk3;
       
   562   ChunkLink *chunk4;
       
   563   /* checked upon initialization: mem_assert (MIN_MAGAZINE_SIZE >= 4); */
       
   564   /* ensure a magazine with at least 4 unused data pointers */
       
   565   chunk1 = magazine_chain_pop_head (&magazine_chunks);
       
   566   chunk2 = magazine_chain_pop_head (&magazine_chunks);
       
   567   chunk3 = magazine_chain_pop_head (&magazine_chunks);
       
   568   chunk4 = magazine_chain_pop_head (&magazine_chunks);
       
   569   chunk4->next = magazine_chunks;
       
   570   chunk3->next = chunk4;
       
   571   chunk2->next = chunk3;
       
   572   chunk1->next = chunk2;
       
   573   return chunk1;
       
   574 }
       
   575 
       
   576 /* access the first 3 fields of a specially prepared magazine chain */
       
   577 #define magazine_chain_prev(mc)         ((mc)->data)
       
   578 #define magazine_chain_stamp(mc)        ((mc)->next->data)
       
   579 #define magazine_chain_uint_stamp(mc)   GPOINTER_TO_UINT ((mc)->next->data)
       
   580 #define magazine_chain_next(mc)         ((mc)->next->next->data)
       
   581 #define magazine_chain_count(mc)        ((mc)->next->next->next->data)
       
   582 
       
   583 #ifdef __SYMBIAN32__
       
   584 
       
   585 static void
       
   586 magazine_cache_trim (Allocator *allocator1,
       
   587                      guint      ix,
       
   588                      guint      stamp)
       
   589 
       
   590 #else
       
   591 static void
       
   592 magazine_cache_trim (Allocator *allocator,
       
   593                      guint      ix,
       
   594                      guint      stamp)
       
   595 #endif /* __SYMBIAN32__ */
       
   596 {
       
   597   /* g_mutex_lock (allocator->mutex); done by caller */
       
   598   /* trim magazine cache from tail */
       
   599   ChunkLink *current = magazine_chain_prev (allocator1->magazines[ix]);
       
   600   ChunkLink *trash = NULL;
       
   601   while (ABS (stamp - magazine_chain_uint_stamp (current)) >= allocator1->config.working_set_msecs)
       
   602     {
       
   603       /* unlink */
       
   604       ChunkLink *prev = magazine_chain_prev (current);
       
   605       ChunkLink *next = magazine_chain_next (current);
       
   606       magazine_chain_next (prev) = next;
       
   607       magazine_chain_prev (next) = prev;
       
   608       /* clear special fields, put on trash stack */
       
   609       magazine_chain_next (current) = NULL;
       
   610       magazine_chain_count (current) = NULL;
       
   611       magazine_chain_stamp (current) = NULL;
       
   612       magazine_chain_prev (current) = trash;
       
   613       trash = current;
       
   614       /* fixup list head if required */
       
   615       if (current == allocator1->magazines[ix])
       
   616         {
       
   617           allocator1->magazines[ix] = NULL;
       
   618           break;
       
   619         }
       
   620       current = prev;
       
   621     }
       
   622   g_mutex_unlock (allocator1->magazine_mutex);
       
   623   /* free trash */
       
   624   if (trash)
       
   625     {
       
   626       const gsize chunk_size = SLAB_CHUNK_SIZE (allocator1, ix);
       
   627       g_mutex_lock (allocator1->slab_mutex);
       
   628       while (trash)
       
   629         {
       
   630           current = trash;
       
   631           trash = magazine_chain_prev (current);
       
   632           magazine_chain_prev (current) = NULL; /* clear special field */
       
   633           while (current)
       
   634             {
       
   635               ChunkLink *chunk = magazine_chain_pop_head (&current);
       
   636               slab_allocator_free_chunk (chunk_size, chunk);
       
   637             }
       
   638         }
       
   639       g_mutex_unlock (allocator1->slab_mutex);
       
   640     }
       
   641 }
       
   642 
       
   643 static void
       
   644 magazine_cache_push_magazine (guint      ix,
       
   645                               ChunkLink *magazine_chunks,
       
   646                               gsize      count) /* must be >= MIN_MAGAZINE_SIZE */
       
   647 {
       
   648   ChunkLink *current = magazine_chain_prepare_fields (magazine_chunks);
       
   649   ChunkLink *next, *prev;
       
   650   g_mutex_lock (allocator->magazine_mutex);
       
   651   /* add magazine at head */
       
   652   next = allocator->magazines[ix];
       
   653   if (next)
       
   654     prev = magazine_chain_prev (next);
       
   655   else
       
   656     next = prev = current;
       
   657   magazine_chain_next (prev) = current;
       
   658   magazine_chain_prev (next) = current;
       
   659   magazine_chain_prev (current) = prev;
       
   660   magazine_chain_next (current) = next;
       
   661   magazine_chain_count (current) = (gpointer) count;
       
   662   /* stamp magazine */
       
   663   magazine_chain_stamp (current) = GUINT_TO_POINTER (allocator->last_stamp);
       
   664   magazine_cache_update_stamp();
       
   665   allocator->magazines[ix] = current;
       
   666   /* free old magazines beyond a certain threshold */
       
   667   magazine_cache_trim (allocator, ix, allocator->last_stamp);
       
   668   /* g_mutex_unlock (allocator->mutex); was done by magazine_cache_trim() */
       
   669 }
       
   670 
       
   671 static ChunkLink*
       
   672 magazine_cache_pop_magazine (guint  ix,
       
   673                              gsize *countp)
       
   674 {
       
   675   g_mutex_lock_a (allocator->magazine_mutex, &allocator->contention_counters[ix]);
       
   676   if (!allocator->magazines[ix])
       
   677     {
       
   678       guint magazine_threshold = allocator_get_magazine_threshold (allocator, ix);
       
   679       gsize i, chunk_size = SLAB_CHUNK_SIZE (allocator, ix);
       
   680       ChunkLink *chunk, *head;
       
   681       g_mutex_unlock (allocator->magazine_mutex);
       
   682       g_mutex_lock (allocator->slab_mutex);
       
   683       head = slab_allocator_alloc_chunk (chunk_size);
       
   684       head->data = NULL;
       
   685       chunk = head;
       
   686       for (i = 1; i < magazine_threshold; i++)
       
   687         {
       
   688           chunk->next = slab_allocator_alloc_chunk (chunk_size);
       
   689           chunk = chunk->next;
       
   690           chunk->data = NULL;
       
   691         }
       
   692       chunk->next = NULL;
       
   693       g_mutex_unlock (allocator->slab_mutex);
       
   694       *countp = i;
       
   695       return head;
       
   696     }
       
   697   else
       
   698     {
       
   699       ChunkLink *current = allocator->magazines[ix];
       
   700       ChunkLink *prev = magazine_chain_prev (current);
       
   701       ChunkLink *next = magazine_chain_next (current);
       
   702       /* unlink */
       
   703       magazine_chain_next (prev) = next;
       
   704       magazine_chain_prev (next) = prev;
       
   705       allocator->magazines[ix] = next == current ? NULL : next;
       
   706       g_mutex_unlock (allocator->magazine_mutex);
       
   707       /* clear special fields and hand out */
       
   708       *countp = (gsize) magazine_chain_count (current);
       
   709       magazine_chain_prev (current) = NULL;
       
   710       magazine_chain_next (current) = NULL;
       
   711       magazine_chain_count (current) = NULL;
       
   712       magazine_chain_stamp (current) = NULL;
       
   713       return current;
       
   714     }
       
   715 }
       
   716 
       
   717 /* --- thread magazines --- */
       
   718 static void
       
   719 private_thread_memory_cleanup (gpointer data)
       
   720 {
       
   721   ThreadMemory *tmem = data;
       
   722   const guint n_magazines = MAX_SLAB_INDEX (allocator);
       
   723   guint ix;
       
   724   for (ix = 0; ix < n_magazines; ix++)
       
   725     {
       
   726       Magazine *mags[2];
       
   727       guint j;
       
   728       mags[0] = &tmem->magazine1[ix];
       
   729       mags[1] = &tmem->magazine2[ix];
       
   730       for (j = 0; j < 2; j++)
       
   731         {
       
   732           Magazine *mag = mags[j];
       
   733           if (mag->count >= MIN_MAGAZINE_SIZE)
       
   734             magazine_cache_push_magazine (ix, mag->chunks, mag->count);
       
   735           else
       
   736             {
       
   737               const gsize chunk_size = SLAB_CHUNK_SIZE (allocator, ix);
       
   738               g_mutex_lock (allocator->slab_mutex);
       
   739               while (mag->chunks)
       
   740                 {
       
   741                   ChunkLink *chunk = magazine_chain_pop_head (&mag->chunks);
       
   742                   slab_allocator_free_chunk (chunk_size, chunk);
       
   743                 }
       
   744               g_mutex_unlock (allocator->slab_mutex);
       
   745             }
       
   746         }
       
   747     }
       
   748   g_free (tmem);
       
   749 }
       
   750 
       
   751 static void
       
   752 thread_memory_magazine1_reload (ThreadMemory *tmem,
       
   753                                 guint         ix)
       
   754 {
       
   755   Magazine *mag = &tmem->magazine1[ix];
       
   756   mem_assert (mag->chunks == NULL); /* ensure that we may reset mag->count */
       
   757   mag->count = 0;
       
   758   mag->chunks = magazine_cache_pop_magazine (ix, &mag->count);
       
   759 }
       
   760 
       
   761 static void
       
   762 thread_memory_magazine2_unload (ThreadMemory *tmem,
       
   763                                 guint         ix)
       
   764 {
       
   765   Magazine *mag = &tmem->magazine2[ix];
       
   766   magazine_cache_push_magazine (ix, mag->chunks, mag->count);
       
   767   mag->chunks = NULL;
       
   768   mag->count = 0;
       
   769 }
       
   770 
       
   771 static inline void
       
   772 thread_memory_swap_magazines (ThreadMemory *tmem,
       
   773                               guint         ix)
       
   774 {
       
   775   Magazine xmag = tmem->magazine1[ix];
       
   776   tmem->magazine1[ix] = tmem->magazine2[ix];
       
   777   tmem->magazine2[ix] = xmag;
       
   778 }
       
   779 
       
   780 static inline gboolean
       
   781 thread_memory_magazine1_is_empty (ThreadMemory *tmem,
       
   782                                   guint         ix)
       
   783 {
       
   784   return tmem->magazine1[ix].chunks == NULL;
       
   785 }
       
   786 
       
   787 static inline gboolean
       
   788 thread_memory_magazine2_is_full (ThreadMemory *tmem,
       
   789                                  guint         ix)
       
   790 {
       
   791   return tmem->magazine2[ix].count >= allocator_get_magazine_threshold (allocator, ix);
       
   792 }
       
   793 
       
   794 static inline gpointer
       
   795 thread_memory_magazine1_alloc (ThreadMemory *tmem,
       
   796                                guint         ix)
       
   797 {
       
   798   Magazine *mag = &tmem->magazine1[ix];
       
   799   ChunkLink *chunk = magazine_chain_pop_head (&mag->chunks);
       
   800   if (G_LIKELY (mag->count > 0))
       
   801     mag->count--;
       
   802   return chunk;
       
   803 }
       
   804 
       
   805 static inline void
       
   806 thread_memory_magazine2_free (ThreadMemory *tmem,
       
   807                               guint         ix,
       
   808                               gpointer      mem)
       
   809 {
       
   810   Magazine *mag = &tmem->magazine2[ix];
       
   811   ChunkLink *chunk = mem;
       
   812   chunk->data = NULL;
       
   813   chunk->next = mag->chunks;
       
   814   mag->chunks = chunk;
       
   815   mag->count++;
       
   816 }
       
   817 
       
   818 /* --- API functions --- */
       
   819 EXPORT_C gpointer
       
   820 g_slice_alloc (gsize mem_size)
       
   821 {
       
   822   gsize chunk_size;
       
   823   gpointer mem;
       
   824   guint acat;
       
   825   chunk_size = P2ALIGN (mem_size);
       
   826   acat = allocator_categorize (chunk_size);
       
   827   if (G_LIKELY (acat == 1))     /* allocate through magazine layer */
       
   828     {
       
   829       ThreadMemory *tmem = thread_memory_from_self();
       
   830       guint ix = SLAB_INDEX (allocator, chunk_size);
       
   831       if (G_UNLIKELY (thread_memory_magazine1_is_empty (tmem, ix)))
       
   832         {
       
   833           thread_memory_swap_magazines (tmem, ix);
       
   834           if (G_UNLIKELY (thread_memory_magazine1_is_empty (tmem, ix)))
       
   835             thread_memory_magazine1_reload (tmem, ix);
       
   836         }
       
   837       mem = thread_memory_magazine1_alloc (tmem, ix);
       
   838     }
       
   839   else if (acat == 2)           /* allocate through slab allocator */
       
   840     {
       
   841       g_mutex_lock (allocator->slab_mutex);
       
   842       mem = slab_allocator_alloc_chunk (chunk_size);
       
   843       g_mutex_unlock (allocator->slab_mutex);
       
   844     }
       
   845   else                          /* delegate to system malloc */
       
   846     mem = g_malloc (mem_size);
       
   847     
       
   848   return mem;
       
   849 }
       
   850 
       
   851 EXPORT_C gpointer
       
   852 g_slice_alloc0 (gsize mem_size)
       
   853 {
       
   854   gpointer mem = g_slice_alloc (mem_size);
       
   855   if (mem)
       
   856     memset (mem, 0, mem_size);
       
   857   return mem;
       
   858 }
       
   859 
       
   860 EXPORT_C void
       
   861 g_slice_free1 (gsize    mem_size,
       
   862                gpointer mem_block)
       
   863 {
       
   864   gsize chunk_size = P2ALIGN (mem_size);
       
   865   guint acat = allocator_categorize (chunk_size);
       
   866   if (G_UNLIKELY (!mem_block))
       
   867     return;
       
   868   if (G_LIKELY (acat == 1))             /* allocate through magazine layer */
       
   869     {
       
   870       ThreadMemory *tmem = thread_memory_from_self();
       
   871       guint ix = SLAB_INDEX (allocator, chunk_size);
       
   872       if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem, ix)))
       
   873         {
       
   874           thread_memory_swap_magazines (tmem, ix);
       
   875           if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem, ix)))
       
   876             thread_memory_magazine2_unload (tmem, ix);
       
   877         }
       
   878       if (G_UNLIKELY (g_mem_gc_friendly))
       
   879         memset (mem_block, 0, chunk_size);
       
   880       thread_memory_magazine2_free (tmem, ix, mem_block);
       
   881     }
       
   882   else if (acat == 2)                   /* allocate through slab allocator */
       
   883     {
       
   884       if (G_UNLIKELY (g_mem_gc_friendly))
       
   885         memset (mem_block, 0, chunk_size);
       
   886       g_mutex_lock (allocator->slab_mutex);
       
   887       slab_allocator_free_chunk (chunk_size, mem_block);
       
   888       g_mutex_unlock (allocator->slab_mutex);
       
   889     }
       
   890   else                                  /* delegate to system malloc */
       
   891     {
       
   892       if (G_UNLIKELY (g_mem_gc_friendly))
       
   893         memset (mem_block, 0, mem_size);
       
   894       g_free (mem_block);
       
   895     }
       
   896 }
       
   897 
       
   898 EXPORT_C void
       
   899 g_slice_free_chain_with_offset (gsize    mem_size,
       
   900                                 gpointer mem_chain,
       
   901                                 gsize    next_offset)
       
   902 {
       
   903   gpointer slice = mem_chain;
       
   904   /* while the thread magazines and the magazine cache are implemented so that
       
   905    * they can easily be extended to allow for free lists containing more free
       
   906    * lists for the first level nodes, which would allow O(1) freeing in this
       
   907    * function, the benefit of such an extension is questionable, because:
       
   908    * - the magazine size counts will become mere lower bounds which confuses
       
   909    *   the code adapting to lock contention;
       
   910    * - freeing a single node to the thread magazines is very fast, so this
       
   911    *   O(list_length) operation is multiplied by a fairly small factor;
       
   912    * - memory usage histograms on larger applications seem to indicate that
       
   913    *   the amount of released multi node lists is negligible in comparison
       
   914    *   to single node releases.
       
   915    * - the major performance bottle neck, namely g_private_get() or
       
   916    *   g_mutex_lock()/g_mutex_unlock() has already been moved out of the
       
   917    *   inner loop for freeing chained slices.
       
   918    */
       
   919   gsize chunk_size = P2ALIGN (mem_size);
       
   920   guint acat = allocator_categorize (chunk_size);
       
   921   if (G_LIKELY (acat == 1))             /* allocate through magazine layer */
       
   922     {
       
   923       ThreadMemory *tmem = thread_memory_from_self();
       
   924       guint ix = SLAB_INDEX (allocator, chunk_size);
       
   925       while (slice)
       
   926         {
       
   927           guint8 *current = slice;
       
   928           slice = *(gpointer*) (current + next_offset);
       
   929           if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem, ix)))
       
   930             {
       
   931               thread_memory_swap_magazines (tmem, ix);
       
   932               if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem, ix)))
       
   933                 thread_memory_magazine2_unload (tmem, ix);
       
   934             }
       
   935           if (G_UNLIKELY (g_mem_gc_friendly))
       
   936             memset (current, 0, chunk_size);
       
   937           thread_memory_magazine2_free (tmem, ix, current);
       
   938         }
       
   939     }
       
   940   else if (acat == 2)                   /* allocate through slab allocator */
       
   941     {
       
   942       g_mutex_lock (allocator->slab_mutex);
       
   943       while (slice)
       
   944         {
       
   945           guint8 *current = slice;
       
   946           slice = *(gpointer*) (current + next_offset);
       
   947           if (G_UNLIKELY (g_mem_gc_friendly))
       
   948             memset (current, 0, chunk_size);
       
   949           slab_allocator_free_chunk (chunk_size, current);
       
   950         }
       
   951       g_mutex_unlock (allocator->slab_mutex);
       
   952     }
       
   953   else                                  /* delegate to system malloc */
       
   954     while (slice)
       
   955       {
       
   956         guint8 *current = slice;
       
   957         slice = *(gpointer*) (current + next_offset);
       
   958         if (G_UNLIKELY (g_mem_gc_friendly))
       
   959           memset (current, 0, mem_size);
       
   960         g_free (current);
       
   961       }
       
   962 }
       
   963 
       
   964 /* --- single page allocator --- */
       
   965 #ifdef __SYMBIAN32__
       
   966 
       
   967 static void
       
   968 allocator_slab_stack_push (Allocator *allocator1,
       
   969                            guint      ix,
       
   970                            SlabInfo  *sinfo)
       
   971 
       
   972 #else
       
   973 
       
   974 static void
       
   975 allocator_slab_stack_push (Allocator *allocator,
       
   976                            guint      ix,
       
   977                            SlabInfo  *sinfo)
       
   978 
       
   979 #endif /* __SYMBIAN32__ */
       
   980 {
       
   981   /* insert slab at slab ring head */
       
   982   if (!allocator1->slab_stack[ix])
       
   983     {
       
   984       sinfo->next = sinfo;
       
   985       sinfo->prev = sinfo;
       
   986     }
       
   987   else
       
   988     {
       
   989       SlabInfo *next = allocator1->slab_stack[ix], *prev = next->prev;
       
   990       next->prev = sinfo;
       
   991       prev->next = sinfo;
       
   992       sinfo->next = next;
       
   993       sinfo->prev = prev;
       
   994     }
       
   995   allocator1->slab_stack[ix] = sinfo;
       
   996 }
       
   997 
       
   998 #ifdef __SYMBIAN32__
       
   999 
       
  1000 static gsize
       
  1001 allocator_aligned_page_size (Allocator *allocator1,
       
  1002                              gsize      n_bytes)
       
  1003 
       
  1004 #else
       
  1005 static gsize
       
  1006 allocator_aligned_page_size (Allocator *allocator,
       
  1007                              gsize      n_bytes)
       
  1008 #endif /* __SYMBIAN32__ */
       
  1009 {
       
  1010   gsize val = 1 << g_bit_storage (n_bytes - 1);
       
  1011   val = MAX (val, allocator1->min_page_size);
       
  1012   return val;
       
  1013 }
       
  1014 
       
  1015 #ifdef __SYMBIAN32__
       
  1016 
       
  1017 static void
       
  1018 allocator_add_slab (Allocator *allocator1,
       
  1019                     guint      ix,
       
  1020                     gsize      chunk_size)
       
  1021                     
       
  1022 #else
       
  1023 static void
       
  1024 allocator_add_slab (Allocator *allocator,
       
  1025                     guint      ix,
       
  1026                     gsize      chunk_size)
       
  1027 #endif /* __SYMBIAN32__ */
       
  1028 {
       
  1029   ChunkLink *chunk;
       
  1030   SlabInfo *sinfo;
       
  1031   gsize addr, padding, n_chunks, color = 0;
       
  1032   gsize page_size = allocator_aligned_page_size (allocator1, SLAB_BPAGE_SIZE (allocator1, chunk_size));
       
  1033   /* allocate 1 page for the chunks and the slab */
       
  1034   gpointer aligned_memory = allocator_memalign (page_size, page_size - NATIVE_MALLOC_PADDING);
       
  1035   guint8 *mem = aligned_memory;
       
  1036   guint i;
       
  1037   if (!mem)
       
  1038     {
       
  1039       const gchar *syserr = "unknown error";
       
  1040 #if HAVE_STRERROR
       
  1041       syserr = strerror (errno);
       
  1042 #endif
       
  1043       mem_error ("failed to allocate %u bytes (alignment: %u): %s\n",
       
  1044                  (guint) (page_size - NATIVE_MALLOC_PADDING), (guint) page_size, syserr);
       
  1045     }
       
  1046   /* mask page adress */
       
  1047   addr = ((gsize) mem / page_size) * page_size;
       
  1048   /* assert alignment */
       
  1049   mem_assert (aligned_memory == (gpointer) addr);
       
  1050   /* basic slab info setup */
       
  1051   sinfo = (SlabInfo*) (mem + page_size - SLAB_INFO_SIZE);
       
  1052   sinfo->n_allocated = 0;
       
  1053   sinfo->chunks = NULL;
       
  1054   /* figure cache colorization */
       
  1055   n_chunks = ((guint8*) sinfo - mem) / chunk_size;
       
  1056   padding = ((guint8*) sinfo - mem) - n_chunks * chunk_size;
       
  1057   if (padding)
       
  1058     {
       
  1059       color = (allocator1->color_accu * P2ALIGNMENT) % padding;
       
  1060       allocator1->color_accu += allocator1->config.color_increment;
       
  1061     }
       
  1062   /* add chunks to free list */
       
  1063   chunk = (ChunkLink*) (mem + color);
       
  1064   sinfo->chunks = chunk;
       
  1065   for (i = 0; i < n_chunks - 1; i++)
       
  1066     {
       
  1067       chunk->next = (ChunkLink*) ((guint8*) chunk + chunk_size);
       
  1068       chunk = chunk->next;
       
  1069     }
       
  1070   chunk->next = NULL;   /* last chunk */
       
  1071   /* add slab to slab ring */
       
  1072   allocator_slab_stack_push (allocator1, ix, sinfo);
       
  1073 }
       
  1074 
       
  1075 static gpointer
       
  1076 slab_allocator_alloc_chunk (gsize chunk_size)
       
  1077 {
       
  1078   ChunkLink *chunk;
       
  1079   guint ix = SLAB_INDEX (allocator, chunk_size);
       
  1080   /* ensure non-empty slab */
       
  1081   if (!allocator->slab_stack[ix] || !allocator->slab_stack[ix]->chunks)
       
  1082     allocator_add_slab (allocator, ix, chunk_size);
       
  1083   /* allocate chunk */
       
  1084   chunk = allocator->slab_stack[ix]->chunks;
       
  1085   allocator->slab_stack[ix]->chunks = chunk->next;
       
  1086   allocator->slab_stack[ix]->n_allocated++;
       
  1087   /* rotate empty slabs */
       
  1088   if (!allocator->slab_stack[ix]->chunks)
       
  1089     allocator->slab_stack[ix] = allocator->slab_stack[ix]->next;
       
  1090   return chunk;
       
  1091 }
       
  1092 
       
  1093 static void
       
  1094 slab_allocator_free_chunk (gsize    chunk_size,
       
  1095                            gpointer mem)
       
  1096 {
       
  1097   ChunkLink *chunk;
       
  1098   gboolean was_empty;
       
  1099 #ifdef MOBILE_PORT
       
  1100   guint16 offset;
       
  1101 #endif//MOBILE_PORT   
       
  1102   guint ix = SLAB_INDEX (allocator, chunk_size);
       
  1103   gsize page_size = allocator_aligned_page_size (allocator, SLAB_BPAGE_SIZE (allocator, chunk_size));
       
  1104   gsize addr = ((gsize) mem / page_size) * page_size;
       
  1105   /* mask page adress */
       
  1106   guint8 *page = (guint8*) addr;
       
  1107   SlabInfo *sinfo = (SlabInfo*) (page + page_size - SLAB_INFO_SIZE);
       
  1108   /* assert valid chunk count */
       
  1109   mem_assert (sinfo->n_allocated > 0);
       
  1110 #ifdef MOBILE_PORT
       
  1111   offset = *((guint16*)(page + page_size - NATIVE_MALLOC_PADDING));
       
  1112 #endif//MOBILE_PORT  
       
  1113   /* add chunk to free list */
       
  1114   was_empty = sinfo->chunks == NULL;
       
  1115   chunk = (ChunkLink*) mem;
       
  1116   chunk->next = sinfo->chunks;
       
  1117   sinfo->chunks = chunk;
       
  1118   sinfo->n_allocated--;
       
  1119   /* keep slab ring partially sorted, empty slabs at end */
       
  1120   if (was_empty)
       
  1121     {
       
  1122       /* unlink slab */
       
  1123       SlabInfo *next = sinfo->next, *prev = sinfo->prev;
       
  1124       next->prev = prev;
       
  1125       prev->next = next;
       
  1126       if (allocator->slab_stack[ix] == sinfo)
       
  1127         allocator->slab_stack[ix] = next == sinfo ? NULL : next;
       
  1128       /* insert slab at head */
       
  1129       allocator_slab_stack_push (allocator, ix, sinfo);
       
  1130     }
       
  1131   /* eagerly free complete unused slabs */
       
  1132   if (!sinfo->n_allocated)
       
  1133     {
       
  1134       /* unlink slab */
       
  1135       SlabInfo *next = sinfo->next, *prev = sinfo->prev;
       
  1136       next->prev = prev;
       
  1137       prev->next = next;
       
  1138       if (allocator->slab_stack[ix] == sinfo)
       
  1139         allocator->slab_stack[ix] = next == sinfo ? NULL : next;
       
  1140       /* free slab */
       
  1141 #ifndef MOBILE_PORT      
       
  1142       allocator_memfree (page_size, page);
       
  1143 #else//MOBILE_PORT
       
  1144       allocator_memfree (page_size, page - offset);
       
  1145 #endif//MOBILE_PORT      
       
  1146     }
       
  1147 }
       
  1148 
       
  1149 /* --- memalign implementation --- */
       
  1150 #ifdef HAVE_MALLOC_H
       
  1151 #include <malloc.h>             /* memalign() *///puneet
       
  1152 #endif
       
  1153 
       
  1154 /* from config.h:
       
  1155  * define HAVE_POSIX_MEMALIGN           1 // if free(posix_memalign(3)) works, <stdlib.h>
       
  1156  * define HAVE_COMPLIANT_POSIX_MEMALIGN 1 // if free(posix_memalign(3)) works for sizes != 2^n, <stdlib.h>
       
  1157  * define HAVE_MEMALIGN                 1 // if free(memalign(3)) works, <malloc.h>
       
  1158  * define HAVE_VALLOC                   1 // if free(valloc(3)) works, <stdlib.h> or <malloc.h>
       
  1159  * if none is provided, we implement malloc(3)-based alloc-only page alignment
       
  1160  */
       
  1161 
       
  1162 #if !(HAVE_COMPLIANT_POSIX_MEMALIGN || HAVE_MEMALIGN || HAVE_VALLOC)
       
  1163 #if EMULATOR
       
  1164 
       
  1165 PLS(compat_valloc_trash,gslice,GTrashStack *)
       
  1166 #define compat_valloc_trash  (*FUNCTION_NAME(compat_valloc_trash,gslice)())
       
  1167 
       
  1168 #else
       
  1169 
       
  1170 static GTrashStack *compat_valloc_trash = NULL;
       
  1171 
       
  1172 #endif /* EMULATOR */
       
  1173 
       
  1174 #endif
       
  1175 
       
  1176 static gpointer
       
  1177 allocator_memalign (gsize alignment,
       
  1178                     gsize memsize)
       
  1179 {
       
  1180   gpointer aligned_memory = NULL;
       
  1181   gint err = ENOMEM;
       
  1182 #if     HAVE_COMPLIANT_POSIX_MEMALIGN
       
  1183   err = posix_memalign (&aligned_memory, alignment, memsize);
       
  1184 #elif   HAVE_MEMALIGN
       
  1185   errno = 0;
       
  1186   aligned_memory = memalign (alignment, memsize);
       
  1187   err = errno;
       
  1188 #elif MOBILE_PORT
       
  1189   const guint n_pages = 2;
       
  1190   guint8 *mem = malloc (n_pages * sys_page_size);
       
  1191   err = errno;
       
  1192   if (mem)
       
  1193       {
       
  1194       guint8 *amem = (guint8*) ALIGN ((gsize) mem, sys_page_size);
       
  1195       guint16 offset = amem - mem;
       
  1196       guint16 *p = (guint16*)(amem + sys_page_size - NATIVE_MALLOC_PADDING);
       
  1197       *p = offset;
       
  1198       aligned_memory = amem;
       
  1199       }
       
  1200 #elif   HAVE_VALLOC
       
  1201   errno = 0;
       
  1202   aligned_memory = valloc (memsize);
       
  1203   err = errno;
       
  1204 #else
       
  1205   /* simplistic non-freeing page allocator */
       
  1206   mem_assert (alignment == sys_page_size);
       
  1207   mem_assert (memsize <= sys_page_size);
       
  1208   if (!compat_valloc_trash)
       
  1209     {
       
  1210       const guint n_pages = 16;
       
  1211       guint8 *mem = malloc (n_pages * sys_page_size);
       
  1212       err = errno;
       
  1213       if (mem)
       
  1214         {
       
  1215           gint i = n_pages;
       
  1216           guint8 *amem = (guint8*) ALIGN ((gsize) mem, sys_page_size);
       
  1217           if (amem != mem)
       
  1218             i--;        /* mem wasn't page aligned */
       
  1219           while (--i >= 0)
       
  1220             g_trash_stack_push (&compat_valloc_trash, amem + i * sys_page_size);
       
  1221         }
       
  1222     }
       
  1223   aligned_memory = g_trash_stack_pop (&compat_valloc_trash);
       
  1224 #endif
       
  1225   if (!aligned_memory)
       
  1226     errno = err;
       
  1227   return aligned_memory;
       
  1228 }
       
  1229 
       
  1230 static void
       
  1231 allocator_memfree (gsize    memsize,
       
  1232                    gpointer mem)
       
  1233 {
       
  1234 #ifdef MOBILE_PORT
       
  1235   free (mem);
       
  1236 #elif (HAVE_COMPLIANT_POSIX_MEMALIGN) || (HAVE_MEMALIGN || HAVE_VALLOC)
       
  1237   free (mem);
       
  1238 #else
       
  1239   mem_assert (memsize <= sys_page_size);
       
  1240   g_trash_stack_push (&compat_valloc_trash, mem);
       
  1241 #endif
       
  1242 }
       
  1243 
       
  1244 #include <stdio.h>
       
  1245 
       
  1246 static void
       
  1247 mem_error (const char *format,
       
  1248            ...)
       
  1249 {
       
  1250   const char *pname;
       
  1251   va_list args;
       
  1252   /* at least, put out "MEMORY-ERROR", in case we segfault during the rest of the function */
       
  1253   fputs ("\n***MEMORY-ERROR***: ", stderr);
       
  1254   pname = g_get_prgname();
       
  1255   fprintf (stderr, "%s[%u]: GSlice: ", pname ? pname : "", getpid());
       
  1256   va_start (args, format);
       
  1257   vfprintf (stderr, format, args);
       
  1258   va_end (args);
       
  1259   fputs ("\n", stderr);
       
  1260   _exit (1);
       
  1261 }
       
  1262 
       
  1263 #define __G_SLICE_C__
       
  1264 #include "galiasdef.c"