utilities/standaloneallocator/newallocator.cpp
author hgs
Fri, 15 Oct 2010 17:30:59 -0400
changeset 16 3c88a81ff781
permissions -rw-r--r--
201041
Ignore whitespace changes - Everywhere: Within whitespace: At end of lines:
16
hgs
parents:
diff changeset
     1
/*
hgs
parents:
diff changeset
     2
 * Copyright (c) 2010 Nokia Corporation and/or its subsidiary(-ies).
hgs
parents:
diff changeset
     3
 *
hgs
parents:
diff changeset
     4
 * This file is part of Qt Web Runtime.
hgs
parents:
diff changeset
     5
 *
hgs
parents:
diff changeset
     6
 * This library is free software; you can redistribute it and/or
hgs
parents:
diff changeset
     7
 * modify it under the terms of the GNU Lesser General Public License
hgs
parents:
diff changeset
     8
 * version 2.1 as published by the Free Software Foundation.
hgs
parents:
diff changeset
     9
 *
hgs
parents:
diff changeset
    10
 * This library is distributed in the hope that it will be useful,
hgs
parents:
diff changeset
    11
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
hgs
parents:
diff changeset
    12
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
hgs
parents:
diff changeset
    13
 * Lesser General Public License for more details.
hgs
parents:
diff changeset
    14
 *
hgs
parents:
diff changeset
    15
 * You should have received a copy of the GNU Lesser General Public
hgs
parents:
diff changeset
    16
 * License along with this library; if not, write to the Free Software
hgs
parents:
diff changeset
    17
 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
hgs
parents:
diff changeset
    18
 *
hgs
parents:
diff changeset
    19
 */
hgs
parents:
diff changeset
    20
hgs
parents:
diff changeset
    21
hgs
parents:
diff changeset
    22
/****************************************************************************
hgs
parents:
diff changeset
    23
 *
hgs
parents:
diff changeset
    24
 * This file is part of the Symbian application wrapper of the Qt Toolkit.
hgs
parents:
diff changeset
    25
 *
hgs
parents:
diff changeset
    26
 * The memory allocator is backported from Symbian OS, and can eventually
hgs
parents:
diff changeset
    27
 * be removed from Qt once it is built in to all supported OS versions.
hgs
parents:
diff changeset
    28
 * The allocator is a composite of three allocators:
hgs
parents:
diff changeset
    29
 *  - A page allocator, for large allocations
hgs
parents:
diff changeset
    30
 *  - A slab allocator, for small allocations
hgs
parents:
diff changeset
    31
 *  - Doug Lea's allocator, for medium size allocations
hgs
parents:
diff changeset
    32
 *
hgs
parents:
diff changeset
    33
 ***************************************************************************/
hgs
parents:
diff changeset
    34
hgs
parents:
diff changeset
    35
hgs
parents:
diff changeset
    36
#include <e32std.h>
hgs
parents:
diff changeset
    37
#include <e32cmn.h>
hgs
parents:
diff changeset
    38
#include <hal.h>
hgs
parents:
diff changeset
    39
#include <e32panic.h>
hgs
parents:
diff changeset
    40
hgs
parents:
diff changeset
    41
#ifndef QT_SYMBIAN_HAVE_U32STD_H
hgs
parents:
diff changeset
    42
struct SThreadCreateInfo
hgs
parents:
diff changeset
    43
    {
hgs
parents:
diff changeset
    44
    TAny* iHandle;
hgs
parents:
diff changeset
    45
    TInt iType;
hgs
parents:
diff changeset
    46
    TThreadFunction iFunction;
hgs
parents:
diff changeset
    47
    TAny* iPtr;
hgs
parents:
diff changeset
    48
    TAny* iSupervisorStack;
hgs
parents:
diff changeset
    49
    TInt iSupervisorStackSize;
hgs
parents:
diff changeset
    50
    TAny* iUserStack;
hgs
parents:
diff changeset
    51
    TInt iUserStackSize;
hgs
parents:
diff changeset
    52
    TInt iInitialThreadPriority;
hgs
parents:
diff changeset
    53
    TPtrC iName;
hgs
parents:
diff changeset
    54
    TInt iTotalSize;    // Size including any extras (must be a multiple of 8 bytes)
hgs
parents:
diff changeset
    55
    };
hgs
parents:
diff changeset
    56
hgs
parents:
diff changeset
    57
struct SStdEpocThreadCreateInfo : public SThreadCreateInfo
hgs
parents:
diff changeset
    58
    {
hgs
parents:
diff changeset
    59
    RAllocator* iAllocator;
hgs
parents:
diff changeset
    60
    TInt iHeapInitialSize;
hgs
parents:
diff changeset
    61
    TInt iHeapMaxSize;
hgs
parents:
diff changeset
    62
    TInt iPadding;      // Make structure size a multiple of 8 bytes
hgs
parents:
diff changeset
    63
    };
hgs
parents:
diff changeset
    64
#else
hgs
parents:
diff changeset
    65
#include <u32std.h>
hgs
parents:
diff changeset
    66
#endif
hgs
parents:
diff changeset
    67
#include <e32svr.h>
hgs
parents:
diff changeset
    68
hgs
parents:
diff changeset
    69
//Named local chunks require support from the kernel, which depends on Symbian^3
hgs
parents:
diff changeset
    70
#define NO_NAMED_LOCAL_CHUNKS
hgs
parents:
diff changeset
    71
//Reserving a minimum heap size is not supported, because the implementation does not know what type of
hgs
parents:
diff changeset
    72
//memory to use. DLA memory grows upwards, slab and page allocators grow downwards.
hgs
parents:
diff changeset
    73
//This would need kernel support to do properly.
hgs
parents:
diff changeset
    74
#define NO_RESERVE_MEMORY
hgs
parents:
diff changeset
    75
hgs
parents:
diff changeset
    76
//The BTRACE debug framework requires Symbian OS 9.4 or higher.
hgs
parents:
diff changeset
    77
//Required header files are not included in S60 5.0 SDKs, but
hgs
parents:
diff changeset
    78
//they are available for open source versions of Symbian OS.
hgs
parents:
diff changeset
    79
//Note that although Symbian OS 9.3 supports BTRACE, the usage in this file
hgs
parents:
diff changeset
    80
//depends on 9.4 header files.
hgs
parents:
diff changeset
    81
hgs
parents:
diff changeset
    82
//This debug flag uses BTRACE to emit debug traces to identify the heaps.
hgs
parents:
diff changeset
    83
//Note that it uses the ETest1 trace category which is not reserved
hgs
parents:
diff changeset
    84
//#define TRACING_HEAPS
hgs
parents:
diff changeset
    85
//This debug flag uses BTRACE to emit debug traces to aid with debugging
hgs
parents:
diff changeset
    86
//allocs, frees & reallocs. It should be used together with the KUSERHEAPTRACE
hgs
parents:
diff changeset
    87
//kernel trace flag to enable heap tracing.
hgs
parents:
diff changeset
    88
//#define TRACING_ALLOCS
hgs
parents:
diff changeset
    89
//This debug flag turns on tracing of the call stack for each alloc trace.
hgs
parents:
diff changeset
    90
//It is dependent on TRACING_ALLOCS.
hgs
parents:
diff changeset
    91
//#define TRACING_CALLSTACKS
hgs
parents:
diff changeset
    92
hgs
parents:
diff changeset
    93
#if defined(TRACING_ALLOCS) || defined(TRACING_HEAPS)
hgs
parents:
diff changeset
    94
#include <e32btrace.h>
hgs
parents:
diff changeset
    95
#endif
hgs
parents:
diff changeset
    96
hgs
parents:
diff changeset
    97
// Memory logging routines inherited from webkit allocator 9.2TB.
hgs
parents:
diff changeset
    98
// #define OOM_LOGGING
hgs
parents:
diff changeset
    99
// This debug flag logs error conditions when memory is unmapped/mapped from the system.
hgs
parents:
diff changeset
   100
// Also, exports routines to dump the internal state and memory usage of the DL allocator.
hgs
parents:
diff changeset
   101
// #define DL_CHUNK_MEM_DEBUG
hgs
parents:
diff changeset
   102
// Exports debug rouintes to assert/trace chunked memory access.
hgs
parents:
diff changeset
   103
#if defined(OOM_LOGGING) || defined(DL_CHUNK_MEM_DEBUG)
hgs
parents:
diff changeset
   104
#include "MemoryLogger.h"
hgs
parents:
diff changeset
   105
#endif
hgs
parents:
diff changeset
   106
hgs
parents:
diff changeset
   107
hgs
parents:
diff changeset
   108
#ifndef __WINS__
hgs
parents:
diff changeset
   109
#pragma push
hgs
parents:
diff changeset
   110
#pragma arm
hgs
parents:
diff changeset
   111
#endif
hgs
parents:
diff changeset
   112
hgs
parents:
diff changeset
   113
#include "dla_p.h"
hgs
parents:
diff changeset
   114
#include "newallocator_p.h"
hgs
parents:
diff changeset
   115
hgs
parents:
diff changeset
   116
// if non zero this causes the slabs to be configured only when the chunk size exceeds this level
hgs
parents:
diff changeset
   117
#define DELAYED_SLAB_THRESHOLD (64*1024)        // 64KB seems about right based on trace data
hgs
parents:
diff changeset
   118
#define SLAB_CONFIG (0xabe)
hgs
parents:
diff changeset
   119
hgs
parents:
diff changeset
   120
_LIT(KDLHeapPanicCategory, "DL Heap");
hgs
parents:
diff changeset
   121
#define GET_PAGE_SIZE(x)            HAL::Get(HALData::EMemoryPageSize, x)
hgs
parents:
diff changeset
   122
#define __CHECK_CELL(p)
hgs
parents:
diff changeset
   123
#define __POWER_OF_2(x)             ((TUint32)((x)^((x)-1))>=(TUint32)(x))
hgs
parents:
diff changeset
   124
#define HEAP_PANIC(r)               Panic(r)
hgs
parents:
diff changeset
   125
hgs
parents:
diff changeset
   126
LOCAL_C void Panic(TCdtPanic aPanic)
hgs
parents:
diff changeset
   127
// Panic the process with USER as the category.
hgs
parents:
diff changeset
   128
    {
hgs
parents:
diff changeset
   129
    User::Panic(_L("USER"),aPanic);
hgs
parents:
diff changeset
   130
    }
hgs
parents:
diff changeset
   131
hgs
parents:
diff changeset
   132
    /* Purpose:     Map chunk memory pages from system RAM
hgs
parents:
diff changeset
   133
 * Arguments:   tp - tchunkptr in which memmory should be mapped
hgs
parents:
diff changeset
   134
 *              psize - incoming tchunk size
hgs
parents:
diff changeset
   135
 * Return:      KErrNone if successful, else KErrNoMemory
hgs
parents:
diff changeset
   136
 * Note:
hgs
parents:
diff changeset
   137
 */
hgs
parents:
diff changeset
   138
TInt RNewAllocator::map_chunk_pages(tchunkptr tp, size_t psize)
hgs
parents:
diff changeset
   139
{
hgs
parents:
diff changeset
   140
    if (page_not_in_memory(tp, psize)) {
hgs
parents:
diff changeset
   141
        char *a_addr = tchunk_page_align(tp);
hgs
parents:
diff changeset
   142
        size_t npages = tp->npages;
hgs
parents:
diff changeset
   143
hgs
parents:
diff changeset
   144
#ifdef OOM_LOGGING
hgs
parents:
diff changeset
   145
        // check that npages matches the psize
hgs
parents:
diff changeset
   146
        size_t offset = address_offset(a_addr,tp);
hgs
parents:
diff changeset
   147
        if (offset < psize && (psize - offset) >= mparams.page_size )
hgs
parents:
diff changeset
   148
        {
hgs
parents:
diff changeset
   149
            size_t tpages = ( psize - offset) >> pageshift;
hgs
parents:
diff changeset
   150
            if (tpages != tp->npages) //assert condition
hgs
parents:
diff changeset
   151
                MEM_LOG("CHUNK_PAGE_ERROR:map_chunk_pages, error in npages");
hgs
parents:
diff changeset
   152
        }
hgs
parents:
diff changeset
   153
        else
hgs
parents:
diff changeset
   154
            MEM_LOG("CHUNK_PAGE_ERROR::map_chunk_pages: - Incorrect page-in-memmory flag");
hgs
parents:
diff changeset
   155
#endif
hgs
parents:
diff changeset
   156
hgs
parents:
diff changeset
   157
        if (map(a_addr, npages*mparams.page_size)) {
hgs
parents:
diff changeset
   158
            TRACE_DL_CHUNK_MAP(tp, psize, a_addr, npages*mparams.page_size);
hgs
parents:
diff changeset
   159
            ASSERT_RCHUNK_SIZE();
hgs
parents:
diff changeset
   160
            TRACE_UNMAPPED_CHUNK(-1*npages*mparams.page_size);
hgs
parents:
diff changeset
   161
            return KErrNone;
hgs
parents:
diff changeset
   162
        }
hgs
parents:
diff changeset
   163
        else {
hgs
parents:
diff changeset
   164
#ifdef OOM_LOGGING
hgs
parents:
diff changeset
   165
hgs
parents:
diff changeset
   166
            MEM_LOGF(_L8("CHUNK_PAGE_ERROR:: map_chunk_pages - Failed to Commit RAM, page_addr=%x, npages=%d, chunk_size=%d"), a_addr, npages, psize);
hgs
parents:
diff changeset
   167
            MEM_DUMP_OOM_LOGS(psize, "RSymbianDLHeap::map_chunk_pages - Failed to Commit RAM");
hgs
parents:
diff changeset
   168
#endif
hgs
parents:
diff changeset
   169
            return KErrNoMemory;
hgs
parents:
diff changeset
   170
        }
hgs
parents:
diff changeset
   171
    }
hgs
parents:
diff changeset
   172
    return KErrNone;
hgs
parents:
diff changeset
   173
}
hgs
parents:
diff changeset
   174
hgs
parents:
diff changeset
   175
/* Purpose:     Map partial chunk memory pages from system RAM
hgs
parents:
diff changeset
   176
 * Arguments:   tp - tchunkptr in which memmory should be mapped
hgs
parents:
diff changeset
   177
 *              psize - incoming tchunk size
hgs
parents:
diff changeset
   178
 *              r - remainder chunk pointer
hgs
parents:
diff changeset
   179
 *              rsize - remainder chunk size
hgs
parents:
diff changeset
   180
 * Return:      Number of unmapped pages from remainder chunk if successful (0 or more), else KErrNoMemory
hgs
parents:
diff changeset
   181
 * Note:        Remainder chunk should be large enough to be mapped out (checked before invoking this function)
hgs
parents:
diff changeset
   182
 *              pageout headers will be set from insert_large_chunk(), not here.
hgs
parents:
diff changeset
   183
 */
hgs
parents:
diff changeset
   184
TInt RNewAllocator::map_chunk_pages_partial(tchunkptr tp, size_t psize, tchunkptr r, size_t rsize)
hgs
parents:
diff changeset
   185
{
hgs
parents:
diff changeset
   186
    if (page_not_in_memory(tp, psize)) {
hgs
parents:
diff changeset
   187
        size_t npages = tp->npages; // total no of pages unmapped in this chunk
hgs
parents:
diff changeset
   188
        char *page_addr_map = tchunk_page_align(tp); // address to begin page map
hgs
parents:
diff changeset
   189
        char *page_addr_rem = tchunk_page_align(r);  // address in remainder chunk to remain unmapped
hgs
parents:
diff changeset
   190
        assert(address_offset(page_addr_rem, r) < rsize);
hgs
parents:
diff changeset
   191
        size_t npages_map = address_offset(page_addr_rem, page_addr_map) >> pageshift; // no of pages to be mapped
hgs
parents:
diff changeset
   192
        if (npages_map > 0) {
hgs
parents:
diff changeset
   193
            if (map(page_addr_map, npages_map*mparams.page_size)) {
hgs
parents:
diff changeset
   194
#ifdef DL_CHUNK_MEM_DEBUG
hgs
parents:
diff changeset
   195
                TRACE_DL_CHUNK_MAP(tp, psize, page_addr_map, npages_map*mparams.page_size);
hgs
parents:
diff changeset
   196
                ASSERT_RCHUNK_SIZE();
hgs
parents:
diff changeset
   197
                TRACE_UNMAPPED_CHUNK(-1*npages_map*mparams.page_size);
hgs
parents:
diff changeset
   198
#endif
hgs
parents:
diff changeset
   199
                return (npages - npages_map);
hgs
parents:
diff changeset
   200
            }
hgs
parents:
diff changeset
   201
            else {
hgs
parents:
diff changeset
   202
#ifdef OOM_LOGGING
hgs
parents:
diff changeset
   203
                MEM_LOGF(_L8("CHUNK_PAGE_ERROR:: map_chunk_pages_partial - Failed to Commit RAM, page_addr=%x, npages=%d, chunk_size=%d"), page_addr_map, npages_map, psize);
hgs
parents:
diff changeset
   204
                MEM_DUMP_OOM_LOGS(psize, "RSymbianDLHeap::map_chunk_pages_partial - Failed to Commit RAM");
hgs
parents:
diff changeset
   205
#endif
hgs
parents:
diff changeset
   206
                return KErrNoMemory;
hgs
parents:
diff changeset
   207
            }
hgs
parents:
diff changeset
   208
        }
hgs
parents:
diff changeset
   209
        else {
hgs
parents:
diff changeset
   210
             // map not needed, first page is already mapped
hgs
parents:
diff changeset
   211
             return npages;
hgs
parents:
diff changeset
   212
        }
hgs
parents:
diff changeset
   213
    }
hgs
parents:
diff changeset
   214
hgs
parents:
diff changeset
   215
    return 0;
hgs
parents:
diff changeset
   216
}
hgs
parents:
diff changeset
   217
hgs
parents:
diff changeset
   218
hgs
parents:
diff changeset
   219
/* Purpose:     Release (unmap) chunk memory pages to system RAM
hgs
parents:
diff changeset
   220
 * Arguments:   tp - tchunkptr from which memmory may be released
hgs
parents:
diff changeset
   221
 *              psize - incoming tchunk size
hgs
parents:
diff changeset
   222
 *              prev_npages - number of pages that has been already unmapped from this chunk
hgs
parents:
diff changeset
   223
 * Return:      total number of pages that has been unmapped from this chunk (new unmapped pages + prev_npages)
hgs
parents:
diff changeset
   224
 * Note:        pageout headers will be set from insert_large_chunk(), not here.
hgs
parents:
diff changeset
   225
 */
hgs
parents:
diff changeset
   226
TInt RNewAllocator::unmap_chunk_pages(tchunkptr tp, size_t psize, size_t prev_npages)
hgs
parents:
diff changeset
   227
{
hgs
parents:
diff changeset
   228
    size_t npages = 0;
hgs
parents:
diff changeset
   229
    char *a_addr = tchunk_page_align(tp);
hgs
parents:
diff changeset
   230
    size_t offset = address_offset(a_addr,tp);
hgs
parents:
diff changeset
   231
    if (offset < psize && (psize - offset) >= mparams.page_size)
hgs
parents:
diff changeset
   232
    { /* check for new pages to decommit */
hgs
parents:
diff changeset
   233
        npages = ( psize - offset) >> pageshift;
hgs
parents:
diff changeset
   234
        if (npages > prev_npages) {
hgs
parents:
diff changeset
   235
            unmap(a_addr, npages*mparams.page_size);    // assuming kernel takes care of already unmapped pages
hgs
parents:
diff changeset
   236
            TRACE_DL_CHUNK_UNMAP(tp, psize, a_addr, npages*mparams.page_size);
hgs
parents:
diff changeset
   237
            iChunkSize += prev_npages*mparams.page_size; //adjust actual chunk size
hgs
parents:
diff changeset
   238
            ASSERT_RCHUNK_SIZE();
hgs
parents:
diff changeset
   239
            TRACE_UNMAPPED_CHUNK((npages-prev_npages)*mparams.page_size);
hgs
parents:
diff changeset
   240
            assert((a_addr + npages*mparams.page_size - 1) < (char*)next_chunk(tp));
hgs
parents:
diff changeset
   241
        }
hgs
parents:
diff changeset
   242
    }
hgs
parents:
diff changeset
   243
hgs
parents:
diff changeset
   244
#ifdef OOM_LOGGING
hgs
parents:
diff changeset
   245
    if (npages && (npages < prev_npages))
hgs
parents:
diff changeset
   246
        MEM_LOG("CHUNK_PAGE_ERROR:unmap_chunk_pages, error in npages");
hgs
parents:
diff changeset
   247
    if (npages > prev_npages) {
hgs
parents:
diff changeset
   248
        /* check that end of decommited address lie within this chunk */
hgs
parents:
diff changeset
   249
        if ((a_addr + npages*mparams.page_size - 1) >= (char*)next_chunk(tp))
hgs
parents:
diff changeset
   250
            MEM_LOG("CHUNK_PAGE_ERROR:unmap_chunk_pages, error chunk boundary");
hgs
parents:
diff changeset
   251
    }
hgs
parents:
diff changeset
   252
#endif
hgs
parents:
diff changeset
   253
#ifdef DL_CHUNK_MEM_DEBUG
hgs
parents:
diff changeset
   254
    mchunkptr next = next_chunk(tp);
hgs
parents:
diff changeset
   255
    do_check_any_chunk_access(next, chunksize(next));
hgs
parents:
diff changeset
   256
    if (!npages)  do_check_any_chunk_access((mchunkptr)tp, psize);
hgs
parents:
diff changeset
   257
#endif
hgs
parents:
diff changeset
   258
hgs
parents:
diff changeset
   259
    return (npages);
hgs
parents:
diff changeset
   260
}
hgs
parents:
diff changeset
   261
hgs
parents:
diff changeset
   262
/* Purpose:     Unmap all pages between previously unmapped and end of top chunk
hgs
parents:
diff changeset
   263
                and reset top to beginning of prev chunk
hgs
parents:
diff changeset
   264
 * Arguments:   fm - global malloc state
hgs
parents:
diff changeset
   265
 *              prev - previous chunk which has unmapped pages
hgs
parents:
diff changeset
   266
 *              psize - size of previous chunk
hgs
parents:
diff changeset
   267
 *              prev_npages - number of unmapped pages from previous chunk
hgs
parents:
diff changeset
   268
 * Return:      nonzero if sucessful, else 0
hgs
parents:
diff changeset
   269
 * Note:
hgs
parents:
diff changeset
   270
 */
hgs
parents:
diff changeset
   271
TInt RNewAllocator::sys_trim_partial(mstate m, mchunkptr prev, size_t psize, size_t prev_npages)
hgs
parents:
diff changeset
   272
{
hgs
parents:
diff changeset
   273
    size_t released = 0;
hgs
parents:
diff changeset
   274
    size_t extra = 0;
hgs
parents:
diff changeset
   275
    if (is_initialized(m)) {
hgs
parents:
diff changeset
   276
      psize += m->topsize;
hgs
parents:
diff changeset
   277
      char *a_addr = tchunk_page_align(prev); // includes space for TOP footer
hgs
parents:
diff changeset
   278
      size_t addr_offset = address_offset(a_addr, prev);
hgs
parents:
diff changeset
   279
      assert(addr_offset > TOP_FOOT_SIZE); //always assert?
hgs
parents:
diff changeset
   280
      assert((char*)iTop >= a_addr); //always assert?
hgs
parents:
diff changeset
   281
      if ((char*)iTop > a_addr)
hgs
parents:
diff changeset
   282
          extra = address_offset(iTop, a_addr);
hgs
parents:
diff changeset
   283
hgs
parents:
diff changeset
   284
#ifdef OOM_LOGGING
hgs
parents:
diff changeset
   285
      if ((char*)iTop < a_addr)
hgs
parents:
diff changeset
   286
          MEM_LOGF(_L8("RSymbianDLHeap::sys_trim_partial - incorrect iTop value, top=%x, iTop=%x"), m->top, iTop);
hgs
parents:
diff changeset
   287
#endif
hgs
parents:
diff changeset
   288
        msegmentptr sp = segment_holding(m, (TUint8*)prev);
hgs
parents:
diff changeset
   289
        if (!is_extern_segment(sp)) {
hgs
parents:
diff changeset
   290
          if (is_mmapped_segment(sp)) {
hgs
parents:
diff changeset
   291
            if (HAVE_MMAP &&  sp->size >= extra && !has_segment_link(m, sp)) { /* can't shrink if pinned */
hgs
parents:
diff changeset
   292
             // size_t newsize = sp->size - extra;
hgs
parents:
diff changeset
   293
              /* Prefer mremap, fall back to munmap */
hgs
parents:
diff changeset
   294
              if ((CALL_MREMAP(sp->base, sp->size, sp->size - extra, 0) != MFAIL) ||
hgs
parents:
diff changeset
   295
                  (CALL_MUNMAP(sp->base + sp->size - extra, extra) == 0)) {
hgs
parents:
diff changeset
   296
                released = extra;
hgs
parents:
diff changeset
   297
              }
hgs
parents:
diff changeset
   298
            }
hgs
parents:
diff changeset
   299
          }
hgs
parents:
diff changeset
   300
          else if (HAVE_MORECORE) {
hgs
parents:
diff changeset
   301
            if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
hgs
parents:
diff changeset
   302
                extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - mparams.granularity;
hgs
parents:
diff changeset
   303
            ACQUIRE_MORECORE_LOCK(m);
hgs
parents:
diff changeset
   304
            {
hgs
parents:
diff changeset
   305
              /* Make sure end of memory is where we last set it. */
hgs
parents:
diff changeset
   306
              TUint8* old_br = (TUint8*)(CALL_MORECORE(0));
hgs
parents:
diff changeset
   307
              if (old_br == sp->base + sp->size) {
hgs
parents:
diff changeset
   308
                TUint8* rel_br = (TUint8*)(CALL_MORECORE(-extra));
hgs
parents:
diff changeset
   309
                TUint8* new_br = (TUint8*)(CALL_MORECORE(0));
hgs
parents:
diff changeset
   310
                if (rel_br != CMFAIL && new_br < old_br)
hgs
parents:
diff changeset
   311
                  released = old_br - new_br;
hgs
parents:
diff changeset
   312
              }
hgs
parents:
diff changeset
   313
            }
hgs
parents:
diff changeset
   314
            RELEASE_MORECORE_LOCK(m);
hgs
parents:
diff changeset
   315
          }
hgs
parents:
diff changeset
   316
        }
hgs
parents:
diff changeset
   317
hgs
parents:
diff changeset
   318
        if (released != 0) {
hgs
parents:
diff changeset
   319
          TRACE_DL_CHUNK_UNMAP(prev, psize, a_addr, released);
hgs
parents:
diff changeset
   320
          iChunkSize += prev_npages*mparams.page_size; // prev_unmapped was already unmapped
hgs
parents:
diff changeset
   321
          TRACE_UNMAPPED_CHUNK(-1*prev_npages*mparams.page_size);
hgs
parents:
diff changeset
   322
          ASSERT_RCHUNK_SIZE();
hgs
parents:
diff changeset
   323
          sp->size -= released;
hgs
parents:
diff changeset
   324
          m->footprint -= released;
hgs
parents:
diff changeset
   325
        }
hgs
parents:
diff changeset
   326
hgs
parents:
diff changeset
   327
        /* reset top to prev chunk */
hgs
parents:
diff changeset
   328
        init_top(m, prev, addr_offset - TOP_FOOT_SIZE);
hgs
parents:
diff changeset
   329
        check_top_chunk(m, m->top);
hgs
parents:
diff changeset
   330
    }
hgs
parents:
diff changeset
   331
hgs
parents:
diff changeset
   332
    // DL region not initalized, do not reset top here
hgs
parents:
diff changeset
   333
    return (released != 0)? 1 : 0;
hgs
parents:
diff changeset
   334
}
hgs
parents:
diff changeset
   335
hgs
parents:
diff changeset
   336
hgs
parents:
diff changeset
   337
#define STACKSIZE 32
hgs
parents:
diff changeset
   338
inline void RNewAllocator::TraceCallStack()
hgs
parents:
diff changeset
   339
{
hgs
parents:
diff changeset
   340
#ifdef TRACING_CALLSTACKS
hgs
parents:
diff changeset
   341
    TUint32 filteredStack[STACKSIZE];
hgs
parents:
diff changeset
   342
    TThreadStackInfo info;
hgs
parents:
diff changeset
   343
    TUint32 *sp = (TUint32*)&sp;
hgs
parents:
diff changeset
   344
    RThread().StackInfo(info);
hgs
parents:
diff changeset
   345
    Lock();
hgs
parents:
diff changeset
   346
    TInt i;
hgs
parents:
diff changeset
   347
    for (i=0;i<STACKSIZE;i++) {
hgs
parents:
diff changeset
   348
        if ((TLinAddr)sp>=info.iBase) break;
hgs
parents:
diff changeset
   349
        while ((TLinAddr)sp < info.iBase) {
hgs
parents:
diff changeset
   350
            TUint32 cur = *sp++;
hgs
parents:
diff changeset
   351
            TUint32 range = cur & 0xF0000000;
hgs
parents:
diff changeset
   352
            if (range == 0x80000000 || range == 0x70000000) {
hgs
parents:
diff changeset
   353
                filteredStack[i] = cur;
hgs
parents:
diff changeset
   354
                break;
hgs
parents:
diff changeset
   355
            }
hgs
parents:
diff changeset
   356
        }
hgs
parents:
diff changeset
   357
    }
hgs
parents:
diff changeset
   358
    Unlock();
hgs
parents:
diff changeset
   359
    BTraceContextBig(BTrace::EHeap, BTrace::EHeapCallStack, (TUint32)this, filteredStack, i * 4);
hgs
parents:
diff changeset
   360
#endif
hgs
parents:
diff changeset
   361
}
hgs
parents:
diff changeset
   362
hgs
parents:
diff changeset
   363
size_t getpagesize()
hgs
parents:
diff changeset
   364
{
hgs
parents:
diff changeset
   365
    TInt size;
hgs
parents:
diff changeset
   366
    TInt err = GET_PAGE_SIZE(size);
hgs
parents:
diff changeset
   367
    if (err != KErrNone)
hgs
parents:
diff changeset
   368
        return (size_t)0x1000;
hgs
parents:
diff changeset
   369
    return (size_t)size;
hgs
parents:
diff changeset
   370
}
hgs
parents:
diff changeset
   371
hgs
parents:
diff changeset
   372
#define gm  (&iGlobalMallocState)
hgs
parents:
diff changeset
   373
hgs
parents:
diff changeset
   374
RNewAllocator::RNewAllocator(TInt aMaxLength, TInt aAlign, TBool aSingleThread)
hgs
parents:
diff changeset
   375
// constructor for a fixed heap. Just use DL allocator
hgs
parents:
diff changeset
   376
    :iMinLength(aMaxLength), iMaxLength(aMaxLength), iOffset(0), iGrowBy(0), iChunkHandle(0),
hgs
parents:
diff changeset
   377
    iNestingLevel(0), iAllocCount(0), iFailType(ENone), iTestData(NULL), iChunkSize(aMaxLength)
hgs
parents:
diff changeset
   378
    {
hgs
parents:
diff changeset
   379
hgs
parents:
diff changeset
   380
    if ((TUint32)aAlign>=sizeof(TAny*) && __POWER_OF_2(iAlign))
hgs
parents:
diff changeset
   381
        {
hgs
parents:
diff changeset
   382
        iAlign = aAlign;
hgs
parents:
diff changeset
   383
        }
hgs
parents:
diff changeset
   384
    else
hgs
parents:
diff changeset
   385
        {
hgs
parents:
diff changeset
   386
        iAlign = 4;
hgs
parents:
diff changeset
   387
        }
hgs
parents:
diff changeset
   388
    iPageSize = 0;
hgs
parents:
diff changeset
   389
    iFlags = aSingleThread ? (ESingleThreaded|EFixedSize) : EFixedSize;
hgs
parents:
diff changeset
   390
hgs
parents:
diff changeset
   391
    Init(0, 0, 0);
hgs
parents:
diff changeset
   392
    }
hgs
parents:
diff changeset
   393
hgs
parents:
diff changeset
   394
RNewAllocator::RNewAllocator(TInt aChunkHandle, TInt aOffset, TInt aMinLength, TInt aMaxLength, TInt aGrowBy,
hgs
parents:
diff changeset
   395
            TInt aAlign, TBool aSingleThread)
hgs
parents:
diff changeset
   396
        : iMinLength(aMinLength), iMaxLength(aMaxLength), iOffset(aOffset), iChunkHandle(aChunkHandle), iAlign(aAlign), iNestingLevel(0), iAllocCount(0),
hgs
parents:
diff changeset
   397
            iFailType(ENone), iTestData(NULL), iChunkSize(aMinLength),iHighWaterMark(aMinLength)
hgs
parents:
diff changeset
   398
    {
hgs
parents:
diff changeset
   399
    iPageSize = malloc_getpagesize;
hgs
parents:
diff changeset
   400
    __ASSERT_ALWAYS(aOffset >=0, User::Panic(KDLHeapPanicCategory, ETHeapNewBadOffset));
hgs
parents:
diff changeset
   401
    iGrowBy = _ALIGN_UP(aGrowBy, iPageSize);
hgs
parents:
diff changeset
   402
    iFlags = aSingleThread ? ESingleThreaded : 0;
hgs
parents:
diff changeset
   403
hgs
parents:
diff changeset
   404
    // Initialise
hgs
parents:
diff changeset
   405
    // if the heap is created with aMinLength==aMaxLength then it cannot allocate slab or page memory
hgs
parents:
diff changeset
   406
    // so these sub-allocators should be disabled. Otherwise initialise with default values
hgs
parents:
diff changeset
   407
    if (aMinLength == aMaxLength)
hgs
parents:
diff changeset
   408
        Init(0, 0, 0);
hgs
parents:
diff changeset
   409
    else
hgs
parents:
diff changeset
   410
        Init(0x3fff, 15, 0x10000);  // all slabs, page {32KB}, trim {64KB} // Andrew: Adopting Webkit config?
hgs
parents:
diff changeset
   411
        //Init(0xabe, 16, iPageSize*4); // slabs {48, 40, 32, 24, 20, 16, 12, 8}, page {64KB}, trim {16KB}
hgs
parents:
diff changeset
   412
#ifdef TRACING_HEAPS
hgs
parents:
diff changeset
   413
    RChunk chunk;
hgs
parents:
diff changeset
   414
    chunk.SetHandle(iChunkHandle);
hgs
parents:
diff changeset
   415
    TKName chunk_name;
hgs
parents:
diff changeset
   416
    chunk.FullName(chunk_name);
hgs
parents:
diff changeset
   417
    BTraceContextBig(BTrace::ETest1, 2, 22, chunk_name.Ptr(), chunk_name.Size());
hgs
parents:
diff changeset
   418
hgs
parents:
diff changeset
   419
    TUint32 traceData[4];
hgs
parents:
diff changeset
   420
    traceData[0] = iChunkHandle;
hgs
parents:
diff changeset
   421
    traceData[1] = iMinLength;
hgs
parents:
diff changeset
   422
    traceData[2] = iMaxLength;
hgs
parents:
diff changeset
   423
    traceData[3] = iAlign;
hgs
parents:
diff changeset
   424
    BTraceContextN(BTrace::ETest1, 1, (TUint32)this, 11, traceData, sizeof(traceData));
hgs
parents:
diff changeset
   425
#endif
hgs
parents:
diff changeset
   426
hgs
parents:
diff changeset
   427
    }
hgs
parents:
diff changeset
   428
hgs
parents:
diff changeset
   429
TAny* RNewAllocator::operator new(TUint aSize, TAny* aBase) __NO_THROW
hgs
parents:
diff changeset
   430
    {
hgs
parents:
diff changeset
   431
    __ASSERT_ALWAYS(aSize>=sizeof(RNewAllocator), HEAP_PANIC(ETHeapNewBadSize));
hgs
parents:
diff changeset
   432
    RNewAllocator* h = (RNewAllocator*)aBase;
hgs
parents:
diff changeset
   433
    h->iAlign = 0x80000000; // garbage value
hgs
parents:
diff changeset
   434
    h->iBase = ((TUint8*)aBase) + aSize;
hgs
parents:
diff changeset
   435
    return aBase;
hgs
parents:
diff changeset
   436
    }
hgs
parents:
diff changeset
   437
hgs
parents:
diff changeset
   438
void RNewAllocator::Init(TInt aBitmapSlab, TInt aPagePower, size_t aTrimThreshold)
hgs
parents:
diff changeset
   439
    {
hgs
parents:
diff changeset
   440
    __ASSERT_ALWAYS((TUint32)iAlign>=sizeof(TAny*) && __POWER_OF_2(iAlign), HEAP_PANIC(ETHeapNewBadAlignment));
hgs
parents:
diff changeset
   441
hgs
parents:
diff changeset
   442
    /*Moved code which does initialization */
hgs
parents:
diff changeset
   443
    iTop = (TUint8*)this + iMinLength;
hgs
parents:
diff changeset
   444
    spare_page = 0;
hgs
parents:
diff changeset
   445
    iAllocCount = 0; // FIXME  -- not used anywhere - already initialized to 0 in constructor anyway
hgs
parents:
diff changeset
   446
    memset(&mparams,0,sizeof(mparams));
hgs
parents:
diff changeset
   447
hgs
parents:
diff changeset
   448
    Init_Dlmalloc(iTop - iBase, 0, aTrimThreshold);
hgs
parents:
diff changeset
   449
hgs
parents:
diff changeset
   450
    slab_init(aBitmapSlab);
hgs
parents:
diff changeset
   451
hgs
parents:
diff changeset
   452
    /*10-1K,11-2K,12-4k,13-8K,14-16K,15-32K,16-64K*/
hgs
parents:
diff changeset
   453
    paged_init(aPagePower);
hgs
parents:
diff changeset
   454
hgs
parents:
diff changeset
   455
#ifdef TRACING_ALLOCS
hgs
parents:
diff changeset
   456
        TUint32 traceData[3];
hgs
parents:
diff changeset
   457
        traceData[0] = aBitmapSlab;
hgs
parents:
diff changeset
   458
        traceData[1] = aPagePower;
hgs
parents:
diff changeset
   459
        traceData[2] = aTrimThreshold;
hgs
parents:
diff changeset
   460
        BTraceContextN(BTrace::ETest1, BTrace::EHeapAlloc, (TUint32)this, 0, traceData, sizeof(traceData));
hgs
parents:
diff changeset
   461
#endif
hgs
parents:
diff changeset
   462
hgs
parents:
diff changeset
   463
    }
hgs
parents:
diff changeset
   464
hgs
parents:
diff changeset
   465
RNewAllocator::SCell* RNewAllocator::GetAddress(const TAny* aCell) const
hgs
parents:
diff changeset
   466
//
hgs
parents:
diff changeset
   467
// As much as possible, check a cell address and backspace it
hgs
parents:
diff changeset
   468
// to point at the cell header.
hgs
parents:
diff changeset
   469
//
hgs
parents:
diff changeset
   470
    {
hgs
parents:
diff changeset
   471
hgs
parents:
diff changeset
   472
    TLinAddr m = TLinAddr(iAlign - 1);
hgs
parents:
diff changeset
   473
    __ASSERT_ALWAYS(!(TLinAddr(aCell)&m), HEAP_PANIC(ETHeapBadCellAddress));
hgs
parents:
diff changeset
   474
hgs
parents:
diff changeset
   475
    SCell* pC = (SCell*)(((TUint8*)aCell)-EAllocCellSize);
hgs
parents:
diff changeset
   476
    __CHECK_CELL(pC);
hgs
parents:
diff changeset
   477
hgs
parents:
diff changeset
   478
    return pC;
hgs
parents:
diff changeset
   479
    }
hgs
parents:
diff changeset
   480
hgs
parents:
diff changeset
   481
TInt RNewAllocator::AllocLen(const TAny* aCell) const
hgs
parents:
diff changeset
   482
{
hgs
parents:
diff changeset
   483
    if (ptrdiff(aCell, this) >= 0)
hgs
parents:
diff changeset
   484
    {
hgs
parents:
diff changeset
   485
        mchunkptr m = mem2chunk(aCell);
hgs
parents:
diff changeset
   486
        return chunksize(m) - CHUNK_OVERHEAD; // Andrew: Picking up webkit change.
hgs
parents:
diff changeset
   487
    }
hgs
parents:
diff changeset
   488
    if (lowbits(aCell, pagesize) > cellalign)
hgs
parents:
diff changeset
   489
        return header_size(slab::slabfor(aCell)->header);
hgs
parents:
diff changeset
   490
    if (lowbits(aCell, pagesize) == cellalign)
hgs
parents:
diff changeset
   491
        return *(unsigned*)(offset(aCell,-int(cellalign)))-cellalign;
hgs
parents:
diff changeset
   492
    return paged_descriptor(aCell)->size;
hgs
parents:
diff changeset
   493
}
hgs
parents:
diff changeset
   494
hgs
parents:
diff changeset
   495
TAny* RNewAllocator::Alloc(TInt aSize)
hgs
parents:
diff changeset
   496
{
hgs
parents:
diff changeset
   497
    __ASSERT_ALWAYS((TUint)aSize<(KMaxTInt/2),HEAP_PANIC(ETHeapBadAllocatedCellSize));
hgs
parents:
diff changeset
   498
hgs
parents:
diff changeset
   499
    TAny* addr;
hgs
parents:
diff changeset
   500
hgs
parents:
diff changeset
   501
#ifdef TRACING_ALLOCS
hgs
parents:
diff changeset
   502
    TInt aCnt=0;
hgs
parents:
diff changeset
   503
#endif
hgs
parents:
diff changeset
   504
    Lock();
hgs
parents:
diff changeset
   505
    if (aSize < slab_threshold)
hgs
parents:
diff changeset
   506
    {
hgs
parents:
diff changeset
   507
        TInt ix = sizemap[(aSize+3)>>2];
hgs
parents:
diff changeset
   508
        ASSERT(ix != 0xff);
hgs
parents:
diff changeset
   509
        addr = slab_allocate(slaballoc[ix]);
hgs
parents:
diff changeset
   510
        if (addr) iTotalAllocSize += slaballoc[ix].size;
hgs
parents:
diff changeset
   511
    }else if ((aSize >> page_threshold)==0)
hgs
parents:
diff changeset
   512
        {
hgs
parents:
diff changeset
   513
#ifdef TRACING_ALLOCS
hgs
parents:
diff changeset
   514
        aCnt=1;
hgs
parents:
diff changeset
   515
#endif
hgs
parents:
diff changeset
   516
        addr = dlmalloc(aSize);
hgs
parents:
diff changeset
   517
        }
hgs
parents:
diff changeset
   518
    else
hgs
parents:
diff changeset
   519
        {
hgs
parents:
diff changeset
   520
#ifdef TRACING_ALLOCS
hgs
parents:
diff changeset
   521
        aCnt=2;
hgs
parents:
diff changeset
   522
#endif
hgs
parents:
diff changeset
   523
        addr = paged_allocate(aSize);
hgs
parents:
diff changeset
   524
        //attempt dlmalloc() if paged_allocate() fails. This can improve allocation chances if fragmentation is high in the heap.
hgs
parents:
diff changeset
   525
        if (!addr) { // paged_allocator failed, try in dlmalloc
hgs
parents:
diff changeset
   526
            addr = dlmalloc(aSize);
hgs
parents:
diff changeset
   527
        }
hgs
parents:
diff changeset
   528
    }
hgs
parents:
diff changeset
   529
hgs
parents:
diff changeset
   530
    if (addr) {
hgs
parents:
diff changeset
   531
        iCellCount++;
hgs
parents:
diff changeset
   532
        // Increment iTotalAllocSize in memory segment specific code for more accuracy
hgs
parents:
diff changeset
   533
        //iTotalAllocSize += aSize;
hgs
parents:
diff changeset
   534
    }
hgs
parents:
diff changeset
   535
    Unlock();
hgs
parents:
diff changeset
   536
hgs
parents:
diff changeset
   537
#ifdef TRACING_ALLOCS
hgs
parents:
diff changeset
   538
    if (iFlags & ETraceAllocs)
hgs
parents:
diff changeset
   539
        {
hgs
parents:
diff changeset
   540
        TUint32 traceData[3];
hgs
parents:
diff changeset
   541
        traceData[0] = AllocLen(addr);
hgs
parents:
diff changeset
   542
        traceData[1] = aSize;
hgs
parents:
diff changeset
   543
        traceData[2] = aCnt;
hgs
parents:
diff changeset
   544
        BTraceContextN(BTrace::EHeap, BTrace::EHeapAlloc, (TUint32)this, (TUint32)addr, traceData, sizeof(traceData));
hgs
parents:
diff changeset
   545
        TraceCallStack();
hgs
parents:
diff changeset
   546
        }
hgs
parents:
diff changeset
   547
#endif
hgs
parents:
diff changeset
   548
hgs
parents:
diff changeset
   549
    return addr;
hgs
parents:
diff changeset
   550
}
hgs
parents:
diff changeset
   551
hgs
parents:
diff changeset
   552
TInt RNewAllocator::Compress()
hgs
parents:
diff changeset
   553
    {
hgs
parents:
diff changeset
   554
    if (iFlags & EFixedSize)
hgs
parents:
diff changeset
   555
        return 0;
hgs
parents:
diff changeset
   556
hgs
parents:
diff changeset
   557
    Lock();
hgs
parents:
diff changeset
   558
    dlmalloc_trim(0);
hgs
parents:
diff changeset
   559
    if (spare_page)
hgs
parents:
diff changeset
   560
        {
hgs
parents:
diff changeset
   561
        unmap(spare_page,pagesize);
hgs
parents:
diff changeset
   562
        spare_page = 0;
hgs
parents:
diff changeset
   563
        }
hgs
parents:
diff changeset
   564
    Unlock();
hgs
parents:
diff changeset
   565
    return 0;
hgs
parents:
diff changeset
   566
    }
hgs
parents:
diff changeset
   567
hgs
parents:
diff changeset
   568
void RNewAllocator::Free(TAny* aPtr)
hgs
parents:
diff changeset
   569
{
hgs
parents:
diff changeset
   570
hgs
parents:
diff changeset
   571
#ifdef TRACING_ALLOCS
hgs
parents:
diff changeset
   572
    TInt aCnt=0;
hgs
parents:
diff changeset
   573
#endif
hgs
parents:
diff changeset
   574
#ifdef ENABLE_DEBUG_TRACE
hgs
parents:
diff changeset
   575
    RThread me;
hgs
parents:
diff changeset
   576
    TBuf<100> thName;
hgs
parents:
diff changeset
   577
    me.FullName(thName);
hgs
parents:
diff changeset
   578
#endif
hgs
parents:
diff changeset
   579
    //if (!aPtr) return; //return in case of NULL pointer
hgs
parents:
diff changeset
   580
hgs
parents:
diff changeset
   581
    Lock();
hgs
parents:
diff changeset
   582
hgs
parents:
diff changeset
   583
    if (!aPtr)
hgs
parents:
diff changeset
   584
        ;
hgs
parents:
diff changeset
   585
    else if (ptrdiff(aPtr, this) >= 0)
hgs
parents:
diff changeset
   586
        {
hgs
parents:
diff changeset
   587
#ifdef TRACING_ALLOCS
hgs
parents:
diff changeset
   588
        aCnt = 1;
hgs
parents:
diff changeset
   589
#endif
hgs
parents:
diff changeset
   590
        dlfree( aPtr);
hgs
parents:
diff changeset
   591
        }
hgs
parents:
diff changeset
   592
    else if (lowbits(aPtr, pagesize) <= cellalign)
hgs
parents:
diff changeset
   593
        {
hgs
parents:
diff changeset
   594
#ifdef TRACING_ALLOCS
hgs
parents:
diff changeset
   595
        aCnt = 2;
hgs
parents:
diff changeset
   596
#endif
hgs
parents:
diff changeset
   597
        paged_free(aPtr);
hgs
parents:
diff changeset
   598
        }
hgs
parents:
diff changeset
   599
    else
hgs
parents:
diff changeset
   600
        {
hgs
parents:
diff changeset
   601
#ifdef TRACING_ALLOCS
hgs
parents:
diff changeset
   602
        aCnt = 0;
hgs
parents:
diff changeset
   603
#endif
hgs
parents:
diff changeset
   604
        slab_free(aPtr);
hgs
parents:
diff changeset
   605
        }
hgs
parents:
diff changeset
   606
    iCellCount--;
hgs
parents:
diff changeset
   607
    Unlock();
hgs
parents:
diff changeset
   608
hgs
parents:
diff changeset
   609
#ifdef TRACING_ALLOCS
hgs
parents:
diff changeset
   610
    if (iFlags & ETraceAllocs)
hgs
parents:
diff changeset
   611
        {
hgs
parents:
diff changeset
   612
        TUint32 traceData;
hgs
parents:
diff changeset
   613
        traceData = aCnt;
hgs
parents:
diff changeset
   614
        BTraceContextN(BTrace::EHeap, BTrace::EHeapFree, (TUint32)this, (TUint32)aPtr, &traceData, sizeof(traceData));
hgs
parents:
diff changeset
   615
        TraceCallStack();
hgs
parents:
diff changeset
   616
        }
hgs
parents:
diff changeset
   617
#endif
hgs
parents:
diff changeset
   618
}
hgs
parents:
diff changeset
   619
hgs
parents:
diff changeset
   620
hgs
parents:
diff changeset
   621
void RNewAllocator::Reset()
hgs
parents:
diff changeset
   622
    {
hgs
parents:
diff changeset
   623
    // TODO free everything
hgs
parents:
diff changeset
   624
    User::Panic(_L("RNewAllocator"), 1); //this should never be called
hgs
parents:
diff changeset
   625
    }
hgs
parents:
diff changeset
   626
hgs
parents:
diff changeset
   627
#ifdef TRACING_ALLOCS
hgs
parents:
diff changeset
   628
inline void RNewAllocator::TraceReAlloc(TAny* aPtr, TInt aSize, TAny* aNewPtr, TInt aZone)
hgs
parents:
diff changeset
   629
{
hgs
parents:
diff changeset
   630
    if (aNewPtr && (iFlags & ETraceAllocs)) {
hgs
parents:
diff changeset
   631
        TUint32 traceData[3];
hgs
parents:
diff changeset
   632
        traceData[0] = AllocLen(aNewPtr);
hgs
parents:
diff changeset
   633
        traceData[1] = aSize;
hgs
parents:
diff changeset
   634
        traceData[2] = (TUint32) aPtr;
hgs
parents:
diff changeset
   635
        BTraceContextN(BTrace::EHeap, BTrace::EHeapReAlloc, (TUint32) this, (TUint32) aNewPtr,
hgs
parents:
diff changeset
   636
            traceData, sizeof(traceData));
hgs
parents:
diff changeset
   637
        TraceCallStack();
hgs
parents:
diff changeset
   638
        //workaround for SAW not handling reallocs properly
hgs
parents:
diff changeset
   639
        if (aZone >= 0 && aPtr != aNewPtr) {
hgs
parents:
diff changeset
   640
            BTraceContextN(BTrace::EHeap, BTrace::EHeapFree, (TUint32) this, (TUint32) aPtr,
hgs
parents:
diff changeset
   641
                &aZone, sizeof(aZone));
hgs
parents:
diff changeset
   642
            TraceCallStack();
hgs
parents:
diff changeset
   643
        }
hgs
parents:
diff changeset
   644
    }
hgs
parents:
diff changeset
   645
}
hgs
parents:
diff changeset
   646
#else
hgs
parents:
diff changeset
   647
//Q_UNUSED generates code that prevents the compiler optimising out the empty inline function
hgs
parents:
diff changeset
   648
inline void RNewAllocator::TraceReAlloc(TAny* , TInt , TAny* , TInt )
hgs
parents:
diff changeset
   649
{}
hgs
parents:
diff changeset
   650
#endif
hgs
parents:
diff changeset
   651
hgs
parents:
diff changeset
   652
TAny* RNewAllocator::ReAlloc(TAny* aPtr, TInt aSize, TInt /*aMode = 0*/)
hgs
parents:
diff changeset
   653
    {
hgs
parents:
diff changeset
   654
    if (ptrdiff(aPtr,this)>=0)
hgs
parents:
diff changeset
   655
    {
hgs
parents:
diff changeset
   656
        // original cell is in DL zone
hgs
parents:
diff changeset
   657
        if ((aSize>>page_threshold)==0 || aSize <= chunksize(mem2chunk(aPtr)) - CHUNK_OVERHEAD)
hgs
parents:
diff changeset
   658
            {
hgs
parents:
diff changeset
   659
            // new one is below page limit or smaller than old one (so can't be moved)
hgs
parents:
diff changeset
   660
            Lock();
hgs
parents:
diff changeset
   661
            TAny* addr = dlrealloc(aPtr,aSize);
hgs
parents:
diff changeset
   662
            Unlock();
hgs
parents:
diff changeset
   663
            TraceReAlloc(aPtr, aSize, addr, 2);
hgs
parents:
diff changeset
   664
            return addr;
hgs
parents:
diff changeset
   665
            }
hgs
parents:
diff changeset
   666
    }
hgs
parents:
diff changeset
   667
    else if (lowbits(aPtr,pagesize)<=cellalign)
hgs
parents:
diff changeset
   668
    {
hgs
parents:
diff changeset
   669
        // original cell is either NULL or in paged zone
hgs
parents:
diff changeset
   670
        if (!aPtr)
hgs
parents:
diff changeset
   671
            return Alloc(aSize);
hgs
parents:
diff changeset
   672
hgs
parents:
diff changeset
   673
        // either the new size is larger (in which case it will still be in paged zone)
hgs
parents:
diff changeset
   674
        // or it is smaller, but we will never move a shrinking cell so in paged zone
hgs
parents:
diff changeset
   675
        // must handle [rare] case that aSize == 0, as paged_[re]allocate() will panic
hgs
parents:
diff changeset
   676
        if (aSize == 0)
hgs
parents:
diff changeset
   677
            aSize = 1;
hgs
parents:
diff changeset
   678
        Lock();
hgs
parents:
diff changeset
   679
        TAny* addr = paged_reallocate(aPtr,aSize);
hgs
parents:
diff changeset
   680
        Unlock();
hgs
parents:
diff changeset
   681
        TraceReAlloc(aPtr, aSize, addr, 2);
hgs
parents:
diff changeset
   682
        return addr;
hgs
parents:
diff changeset
   683
    }
hgs
parents:
diff changeset
   684
    else
hgs
parents:
diff changeset
   685
    {
hgs
parents:
diff changeset
   686
        // original cell is in slab zone
hgs
parents:
diff changeset
   687
        // return original if new one smaller
hgs
parents:
diff changeset
   688
        if (aSize <= header_size(slab::slabfor(aPtr)->header))
hgs
parents:
diff changeset
   689
            return aPtr;
hgs
parents:
diff changeset
   690
    }
hgs
parents:
diff changeset
   691
    // can't do better than allocate/copy/free
hgs
parents:
diff changeset
   692
    TAny* newp = Alloc(aSize);
hgs
parents:
diff changeset
   693
    if (newp)
hgs
parents:
diff changeset
   694
    {
hgs
parents:
diff changeset
   695
        TInt oldsize = AllocLen(aPtr);
hgs
parents:
diff changeset
   696
        memcpy(newp,aPtr,oldsize<aSize?oldsize:aSize);
hgs
parents:
diff changeset
   697
        Free(aPtr);
hgs
parents:
diff changeset
   698
    }
hgs
parents:
diff changeset
   699
    return newp;
hgs
parents:
diff changeset
   700
    }
hgs
parents:
diff changeset
   701
hgs
parents:
diff changeset
   702
TInt RNewAllocator::Available(TInt& aBiggestBlock) const
hgs
parents:
diff changeset
   703
{
hgs
parents:
diff changeset
   704
    //TODO: consider page and slab allocators
hgs
parents:
diff changeset
   705
hgs
parents:
diff changeset
   706
    //this gets free space in DL region - the C ported code doesn't respect const yet.
hgs
parents:
diff changeset
   707
    RNewAllocator* self = const_cast<RNewAllocator*> (this);
hgs
parents:
diff changeset
   708
    mallinfo info = self->dlmallinfo();
hgs
parents:
diff changeset
   709
    aBiggestBlock = info.largestBlock;
hgs
parents:
diff changeset
   710
    return info.fordblks;
hgs
parents:
diff changeset
   711
}
hgs
parents:
diff changeset
   712
TInt RNewAllocator::AllocSize(TInt& aTotalAllocSize) const
hgs
parents:
diff changeset
   713
{
hgs
parents:
diff changeset
   714
    aTotalAllocSize = iTotalAllocSize;
hgs
parents:
diff changeset
   715
    return iCellCount;
hgs
parents:
diff changeset
   716
}
hgs
parents:
diff changeset
   717
hgs
parents:
diff changeset
   718
TInt RNewAllocator::DebugFunction(TInt aFunc, TAny* a1, TAny* /*a2*/)
hgs
parents:
diff changeset
   719
    {
hgs
parents:
diff changeset
   720
    TInt r = KErrNotSupported;
hgs
parents:
diff changeset
   721
    TInt* a1int = reinterpret_cast<TInt*>(a1);
hgs
parents:
diff changeset
   722
    switch (aFunc) {
hgs
parents:
diff changeset
   723
    case RAllocator::ECount:
hgs
parents:
diff changeset
   724
    {
hgs
parents:
diff changeset
   725
        struct mallinfo mi = dlmallinfo();
hgs
parents:
diff changeset
   726
        *a1int = mi.fordblks;
hgs
parents:
diff changeset
   727
        r = mi.uordblks;
hgs
parents:
diff changeset
   728
    }
hgs
parents:
diff changeset
   729
        break;
hgs
parents:
diff changeset
   730
    case RAllocator::EMarkStart:
hgs
parents:
diff changeset
   731
    case RAllocator::EMarkEnd:
hgs
parents:
diff changeset
   732
    case RAllocator::ESetFail:
hgs
parents:
diff changeset
   733
    case RAllocator::ECheck:
hgs
parents:
diff changeset
   734
        r = KErrNone;
hgs
parents:
diff changeset
   735
        break;
hgs
parents:
diff changeset
   736
    }
hgs
parents:
diff changeset
   737
    return r;
hgs
parents:
diff changeset
   738
    }
hgs
parents:
diff changeset
   739
hgs
parents:
diff changeset
   740
TInt RNewAllocator::Extension_(TUint /* aExtensionId */, TAny*& /* a0 */, TAny* /* a1 */)
hgs
parents:
diff changeset
   741
    {
hgs
parents:
diff changeset
   742
    return KErrNotSupported;
hgs
parents:
diff changeset
   743
    }
hgs
parents:
diff changeset
   744
hgs
parents:
diff changeset
   745
///////////////////////////////////////////////////////////////////////////////
hgs
parents:
diff changeset
   746
// imported from dla.cpp
hgs
parents:
diff changeset
   747
///////////////////////////////////////////////////////////////////////////////
hgs
parents:
diff changeset
   748
hgs
parents:
diff changeset
   749
//#include <unistd.h>
hgs
parents:
diff changeset
   750
//#define DEBUG_REALLOC
hgs
parents:
diff changeset
   751
#ifdef DEBUG_REALLOC
hgs
parents:
diff changeset
   752
#include <e32debug.h>
hgs
parents:
diff changeset
   753
#endif
hgs
parents:
diff changeset
   754
int RNewAllocator::init_mparams(size_t aTrimThreshold /*= DEFAULT_TRIM_THRESHOLD*/)
hgs
parents:
diff changeset
   755
{
hgs
parents:
diff changeset
   756
    if (mparams.page_size == 0)
hgs
parents:
diff changeset
   757
    {
hgs
parents:
diff changeset
   758
        size_t s;
hgs
parents:
diff changeset
   759
        mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
hgs
parents:
diff changeset
   760
        mparams.trim_threshold = aTrimThreshold;
hgs
parents:
diff changeset
   761
        #if MORECORE_CONTIGUOUS
hgs
parents:
diff changeset
   762
            mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT;
hgs
parents:
diff changeset
   763
        #else  /* MORECORE_CONTIGUOUS */
hgs
parents:
diff changeset
   764
            mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
hgs
parents:
diff changeset
   765
        #endif /* MORECORE_CONTIGUOUS */
hgs
parents:
diff changeset
   766
hgs
parents:
diff changeset
   767
            s = (size_t)0x58585858U;
hgs
parents:
diff changeset
   768
        ACQUIRE_MAGIC_INIT_LOCK(&mparams);
hgs
parents:
diff changeset
   769
        if (mparams.magic == 0) {
hgs
parents:
diff changeset
   770
          mparams.magic = s;
hgs
parents:
diff changeset
   771
          /* Set up lock for main malloc area */
hgs
parents:
diff changeset
   772
          INITIAL_LOCK(&gm->mutex);
hgs
parents:
diff changeset
   773
          gm->mflags = mparams.default_mflags;
hgs
parents:
diff changeset
   774
        }
hgs
parents:
diff changeset
   775
        RELEASE_MAGIC_INIT_LOCK(&mparams);
hgs
parents:
diff changeset
   776
hgs
parents:
diff changeset
   777
        mparams.page_size = malloc_getpagesize;
hgs
parents:
diff changeset
   778
hgs
parents:
diff changeset
   779
        mparams.granularity = ((DEFAULT_GRANULARITY != 0)?
hgs
parents:
diff changeset
   780
                               DEFAULT_GRANULARITY : mparams.page_size);
hgs
parents:
diff changeset
   781
hgs
parents:
diff changeset
   782
        /* Sanity-check configuration:
hgs
parents:
diff changeset
   783
           size_t must be unsigned and as wide as pointer type.
hgs
parents:
diff changeset
   784
           ints must be at least 4 bytes.
hgs
parents:
diff changeset
   785
           alignment must be at least 8.
hgs
parents:
diff changeset
   786
           Alignment, min chunk size, and page size must all be powers of 2.
hgs
parents:
diff changeset
   787
        */
hgs
parents:
diff changeset
   788
hgs
parents:
diff changeset
   789
        if ((sizeof(size_t) != sizeof(TUint8*)) ||
hgs
parents:
diff changeset
   790
            (MAX_SIZE_T < MIN_CHUNK_SIZE)  ||
hgs
parents:
diff changeset
   791
            (sizeof(int) < 4)  ||
hgs
parents:
diff changeset
   792
            (MALLOC_ALIGNMENT < (size_t)8U) ||
hgs
parents:
diff changeset
   793
            ((MALLOC_ALIGNMENT    & (MALLOC_ALIGNMENT-SIZE_T_ONE))    != 0) ||
hgs
parents:
diff changeset
   794
            ((MCHUNK_SIZE         & (MCHUNK_SIZE-SIZE_T_ONE))         != 0) ||
hgs
parents:
diff changeset
   795
            ((mparams.granularity & (mparams.granularity-SIZE_T_ONE)) != 0) ||
hgs
parents:
diff changeset
   796
            ((mparams.page_size   & (mparams.page_size-SIZE_T_ONE))   != 0))
hgs
parents:
diff changeset
   797
          ABORT;
hgs
parents:
diff changeset
   798
    }
hgs
parents:
diff changeset
   799
    return 0;
hgs
parents:
diff changeset
   800
}
hgs
parents:
diff changeset
   801
hgs
parents:
diff changeset
   802
void RNewAllocator::init_bins(mstate m) {
hgs
parents:
diff changeset
   803
  /* Establish circular links for smallbins */
hgs
parents:
diff changeset
   804
  bindex_t i;
hgs
parents:
diff changeset
   805
  for (i = 0; i < NSMALLBINS; ++i) {
hgs
parents:
diff changeset
   806
    sbinptr bin = smallbin_at(m,i);
hgs
parents:
diff changeset
   807
    bin->fd = bin->bk = bin;
hgs
parents:
diff changeset
   808
  }
hgs
parents:
diff changeset
   809
}
hgs
parents:
diff changeset
   810
/* ---------------------------- malloc support --------------------------- */
hgs
parents:
diff changeset
   811
hgs
parents:
diff changeset
   812
/* allocate a large request from the best fitting chunk in a treebin */
hgs
parents:
diff changeset
   813
void* RNewAllocator::tmalloc_large(mstate m, size_t nb) {
hgs
parents:
diff changeset
   814
  tchunkptr v = 0;
hgs
parents:
diff changeset
   815
  size_t rsize = -nb; /* Unsigned negation */
hgs
parents:
diff changeset
   816
  tchunkptr t;
hgs
parents:
diff changeset
   817
  bindex_t idx;
hgs
parents:
diff changeset
   818
  compute_tree_index(nb, idx);
hgs
parents:
diff changeset
   819
hgs
parents:
diff changeset
   820
  if ((t = *treebin_at(m, idx)) != 0) {
hgs
parents:
diff changeset
   821
    /* Traverse tree for this bin looking for node with size == nb */
hgs
parents:
diff changeset
   822
    size_t sizebits =
hgs
parents:
diff changeset
   823
    nb <<
hgs
parents:
diff changeset
   824
    leftshift_for_tree_index(idx);
hgs
parents:
diff changeset
   825
    tchunkptr rst = 0;  /* The deepest untaken right subtree */
hgs
parents:
diff changeset
   826
    for (;;) {
hgs
parents:
diff changeset
   827
      tchunkptr rt;
hgs
parents:
diff changeset
   828
      size_t trem = chunksize(t) - nb;
hgs
parents:
diff changeset
   829
      if (trem < rsize) {
hgs
parents:
diff changeset
   830
        v = t;
hgs
parents:
diff changeset
   831
        if ((rsize = trem) == 0)
hgs
parents:
diff changeset
   832
          break;
hgs
parents:
diff changeset
   833
      }
hgs
parents:
diff changeset
   834
      rt = t->child[1];
hgs
parents:
diff changeset
   835
      t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
hgs
parents:
diff changeset
   836
      if (rt != 0 && rt != t)
hgs
parents:
diff changeset
   837
        rst = rt;
hgs
parents:
diff changeset
   838
      if (t == 0) {
hgs
parents:
diff changeset
   839
        t = rst; /* set t to least subtree holding sizes > nb */
hgs
parents:
diff changeset
   840
        break;
hgs
parents:
diff changeset
   841
      }
hgs
parents:
diff changeset
   842
      sizebits <<= 1;
hgs
parents:
diff changeset
   843
    }
hgs
parents:
diff changeset
   844
  }
hgs
parents:
diff changeset
   845
  if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
hgs
parents:
diff changeset
   846
    binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
hgs
parents:
diff changeset
   847
    if (leftbits != 0) {
hgs
parents:
diff changeset
   848
      bindex_t i;
hgs
parents:
diff changeset
   849
      binmap_t leastbit = least_bit(leftbits);
hgs
parents:
diff changeset
   850
      compute_bit2idx(leastbit, i);
hgs
parents:
diff changeset
   851
      t = *treebin_at(m, i);
hgs
parents:
diff changeset
   852
    }
hgs
parents:
diff changeset
   853
  }
hgs
parents:
diff changeset
   854
  while (t != 0) { /* find smallest of tree or subtree */
hgs
parents:
diff changeset
   855
    size_t trem = chunksize(t) - nb;
hgs
parents:
diff changeset
   856
    if (trem < rsize) {
hgs
parents:
diff changeset
   857
      rsize = trem;
hgs
parents:
diff changeset
   858
      v = t;
hgs
parents:
diff changeset
   859
    }
hgs
parents:
diff changeset
   860
    t = leftmost_child(t);
hgs
parents:
diff changeset
   861
  }
hgs
parents:
diff changeset
   862
  /*  If dv is a better fit, return 0 so malloc will use it */
hgs
parents:
diff changeset
   863
   if (v != 0) {
hgs
parents:
diff changeset
   864
   if (RTCHECK(ok_address(m, v))) { /* split */
hgs
parents:
diff changeset
   865
      mchunkptr r = chunk_plus_offset(v, nb);
hgs
parents:
diff changeset
   866
      assert(chunksize(v) == rsize + nb);
hgs
parents:
diff changeset
   867
hgs
parents:
diff changeset
   868
      /* check for chunk memory page-in */
hgs
parents:
diff changeset
   869
      size_t npages_out = 0;
hgs
parents:
diff changeset
   870
      if (page_not_in_memory(v, chunksize(v))) {
hgs
parents:
diff changeset
   871
          if (!is_small(rsize) && rsize>=CHUNK_PAGEOUT_THESHOLD) {
hgs
parents:
diff changeset
   872
              // partial chunk page mapping
hgs
parents:
diff changeset
   873
              TInt result = map_chunk_pages_partial(v, chunksize(v), (tchunkptr)r, rsize);
hgs
parents:
diff changeset
   874
              if (result < 0) return 0; // Failed to Commit RAM
hgs
parents:
diff changeset
   875
              else npages_out = (size_t)result;
hgs
parents:
diff changeset
   876
          }
hgs
parents:
diff changeset
   877
          else {
hgs
parents:
diff changeset
   878
              // full chunk page map needed
hgs
parents:
diff changeset
   879
              TInt err = map_chunk_pages(v, chunksize(v));
hgs
parents:
diff changeset
   880
              if (err != KErrNone)  return 0; // Failed to Commit RAM
hgs
parents:
diff changeset
   881
          }
hgs
parents:
diff changeset
   882
      }
hgs
parents:
diff changeset
   883
hgs
parents:
diff changeset
   884
      if (RTCHECK(ok_next(v, r))) {
hgs
parents:
diff changeset
   885
        unlink_large_chunk(m, v);
hgs
parents:
diff changeset
   886
        if (rsize < free_chunk_threshold) // exaust if less than slab threshold
hgs
parents:
diff changeset
   887
          set_inuse_and_pinuse(m, v, (rsize + nb));
hgs
parents:
diff changeset
   888
        else {
hgs
parents:
diff changeset
   889
          set_size_and_pinuse_of_inuse_chunk(m, v, nb);
hgs
parents:
diff changeset
   890
          set_size_and_pinuse_of_free_chunk(r, rsize);
hgs
parents:
diff changeset
   891
          insert_chunk(m, r, rsize, npages_out);
hgs
parents:
diff changeset
   892
        }
hgs
parents:
diff changeset
   893
        return chunk2mem(v);
hgs
parents:
diff changeset
   894
      }
hgs
parents:
diff changeset
   895
    }
hgs
parents:
diff changeset
   896
#if !INSECURE // conditional statement to keep compiler happy. code is reachable if RTCHECK evaluates to False
hgs
parents:
diff changeset
   897
    CORRUPTION_ERROR_ACTION(m);
hgs
parents:
diff changeset
   898
#endif
hgs
parents:
diff changeset
   899
  }
hgs
parents:
diff changeset
   900
  return 0;
hgs
parents:
diff changeset
   901
}
hgs
parents:
diff changeset
   902
hgs
parents:
diff changeset
   903
/* allocate a small request from the best fitting chunk in a treebin */
hgs
parents:
diff changeset
   904
void* RNewAllocator::tmalloc_small(mstate m, size_t nb) {
hgs
parents:
diff changeset
   905
  tchunkptr t, v;
hgs
parents:
diff changeset
   906
  size_t rsize;
hgs
parents:
diff changeset
   907
  bindex_t i;
hgs
parents:
diff changeset
   908
  binmap_t leastbit = least_bit(m->treemap);
hgs
parents:
diff changeset
   909
  compute_bit2idx(leastbit, i);
hgs
parents:
diff changeset
   910
hgs
parents:
diff changeset
   911
  v = t = *treebin_at(m, i);
hgs
parents:
diff changeset
   912
  rsize = chunksize(t) - nb;
hgs
parents:
diff changeset
   913
hgs
parents:
diff changeset
   914
  while ((t = leftmost_child(t)) != 0) {
hgs
parents:
diff changeset
   915
    size_t trem = chunksize(t) - nb;
hgs
parents:
diff changeset
   916
    if (trem < rsize) {
hgs
parents:
diff changeset
   917
      rsize = trem;
hgs
parents:
diff changeset
   918
      v = t;
hgs
parents:
diff changeset
   919
    }
hgs
parents:
diff changeset
   920
  }
hgs
parents:
diff changeset
   921
hgs
parents:
diff changeset
   922
  if (RTCHECK(ok_address(m, v))) {
hgs
parents:
diff changeset
   923
    mchunkptr r = chunk_plus_offset(v, nb);
hgs
parents:
diff changeset
   924
    assert(chunksize(v) == rsize + nb);
hgs
parents:
diff changeset
   925
hgs
parents:
diff changeset
   926
    /* check for chunk memory page-in */
hgs
parents:
diff changeset
   927
      if (page_not_in_memory(v, chunksize(v))) {
hgs
parents:
diff changeset
   928
          TInt err = map_chunk_pages(v, chunksize(v));
hgs
parents:
diff changeset
   929
          if (err != KErrNone)  return 0; // Failed to Commit RAM
hgs
parents:
diff changeset
   930
      }
hgs
parents:
diff changeset
   931
hgs
parents:
diff changeset
   932
    if (RTCHECK(ok_next(v, r))) {
hgs
parents:
diff changeset
   933
      unlink_large_chunk(m, v);
hgs
parents:
diff changeset
   934
      if (rsize < free_chunk_threshold) // exaust if less than slab threshold
hgs
parents:
diff changeset
   935
        set_inuse_and_pinuse(m, v, (rsize + nb));
hgs
parents:
diff changeset
   936
      else {
hgs
parents:
diff changeset
   937
        set_size_and_pinuse_of_inuse_chunk(m, v, nb);
hgs
parents:
diff changeset
   938
        set_size_and_pinuse_of_free_chunk(r, rsize);
hgs
parents:
diff changeset
   939
        insert_chunk(m, r, rsize, 0);
hgs
parents:
diff changeset
   940
      }
hgs
parents:
diff changeset
   941
      return chunk2mem(v);
hgs
parents:
diff changeset
   942
    }
hgs
parents:
diff changeset
   943
  }
hgs
parents:
diff changeset
   944
#if !INSECURE // conditional statement to keep compiler happy. code is reachable if RTCHECK evaluates to False
hgs
parents:
diff changeset
   945
  CORRUPTION_ERROR_ACTION(m);
hgs
parents:
diff changeset
   946
  return 0;
hgs
parents:
diff changeset
   947
#endif
hgs
parents:
diff changeset
   948
}
hgs
parents:
diff changeset
   949
hgs
parents:
diff changeset
   950
void RNewAllocator::init_top(mstate m, mchunkptr p, size_t psize)
hgs
parents:
diff changeset
   951
{
hgs
parents:
diff changeset
   952
    /* Ensure alignment */
hgs
parents:
diff changeset
   953
    size_t offset = align_offset(chunk2mem(p));
hgs
parents:
diff changeset
   954
    p = (mchunkptr)((TUint8*)p + offset);
hgs
parents:
diff changeset
   955
    psize -= offset;
hgs
parents:
diff changeset
   956
    m->top = p;
hgs
parents:
diff changeset
   957
    m->topsize = psize;
hgs
parents:
diff changeset
   958
    p->head = psize | PINUSE_BIT;
hgs
parents:
diff changeset
   959
    /* set size of fake trailing chunk holding overhead space only once */
hgs
parents:
diff changeset
   960
    mchunkptr chunkPlusOff = chunk_plus_offset(p, psize);
hgs
parents:
diff changeset
   961
    chunkPlusOff->head = TOP_FOOT_SIZE;
hgs
parents:
diff changeset
   962
    m->trim_check = mparams.trim_threshold; /* reset on each update */
hgs
parents:
diff changeset
   963
}
hgs
parents:
diff changeset
   964
hgs
parents:
diff changeset
   965
void* RNewAllocator::internal_realloc(mstate m, void* oldmem, size_t bytes)
hgs
parents:
diff changeset
   966
{
hgs
parents:
diff changeset
   967
  if (bytes >= MAX_REQUEST) {
hgs
parents:
diff changeset
   968
    MALLOC_FAILURE_ACTION;
hgs
parents:
diff changeset
   969
    return 0;
hgs
parents:
diff changeset
   970
  }
hgs
parents:
diff changeset
   971
  if (!PREACTION(m)) {
hgs
parents:
diff changeset
   972
    mchunkptr oldp = mem2chunk(oldmem);
hgs
parents:
diff changeset
   973
    size_t oldsize = chunksize(oldp);
hgs
parents:
diff changeset
   974
    mchunkptr next = chunk_plus_offset(oldp, oldsize);
hgs
parents:
diff changeset
   975
    mchunkptr newp = 0;
hgs
parents:
diff changeset
   976
    void* extra = 0;
hgs
parents:
diff changeset
   977
hgs
parents:
diff changeset
   978
    /* Try to either shrink or extend into top. Else malloc-copy-free */
hgs
parents:
diff changeset
   979
hgs
parents:
diff changeset
   980
    if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) &&
hgs
parents:
diff changeset
   981
                ok_next(oldp, next) && ok_pinuse(next))) {
hgs
parents:
diff changeset
   982
      size_t nb = request2size(bytes);
hgs
parents:
diff changeset
   983
      if (is_mmapped(oldp))
hgs
parents:
diff changeset
   984
        newp = mmap_resize(m, oldp, nb);
hgs
parents:
diff changeset
   985
      else
hgs
parents:
diff changeset
   986
      if (oldsize >= nb) { /* already big enough */
hgs
parents:
diff changeset
   987
        size_t rsize = oldsize - nb;
hgs
parents:
diff changeset
   988
        newp = oldp;
hgs
parents:
diff changeset
   989
        if (rsize >= free_chunk_threshold) {
hgs
parents:
diff changeset
   990
          mchunkptr remainder = chunk_plus_offset(newp, nb);
hgs
parents:
diff changeset
   991
          set_inuse(m, newp, nb);
hgs
parents:
diff changeset
   992
          set_inuse(m, remainder, rsize);
hgs
parents:
diff changeset
   993
          extra = chunk2mem(remainder);
hgs
parents:
diff changeset
   994
          iTotalAllocSize -= rsize;
hgs
parents:
diff changeset
   995
  }
hgs
parents:
diff changeset
   996
      }
hgs
parents:
diff changeset
   997
        /*AMOD: Modified to optimized*/
hgs
parents:
diff changeset
   998
        else if (next == m->top && oldsize + m->topsize > nb)
hgs
parents:
diff changeset
   999
        {
hgs
parents:
diff changeset
  1000
            /* Expand into top */
hgs
parents:
diff changeset
  1001
            if (oldsize + m->topsize > nb)
hgs
parents:
diff changeset
  1002
            {
hgs
parents:
diff changeset
  1003
                size_t newsize = oldsize + m->topsize;
hgs
parents:
diff changeset
  1004
                size_t newtopsize = newsize - nb;
hgs
parents:
diff changeset
  1005
                mchunkptr newtop = chunk_plus_offset(oldp, nb);
hgs
parents:
diff changeset
  1006
                set_inuse(m, oldp, nb);
hgs
parents:
diff changeset
  1007
                newtop->head = newtopsize |PINUSE_BIT;
hgs
parents:
diff changeset
  1008
                m->top = newtop;
hgs
parents:
diff changeset
  1009
                m->topsize = newtopsize;
hgs
parents:
diff changeset
  1010
                iTotalAllocSize += nb - oldsize;
hgs
parents:
diff changeset
  1011
                newp = oldp;
hgs
parents:
diff changeset
  1012
            }
hgs
parents:
diff changeset
  1013
      }
hgs
parents:
diff changeset
  1014
    }
hgs
parents:
diff changeset
  1015
    else {
hgs
parents:
diff changeset
  1016
      USAGE_ERROR_ACTION(m, oldmem);
hgs
parents:
diff changeset
  1017
      POSTACTION(m);
hgs
parents:
diff changeset
  1018
      return 0;
hgs
parents:
diff changeset
  1019
    }
hgs
parents:
diff changeset
  1020
hgs
parents:
diff changeset
  1021
    POSTACTION(m);
hgs
parents:
diff changeset
  1022
hgs
parents:
diff changeset
  1023
    if (newp != 0) {
hgs
parents:
diff changeset
  1024
      if (extra != 0) {
hgs
parents:
diff changeset
  1025
        internal_free(m, extra);
hgs
parents:
diff changeset
  1026
      }
hgs
parents:
diff changeset
  1027
      check_inuse_chunk(m, newp);
hgs
parents:
diff changeset
  1028
      return chunk2mem(newp);
hgs
parents:
diff changeset
  1029
    }
hgs
parents:
diff changeset
  1030
    else {
hgs
parents:
diff changeset
  1031
      void* newmem = internal_malloc(m, bytes);
hgs
parents:
diff changeset
  1032
      if (newmem != 0) {
hgs
parents:
diff changeset
  1033
        size_t oc = oldsize - overhead_for(oldp);
hgs
parents:
diff changeset
  1034
        memcpy(newmem, oldmem, (oc < bytes)? oc : bytes);
hgs
parents:
diff changeset
  1035
        internal_free(m, oldmem);
hgs
parents:
diff changeset
  1036
      }
hgs
parents:
diff changeset
  1037
      return newmem;
hgs
parents:
diff changeset
  1038
    }
hgs
parents:
diff changeset
  1039
  }
hgs
parents:
diff changeset
  1040
#if USE_LOCKS // keep the compiler happy
hgs
parents:
diff changeset
  1041
  return 0;
hgs
parents:
diff changeset
  1042
#endif
hgs
parents:
diff changeset
  1043
}
hgs
parents:
diff changeset
  1044
/* ----------------------------- statistics ------------------------------ */
hgs
parents:
diff changeset
  1045
mallinfo RNewAllocator::internal_mallinfo(mstate m) {
hgs
parents:
diff changeset
  1046
  struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
hgs
parents:
diff changeset
  1047
  TInt chunkCnt = 0;
hgs
parents:
diff changeset
  1048
  if (!PREACTION(m)) {
hgs
parents:
diff changeset
  1049
    check_malloc_state(m);
hgs
parents:
diff changeset
  1050
    if (is_initialized(m)) {
hgs
parents:
diff changeset
  1051
      size_t nfree = SIZE_T_ONE; /* top always free */
hgs
parents:
diff changeset
  1052
      size_t mfree = m->topsize + TOP_FOOT_SIZE;
hgs
parents:
diff changeset
  1053
      size_t sum = mfree;
hgs
parents:
diff changeset
  1054
      msegmentptr s = &m->seg;
hgs
parents:
diff changeset
  1055
      while (s != 0) {
hgs
parents:
diff changeset
  1056
        mchunkptr q = align_as_chunk(s->base);
hgs
parents:
diff changeset
  1057
        chunkCnt++;
hgs
parents:
diff changeset
  1058
        while (segment_holds(s, q) &&
hgs
parents:
diff changeset
  1059
               q != m->top && q->head != FENCEPOST_HEAD) {
hgs
parents:
diff changeset
  1060
          size_t sz = chunksize(q);
hgs
parents:
diff changeset
  1061
          sum += sz;
hgs
parents:
diff changeset
  1062
          if (!cinuse(q)) {
hgs
parents:
diff changeset
  1063
            if (sz > nm.largestBlock)
hgs
parents:
diff changeset
  1064
              nm.largestBlock = sz;
hgs
parents:
diff changeset
  1065
            mfree += sz;
hgs
parents:
diff changeset
  1066
            ++nfree;
hgs
parents:
diff changeset
  1067
          }
hgs
parents:
diff changeset
  1068
          q = next_chunk(q);
hgs
parents:
diff changeset
  1069
        }
hgs
parents:
diff changeset
  1070
        s = s->next;
hgs
parents:
diff changeset
  1071
      }
hgs
parents:
diff changeset
  1072
      nm.arena    = sum;
hgs
parents:
diff changeset
  1073
      nm.ordblks  = nfree;
hgs
parents:
diff changeset
  1074
      nm.hblkhd   = m->footprint - sum;
hgs
parents:
diff changeset
  1075
      nm.usmblks  = m->max_footprint;
hgs
parents:
diff changeset
  1076
      nm.uordblks = m->footprint - mfree;
hgs
parents:
diff changeset
  1077
      nm.fordblks = mfree;
hgs
parents:
diff changeset
  1078
      nm.keepcost = m->topsize;
hgs
parents:
diff changeset
  1079
      nm.cellCount= chunkCnt;/*number of chunks allocated*/
hgs
parents:
diff changeset
  1080
    }
hgs
parents:
diff changeset
  1081
    POSTACTION(m);
hgs
parents:
diff changeset
  1082
  }
hgs
parents:
diff changeset
  1083
  return nm;
hgs
parents:
diff changeset
  1084
}
hgs
parents:
diff changeset
  1085
hgs
parents:
diff changeset
  1086
void  RNewAllocator::internal_malloc_stats(mstate m) {
hgs
parents:
diff changeset
  1087
if (!PREACTION(m)) {
hgs
parents:
diff changeset
  1088
  size_t fp = 0;
hgs
parents:
diff changeset
  1089
  size_t used = 0;
hgs
parents:
diff changeset
  1090
  check_malloc_state(m);
hgs
parents:
diff changeset
  1091
  if (is_initialized(m)) {
hgs
parents:
diff changeset
  1092
    msegmentptr s = &m->seg;
hgs
parents:
diff changeset
  1093
    //size_t maxfp = m->max_footprint;
hgs
parents:
diff changeset
  1094
    fp = m->footprint;
hgs
parents:
diff changeset
  1095
    used = fp - (m->topsize + TOP_FOOT_SIZE);
hgs
parents:
diff changeset
  1096
hgs
parents:
diff changeset
  1097
    while (s != 0) {
hgs
parents:
diff changeset
  1098
      mchunkptr q = align_as_chunk(s->base);
hgs
parents:
diff changeset
  1099
      while (segment_holds(s, q) &&
hgs
parents:
diff changeset
  1100
             q != m->top && q->head != FENCEPOST_HEAD) {
hgs
parents:
diff changeset
  1101
        if (!cinuse(q))
hgs
parents:
diff changeset
  1102
          used -= chunksize(q);
hgs
parents:
diff changeset
  1103
        q = next_chunk(q);
hgs
parents:
diff changeset
  1104
      }
hgs
parents:
diff changeset
  1105
      s = s->next;
hgs
parents:
diff changeset
  1106
    }
hgs
parents:
diff changeset
  1107
  }
hgs
parents:
diff changeset
  1108
  POSTACTION(m);
hgs
parents:
diff changeset
  1109
}
hgs
parents:
diff changeset
  1110
}
hgs
parents:
diff changeset
  1111
/* support for mallopt */
hgs
parents:
diff changeset
  1112
int RNewAllocator::change_mparam(int param_number, int value) {
hgs
parents:
diff changeset
  1113
  size_t val = (size_t)value;
hgs
parents:
diff changeset
  1114
  init_mparams(DEFAULT_TRIM_THRESHOLD);
hgs
parents:
diff changeset
  1115
  switch (param_number) {
hgs
parents:
diff changeset
  1116
  case M_TRIM_THRESHOLD:
hgs
parents:
diff changeset
  1117
    mparams.trim_threshold = val;
hgs
parents:
diff changeset
  1118
    return 1;
hgs
parents:
diff changeset
  1119
  case M_GRANULARITY:
hgs
parents:
diff changeset
  1120
    if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
hgs
parents:
diff changeset
  1121
      mparams.granularity = val;
hgs
parents:
diff changeset
  1122
      return 1;
hgs
parents:
diff changeset
  1123
    }
hgs
parents:
diff changeset
  1124
    else
hgs
parents:
diff changeset
  1125
      return 0;
hgs
parents:
diff changeset
  1126
  case M_MMAP_THRESHOLD:
hgs
parents:
diff changeset
  1127
    mparams.mmap_threshold = val;
hgs
parents:
diff changeset
  1128
    return 1;
hgs
parents:
diff changeset
  1129
  default:
hgs
parents:
diff changeset
  1130
    return 0;
hgs
parents:
diff changeset
  1131
  }
hgs
parents:
diff changeset
  1132
}
hgs
parents:
diff changeset
  1133
/* Get memory from system using MORECORE or MMAP */
hgs
parents:
diff changeset
  1134
void* RNewAllocator::sys_alloc(mstate m, size_t nb)
hgs
parents:
diff changeset
  1135
{
hgs
parents:
diff changeset
  1136
    TUint8* tbase = CMFAIL;
hgs
parents:
diff changeset
  1137
    size_t tsize = 0;
hgs
parents:
diff changeset
  1138
    flag_t mmap_flag = 0;
hgs
parents:
diff changeset
  1139
    //init_mparams();/*No need to do init_params here*/
hgs
parents:
diff changeset
  1140
    /* Directly map large chunks */
hgs
parents:
diff changeset
  1141
    if (use_mmap(m) && nb >= mparams.mmap_threshold)
hgs
parents:
diff changeset
  1142
    {
hgs
parents:
diff changeset
  1143
        void* mem = mmap_alloc(m, nb);
hgs
parents:
diff changeset
  1144
        if (mem != 0)
hgs
parents:
diff changeset
  1145
            return mem;
hgs
parents:
diff changeset
  1146
    }
hgs
parents:
diff changeset
  1147
  /*
hgs
parents:
diff changeset
  1148
    Try getting memory in any of three ways (in most-preferred to
hgs
parents:
diff changeset
  1149
    least-preferred order):
hgs
parents:
diff changeset
  1150
    1. A call to MORECORE that can normally contiguously extend memory.
hgs
parents:
diff changeset
  1151
       (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
hgs
parents:
diff changeset
  1152
       or main space is mmapped or a previous contiguous call failed)
hgs
parents:
diff changeset
  1153
    2. A call to MMAP new space (disabled if not HAVE_MMAP).
hgs
parents:
diff changeset
  1154
       Note that under the default settings, if MORECORE is unable to
hgs
parents:
diff changeset
  1155
       fulfill a request, and HAVE_MMAP is true, then mmap is
hgs
parents:
diff changeset
  1156
       used as a noncontiguous system allocator. This is a useful backup
hgs
parents:
diff changeset
  1157
       strategy for systems with holes in address spaces -- in this case
hgs
parents:
diff changeset
  1158
       sbrk cannot contiguously expand the heap, but mmap may be able to
hgs
parents:
diff changeset
  1159
       find space.
hgs
parents:
diff changeset
  1160
    3. A call to MORECORE that cannot usually contiguously extend memory.
hgs
parents:
diff changeset
  1161
       (disabled if not HAVE_MORECORE)
hgs
parents:
diff changeset
  1162
  */
hgs
parents:
diff changeset
  1163
  /*Trying to allocate the memory*/
hgs
parents:
diff changeset
  1164
    if (MORECORE_CONTIGUOUS && !use_noncontiguous(m))
hgs
parents:
diff changeset
  1165
    {
hgs
parents:
diff changeset
  1166
    TUint8* br = CMFAIL;
hgs
parents:
diff changeset
  1167
    msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (TUint8*)m->top);
hgs
parents:
diff changeset
  1168
    size_t asize = 0;
hgs
parents:
diff changeset
  1169
    ACQUIRE_MORECORE_LOCK(m);
hgs
parents:
diff changeset
  1170
    if (ss == 0)
hgs
parents:
diff changeset
  1171
    {  /* First time through or recovery */
hgs
parents:
diff changeset
  1172
        TUint8* base = (TUint8*)CALL_MORECORE(0);
hgs
parents:
diff changeset
  1173
        if (base != CMFAIL)
hgs
parents:
diff changeset
  1174
        {
hgs
parents:
diff changeset
  1175
            asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
hgs
parents:
diff changeset
  1176
            /* Adjust to end on a page boundary */
hgs
parents:
diff changeset
  1177
            if (!is_page_aligned(base))
hgs
parents:
diff changeset
  1178
                asize += (page_align((size_t)base) - (size_t)base);
hgs
parents:
diff changeset
  1179
            /* Can't call MORECORE if size is negative when treated as signed */
hgs
parents:
diff changeset
  1180
            if (asize < HALF_MAX_SIZE_T &&(br = (TUint8*)(CALL_MORECORE(asize))) == base)
hgs
parents:
diff changeset
  1181
            {
hgs
parents:
diff changeset
  1182
                tbase = base;
hgs
parents:
diff changeset
  1183
                tsize = asize;
hgs
parents:
diff changeset
  1184
            }
hgs
parents:
diff changeset
  1185
        }
hgs
parents:
diff changeset
  1186
    }
hgs
parents:
diff changeset
  1187
    else
hgs
parents:
diff changeset
  1188
    {
hgs
parents:
diff changeset
  1189
      /* Subtract out existing available top space from MORECORE request. */
hgs
parents:
diff changeset
  1190
        asize = granularity_align(nb - m->topsize + TOP_FOOT_SIZE + SIZE_T_ONE);
hgs
parents:
diff changeset
  1191
    /* Use mem here only if it did continuously extend old space */
hgs
parents:
diff changeset
  1192
      if (asize < HALF_MAX_SIZE_T &&
hgs
parents:
diff changeset
  1193
          (br = (TUint8*)(CALL_MORECORE(asize))) == ss->base+ss->size) {
hgs
parents:
diff changeset
  1194
        tbase = br;
hgs
parents:
diff changeset
  1195
        tsize = asize;
hgs
parents:
diff changeset
  1196
      }
hgs
parents:
diff changeset
  1197
    }
hgs
parents:
diff changeset
  1198
    if (tbase == CMFAIL) {    /* Cope with partial failure */
hgs
parents:
diff changeset
  1199
      if (br != CMFAIL) {    /* Try to use/extend the space we did get */
hgs
parents:
diff changeset
  1200
        if (asize < HALF_MAX_SIZE_T &&
hgs
parents:
diff changeset
  1201
            asize < nb + TOP_FOOT_SIZE + SIZE_T_ONE) {
hgs
parents:
diff changeset
  1202
          size_t esize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE - asize);
hgs
parents:
diff changeset
  1203
          if (esize < HALF_MAX_SIZE_T) {
hgs
parents:
diff changeset
  1204
            TUint8* end = (TUint8*)CALL_MORECORE(esize);
hgs
parents:
diff changeset
  1205
            if (end != CMFAIL)
hgs
parents:
diff changeset
  1206
              asize += esize;
hgs
parents:
diff changeset
  1207
            else {            /* Can't use; try to release */
hgs
parents:
diff changeset
  1208
              CALL_MORECORE(-asize);
hgs
parents:
diff changeset
  1209
              br = CMFAIL;
hgs
parents:
diff changeset
  1210
            }
hgs
parents:
diff changeset
  1211
          }
hgs
parents:
diff changeset
  1212
        }
hgs
parents:
diff changeset
  1213
      }
hgs
parents:
diff changeset
  1214
      if (br != CMFAIL) {    /* Use the space we did get */
hgs
parents:
diff changeset
  1215
        tbase = br;
hgs
parents:
diff changeset
  1216
        tsize = asize;
hgs
parents:
diff changeset
  1217
      }
hgs
parents:
diff changeset
  1218
      else
hgs
parents:
diff changeset
  1219
        disable_contiguous(m); /* Don't try contiguous path in the future */
hgs
parents:
diff changeset
  1220
    }
hgs
parents:
diff changeset
  1221
    RELEASE_MORECORE_LOCK(m);
hgs
parents:
diff changeset
  1222
  }
hgs
parents:
diff changeset
  1223
  if (HAVE_MMAP && tbase == CMFAIL) {  /* Try MMAP */
hgs
parents:
diff changeset
  1224
    size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE;
hgs
parents:
diff changeset
  1225
    size_t rsize = granularity_align(req);
hgs
parents:
diff changeset
  1226
    if (rsize > nb) { /* Fail if wraps around zero */
hgs
parents:
diff changeset
  1227
      TUint8* mp = (TUint8*)(CALL_MMAP(rsize));
hgs
parents:
diff changeset
  1228
      if (mp != CMFAIL) {
hgs
parents:
diff changeset
  1229
        tbase = mp;
hgs
parents:
diff changeset
  1230
        tsize = rsize;
hgs
parents:
diff changeset
  1231
        mmap_flag = IS_MMAPPED_BIT;
hgs
parents:
diff changeset
  1232
      }
hgs
parents:
diff changeset
  1233
    }
hgs
parents:
diff changeset
  1234
  }
hgs
parents:
diff changeset
  1235
  if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */
hgs
parents:
diff changeset
  1236
    size_t asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
hgs
parents:
diff changeset
  1237
    if (asize < HALF_MAX_SIZE_T) {
hgs
parents:
diff changeset
  1238
      TUint8* br = CMFAIL;
hgs
parents:
diff changeset
  1239
      TUint8* end = CMFAIL;
hgs
parents:
diff changeset
  1240
      ACQUIRE_MORECORE_LOCK(m);
hgs
parents:
diff changeset
  1241
      br = (TUint8*)(CALL_MORECORE(asize));
hgs
parents:
diff changeset
  1242
      end = (TUint8*)(CALL_MORECORE(0));
hgs
parents:
diff changeset
  1243
      RELEASE_MORECORE_LOCK(m);
hgs
parents:
diff changeset
  1244
      if (br != CMFAIL && end != CMFAIL && br < end) {
hgs
parents:
diff changeset
  1245
        size_t ssize = end - br;
hgs
parents:
diff changeset
  1246
        if (ssize > nb + TOP_FOOT_SIZE) {
hgs
parents:
diff changeset
  1247
          tbase = br;
hgs
parents:
diff changeset
  1248
          tsize = ssize;
hgs
parents:
diff changeset
  1249
        }
hgs
parents:
diff changeset
  1250
      }
hgs
parents:
diff changeset
  1251
    }
hgs
parents:
diff changeset
  1252
  }
hgs
parents:
diff changeset
  1253
  if (tbase != CMFAIL) {
hgs
parents:
diff changeset
  1254
    if ((m->footprint += tsize) > m->max_footprint)
hgs
parents:
diff changeset
  1255
      m->max_footprint = m->footprint;
hgs
parents:
diff changeset
  1256
    if (!is_initialized(m)) { /* first-time initialization */
hgs
parents:
diff changeset
  1257
      m->seg.base = m->least_addr = tbase;
hgs
parents:
diff changeset
  1258
      m->seg.size = tsize;
hgs
parents:
diff changeset
  1259
      m->seg.sflags = mmap_flag;
hgs
parents:
diff changeset
  1260
      m->magic = mparams.magic;
hgs
parents:
diff changeset
  1261
      init_bins(m);
hgs
parents:
diff changeset
  1262
      if (is_global(m))
hgs
parents:
diff changeset
  1263
        init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
hgs
parents:
diff changeset
  1264
      else {
hgs
parents:
diff changeset
  1265
        /* Offset top by embedded malloc_state */
hgs
parents:
diff changeset
  1266
        mchunkptr mn = next_chunk(mem2chunk(m));
hgs
parents:
diff changeset
  1267
        init_top(m, mn, (size_t)((tbase + tsize) - (TUint8*)mn) -TOP_FOOT_SIZE);
hgs
parents:
diff changeset
  1268
      }
hgs
parents:
diff changeset
  1269
    }else {
hgs
parents:
diff changeset
  1270
      /* Try to merge with an existing segment */
hgs
parents:
diff changeset
  1271
      msegmentptr sp = &m->seg;
hgs
parents:
diff changeset
  1272
      while (sp != 0 && tbase != sp->base + sp->size)
hgs
parents:
diff changeset
  1273
        sp = sp->next;
hgs
parents:
diff changeset
  1274
      if (sp != 0 && !is_extern_segment(sp) &&
hgs
parents:
diff changeset
  1275
          (sp->sflags & IS_MMAPPED_BIT) == mmap_flag &&
hgs
parents:
diff changeset
  1276
          segment_holds(sp, m->top))
hgs
parents:
diff changeset
  1277
          { /* append */
hgs
parents:
diff changeset
  1278
        sp->size += tsize;
hgs
parents:
diff changeset
  1279
        init_top(m, m->top, m->topsize + tsize);
hgs
parents:
diff changeset
  1280
      }
hgs
parents:
diff changeset
  1281
      else {
hgs
parents:
diff changeset
  1282
        if (tbase < m->least_addr)
hgs
parents:
diff changeset
  1283
          m->least_addr = tbase;
hgs
parents:
diff changeset
  1284
        sp = &m->seg;
hgs
parents:
diff changeset
  1285
        while (sp != 0 && sp->base != tbase + tsize)
hgs
parents:
diff changeset
  1286
          sp = sp->next;
hgs
parents:
diff changeset
  1287
        if (sp != 0 &&
hgs
parents:
diff changeset
  1288
            !is_extern_segment(sp) &&
hgs
parents:
diff changeset
  1289
            (sp->sflags & IS_MMAPPED_BIT) == mmap_flag) {
hgs
parents:
diff changeset
  1290
          TUint8* oldbase = sp->base;
hgs
parents:
diff changeset
  1291
          sp->base = tbase;
hgs
parents:
diff changeset
  1292
          sp->size += tsize;
hgs
parents:
diff changeset
  1293
          return prepend_alloc(m, tbase, oldbase, nb);
hgs
parents:
diff changeset
  1294
        }
hgs
parents:
diff changeset
  1295
        else
hgs
parents:
diff changeset
  1296
          add_segment(m, tbase, tsize, mmap_flag);
hgs
parents:
diff changeset
  1297
      }
hgs
parents:
diff changeset
  1298
    }
hgs
parents:
diff changeset
  1299
    if (nb < m->topsize) { /* Allocate from new or extended top space */
hgs
parents:
diff changeset
  1300
      size_t rsize = m->topsize -= nb;
hgs
parents:
diff changeset
  1301
      mchunkptr p = m->top;
hgs
parents:
diff changeset
  1302
      mchunkptr r = m->top = chunk_plus_offset(p, nb);
hgs
parents:
diff changeset
  1303
      r->head = rsize | PINUSE_BIT;
hgs
parents:
diff changeset
  1304
      set_size_and_pinuse_of_inuse_chunk(m, p, nb);
hgs
parents:
diff changeset
  1305
      check_top_chunk(m, m->top);
hgs
parents:
diff changeset
  1306
      check_malloced_chunk(m, chunk2mem(p), nb);
hgs
parents:
diff changeset
  1307
      return chunk2mem(p);
hgs
parents:
diff changeset
  1308
    }
hgs
parents:
diff changeset
  1309
  }
hgs
parents:
diff changeset
  1310
  /*need to check this*/
hgs
parents:
diff changeset
  1311
  MEM_DUMP_OOM_LOGS(nb, "sys_alloc:: FAILED to get more memory");
hgs
parents:
diff changeset
  1312
hgs
parents:
diff changeset
  1313
  //errno = -1;
hgs
parents:
diff changeset
  1314
  return 0;
hgs
parents:
diff changeset
  1315
}
hgs
parents:
diff changeset
  1316
msegmentptr RNewAllocator::segment_holding(mstate m, TUint8* addr) {
hgs
parents:
diff changeset
  1317
  msegmentptr sp = &m->seg;
hgs
parents:
diff changeset
  1318
  for (;;) {
hgs
parents:
diff changeset
  1319
    if (addr >= sp->base && addr < sp->base + sp->size)
hgs
parents:
diff changeset
  1320
      return sp;
hgs
parents:
diff changeset
  1321
    if ((sp = sp->next) == 0)
hgs
parents:
diff changeset
  1322
      return 0;
hgs
parents:
diff changeset
  1323
  }
hgs
parents:
diff changeset
  1324
}
hgs
parents:
diff changeset
  1325
/* Unlink the first chunk from a smallbin */
hgs
parents:
diff changeset
  1326
inline void RNewAllocator::unlink_first_small_chunk(mstate M,mchunkptr B,mchunkptr P,bindex_t& I)
hgs
parents:
diff changeset
  1327
{
hgs
parents:
diff changeset
  1328
  mchunkptr F = P->fd;
hgs
parents:
diff changeset
  1329
  assert(P != B);
hgs
parents:
diff changeset
  1330
  assert(P != F);
hgs
parents:
diff changeset
  1331
  assert(chunksize(P) == small_index2size(I));
hgs
parents:
diff changeset
  1332
  if (B == F)
hgs
parents:
diff changeset
  1333
    clear_smallmap(M, I);
hgs
parents:
diff changeset
  1334
  else if (RTCHECK(ok_address(M, F))) {
hgs
parents:
diff changeset
  1335
    B->fd = F;
hgs
parents:
diff changeset
  1336
    F->bk = B;
hgs
parents:
diff changeset
  1337
  }
hgs
parents:
diff changeset
  1338
  else {
hgs
parents:
diff changeset
  1339
    CORRUPTION_ERROR_ACTION(M);
hgs
parents:
diff changeset
  1340
  }
hgs
parents:
diff changeset
  1341
}
hgs
parents:
diff changeset
  1342
/* Link a free chunk into a smallbin  */
hgs
parents:
diff changeset
  1343
inline void RNewAllocator::insert_small_chunk(mstate M,mchunkptr P, size_t S)
hgs
parents:
diff changeset
  1344
{
hgs
parents:
diff changeset
  1345
  bindex_t I  = small_index(S);
hgs
parents:
diff changeset
  1346
  mchunkptr B = smallbin_at(M, I);
hgs
parents:
diff changeset
  1347
  mchunkptr F = B;
hgs
parents:
diff changeset
  1348
  assert(S >= MIN_CHUNK_SIZE);
hgs
parents:
diff changeset
  1349
  if (!smallmap_is_marked(M, I))
hgs
parents:
diff changeset
  1350
    mark_smallmap(M, I);
hgs
parents:
diff changeset
  1351
  else if (RTCHECK(ok_address(M, B->fd)))
hgs
parents:
diff changeset
  1352
    F = B->fd;
hgs
parents:
diff changeset
  1353
  else {
hgs
parents:
diff changeset
  1354
    CORRUPTION_ERROR_ACTION(M);
hgs
parents:
diff changeset
  1355
  }
hgs
parents:
diff changeset
  1356
  B->fd = P;
hgs
parents:
diff changeset
  1357
  F->bk = P;
hgs
parents:
diff changeset
  1358
  P->fd = F;
hgs
parents:
diff changeset
  1359
  P->bk = B;
hgs
parents:
diff changeset
  1360
}
hgs
parents:
diff changeset
  1361
hgs
parents:
diff changeset
  1362
hgs
parents:
diff changeset
  1363
inline void RNewAllocator::insert_chunk(mstate M,mchunkptr P,size_t S,size_t NPAGES)
hgs
parents:
diff changeset
  1364
{
hgs
parents:
diff changeset
  1365
    if (is_small(S))
hgs
parents:
diff changeset
  1366
        insert_small_chunk(M, P, S);
hgs
parents:
diff changeset
  1367
    else{
hgs
parents:
diff changeset
  1368
        tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S, NPAGES);
hgs
parents:
diff changeset
  1369
     }
hgs
parents:
diff changeset
  1370
}
hgs
parents:
diff changeset
  1371
hgs
parents:
diff changeset
  1372
inline void RNewAllocator::unlink_large_chunk(mstate M,tchunkptr X)
hgs
parents:
diff changeset
  1373
{
hgs
parents:
diff changeset
  1374
  tchunkptr XP = X->parent;
hgs
parents:
diff changeset
  1375
  tchunkptr R;
hgs
parents:
diff changeset
  1376
  reset_tchunk_mem_pageout(X); // clear chunk pageout flag
hgs
parents:
diff changeset
  1377
  if (X->bk != X) {
hgs
parents:
diff changeset
  1378
    tchunkptr F = X->fd;
hgs
parents:
diff changeset
  1379
    R = X->bk;
hgs
parents:
diff changeset
  1380
    if (RTCHECK(ok_address(M, F))) {
hgs
parents:
diff changeset
  1381
      F->bk = R;
hgs
parents:
diff changeset
  1382
      R->fd = F;
hgs
parents:
diff changeset
  1383
    }
hgs
parents:
diff changeset
  1384
    else {
hgs
parents:
diff changeset
  1385
      CORRUPTION_ERROR_ACTION(M);
hgs
parents:
diff changeset
  1386
    }
hgs
parents:
diff changeset
  1387
  }
hgs
parents:
diff changeset
  1388
  else {
hgs
parents:
diff changeset
  1389
    tchunkptr* RP;
hgs
parents:
diff changeset
  1390
    if (((R = *(RP = &(X->child[1]))) != 0) ||
hgs
parents:
diff changeset
  1391
        ((R = *(RP = &(X->child[0]))) != 0)) {
hgs
parents:
diff changeset
  1392
      tchunkptr* CP;
hgs
parents:
diff changeset
  1393
      while ((*(CP = &(R->child[1])) != 0) ||
hgs
parents:
diff changeset
  1394
             (*(CP = &(R->child[0])) != 0)) {
hgs
parents:
diff changeset
  1395
        R = *(RP = CP);
hgs
parents:
diff changeset
  1396
      }
hgs
parents:
diff changeset
  1397
      if (RTCHECK(ok_address(M, RP)))
hgs
parents:
diff changeset
  1398
        *RP = 0;
hgs
parents:
diff changeset
  1399
      else {
hgs
parents:
diff changeset
  1400
        CORRUPTION_ERROR_ACTION(M);
hgs
parents:
diff changeset
  1401
      }
hgs
parents:
diff changeset
  1402
    }
hgs
parents:
diff changeset
  1403
  }
hgs
parents:
diff changeset
  1404
  if (XP != 0) {
hgs
parents:
diff changeset
  1405
    tbinptr* H = treebin_at(M, X->index);
hgs
parents:
diff changeset
  1406
    if (X == *H) {
hgs
parents:
diff changeset
  1407
      if ((*H = R) == 0)
hgs
parents:
diff changeset
  1408
        clear_treemap(M, X->index);
hgs
parents:
diff changeset
  1409
    }
hgs
parents:
diff changeset
  1410
    else if (RTCHECK(ok_address(M, XP))) {
hgs
parents:
diff changeset
  1411
      if (XP->child[0] == X)
hgs
parents:
diff changeset
  1412
        XP->child[0] = R;
hgs
parents:
diff changeset
  1413
      else
hgs
parents:
diff changeset
  1414
        XP->child[1] = R;
hgs
parents:
diff changeset
  1415
    }
hgs
parents:
diff changeset
  1416
    else
hgs
parents:
diff changeset
  1417
      CORRUPTION_ERROR_ACTION(M);
hgs
parents:
diff changeset
  1418
    if (R != 0) {
hgs
parents:
diff changeset
  1419
      if (RTCHECK(ok_address(M, R))) {
hgs
parents:
diff changeset
  1420
        tchunkptr C0, C1;
hgs
parents:
diff changeset
  1421
        R->parent = XP;
hgs
parents:
diff changeset
  1422
        if ((C0 = X->child[0]) != 0) {
hgs
parents:
diff changeset
  1423
          if (RTCHECK(ok_address(M, C0))) {
hgs
parents:
diff changeset
  1424
            R->child[0] = C0;
hgs
parents:
diff changeset
  1425
            C0->parent = R;
hgs
parents:
diff changeset
  1426
          }
hgs
parents:
diff changeset
  1427
          else
hgs
parents:
diff changeset
  1428
            CORRUPTION_ERROR_ACTION(M);
hgs
parents:
diff changeset
  1429
        }
hgs
parents:
diff changeset
  1430
        if ((C1 = X->child[1]) != 0) {
hgs
parents:
diff changeset
  1431
          if (RTCHECK(ok_address(M, C1))) {
hgs
parents:
diff changeset
  1432
            R->child[1] = C1;
hgs
parents:
diff changeset
  1433
            C1->parent = R;
hgs
parents:
diff changeset
  1434
          }
hgs
parents:
diff changeset
  1435
          else
hgs
parents:
diff changeset
  1436
            CORRUPTION_ERROR_ACTION(M);
hgs
parents:
diff changeset
  1437
        }
hgs
parents:
diff changeset
  1438
      }
hgs
parents:
diff changeset
  1439
      else
hgs
parents:
diff changeset
  1440
        CORRUPTION_ERROR_ACTION(M);
hgs
parents:
diff changeset
  1441
    }
hgs
parents:
diff changeset
  1442
  }
hgs
parents:
diff changeset
  1443
}
hgs
parents:
diff changeset
  1444
hgs
parents:
diff changeset
  1445
/* Unlink a chunk from a smallbin  */
hgs
parents:
diff changeset
  1446
inline void RNewAllocator::unlink_small_chunk(mstate M, mchunkptr P,size_t S)
hgs
parents:
diff changeset
  1447
{
hgs
parents:
diff changeset
  1448
  mchunkptr F = P->fd;
hgs
parents:
diff changeset
  1449
  mchunkptr B = P->bk;
hgs
parents:
diff changeset
  1450
  bindex_t I = small_index(S);
hgs
parents:
diff changeset
  1451
  assert(P != B);
hgs
parents:
diff changeset
  1452
  assert(P != F);
hgs
parents:
diff changeset
  1453
  assert(chunksize(P) == small_index2size(I));
hgs
parents:
diff changeset
  1454
  if (F == B)
hgs
parents:
diff changeset
  1455
    clear_smallmap(M, I);
hgs
parents:
diff changeset
  1456
  else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&
hgs
parents:
diff changeset
  1457
                   (B == smallbin_at(M,I) || ok_address(M, B)))) {
hgs
parents:
diff changeset
  1458
    F->bk = B;
hgs
parents:
diff changeset
  1459
    B->fd = F;
hgs
parents:
diff changeset
  1460
  }
hgs
parents:
diff changeset
  1461
  else {
hgs
parents:
diff changeset
  1462
    CORRUPTION_ERROR_ACTION(M);
hgs
parents:
diff changeset
  1463
  }
hgs
parents:
diff changeset
  1464
}
hgs
parents:
diff changeset
  1465
hgs
parents:
diff changeset
  1466
inline void RNewAllocator::unlink_chunk(mstate M, mchunkptr P, size_t S)
hgs
parents:
diff changeset
  1467
{
hgs
parents:
diff changeset
  1468
  if (is_small(S))
hgs
parents:
diff changeset
  1469
    unlink_small_chunk(M, P, S);
hgs
parents:
diff changeset
  1470
  else
hgs
parents:
diff changeset
  1471
  {
hgs
parents:
diff changeset
  1472
      tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP);
hgs
parents:
diff changeset
  1473
  }
hgs
parents:
diff changeset
  1474
}
hgs
parents:
diff changeset
  1475
hgs
parents:
diff changeset
  1476
inline void RNewAllocator::compute_tree_index(size_t S, bindex_t& I)
hgs
parents:
diff changeset
  1477
{
hgs
parents:
diff changeset
  1478
  size_t X = S >> TREEBIN_SHIFT;
hgs
parents:
diff changeset
  1479
  if (X == 0)
hgs
parents:
diff changeset
  1480
    I = 0;
hgs
parents:
diff changeset
  1481
  else if (X > 0xFFFF)
hgs
parents:
diff changeset
  1482
    I = NTREEBINS-1;
hgs
parents:
diff changeset
  1483
  else {
hgs
parents:
diff changeset
  1484
    unsigned int Y = (unsigned int)X;
hgs
parents:
diff changeset
  1485
    unsigned int N = ((Y - 0x100) >> 16) & 8;
hgs
parents:
diff changeset
  1486
    unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;
hgs
parents:
diff changeset
  1487
    N += K;
hgs
parents:
diff changeset
  1488
    N += K = (((Y <<= K) - 0x4000) >> 16) & 2;
hgs
parents:
diff changeset
  1489
    K = 14 - N + ((Y <<= K) >> 15);
hgs
parents:
diff changeset
  1490
    I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));
hgs
parents:
diff changeset
  1491
  }
hgs
parents:
diff changeset
  1492
}
hgs
parents:
diff changeset
  1493
hgs
parents:
diff changeset
  1494
/* ------------------------- Operations on trees ------------------------- */
hgs
parents:
diff changeset
  1495
hgs
parents:
diff changeset
  1496
/* Insert chunk into tree */
hgs
parents:
diff changeset
  1497
inline void RNewAllocator::insert_large_chunk(mstate M,tchunkptr X,size_t S,size_t NPAGES)
hgs
parents:
diff changeset
  1498
{
hgs
parents:
diff changeset
  1499
  tbinptr* H;
hgs
parents:
diff changeset
  1500
  bindex_t I;
hgs
parents:
diff changeset
  1501
  compute_tree_index(S, I);
hgs
parents:
diff changeset
  1502
  H = treebin_at(M, I);
hgs
parents:
diff changeset
  1503
  X->index = I;
hgs
parents:
diff changeset
  1504
  X->child[0] = X->child[1] = 0;
hgs
parents:
diff changeset
  1505
hgs
parents:
diff changeset
  1506
  if (NPAGES) { set_tchunk_mem_pageout(X, NPAGES) }
hgs
parents:
diff changeset
  1507
  else  { reset_tchunk_mem_pageout(X) }
hgs
parents:
diff changeset
  1508
hgs
parents:
diff changeset
  1509
  if (!treemap_is_marked(M, I)) {
hgs
parents:
diff changeset
  1510
    mark_treemap(M, I);
hgs
parents:
diff changeset
  1511
    *H = X;
hgs
parents:
diff changeset
  1512
    X->parent = (tchunkptr)H;
hgs
parents:
diff changeset
  1513
    X->fd = X->bk = X;
hgs
parents:
diff changeset
  1514
  }
hgs
parents:
diff changeset
  1515
  else {
hgs
parents:
diff changeset
  1516
    tchunkptr T = *H;
hgs
parents:
diff changeset
  1517
    size_t K = S << leftshift_for_tree_index(I);
hgs
parents:
diff changeset
  1518
    for (;;) {
hgs
parents:
diff changeset
  1519
      if (chunksize(T) != S) {
hgs
parents:
diff changeset
  1520
        tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);
hgs
parents:
diff changeset
  1521
        K <<= 1;
hgs
parents:
diff changeset
  1522
        if (*C != 0)
hgs
parents:
diff changeset
  1523
          T = *C;
hgs
parents:
diff changeset
  1524
        else if (RTCHECK(ok_address(M, C))) {
hgs
parents:
diff changeset
  1525
          *C = X;
hgs
parents:
diff changeset
  1526
          X->parent = T;
hgs
parents:
diff changeset
  1527
          X->fd = X->bk = X;
hgs
parents:
diff changeset
  1528
          break;
hgs
parents:
diff changeset
  1529
        }
hgs
parents:
diff changeset
  1530
        else {
hgs
parents:
diff changeset
  1531
          CORRUPTION_ERROR_ACTION(M);
hgs
parents:
diff changeset
  1532
          break;
hgs
parents:
diff changeset
  1533
        }
hgs
parents:
diff changeset
  1534
      }
hgs
parents:
diff changeset
  1535
      else {
hgs
parents:
diff changeset
  1536
        tchunkptr F = T->fd;
hgs
parents:
diff changeset
  1537
        if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {
hgs
parents:
diff changeset
  1538
          T->fd = F->bk = X;
hgs
parents:
diff changeset
  1539
          X->fd = F;
hgs
parents:
diff changeset
  1540
          X->bk = T;
hgs
parents:
diff changeset
  1541
          X->parent = 0;
hgs
parents:
diff changeset
  1542
          break;
hgs
parents:
diff changeset
  1543
        }
hgs
parents:
diff changeset
  1544
        else {
hgs
parents:
diff changeset
  1545
          CORRUPTION_ERROR_ACTION(M);
hgs
parents:
diff changeset
  1546
          break;
hgs
parents:
diff changeset
  1547
        }
hgs
parents:
diff changeset
  1548
      }
hgs
parents:
diff changeset
  1549
    }
hgs
parents:
diff changeset
  1550
  }
hgs
parents:
diff changeset
  1551
}
hgs
parents:
diff changeset
  1552
hgs
parents:
diff changeset
  1553
/*
hgs
parents:
diff changeset
  1554
  Unlink steps:
hgs
parents:
diff changeset
  1555
hgs
parents:
diff changeset
  1556
  1. If x is a chained node, unlink it from its same-sized fd/bk links
hgs
parents:
diff changeset
  1557
     and choose its bk node as its replacement.
hgs
parents:
diff changeset
  1558
  2. If x was the last node of its size, but not a leaf node, it must
hgs
parents:
diff changeset
  1559
     be replaced with a leaf node (not merely one with an open left or
hgs
parents:
diff changeset
  1560
     right), to make sure that lefts and rights of descendents
hgs
parents:
diff changeset
  1561
     correspond properly to bit masks.  We use the rightmost descendent
hgs
parents:
diff changeset
  1562
     of x.  We could use any other leaf, but this is easy to locate and
hgs
parents:
diff changeset
  1563
     tends to counteract removal of leftmosts elsewhere, and so keeps
hgs
parents:
diff changeset
  1564
     paths shorter than minimally guaranteed.  This doesn't loop much
hgs
parents:
diff changeset
  1565
     because on average a node in a tree is near the bottom.
hgs
parents:
diff changeset
  1566
  3. If x is the base of a chain (i.e., has parent links) relink
hgs
parents:
diff changeset
  1567
     x's parent and children to x's replacement (or null if none).
hgs
parents:
diff changeset
  1568
*/
hgs
parents:
diff changeset
  1569
hgs
parents:
diff changeset
  1570
/* Replace dv node, binning the old one */
hgs
parents:
diff changeset
  1571
/* Used only when dvsize known to be small */
hgs
parents:
diff changeset
  1572
inline void RNewAllocator::replace_dv(mstate M, mchunkptr P, size_t S)
hgs
parents:
diff changeset
  1573
{
hgs
parents:
diff changeset
  1574
  size_t DVS = M->dvsize;
hgs
parents:
diff changeset
  1575
  if (DVS != 0) {
hgs
parents:
diff changeset
  1576
    mchunkptr DV = M->dv;
hgs
parents:
diff changeset
  1577
    assert(is_small(DVS));
hgs
parents:
diff changeset
  1578
    insert_small_chunk(M, DV, DVS);
hgs
parents:
diff changeset
  1579
  }
hgs
parents:
diff changeset
  1580
  M->dvsize = S;
hgs
parents:
diff changeset
  1581
  M->dv = P;
hgs
parents:
diff changeset
  1582
}
hgs
parents:
diff changeset
  1583
hgs
parents:
diff changeset
  1584
inline void RNewAllocator::compute_bit2idx(binmap_t X,bindex_t& I)
hgs
parents:
diff changeset
  1585
{
hgs
parents:
diff changeset
  1586
    unsigned int Y = X - 1;
hgs
parents:
diff changeset
  1587
    unsigned int K = Y >> (16-4) & 16;
hgs
parents:
diff changeset
  1588
    unsigned int N = K;        Y >>= K;
hgs
parents:
diff changeset
  1589
    N += K = Y >> (8-3) &  8;  Y >>= K;
hgs
parents:
diff changeset
  1590
    N += K = Y >> (4-2) &  4;  Y >>= K;
hgs
parents:
diff changeset
  1591
    N += K = Y >> (2-1) &  2;  Y >>= K;
hgs
parents:
diff changeset
  1592
    N += K = Y >> (1-0) &  1;  Y >>= K;
hgs
parents:
diff changeset
  1593
    I = (bindex_t)(N + Y);
hgs
parents:
diff changeset
  1594
}
hgs
parents:
diff changeset
  1595
hgs
parents:
diff changeset
  1596
void RNewAllocator::add_segment(mstate m, TUint8* tbase, size_t tsize, flag_t mmapped) {
hgs
parents:
diff changeset
  1597
  /* Determine locations and sizes of segment, fenceposts, old top */
hgs
parents:
diff changeset
  1598
  TUint8* old_top = (TUint8*)m->top;
hgs
parents:
diff changeset
  1599
  msegmentptr oldsp = segment_holding(m, old_top);
hgs
parents:
diff changeset
  1600
  TUint8* old_end = oldsp->base + oldsp->size;
hgs
parents:
diff changeset
  1601
  size_t ssize = pad_request(sizeof(struct malloc_segment));
hgs
parents:
diff changeset
  1602
  TUint8* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
hgs
parents:
diff changeset
  1603
  size_t offset = align_offset(chunk2mem(rawsp));
hgs
parents:
diff changeset
  1604
  TUint8* asp = rawsp + offset;
hgs
parents:
diff changeset
  1605
  TUint8* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
hgs
parents:
diff changeset
  1606
  mchunkptr sp = (mchunkptr)csp;
hgs
parents:
diff changeset
  1607
  msegmentptr ss = (msegmentptr)(chunk2mem(sp));
hgs
parents:
diff changeset
  1608
  mchunkptr tnext = chunk_plus_offset(sp, ssize);
hgs
parents:
diff changeset
  1609
  mchunkptr p = tnext;
hgs
parents:
diff changeset
  1610
  int nfences = 0;
hgs
parents:
diff changeset
  1611
hgs
parents:
diff changeset
  1612
  /* reset top to new space */
hgs
parents:
diff changeset
  1613
  init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
hgs
parents:
diff changeset
  1614
hgs
parents:
diff changeset
  1615
  /* Set up segment record */
hgs
parents:
diff changeset
  1616
  assert(is_aligned(ss));
hgs
parents:
diff changeset
  1617
  set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
hgs
parents:
diff changeset
  1618
  *ss = m->seg; /* Push current record */
hgs
parents:
diff changeset
  1619
  m->seg.base = tbase;
hgs
parents:
diff changeset
  1620
  m->seg.size = tsize;
hgs
parents:
diff changeset
  1621
  m->seg.sflags = mmapped;
hgs
parents:
diff changeset
  1622
  m->seg.next = ss;
hgs
parents:
diff changeset
  1623
hgs
parents:
diff changeset
  1624
  /* Insert trailing fenceposts */
hgs
parents:
diff changeset
  1625
  for (;;) {
hgs
parents:
diff changeset
  1626
    mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
hgs
parents:
diff changeset
  1627
    p->head = FENCEPOST_HEAD;
hgs
parents:
diff changeset
  1628
    ++nfences;
hgs
parents:
diff changeset
  1629
    if ((TUint8*)(&(nextp->head)) < old_end)
hgs
parents:
diff changeset
  1630
      p = nextp;
hgs
parents:
diff changeset
  1631
    else
hgs
parents:
diff changeset
  1632
      break;
hgs
parents:
diff changeset
  1633
  }
hgs
parents:
diff changeset
  1634
  assert(nfences >= 2);
hgs
parents:
diff changeset
  1635
hgs
parents:
diff changeset
  1636
  /* Insert the rest of old top into a bin as an ordinary free chunk */
hgs
parents:
diff changeset
  1637
  if (csp != old_top) {
hgs
parents:
diff changeset
  1638
    mchunkptr q = (mchunkptr)old_top;
hgs
parents:
diff changeset
  1639
    size_t psize = csp - old_top;
hgs
parents:
diff changeset
  1640
    mchunkptr tn = chunk_plus_offset(q, psize);
hgs
parents:
diff changeset
  1641
    set_free_with_pinuse(q, psize, tn);
hgs
parents:
diff changeset
  1642
    insert_chunk(m, q, psize, 0);
hgs
parents:
diff changeset
  1643
  }
hgs
parents:
diff changeset
  1644
hgs
parents:
diff changeset
  1645
  check_top_chunk(m, m->top);
hgs
parents:
diff changeset
  1646
}
hgs
parents:
diff changeset
  1647
hgs
parents:
diff changeset
  1648
hgs
parents:
diff changeset
  1649
void* RNewAllocator::prepend_alloc(mstate m, TUint8* newbase, TUint8* oldbase,
hgs
parents:
diff changeset
  1650
                           size_t nb) {
hgs
parents:
diff changeset
  1651
  mchunkptr p = align_as_chunk(newbase);
hgs
parents:
diff changeset
  1652
  mchunkptr oldfirst = align_as_chunk(oldbase);
hgs
parents:
diff changeset
  1653
  size_t psize = (TUint8*)oldfirst - (TUint8*)p;
hgs
parents:
diff changeset
  1654
  mchunkptr q = chunk_plus_offset(p, nb);
hgs
parents:
diff changeset
  1655
  size_t qsize = psize - nb;
hgs
parents:
diff changeset
  1656
  set_size_and_pinuse_of_inuse_chunk(m, p, nb);
hgs
parents:
diff changeset
  1657
hgs
parents:
diff changeset
  1658
  assert((TUint8*)oldfirst > (TUint8*)q);
hgs
parents:
diff changeset
  1659
  assert(pinuse(oldfirst));
hgs
parents:
diff changeset
  1660
  assert(qsize >= MIN_CHUNK_SIZE);
hgs
parents:
diff changeset
  1661
hgs
parents:
diff changeset
  1662
  /* consolidate remainder with first chunk of old base */
hgs
parents:
diff changeset
  1663
  if (oldfirst == m->top) {
hgs
parents:
diff changeset
  1664
    size_t tsize = m->topsize += qsize;
hgs
parents:
diff changeset
  1665
    m->top = q;
hgs
parents:
diff changeset
  1666
    q->head = tsize | PINUSE_BIT;
hgs
parents:
diff changeset
  1667
    check_top_chunk(m, q);
hgs
parents:
diff changeset
  1668
  }
hgs
parents:
diff changeset
  1669
  else {
hgs
parents:
diff changeset
  1670
    if (!cinuse(oldfirst)) {
hgs
parents:
diff changeset
  1671
      size_t nsize = chunksize(oldfirst);
hgs
parents:
diff changeset
  1672
hgs
parents:
diff changeset
  1673
      /* check for chunk memory page-in */
hgs
parents:
diff changeset
  1674
      if (page_not_in_memory(oldfirst, nsize))
hgs
parents:
diff changeset
  1675
        map_chunk_pages((tchunkptr)oldfirst, nsize);       //Err Ignored, branch not reachable.
hgs
parents:
diff changeset
  1676
hgs
parents:
diff changeset
  1677
      unlink_chunk(m, oldfirst, nsize);
hgs
parents:
diff changeset
  1678
      oldfirst = chunk_plus_offset(oldfirst, nsize);
hgs
parents:
diff changeset
  1679
      qsize += nsize;
hgs
parents:
diff changeset
  1680
    }
hgs
parents:
diff changeset
  1681
    set_free_with_pinuse(q, qsize, oldfirst);
hgs
parents:
diff changeset
  1682
    insert_chunk(m, q, qsize, 0);
hgs
parents:
diff changeset
  1683
    check_free_chunk(m, q);
hgs
parents:
diff changeset
  1684
  }
hgs
parents:
diff changeset
  1685
hgs
parents:
diff changeset
  1686
  check_malloced_chunk(m, chunk2mem(p), nb);
hgs
parents:
diff changeset
  1687
  return chunk2mem(p);
hgs
parents:
diff changeset
  1688
}
hgs
parents:
diff changeset
  1689
hgs
parents:
diff changeset
  1690
void* RNewAllocator::mmap_alloc(mstate m, size_t nb) {
hgs
parents:
diff changeset
  1691
  size_t mmsize = granularity_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
hgs
parents:
diff changeset
  1692
  if (mmsize > nb) {     /* Check for wrap around 0 */
hgs
parents:
diff changeset
  1693
    TUint8* mm = (TUint8*)(DIRECT_MMAP(mmsize));
hgs
parents:
diff changeset
  1694
    if (mm != CMFAIL) {
hgs
parents:
diff changeset
  1695
      size_t offset = align_offset(chunk2mem(mm));
hgs
parents:
diff changeset
  1696
      size_t psize = mmsize - offset - MMAP_FOOT_PAD;
hgs
parents:
diff changeset
  1697
      mchunkptr p = (mchunkptr)(mm + offset);
hgs
parents:
diff changeset
  1698
      p->prev_foot = offset | IS_MMAPPED_BIT;
hgs
parents:
diff changeset
  1699
      (p)->head = (psize|CINUSE_BIT);
hgs
parents:
diff changeset
  1700
      mark_inuse_foot(m, p, psize);
hgs
parents:
diff changeset
  1701
      chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
hgs
parents:
diff changeset
  1702
      chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
hgs
parents:
diff changeset
  1703
hgs
parents:
diff changeset
  1704
      if (mm < m->least_addr)
hgs
parents:
diff changeset
  1705
        m->least_addr = mm;
hgs
parents:
diff changeset
  1706
      if ((m->footprint += mmsize) > m->max_footprint)
hgs
parents:
diff changeset
  1707
        m->max_footprint = m->footprint;
hgs
parents:
diff changeset
  1708
      assert(is_aligned(chunk2mem(p)));
hgs
parents:
diff changeset
  1709
      check_mmapped_chunk(m, p);
hgs
parents:
diff changeset
  1710
      return chunk2mem(p);
hgs
parents:
diff changeset
  1711
    }
hgs
parents:
diff changeset
  1712
  }
hgs
parents:
diff changeset
  1713
  return 0;
hgs
parents:
diff changeset
  1714
}
hgs
parents:
diff changeset
  1715
hgs
parents:
diff changeset
  1716
    int RNewAllocator::sys_trim(mstate m, size_t pad)
hgs
parents:
diff changeset
  1717
    {
hgs
parents:
diff changeset
  1718
      size_t released = 0;
hgs
parents:
diff changeset
  1719
      if (pad < MAX_REQUEST && is_initialized(m)) {
hgs
parents:
diff changeset
  1720
        pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
hgs
parents:
diff changeset
  1721
hgs
parents:
diff changeset
  1722
        if (m->topsize > pad) {
hgs
parents:
diff changeset
  1723
          /* Shrink top space in granularity-size units, keeping at least one */
hgs
parents:
diff changeset
  1724
          size_t unit = mparams.granularity;
hgs
parents:
diff changeset
  1725
                size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - SIZE_T_ONE) * unit;
hgs
parents:
diff changeset
  1726
          msegmentptr sp = segment_holding(m, (TUint8*)m->top);
hgs
parents:
diff changeset
  1727
hgs
parents:
diff changeset
  1728
          if (!is_extern_segment(sp)) {
hgs
parents:
diff changeset
  1729
            if (is_mmapped_segment(sp)) {
hgs
parents:
diff changeset
  1730
              if (HAVE_MMAP &&
hgs
parents:
diff changeset
  1731
                  sp->size >= extra &&
hgs
parents:
diff changeset
  1732
                  !has_segment_link(m, sp)) { /* can't shrink if pinned */
hgs
parents:
diff changeset
  1733
                /*size_t newsize = sp->size - extra; */
hgs
parents:
diff changeset
  1734
                /* Prefer mremap, fall back to munmap */
hgs
parents:
diff changeset
  1735
                if ((CALL_MREMAP(sp->base, sp->size, sp->size - extra, 0) != MFAIL) ||
hgs
parents:
diff changeset
  1736
                    (CALL_MUNMAP(sp->base + sp->size - extra, extra) == 0)) {
hgs
parents:
diff changeset
  1737
                  released = extra;
hgs
parents:
diff changeset
  1738
                }
hgs
parents:
diff changeset
  1739
              }
hgs
parents:
diff changeset
  1740
            }
hgs
parents:
diff changeset
  1741
            else if (HAVE_MORECORE) {
hgs
parents:
diff changeset
  1742
              if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
hgs
parents:
diff changeset
  1743
                extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
hgs
parents:
diff changeset
  1744
              ACQUIRE_MORECORE_LOCK(m);
hgs
parents:
diff changeset
  1745
              {
hgs
parents:
diff changeset
  1746
                /* Make sure end of memory is where we last set it. */
hgs
parents:
diff changeset
  1747
                TUint8* old_br = (TUint8*)(CALL_MORECORE(0));
hgs
parents:
diff changeset
  1748
                if (old_br == sp->base + sp->size) {
hgs
parents:
diff changeset
  1749
                  TUint8* rel_br = (TUint8*)(CALL_MORECORE(-extra));
hgs
parents:
diff changeset
  1750
                  TUint8* new_br = (TUint8*)(CALL_MORECORE(0));
hgs
parents:
diff changeset
  1751
                  if (rel_br != CMFAIL && new_br < old_br)
hgs
parents:
diff changeset
  1752
                    released = old_br - new_br;
hgs
parents:
diff changeset
  1753
                }
hgs
parents:
diff changeset
  1754
              }
hgs
parents:
diff changeset
  1755
              RELEASE_MORECORE_LOCK(m);
hgs
parents:
diff changeset
  1756
            }
hgs
parents:
diff changeset
  1757
          }
hgs
parents:
diff changeset
  1758
hgs
parents:
diff changeset
  1759
          if (released != 0) {
hgs
parents:
diff changeset
  1760
            sp->size -= released;
hgs
parents:
diff changeset
  1761
            m->footprint -= released;
hgs
parents:
diff changeset
  1762
            init_top(m, m->top, m->topsize - released);
hgs
parents:
diff changeset
  1763
            check_top_chunk(m, m->top);
hgs
parents:
diff changeset
  1764
          }
hgs
parents:
diff changeset
  1765
        }
hgs
parents:
diff changeset
  1766
hgs
parents:
diff changeset
  1767
        /* Unmap any unused mmapped segments */
hgs
parents:
diff changeset
  1768
        if (HAVE_MMAP)
hgs
parents:
diff changeset
  1769
          released += release_unused_segments(m);
hgs
parents:
diff changeset
  1770
hgs
parents:
diff changeset
  1771
        /* On failure, disable autotrim to avoid repeated failed future calls */
hgs
parents:
diff changeset
  1772
        if (released == 0)
hgs
parents:
diff changeset
  1773
          m->trim_check = MAX_SIZE_T;
hgs
parents:
diff changeset
  1774
      }
hgs
parents:
diff changeset
  1775
hgs
parents:
diff changeset
  1776
      return (released != 0)? 1 : 0;
hgs
parents:
diff changeset
  1777
    }
hgs
parents:
diff changeset
  1778
hgs
parents:
diff changeset
  1779
    inline int RNewAllocator::has_segment_link(mstate m, msegmentptr ss)
hgs
parents:
diff changeset
  1780
    {
hgs
parents:
diff changeset
  1781
      msegmentptr sp = &m->seg;
hgs
parents:
diff changeset
  1782
      for (;;) {
hgs
parents:
diff changeset
  1783
        if ((TUint8*)sp >= ss->base && (TUint8*)sp < ss->base + ss->size)
hgs
parents:
diff changeset
  1784
          return 1;
hgs
parents:
diff changeset
  1785
        if ((sp = sp->next) == 0)
hgs
parents:
diff changeset
  1786
          return 0;
hgs
parents:
diff changeset
  1787
      }
hgs
parents:
diff changeset
  1788
    }
hgs
parents:
diff changeset
  1789
hgs
parents:
diff changeset
  1790
    /* Unmap and unlink any mmapped segments that don't contain used chunks */
hgs
parents:
diff changeset
  1791
    size_t RNewAllocator::release_unused_segments(mstate m)
hgs
parents:
diff changeset
  1792
    {
hgs
parents:
diff changeset
  1793
      size_t released = 0;
hgs
parents:
diff changeset
  1794
      msegmentptr pred = &m->seg;
hgs
parents:
diff changeset
  1795
      msegmentptr sp = pred->next;
hgs
parents:
diff changeset
  1796
      while (sp != 0) {
hgs
parents:
diff changeset
  1797
        TUint8* base = sp->base;
hgs
parents:
diff changeset
  1798
        size_t size = sp->size;
hgs
parents:
diff changeset
  1799
        msegmentptr next = sp->next;
hgs
parents:
diff changeset
  1800
        if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
hgs
parents:
diff changeset
  1801
          mchunkptr p = align_as_chunk(base);
hgs
parents:
diff changeset
  1802
          size_t psize = chunksize(p);
hgs
parents:
diff changeset
  1803
          /* Can unmap if first chunk holds entire segment and not pinned */
hgs
parents:
diff changeset
  1804
          if (!cinuse(p) && (TUint8*)p + psize >= base + size - TOP_FOOT_SIZE) {
hgs
parents:
diff changeset
  1805
            tchunkptr tp = (tchunkptr)p;
hgs
parents:
diff changeset
  1806
            size_t npages_out = tp->npages;
hgs
parents:
diff changeset
  1807
            assert(segment_holds(sp, (TUint8*)sp));
hgs
parents:
diff changeset
  1808
            unlink_large_chunk(m, tp);
hgs
parents:
diff changeset
  1809
            if (CALL_MUNMAP(base, size) == 0) {
hgs
parents:
diff changeset
  1810
              released += size;
hgs
parents:
diff changeset
  1811
              m->footprint -= size;
hgs
parents:
diff changeset
  1812
              /* unlink obsoleted record */
hgs
parents:
diff changeset
  1813
              sp = pred;
hgs
parents:
diff changeset
  1814
              sp->next = next;
hgs
parents:
diff changeset
  1815
            }
hgs
parents:
diff changeset
  1816
            else { /* back out if cannot unmap */
hgs
parents:
diff changeset
  1817
              insert_large_chunk(m, tp, psize, npages_out);
hgs
parents:
diff changeset
  1818
            }
hgs
parents:
diff changeset
  1819
          }
hgs
parents:
diff changeset
  1820
        }
hgs
parents:
diff changeset
  1821
        pred = sp;
hgs
parents:
diff changeset
  1822
        sp = next;
hgs
parents:
diff changeset
  1823
      }/*End of while*/
hgs
parents:
diff changeset
  1824
      return released;
hgs
parents:
diff changeset
  1825
    }
hgs
parents:
diff changeset
  1826
    /* Realloc using mmap */
hgs
parents:
diff changeset
  1827
    inline  mchunkptr RNewAllocator::mmap_resize(mstate m, mchunkptr oldp, size_t nb)
hgs
parents:
diff changeset
  1828
    {
hgs
parents:
diff changeset
  1829
      size_t oldsize = chunksize(oldp);
hgs
parents:
diff changeset
  1830
      if (is_small(nb)) /* Can't shrink mmap regions below small size */
hgs
parents:
diff changeset
  1831
        return 0;
hgs
parents:
diff changeset
  1832
      /* Keep old chunk if big enough but not too big */
hgs
parents:
diff changeset
  1833
      if (oldsize >= nb + SIZE_T_SIZE &&
hgs
parents:
diff changeset
  1834
          (oldsize - nb) <= (mparams.granularity << 1))
hgs
parents:
diff changeset
  1835
        return oldp;
hgs
parents:
diff changeset
  1836
      else {
hgs
parents:
diff changeset
  1837
        size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT;
hgs
parents:
diff changeset
  1838
        size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
hgs
parents:
diff changeset
  1839
        size_t newmmsize = granularity_align(nb + SIX_SIZE_T_SIZES +
hgs
parents:
diff changeset
  1840
                                             CHUNK_ALIGN_MASK);
hgs
parents:
diff changeset
  1841
        TUint8* cp = (TUint8*)CALL_MREMAP((char*)oldp - offset,
hgs
parents:
diff changeset
  1842
                                      oldmmsize, newmmsize, 1);
hgs
parents:
diff changeset
  1843
        if (cp != CMFAIL) {
hgs
parents:
diff changeset
  1844
          mchunkptr newp = (mchunkptr)(cp + offset);
hgs
parents:
diff changeset
  1845
          size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
hgs
parents:
diff changeset
  1846
          newp->head = (psize|CINUSE_BIT);
hgs
parents:
diff changeset
  1847
          mark_inuse_foot(m, newp, psize);
hgs
parents:
diff changeset
  1848
          chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
hgs
parents:
diff changeset
  1849
          chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
hgs
parents:
diff changeset
  1850
hgs
parents:
diff changeset
  1851
          if (cp < m->least_addr)
hgs
parents:
diff changeset
  1852
            m->least_addr = cp;
hgs
parents:
diff changeset
  1853
          if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
hgs
parents:
diff changeset
  1854
            m->max_footprint = m->footprint;
hgs
parents:
diff changeset
  1855
          check_mmapped_chunk(m, newp);
hgs
parents:
diff changeset
  1856
          return newp;
hgs
parents:
diff changeset
  1857
        }
hgs
parents:
diff changeset
  1858
      }
hgs
parents:
diff changeset
  1859
      return 0;
hgs
parents:
diff changeset
  1860
    }
hgs
parents:
diff changeset
  1861
hgs
parents:
diff changeset
  1862
hgs
parents:
diff changeset
  1863
void RNewAllocator::Init_Dlmalloc(size_t capacity, int locked, size_t aTrimThreshold)
hgs
parents:
diff changeset
  1864
    {
hgs
parents:
diff changeset
  1865
        memset(gm,0,sizeof(malloc_state));
hgs
parents:
diff changeset
  1866
        init_mparams(aTrimThreshold); /* Ensure pagesize etc initialized */
hgs
parents:
diff changeset
  1867
        // The maximum amount that can be allocated can be calculated as:-
hgs
parents:
diff changeset
  1868
        // 2^sizeof(size_t) - sizeof(malloc_state) - TOP_FOOT_SIZE - page size (all accordingly padded)
hgs
parents:
diff changeset
  1869
        // If the capacity exceeds this, no allocation will be done.
hgs
parents:
diff changeset
  1870
        gm->seg.base = gm->least_addr = iBase;
hgs
parents:
diff changeset
  1871
        gm->seg.size = capacity;
hgs
parents:
diff changeset
  1872
        gm->seg.sflags = !IS_MMAPPED_BIT;
hgs
parents:
diff changeset
  1873
        set_lock(gm, locked);
hgs
parents:
diff changeset
  1874
        gm->magic = mparams.magic;
hgs
parents:
diff changeset
  1875
        init_bins(gm);
hgs
parents:
diff changeset
  1876
        init_top(gm, (mchunkptr)iBase, capacity - TOP_FOOT_SIZE);
hgs
parents:
diff changeset
  1877
    }
hgs
parents:
diff changeset
  1878
hgs
parents:
diff changeset
  1879
void* RNewAllocator::dlmalloc(size_t bytes) {
hgs
parents:
diff changeset
  1880
  /*
hgs
parents:
diff changeset
  1881
     Basic algorithm:
hgs
parents:
diff changeset
  1882
     If a small request (< 256 bytes minus per-chunk overhead):
hgs
parents:
diff changeset
  1883
       1. If one exists, use a remainderless chunk in associated smallbin.
hgs
parents:
diff changeset
  1884
          (Remainderless means that there are too few excess bytes to represent as a chunk.)
hgs
parents:
diff changeset
  1885
       2. If one exists, split the smallest available chunk in a bin, saving remainder in bin.
hgs
parents:
diff changeset
  1886
       4. If it is big enough, use the top chunk.
hgs
parents:
diff changeset
  1887
       5. If available, get memory from system and use it
hgs
parents:
diff changeset
  1888
     Otherwise, for a large request:
hgs
parents:
diff changeset
  1889
       1. Find the smallest available binned chunk that fits, splitting if necessary.
hgs
parents:
diff changeset
  1890
       3. If it is big enough, use the top chunk.
hgs
parents:
diff changeset
  1891
       4. If request size >= mmap threshold, try to directly mmap this chunk.
hgs
parents:
diff changeset
  1892
       5. If available, get memory from system and use it
hgs
parents:
diff changeset
  1893
hgs
parents:
diff changeset
  1894
     The ugly goto's here ensure that postaction occurs along all paths.
hgs
parents:
diff changeset
  1895
  */
hgs
parents:
diff changeset
  1896
  if (!PREACTION(gm)) {
hgs
parents:
diff changeset
  1897
    void* mem;
hgs
parents:
diff changeset
  1898
    size_t nb;
hgs
parents:
diff changeset
  1899
    if (bytes <= MAX_SMALL_REQUEST) {
hgs
parents:
diff changeset
  1900
      bindex_t idx;
hgs
parents:
diff changeset
  1901
      binmap_t smallbits;
hgs
parents:
diff changeset
  1902
      nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
hgs
parents:
diff changeset
  1903
      idx = small_index(nb);
hgs
parents:
diff changeset
  1904
      smallbits = gm->smallmap >> idx;
hgs
parents:
diff changeset
  1905
hgs
parents:
diff changeset
  1906
      if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
hgs
parents:
diff changeset
  1907
        mchunkptr b, p;
hgs
parents:
diff changeset
  1908
        idx += ~smallbits & 1;       /* Uses next bin if idx empty */
hgs
parents:
diff changeset
  1909
        b = smallbin_at(gm, idx);
hgs
parents:
diff changeset
  1910
        p = b->fd;
hgs
parents:
diff changeset
  1911
        assert(chunksize(p) == small_index2size(idx));
hgs
parents:
diff changeset
  1912
        unlink_first_small_chunk(gm, b, p, idx);
hgs
parents:
diff changeset
  1913
        set_inuse_and_pinuse(gm, p, small_index2size(idx));
hgs
parents:
diff changeset
  1914
        mem = chunk2mem(p);
hgs
parents:
diff changeset
  1915
        check_malloced_chunk(gm, mem, nb);
hgs
parents:
diff changeset
  1916
        goto postaction;
hgs
parents:
diff changeset
  1917
      } else {
hgs
parents:
diff changeset
  1918
        if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
hgs
parents:
diff changeset
  1919
          mchunkptr b, p, r;
hgs
parents:
diff changeset
  1920
          size_t rsize;
hgs
parents:
diff changeset
  1921
          bindex_t i;
hgs
parents:
diff changeset
  1922
          binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
hgs
parents:
diff changeset
  1923
          binmap_t leastbit = least_bit(leftbits);
hgs
parents:
diff changeset
  1924
          compute_bit2idx(leastbit, i);
hgs
parents:
diff changeset
  1925
          b = smallbin_at(gm, i);
hgs
parents:
diff changeset
  1926
          p = b->fd;
hgs
parents:
diff changeset
  1927
          assert(chunksize(p) == small_index2size(i));
hgs
parents:
diff changeset
  1928
          unlink_first_small_chunk(gm, b, p, i);
hgs
parents:
diff changeset
  1929
          rsize = small_index2size(i) - nb;
hgs
parents:
diff changeset
  1930
          /* Fit here cannot be remainderless if 4byte sizes */
hgs
parents:
diff changeset
  1931
          if (rsize < free_chunk_threshold)
hgs
parents:
diff changeset
  1932
            set_inuse_and_pinuse(gm, p, small_index2size(i));
hgs
parents:
diff changeset
  1933
          else {
hgs
parents:
diff changeset
  1934
            set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
hgs
parents:
diff changeset
  1935
            r = chunk_plus_offset(p, nb);
hgs
parents:
diff changeset
  1936
            set_size_and_pinuse_of_free_chunk(r, rsize);
hgs
parents:
diff changeset
  1937
            insert_chunk(gm, r, rsize, 0);
hgs
parents:
diff changeset
  1938
          }
hgs
parents:
diff changeset
  1939
          mem = chunk2mem(p);
hgs
parents:
diff changeset
  1940
          check_malloced_chunk(gm, mem, nb);
hgs
parents:
diff changeset
  1941
          goto postaction;
hgs
parents:
diff changeset
  1942
        }
hgs
parents:
diff changeset
  1943
hgs
parents:
diff changeset
  1944
        else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
hgs
parents:
diff changeset
  1945
          check_malloced_chunk(gm, mem, nb);
hgs
parents:
diff changeset
  1946
          goto postaction;
hgs
parents:
diff changeset
  1947
        }
hgs
parents:
diff changeset
  1948
      }
hgs
parents:
diff changeset
  1949
    } /* else - large alloc request */
hgs
parents:
diff changeset
  1950
    else if (bytes >= MAX_REQUEST)
hgs
parents:
diff changeset
  1951
      nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
hgs
parents:
diff changeset
  1952
    else {
hgs
parents:
diff changeset
  1953
      nb = pad_request(bytes);
hgs
parents:
diff changeset
  1954
      if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
hgs
parents:
diff changeset
  1955
        check_malloced_chunk(gm, mem, nb);
hgs
parents:
diff changeset
  1956
        goto postaction;
hgs
parents:
diff changeset
  1957
      }
hgs
parents:
diff changeset
  1958
    }
hgs
parents:
diff changeset
  1959
hgs
parents:
diff changeset
  1960
    if (nb < gm->topsize) { /* Split top */
hgs
parents:
diff changeset
  1961
      size_t rsize = gm->topsize -= nb;
hgs
parents:
diff changeset
  1962
      mchunkptr p = gm->top;
hgs
parents:
diff changeset
  1963
      mchunkptr r = gm->top = chunk_plus_offset(p, nb);
hgs
parents:
diff changeset
  1964
      r->head = rsize | PINUSE_BIT;
hgs
parents:
diff changeset
  1965
      set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
hgs
parents:
diff changeset
  1966
      mem = chunk2mem(p);
hgs
parents:
diff changeset
  1967
      check_top_chunk(gm, gm->top);
hgs
parents:
diff changeset
  1968
      check_malloced_chunk(gm, mem, nb);
hgs
parents:
diff changeset
  1969
      goto postaction;
hgs
parents:
diff changeset
  1970
    }
hgs
parents:
diff changeset
  1971
hgs
parents:
diff changeset
  1972
    mem = sys_alloc(gm, nb);
hgs
parents:
diff changeset
  1973
hgs
parents:
diff changeset
  1974
  postaction:
hgs
parents:
diff changeset
  1975
    POSTACTION(gm);
hgs
parents:
diff changeset
  1976
#ifdef DL_CHUNK_MEM_DEBUG
hgs
parents:
diff changeset
  1977
    if (mem) {
hgs
parents:
diff changeset
  1978
        mchunkptr pp = mem2chunk(mem);
hgs
parents:
diff changeset
  1979
        do_check_any_chunk_access(pp, chunksize(pp));
hgs
parents:
diff changeset
  1980
    }
hgs
parents:
diff changeset
  1981
#endif
hgs
parents:
diff changeset
  1982
hgs
parents:
diff changeset
  1983
    if (mem) {
hgs
parents:
diff changeset
  1984
        mchunkptr pp = mem2chunk(mem);
hgs
parents:
diff changeset
  1985
        iTotalAllocSize += chunksize(pp);
hgs
parents:
diff changeset
  1986
    }
hgs
parents:
diff changeset
  1987
hgs
parents:
diff changeset
  1988
    return mem;
hgs
parents:
diff changeset
  1989
  }
hgs
parents:
diff changeset
  1990
#if USE_LOCKS // keep the compiler happy
hgs
parents:
diff changeset
  1991
  return 0;
hgs
parents:
diff changeset
  1992
#endif
hgs
parents:
diff changeset
  1993
}
hgs
parents:
diff changeset
  1994
hgs
parents:
diff changeset
  1995
void RNewAllocator::dlfree(void* mem) {
hgs
parents:
diff changeset
  1996
  /*
hgs
parents:
diff changeset
  1997
     Consolidate freed chunks with preceeding or succeeding bordering
hgs
parents:
diff changeset
  1998
     free chunks, if they exist, and then place in a bin.  Intermixed
hgs
parents:
diff changeset
  1999
     with special cases for top, dv, mmapped chunks, and usage errors.
hgs
parents:
diff changeset
  2000
  */
hgs
parents:
diff changeset
  2001
hgs
parents:
diff changeset
  2002
    if (mem != 0)
hgs
parents:
diff changeset
  2003
    {
hgs
parents:
diff changeset
  2004
        size_t unmapped_pages = 0;
hgs
parents:
diff changeset
  2005
        int prev_chunk_unmapped = 0;
hgs
parents:
diff changeset
  2006
        mchunkptr p  = mem2chunk(mem);
hgs
parents:
diff changeset
  2007
#if FOOTERS
hgs
parents:
diff changeset
  2008
        mstate fm = get_mstate_for(p);
hgs
parents:
diff changeset
  2009
        if (!ok_magic(fm))
hgs
parents:
diff changeset
  2010
        {
hgs
parents:
diff changeset
  2011
            USAGE_ERROR_ACTION(fm, p);
hgs
parents:
diff changeset
  2012
            return;
hgs
parents:
diff changeset
  2013
        }
hgs
parents:
diff changeset
  2014
#else /* FOOTERS */
hgs
parents:
diff changeset
  2015
#define fm gm
hgs
parents:
diff changeset
  2016
#endif /* FOOTERS */
hgs
parents:
diff changeset
  2017
hgs
parents:
diff changeset
  2018
        if (!PREACTION(fm))
hgs
parents:
diff changeset
  2019
        {
hgs
parents:
diff changeset
  2020
            check_inuse_chunk(fm, p);
hgs
parents:
diff changeset
  2021
            if (RTCHECK(ok_address(fm, p) && ok_cinuse(p)))
hgs
parents:
diff changeset
  2022
            {
hgs
parents:
diff changeset
  2023
                size_t psize = chunksize(p);
hgs
parents:
diff changeset
  2024
                iTotalAllocSize -= psize;
hgs
parents:
diff changeset
  2025
                mchunkptr next = chunk_plus_offset(p, psize);
hgs
parents:
diff changeset
  2026
                if (!pinuse(p))
hgs
parents:
diff changeset
  2027
                {
hgs
parents:
diff changeset
  2028
                    size_t prevsize = p->prev_foot;
hgs
parents:
diff changeset
  2029
                    if ((prevsize & IS_MMAPPED_BIT) != 0)
hgs
parents:
diff changeset
  2030
                    {
hgs
parents:
diff changeset
  2031
                        prevsize &= ~IS_MMAPPED_BIT;
hgs
parents:
diff changeset
  2032
                        psize += prevsize + MMAP_FOOT_PAD;
hgs
parents:
diff changeset
  2033
                            if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
hgs
parents:
diff changeset
  2034
                                fm->footprint -= psize;
hgs
parents:
diff changeset
  2035
                            goto postaction;
hgs
parents:
diff changeset
  2036
                    }
hgs
parents:
diff changeset
  2037
                    else
hgs
parents:
diff changeset
  2038
                    {
hgs
parents:
diff changeset
  2039
                        mchunkptr prev = chunk_minus_offset(p, prevsize);
hgs
parents:
diff changeset
  2040
                        if (page_not_in_memory(prev, prevsize)) {
hgs
parents:
diff changeset
  2041
                            prev_chunk_unmapped = 1;
hgs
parents:
diff changeset
  2042
                            unmapped_pages = ((tchunkptr)prev)->npages;
hgs
parents:
diff changeset
  2043
                        }
hgs
parents:
diff changeset
  2044
hgs
parents:
diff changeset
  2045
                        psize += prevsize;
hgs
parents:
diff changeset
  2046
                        p = prev;
hgs
parents:
diff changeset
  2047
                        if (RTCHECK(ok_address(fm, prev)))
hgs
parents:
diff changeset
  2048
                        { /* consolidate backward */
hgs
parents:
diff changeset
  2049
                            unlink_chunk(fm, p, prevsize);
hgs
parents:
diff changeset
  2050
                        }
hgs
parents:
diff changeset
  2051
                        else
hgs
parents:
diff changeset
  2052
                            goto erroraction;
hgs
parents:
diff changeset
  2053
                    }
hgs
parents:
diff changeset
  2054
                }
hgs
parents:
diff changeset
  2055
hgs
parents:
diff changeset
  2056
                if (RTCHECK(ok_next(p, next) && ok_pinuse(next)))
hgs
parents:
diff changeset
  2057
                {
hgs
parents:
diff changeset
  2058
                    if (!cinuse(next))
hgs
parents:
diff changeset
  2059
                    {  /* consolidate forward */
hgs
parents:
diff changeset
  2060
                        if (next == fm->top)
hgs
parents:
diff changeset
  2061
                        {
hgs
parents:
diff changeset
  2062
                            if (prev_chunk_unmapped) { // previous chunk is unmapped
hgs
parents:
diff changeset
  2063
                            /* unmap all pages between previously unmapped and end of top chunk
hgs
parents:
diff changeset
  2064
                               and reset top to beginning of prev chunk - done in sys_trim_partial() */
hgs
parents:
diff changeset
  2065
                                sys_trim_partial(fm, p, psize, unmapped_pages);
hgs
parents:
diff changeset
  2066
                                do_check_any_chunk_access(fm->top, fm->topsize);
hgs
parents:
diff changeset
  2067
                                goto postaction;
hgs
parents:
diff changeset
  2068
                            }
hgs
parents:
diff changeset
  2069
                            else { // forward merge to top
hgs
parents:
diff changeset
  2070
                                size_t tsize = fm->topsize += psize;
hgs
parents:
diff changeset
  2071
                                fm->top = p;
hgs
parents:
diff changeset
  2072
                                p->head = tsize | PINUSE_BIT;
hgs
parents:
diff changeset
  2073
                                if (should_trim(fm, tsize))
hgs
parents:
diff changeset
  2074
                                    sys_trim(fm, 0);
hgs
parents:
diff changeset
  2075
                                do_check_any_chunk_access(fm->top, fm->topsize);
hgs
parents:
diff changeset
  2076
                                goto postaction;
hgs
parents:
diff changeset
  2077
                            }
hgs
parents:
diff changeset
  2078
                        }
hgs
parents:
diff changeset
  2079
                        else
hgs
parents:
diff changeset
  2080
                        {
hgs
parents:
diff changeset
  2081
                            size_t nsize = chunksize(next);
hgs
parents:
diff changeset
  2082
                            //int next_chunk_unmapped = 0;
hgs
parents:
diff changeset
  2083
                            if ( page_not_in_memory(next, nsize) ) {
hgs
parents:
diff changeset
  2084
                                //next_chunk_unmapped = 1;
hgs
parents:
diff changeset
  2085
                                unmapped_pages += ((tchunkptr)next)->npages;
hgs
parents:
diff changeset
  2086
                            }
hgs
parents:
diff changeset
  2087
hgs
parents:
diff changeset
  2088
                            psize += nsize;
hgs
parents:
diff changeset
  2089
                            unlink_chunk(fm, next, nsize);
hgs
parents:
diff changeset
  2090
                            set_size_and_pinuse_of_free_chunk(p, psize);
hgs
parents:
diff changeset
  2091
                        }
hgs
parents:
diff changeset
  2092
                    }
hgs
parents:
diff changeset
  2093
                    else
hgs
parents:
diff changeset
  2094
                        set_free_with_pinuse(p, psize, next);
hgs
parents:
diff changeset
  2095
hgs
parents:
diff changeset
  2096
                    /* check if chunk memmory can be released */
hgs
parents:
diff changeset
  2097
                    size_t npages_out = 0;
hgs
parents:
diff changeset
  2098
                    if (!is_small(psize) && psize>=CHUNK_PAGEOUT_THESHOLD)
hgs
parents:
diff changeset
  2099
                        npages_out = unmap_chunk_pages((tchunkptr)p, psize, unmapped_pages);
hgs
parents:
diff changeset
  2100
hgs
parents:
diff changeset
  2101
                    insert_chunk(fm, p, psize, npages_out);
hgs
parents:
diff changeset
  2102
                    check_free_chunk(fm, p);
hgs
parents:
diff changeset
  2103
                    do_chunk_page_release_check(p, psize, fm, npages_out);
hgs
parents:
diff changeset
  2104
                    goto postaction;
hgs
parents:
diff changeset
  2105
                }
hgs
parents:
diff changeset
  2106
            }
hgs
parents:
diff changeset
  2107
erroraction:
hgs
parents:
diff changeset
  2108
        USAGE_ERROR_ACTION(fm, p);
hgs
parents:
diff changeset
  2109
postaction:
hgs
parents:
diff changeset
  2110
        POSTACTION(fm);
hgs
parents:
diff changeset
  2111
        }
hgs
parents:
diff changeset
  2112
    }
hgs
parents:
diff changeset
  2113
#if !FOOTERS
hgs
parents:
diff changeset
  2114
#undef fm
hgs
parents:
diff changeset
  2115
#endif /* FOOTERS */
hgs
parents:
diff changeset
  2116
}
hgs
parents:
diff changeset
  2117
hgs
parents:
diff changeset
  2118
void* RNewAllocator::dlrealloc(void* oldmem, size_t bytes) {
hgs
parents:
diff changeset
  2119
  if (oldmem == 0)
hgs
parents:
diff changeset
  2120
    return dlmalloc(bytes);
hgs
parents:
diff changeset
  2121
#ifdef REALLOC_ZERO_BYTES_FREES
hgs
parents:
diff changeset
  2122
  if (bytes == 0) {
hgs
parents:
diff changeset
  2123
    dlfree(oldmem);
hgs
parents:
diff changeset
  2124
    return 0;
hgs
parents:
diff changeset
  2125
  }
hgs
parents:
diff changeset
  2126
#endif /* REALLOC_ZERO_BYTES_FREES */
hgs
parents:
diff changeset
  2127
  else {
hgs
parents:
diff changeset
  2128
#if ! FOOTERS
hgs
parents:
diff changeset
  2129
    mstate m = gm;
hgs
parents:
diff changeset
  2130
#else /* FOOTERS */
hgs
parents:
diff changeset
  2131
    mstate m = get_mstate_for(mem2chunk(oldmem));
hgs
parents:
diff changeset
  2132
    if (!ok_magic(m)) {
hgs
parents:
diff changeset
  2133
      USAGE_ERROR_ACTION(m, oldmem);
hgs
parents:
diff changeset
  2134
      return 0;
hgs
parents:
diff changeset
  2135
    }
hgs
parents:
diff changeset
  2136
#endif /* FOOTERS */
hgs
parents:
diff changeset
  2137
    return internal_realloc(m, oldmem, bytes);
hgs
parents:
diff changeset
  2138
  }
hgs
parents:
diff changeset
  2139
}
hgs
parents:
diff changeset
  2140
hgs
parents:
diff changeset
  2141
hgs
parents:
diff changeset
  2142
int RNewAllocator::dlmalloc_trim(size_t pad) {
hgs
parents:
diff changeset
  2143
  int result = 0;
hgs
parents:
diff changeset
  2144
  if (!PREACTION(gm)) {
hgs
parents:
diff changeset
  2145
    result = sys_trim(gm, pad);
hgs
parents:
diff changeset
  2146
    POSTACTION(gm);
hgs
parents:
diff changeset
  2147
  }
hgs
parents:
diff changeset
  2148
  return result;
hgs
parents:
diff changeset
  2149
}
hgs
parents:
diff changeset
  2150
hgs
parents:
diff changeset
  2151
size_t RNewAllocator::dlmalloc_footprint(void) {
hgs
parents:
diff changeset
  2152
  return gm->footprint;
hgs
parents:
diff changeset
  2153
}
hgs
parents:
diff changeset
  2154
hgs
parents:
diff changeset
  2155
size_t RNewAllocator::dlmalloc_max_footprint(void) {
hgs
parents:
diff changeset
  2156
  return gm->max_footprint;
hgs
parents:
diff changeset
  2157
}
hgs
parents:
diff changeset
  2158
hgs
parents:
diff changeset
  2159
#if !NO_MALLINFO
hgs
parents:
diff changeset
  2160
struct mallinfo RNewAllocator::dlmallinfo(void) {
hgs
parents:
diff changeset
  2161
  return internal_mallinfo(gm);
hgs
parents:
diff changeset
  2162
}
hgs
parents:
diff changeset
  2163
#endif /* NO_MALLINFO */
hgs
parents:
diff changeset
  2164
hgs
parents:
diff changeset
  2165
void RNewAllocator::dlmalloc_stats() {
hgs
parents:
diff changeset
  2166
  internal_malloc_stats(gm);
hgs
parents:
diff changeset
  2167
}
hgs
parents:
diff changeset
  2168
hgs
parents:
diff changeset
  2169
int RNewAllocator::dlmallopt(int param_number, int value) {
hgs
parents:
diff changeset
  2170
  return change_mparam(param_number, value);
hgs
parents:
diff changeset
  2171
}
hgs
parents:
diff changeset
  2172
hgs
parents:
diff changeset
  2173
//inline slab* slab::slabfor(void* p)
hgs
parents:
diff changeset
  2174
slab* slab::slabfor( const void* p)
hgs
parents:
diff changeset
  2175
{
hgs
parents:
diff changeset
  2176
return (slab*)(floor(p, slabsize));
hgs
parents:
diff changeset
  2177
}
hgs
parents:
diff changeset
  2178
hgs
parents:
diff changeset
  2179
hgs
parents:
diff changeset
  2180
void RNewAllocator::tree_remove(slab* s)
hgs
parents:
diff changeset
  2181
{
hgs
parents:
diff changeset
  2182
    slab** r = s->parent;
hgs
parents:
diff changeset
  2183
    slab* c1 = s->child1;
hgs
parents:
diff changeset
  2184
    slab* c2 = s->child2;
hgs
parents:
diff changeset
  2185
    for (;;)
hgs
parents:
diff changeset
  2186
    {
hgs
parents:
diff changeset
  2187
        if (!c2)
hgs
parents:
diff changeset
  2188
        {
hgs
parents:
diff changeset
  2189
            *r = c1;
hgs
parents:
diff changeset
  2190
            if (c1)
hgs
parents:
diff changeset
  2191
                c1->parent = r;
hgs
parents:
diff changeset
  2192
            return;
hgs
parents:
diff changeset
  2193
        }
hgs
parents:
diff changeset
  2194
        if (!c1)
hgs
parents:
diff changeset
  2195
        {
hgs
parents:
diff changeset
  2196
            *r = c2;
hgs
parents:
diff changeset
  2197
            c2->parent = r;
hgs
parents:
diff changeset
  2198
            return;
hgs
parents:
diff changeset
  2199
        }
hgs
parents:
diff changeset
  2200
        if (c1 > c2)
hgs
parents:
diff changeset
  2201
        {
hgs
parents:
diff changeset
  2202
            slab* c3 = c1;
hgs
parents:
diff changeset
  2203
            c1 = c2;
hgs
parents:
diff changeset
  2204
            c2 = c3;
hgs
parents:
diff changeset
  2205
        }
hgs
parents:
diff changeset
  2206
        slab* newc2 = c1->child2;
hgs
parents:
diff changeset
  2207
        *r = c1;
hgs
parents:
diff changeset
  2208
        c1->parent = r;
hgs
parents:
diff changeset
  2209
        c1->child2 = c2;
hgs
parents:
diff changeset
  2210
        c2->parent = &c1->child2;
hgs
parents:
diff changeset
  2211
        s = c1;
hgs
parents:
diff changeset
  2212
        c1 = s->child1;
hgs
parents:
diff changeset
  2213
        c2 = newc2;
hgs
parents:
diff changeset
  2214
        r = &s->child1;
hgs
parents:
diff changeset
  2215
    }
hgs
parents:
diff changeset
  2216
}
hgs
parents:
diff changeset
  2217
void RNewAllocator::tree_insert(slab* s,slab** r)
hgs
parents:
diff changeset
  2218
    {
hgs
parents:
diff changeset
  2219
        slab* n = *r;
hgs
parents:
diff changeset
  2220
        for (;;)
hgs
parents:
diff changeset
  2221
        {
hgs
parents:
diff changeset
  2222
            if (!n)
hgs
parents:
diff changeset
  2223
            {   // tree empty
hgs
parents:
diff changeset
  2224
                *r = s;
hgs
parents:
diff changeset
  2225
                s->parent = r;
hgs
parents:
diff changeset
  2226
                s->child1 = s->child2 = 0;
hgs
parents:
diff changeset
  2227
                break;
hgs
parents:
diff changeset
  2228
            }
hgs
parents:
diff changeset
  2229
            if (s < n)
hgs
parents:
diff changeset
  2230
            {   // insert between parent and n
hgs
parents:
diff changeset
  2231
                *r = s;
hgs
parents:
diff changeset
  2232
                s->parent = r;
hgs
parents:
diff changeset
  2233
                s->child1 = n;
hgs
parents:
diff changeset
  2234
                s->child2 = 0;
hgs
parents:
diff changeset
  2235
                n->parent = &s->child1;
hgs
parents:
diff changeset
  2236
                break;
hgs
parents:
diff changeset
  2237
            }
hgs
parents:
diff changeset
  2238
            slab* c1 = n->child1;
hgs
parents:
diff changeset
  2239
            slab* c2 = n->child2;
hgs
parents:
diff changeset
  2240
            if ((c1 - 1) > (c2 - 1))
hgs
parents:
diff changeset
  2241
            {
hgs
parents:
diff changeset
  2242
                r = &n->child1;
hgs
parents:
diff changeset
  2243
                n = c1;
hgs
parents:
diff changeset
  2244
            }
hgs
parents:
diff changeset
  2245
            else
hgs
parents:
diff changeset
  2246
            {
hgs
parents:
diff changeset
  2247
                r = &n->child2;
hgs
parents:
diff changeset
  2248
                n = c2;
hgs
parents:
diff changeset
  2249
            }
hgs
parents:
diff changeset
  2250
        }
hgs
parents:
diff changeset
  2251
    }
hgs
parents:
diff changeset
  2252
void* RNewAllocator::allocnewslab(slabset& allocator)
hgs
parents:
diff changeset
  2253
//
hgs
parents:
diff changeset
  2254
// Acquire and initialise a new slab, returning a cell from the slab
hgs
parents:
diff changeset
  2255
// The strategy is:
hgs
parents:
diff changeset
  2256
// 1. Use the lowest address free slab, if available. This is done by using the lowest slab
hgs
parents:
diff changeset
  2257
//    in the page at the root of the partial_page heap (which is address ordered). If the
hgs
parents:
diff changeset
  2258
//    is now fully used, remove it from the partial_page heap.
hgs
parents:
diff changeset
  2259
// 2. Allocate a new page for slabs if no empty slabs are available
hgs
parents:
diff changeset
  2260
//
hgs
parents:
diff changeset
  2261
{
hgs
parents:
diff changeset
  2262
    page* p = page::pagefor(partial_page);
hgs
parents:
diff changeset
  2263
    if (!p)
hgs
parents:
diff changeset
  2264
        return allocnewpage(allocator);
hgs
parents:
diff changeset
  2265
hgs
parents:
diff changeset
  2266
    unsigned h = p->slabs[0].header;
hgs
parents:
diff changeset
  2267
    unsigned pagemap = header_pagemap(h);
hgs
parents:
diff changeset
  2268
    ASSERT(&p->slabs[hibit(pagemap)] == partial_page);
hgs
parents:
diff changeset
  2269
hgs
parents:
diff changeset
  2270
    unsigned slabix = lowbit(pagemap);
hgs
parents:
diff changeset
  2271
    p->slabs[0].header = h &~ (0x100<<slabix);
hgs
parents:
diff changeset
  2272
    if (!(pagemap &~ (1<<slabix)))
hgs
parents:
diff changeset
  2273
    {
hgs
parents:
diff changeset
  2274
        tree_remove(partial_page);  // last free slab in page
hgs
parents:
diff changeset
  2275
    }
hgs
parents:
diff changeset
  2276
    return allocator.initslab(&p->slabs[slabix]);
hgs
parents:
diff changeset
  2277
}
hgs
parents:
diff changeset
  2278
hgs
parents:
diff changeset
  2279
/**Defination of this functionis not there in proto code***/
hgs
parents:
diff changeset
  2280
#if 0
hgs
parents:
diff changeset
  2281
void RNewAllocator::partial_insert(slab* s)
hgs
parents:
diff changeset
  2282
    {
hgs
parents:
diff changeset
  2283
        // slab has had first cell freed and needs to be linked back into partial tree
hgs
parents:
diff changeset
  2284
        slabset& ss = slaballoc[sizemap[s->clz]];
hgs
parents:
diff changeset
  2285
hgs
parents:
diff changeset
  2286
        ASSERT(s->used == slabfull);
hgs
parents:
diff changeset
  2287
        s->used = ss.fulluse - s->clz;      // full-1 loading
hgs
parents:
diff changeset
  2288
        tree_insert(s,&ss.partial);
hgs
parents:
diff changeset
  2289
        checktree(ss.partial);
hgs
parents:
diff changeset
  2290
    }
hgs
parents:
diff changeset
  2291
/**Defination of this functionis not there in proto code***/
hgs
parents:
diff changeset
  2292
#endif
hgs
parents:
diff changeset
  2293
hgs
parents:
diff changeset
  2294
void* RNewAllocator::allocnewpage(slabset& allocator)
hgs
parents:
diff changeset
  2295
//
hgs
parents:
diff changeset
  2296
// Acquire and initialise a new page, returning a cell from a new slab
hgs
parents:
diff changeset
  2297
// The partial_page tree is empty (otherwise we'd have used a slab from there)
hgs
parents:
diff changeset
  2298
// The partial_page link is put in the highest addressed slab in the page, and the
hgs
parents:
diff changeset
  2299
// lowest addressed slab is used to fulfill the allocation request
hgs
parents:
diff changeset
  2300
//
hgs
parents:
diff changeset
  2301
{
hgs
parents:
diff changeset
  2302
    page* p  = spare_page;
hgs
parents:
diff changeset
  2303
    if (p)
hgs
parents:
diff changeset
  2304
        spare_page = 0;
hgs
parents:
diff changeset
  2305
    else
hgs
parents:
diff changeset
  2306
    {
hgs
parents:
diff changeset
  2307
        p = static_cast<page*>(map(0,pagesize));
hgs
parents:
diff changeset
  2308
        if (!p)
hgs
parents:
diff changeset
  2309
            return 0;
hgs
parents:
diff changeset
  2310
    }
hgs
parents:
diff changeset
  2311
    ASSERT(p == floor(p,pagesize));
hgs
parents:
diff changeset
  2312
    p->slabs[0].header = ((1<<3) + (1<<2) + (1<<1))<<8;     // set pagemap
hgs
parents:
diff changeset
  2313
    p->slabs[3].parent = &partial_page;
hgs
parents:
diff changeset
  2314
    p->slabs[3].child1 = p->slabs[3].child2 = 0;
hgs
parents:
diff changeset
  2315
    partial_page = &p->slabs[3];
hgs
parents:
diff changeset
  2316
    return allocator.initslab(&p->slabs[0]);
hgs
parents:
diff changeset
  2317
}
hgs
parents:
diff changeset
  2318
hgs
parents:
diff changeset
  2319
void RNewAllocator::freepage(page* p)
hgs
parents:
diff changeset
  2320
//
hgs
parents:
diff changeset
  2321
// Release an unused page to the OS
hgs
parents:
diff changeset
  2322
// A single page is cached for reuse to reduce thrashing
hgs
parents:
diff changeset
  2323
// the OS allocator.
hgs
parents:
diff changeset
  2324
//
hgs
parents:
diff changeset
  2325
{
hgs
parents:
diff changeset
  2326
    ASSERT(ceiling(p,pagesize) == p);
hgs
parents:
diff changeset
  2327
    if (!spare_page)
hgs
parents:
diff changeset
  2328
    {
hgs
parents:
diff changeset
  2329
        spare_page = p;
hgs
parents:
diff changeset
  2330
        return;
hgs
parents:
diff changeset
  2331
    }
hgs
parents:
diff changeset
  2332
    unmap(p,pagesize);
hgs
parents:
diff changeset
  2333
}
hgs
parents:
diff changeset
  2334
hgs
parents:
diff changeset
  2335
void RNewAllocator::freeslab(slab* s)
hgs
parents:
diff changeset
  2336
//
hgs
parents:
diff changeset
  2337
// Release an empty slab to the slab manager
hgs
parents:
diff changeset
  2338
// The strategy is:
hgs
parents:
diff changeset
  2339
// 1. The page containing the slab is checked to see the state of the other slabs in the page by
hgs
parents:
diff changeset
  2340
//    inspecting the pagemap field in the header of the first slab in the page.
hgs
parents:
diff changeset
  2341
// 2. The pagemap is updated to indicate the new unused slab
hgs
parents:
diff changeset
  2342
// 3. If this is the only unused slab in the page then the slab header is used to add the page to
hgs
parents:
diff changeset
  2343
//    the partial_page tree/heap
hgs
parents:
diff changeset
  2344
// 4. If all the slabs in the page are now unused the page is release back to the OS
hgs
parents:
diff changeset
  2345
// 5. If this slab has a higher address than the one currently used to track this page in
hgs
parents:
diff changeset
  2346
//    the partial_page heap, the linkage is moved to the new unused slab
hgs
parents:
diff changeset
  2347
//
hgs
parents:
diff changeset
  2348
{
hgs
parents:
diff changeset
  2349
    tree_remove(s);
hgs
parents:
diff changeset
  2350
    checktree(*s->parent);
hgs
parents:
diff changeset
  2351
    ASSERT(header_usedm4(s->header) == header_size(s->header)-4);
hgs
parents:
diff changeset
  2352
    CHECK(s->header |= 0xFF00000);          // illegal value for debug purposes
hgs
parents:
diff changeset
  2353
    page* p = page::pagefor(s);
hgs
parents:
diff changeset
  2354
    unsigned h = p->slabs[0].header;
hgs
parents:
diff changeset
  2355
    int slabix = s - &p->slabs[0];
hgs
parents:
diff changeset
  2356
    unsigned pagemap = header_pagemap(h);
hgs
parents:
diff changeset
  2357
    p->slabs[0].header = h | (0x100<<slabix);
hgs
parents:
diff changeset
  2358
    if (pagemap == 0)
hgs
parents:
diff changeset
  2359
    {   // page was full before, use this slab as link in empty heap
hgs
parents:
diff changeset
  2360
        tree_insert(s, &partial_page);
hgs
parents:
diff changeset
  2361
    }
hgs
parents:
diff changeset
  2362
    else
hgs
parents:
diff changeset
  2363
    {   // find the current empty-link slab
hgs
parents:
diff changeset
  2364
        slab* sl = &p->slabs[hibit(pagemap)];
hgs
parents:
diff changeset
  2365
        pagemap ^= (1<<slabix);
hgs
parents:
diff changeset
  2366
        if (pagemap == 0xf)
hgs
parents:
diff changeset
  2367
        {   // page is now empty so recycle page to os
hgs
parents:
diff changeset
  2368
            tree_remove(sl);
hgs
parents:
diff changeset
  2369
            freepage(p);
hgs
parents:
diff changeset
  2370
            return;
hgs
parents:
diff changeset
  2371
        }
hgs
parents:
diff changeset
  2372
        // ensure the free list link is in highest address slab in page
hgs
parents:
diff changeset
  2373
        if (s > sl)
hgs
parents:
diff changeset
  2374
        {   // replace current link with new one. Address-order tree so position stays the same
hgs
parents:
diff changeset
  2375
            slab** r = sl->parent;
hgs
parents:
diff changeset
  2376
            slab* c1 = sl->child1;
hgs
parents:
diff changeset
  2377
            slab* c2 = sl->child2;
hgs
parents:
diff changeset
  2378
            s->parent = r;
hgs
parents:
diff changeset
  2379
            s->child1 = c1;
hgs
parents:
diff changeset
  2380
            s->child2 = c2;
hgs
parents:
diff changeset
  2381
            *r = s;
hgs
parents:
diff changeset
  2382
            if (c1)
hgs
parents:
diff changeset
  2383
                c1->parent = &s->child1;
hgs
parents:
diff changeset
  2384
            if (c2)
hgs
parents:
diff changeset
  2385
                c2->parent = &s->child2;
hgs
parents:
diff changeset
  2386
        }
hgs
parents:
diff changeset
  2387
        CHECK(if (s < sl) s=sl);
hgs
parents:
diff changeset
  2388
    }
hgs
parents:
diff changeset
  2389
    ASSERT(header_pagemap(p->slabs[0].header) != 0);
hgs
parents:
diff changeset
  2390
    ASSERT(hibit(header_pagemap(p->slabs[0].header)) == unsigned(s - &p->slabs[0]));
hgs
parents:
diff changeset
  2391
}
hgs
parents:
diff changeset
  2392
hgs
parents:
diff changeset
  2393
void RNewAllocator::slab_init(unsigned slabbitmap)
hgs
parents:
diff changeset
  2394
{
hgs
parents:
diff changeset
  2395
    ASSERT((slabbitmap & ~okbits) == 0);
hgs
parents:
diff changeset
  2396
    ASSERT(maxslabsize <= 60);
hgs
parents:
diff changeset
  2397
hgs
parents:
diff changeset
  2398
    slab_threshold=0;
hgs
parents:
diff changeset
  2399
    partial_page = 0;
hgs
parents:
diff changeset
  2400
    unsigned char ix = 0xff;
hgs
parents:
diff changeset
  2401
    unsigned bit = 1<<((maxslabsize>>2)-1);
hgs
parents:
diff changeset
  2402
    for (int sz = maxslabsize; sz >= 0; sz -= 4, bit >>= 1)
hgs
parents:
diff changeset
  2403
    {
hgs
parents:
diff changeset
  2404
        if (slabbitmap & bit)
hgs
parents:
diff changeset
  2405
        {
hgs
parents:
diff changeset
  2406
            if (++ix == 0)
hgs
parents:
diff changeset
  2407
                slab_threshold=sz+1;
hgs
parents:
diff changeset
  2408
            slabset& c = slaballoc[ix];
hgs
parents:
diff changeset
  2409
            c.size = sz;
hgs
parents:
diff changeset
  2410
            c.partial = 0;
hgs
parents:
diff changeset
  2411
        }
hgs
parents:
diff changeset
  2412
        sizemap[sz>>2] = ix;
hgs
parents:
diff changeset
  2413
    }
hgs
parents:
diff changeset
  2414
hgs
parents:
diff changeset
  2415
    free_chunk_threshold = pad_request(slab_threshold);
hgs
parents:
diff changeset
  2416
}
hgs
parents:
diff changeset
  2417
hgs
parents:
diff changeset
  2418
void* RNewAllocator::slab_allocate(slabset& ss)
hgs
parents:
diff changeset
  2419
//
hgs
parents:
diff changeset
  2420
// Allocate a cell from the given slabset
hgs
parents:
diff changeset
  2421
// Strategy:
hgs
parents:
diff changeset
  2422
// 1. Take the partially full slab at the top of the heap (lowest address).
hgs
parents:
diff changeset
  2423
// 2. If there is no such slab, allocate from a new slab
hgs
parents:
diff changeset
  2424
// 3. If the slab has a non-empty freelist, pop the cell from the front of the list and update the slab
hgs
parents:
diff changeset
  2425
// 4. Otherwise, if the slab is not full, return the cell at the end of the currently used region of
hgs
parents:
diff changeset
  2426
//    the slab, updating the slab
hgs
parents:
diff changeset
  2427
// 5. Otherwise, release the slab from the partial tree/heap, marking it as 'floating' and go back to
hgs
parents:
diff changeset
  2428
//    step 1
hgs
parents:
diff changeset
  2429
//
hgs
parents:
diff changeset
  2430
{
hgs
parents:
diff changeset
  2431
    for (;;)
hgs
parents:
diff changeset
  2432
    {
hgs
parents:
diff changeset
  2433
        slab *s = ss.partial;
hgs
parents:
diff changeset
  2434
        if (!s)
hgs
parents:
diff changeset
  2435
            break;
hgs
parents:
diff changeset
  2436
        unsigned h = s->header;
hgs
parents:
diff changeset
  2437
        unsigned free = h & 0xff;       // extract free cell positiong
hgs
parents:
diff changeset
  2438
        if (free)
hgs
parents:
diff changeset
  2439
        {
hgs
parents:
diff changeset
  2440
            ASSERT(((free<<2)-sizeof(slabhdr))%header_size(h) == 0);
hgs
parents:
diff changeset
  2441
            void* p = offset(s,free<<2);
hgs
parents:
diff changeset
  2442
            free = *(unsigned char*)p;  // get next pos in free list
hgs
parents:
diff changeset
  2443
            h += (h&0x3C000)<<6;        // update usedm4
hgs
parents:
diff changeset
  2444
            h &= ~0xff;
hgs
parents:
diff changeset
  2445
            h |= free;                  // update freelist
hgs
parents:
diff changeset
  2446
            s->header = h;
hgs
parents:
diff changeset
  2447
            ASSERT(header_free(h) == 0 || ((header_free(h)<<2)-sizeof(slabhdr))%header_size(h) == 0);
hgs
parents:
diff changeset
  2448
            ASSERT(header_usedm4(h) <= 0x3F8u);
hgs
parents:
diff changeset
  2449
            ASSERT((header_usedm4(h)+4)%header_size(h) == 0);
hgs
parents:
diff changeset
  2450
            return p;
hgs
parents:
diff changeset
  2451
        }
hgs
parents:
diff changeset
  2452
        unsigned h2 = h + ((h&0x3C000)<<6);
hgs
parents:
diff changeset
  2453
        if (h2 < 0xfc00000)
hgs
parents:
diff changeset
  2454
        {
hgs
parents:
diff changeset
  2455
            ASSERT((header_usedm4(h2)+4)%header_size(h2) == 0);
hgs
parents:
diff changeset
  2456
            s->header = h2;
hgs
parents:
diff changeset
  2457
            return offset(s,(h>>18) + sizeof(unsigned) + sizeof(slabhdr));
hgs
parents:
diff changeset
  2458
        }
hgs
parents:
diff changeset
  2459
        h |= 0x80000000;                // mark the slab as full-floating
hgs
parents:
diff changeset
  2460
        s->header = h;
hgs
parents:
diff changeset
  2461
        tree_remove(s);
hgs
parents:
diff changeset
  2462
        checktree(ss.partial);
hgs
parents:
diff changeset
  2463
        // go back and try the next slab...
hgs
parents:
diff changeset
  2464
    }
hgs
parents:
diff changeset
  2465
    // no partial slabs found, so allocate from a new slab
hgs
parents:
diff changeset
  2466
    return allocnewslab(ss);
hgs
parents:
diff changeset
  2467
}
hgs
parents:
diff changeset
  2468
hgs
parents:
diff changeset
  2469
void RNewAllocator::slab_free(void* p)
hgs
parents:
diff changeset
  2470
//
hgs
parents:
diff changeset
  2471
// Free a cell from the slab allocator
hgs
parents:
diff changeset
  2472
// Strategy:
hgs
parents:
diff changeset
  2473
// 1. Find the containing slab (round down to nearest 1KB boundary)
hgs
parents:
diff changeset
  2474
// 2. Push the cell into the slab's freelist, and update the slab usage count
hgs
parents:
diff changeset
  2475
// 3. If this is the last allocated cell, free the slab to the main slab manager
hgs
parents:
diff changeset
  2476
// 4. If the slab was full-floating then insert the slab in it's respective partial tree
hgs
parents:
diff changeset
  2477
//
hgs
parents:
diff changeset
  2478
{
hgs
parents:
diff changeset
  2479
    ASSERT(lowbits(p,3)==0);
hgs
parents:
diff changeset
  2480
    slab* s = slab::slabfor(p);
hgs
parents:
diff changeset
  2481
hgs
parents:
diff changeset
  2482
    unsigned pos = lowbits(p, slabsize);
hgs
parents:
diff changeset
  2483
    unsigned h = s->header;
hgs
parents:
diff changeset
  2484
    ASSERT(header_usedm4(h) != 0x3fC);      // slab is empty already
hgs
parents:
diff changeset
  2485
    ASSERT((pos-sizeof(slabhdr))%header_size(h) == 0);
hgs
parents:
diff changeset
  2486
    *(unsigned char*)p = (unsigned char)h;
hgs
parents:
diff changeset
  2487
    h &= ~0xFF;
hgs
parents:
diff changeset
  2488
    h |= (pos>>2);
hgs
parents:
diff changeset
  2489
    unsigned size = h & 0x3C000;
hgs
parents:
diff changeset
  2490
    unsigned allocSize = (h & 0x3F000) >> 12; // size is stored in bits 12...17 in slabhdr
hgs
parents:
diff changeset
  2491
    iTotalAllocSize -= allocSize;
hgs
parents:
diff changeset
  2492
    if (int(h) >= 0)
hgs
parents:
diff changeset
  2493
    {
hgs
parents:
diff changeset
  2494
        h -= size<<6;
hgs
parents:
diff changeset
  2495
        if (int(h)>=0)
hgs
parents:
diff changeset
  2496
        {
hgs
parents:
diff changeset
  2497
            s->header = h;
hgs
parents:
diff changeset
  2498
            return;
hgs
parents:
diff changeset
  2499
        }
hgs
parents:
diff changeset
  2500
        freeslab(s);
hgs
parents:
diff changeset
  2501
        return;
hgs
parents:
diff changeset
  2502
    }
hgs
parents:
diff changeset
  2503
    h -= size<<6;
hgs
parents:
diff changeset
  2504
    h &= ~0x80000000;
hgs
parents:
diff changeset
  2505
    s->header = h;
hgs
parents:
diff changeset
  2506
    slabset& ss = slaballoc[sizemap[size>>14]];
hgs
parents:
diff changeset
  2507
    tree_insert(s,&ss.partial);
hgs
parents:
diff changeset
  2508
    checktree(ss.partial);
hgs
parents:
diff changeset
  2509
}
hgs
parents:
diff changeset
  2510
hgs
parents:
diff changeset
  2511
void* slabset::initslab(slab* s)
hgs
parents:
diff changeset
  2512
//
hgs
parents:
diff changeset
  2513
// initialise an empty slab for this allocator and return the fist cell
hgs
parents:
diff changeset
  2514
// pre-condition: the slabset has no partial slabs for allocation
hgs
parents:
diff changeset
  2515
//
hgs
parents:
diff changeset
  2516
{
hgs
parents:
diff changeset
  2517
    ASSERT(partial==0);
hgs
parents:
diff changeset
  2518
    unsigned h = s->header & 0xF00; // preserve pagemap only
hgs
parents:
diff changeset
  2519
    h |= (size<<12);                    // set size
hgs
parents:
diff changeset
  2520
    h |= (size-4)<<18;                  // set usedminus4 to one object minus 4
hgs
parents:
diff changeset
  2521
    s->header = h;
hgs
parents:
diff changeset
  2522
    partial = s;
hgs
parents:
diff changeset
  2523
    s->parent = &partial;
hgs
parents:
diff changeset
  2524
    s->child1 = s->child2 = 0;
hgs
parents:
diff changeset
  2525
    return offset(s,sizeof(slabhdr));
hgs
parents:
diff changeset
  2526
}
hgs
parents:
diff changeset
  2527
hgs
parents:
diff changeset
  2528
TAny* RNewAllocator::SetBrk(TInt32 aDelta)
hgs
parents:
diff changeset
  2529
{
hgs
parents:
diff changeset
  2530
    if (iFlags & EFixedSize)
hgs
parents:
diff changeset
  2531
        return MFAIL;
hgs
parents:
diff changeset
  2532
hgs
parents:
diff changeset
  2533
    if (aDelta < 0)
hgs
parents:
diff changeset
  2534
        {
hgs
parents:
diff changeset
  2535
        unmap(offset(iTop, aDelta), -aDelta);
hgs
parents:
diff changeset
  2536
        }
hgs
parents:
diff changeset
  2537
    else if (aDelta > 0)
hgs
parents:
diff changeset
  2538
        {
hgs
parents:
diff changeset
  2539
        if (!map(iTop, aDelta))
hgs
parents:
diff changeset
  2540
            return MFAIL;
hgs
parents:
diff changeset
  2541
        }
hgs
parents:
diff changeset
  2542
    void * p =iTop;
hgs
parents:
diff changeset
  2543
    iTop = offset(iTop, aDelta);
hgs
parents:
diff changeset
  2544
    return p;
hgs
parents:
diff changeset
  2545
}
hgs
parents:
diff changeset
  2546
hgs
parents:
diff changeset
  2547
void* RNewAllocator::map(void* p,unsigned sz)
hgs
parents:
diff changeset
  2548
//
hgs
parents:
diff changeset
  2549
// allocate pages in the chunk
hgs
parents:
diff changeset
  2550
// if p is NULL, find and allocate the required number of pages (which must lie in the lower half)
hgs
parents:
diff changeset
  2551
// otherwise commit the pages specified
hgs
parents:
diff changeset
  2552
//
hgs
parents:
diff changeset
  2553
{
hgs
parents:
diff changeset
  2554
ASSERT(p == floor(p, pagesize));
hgs
parents:
diff changeset
  2555
ASSERT(sz == ceiling(sz, pagesize));
hgs
parents:
diff changeset
  2556
ASSERT(sz > 0);
hgs
parents:
diff changeset
  2557
hgs
parents:
diff changeset
  2558
    if (iChunkSize + sz > iMaxLength)
hgs
parents:
diff changeset
  2559
        return 0;
hgs
parents:
diff changeset
  2560
hgs
parents:
diff changeset
  2561
    RChunk chunk;
hgs
parents:
diff changeset
  2562
    chunk.SetHandle(iChunkHandle);
hgs
parents:
diff changeset
  2563
    if (p)
hgs
parents:
diff changeset
  2564
    {
hgs
parents:
diff changeset
  2565
        TInt r = chunk.Commit(iOffset + ptrdiff(p, this),sz);
hgs
parents:
diff changeset
  2566
        if (r < 0)
hgs
parents:
diff changeset
  2567
            return 0;
hgs
parents:
diff changeset
  2568
        iChunkSize += sz;
hgs
parents:
diff changeset
  2569
        return p;
hgs
parents:
diff changeset
  2570
    }
hgs
parents:
diff changeset
  2571
hgs
parents:
diff changeset
  2572
    TInt r = chunk.Allocate(sz);
hgs
parents:
diff changeset
  2573
    if (r < 0)
hgs
parents:
diff changeset
  2574
        return 0;
hgs
parents:
diff changeset
  2575
    if (r > iOffset)
hgs
parents:
diff changeset
  2576
    {
hgs
parents:
diff changeset
  2577
        // can't allow page allocations in DL zone
hgs
parents:
diff changeset
  2578
        chunk.Decommit(r, sz);
hgs
parents:
diff changeset
  2579
        return 0;
hgs
parents:
diff changeset
  2580
    }
hgs
parents:
diff changeset
  2581
    iChunkSize += sz;
hgs
parents:
diff changeset
  2582
#ifdef TRACING_HEAPS
hgs
parents:
diff changeset
  2583
    if (iChunkSize > iHighWaterMark)
hgs
parents:
diff changeset
  2584
        {
hgs
parents:
diff changeset
  2585
            iHighWaterMark = ceiling(iChunkSize,16*pagesize);
hgs
parents:
diff changeset
  2586
hgs
parents:
diff changeset
  2587
hgs
parents:
diff changeset
  2588
            RChunk chunk;
hgs
parents:
diff changeset
  2589
            chunk.SetHandle(iChunkHandle);
hgs
parents:
diff changeset
  2590
            TKName chunk_name;
hgs
parents:
diff changeset
  2591
            chunk.FullName(chunk_name);
hgs
parents:
diff changeset
  2592
            BTraceContextBig(BTrace::ETest1, 4, 44, chunk_name.Ptr(), chunk_name.Size());
hgs
parents:
diff changeset
  2593
hgs
parents:
diff changeset
  2594
            TUint32 traceData[6];
hgs
parents:
diff changeset
  2595
            traceData[0] = iChunkHandle;
hgs
parents:
diff changeset
  2596
            traceData[1] = iMinLength;
hgs
parents:
diff changeset
  2597
            traceData[2] = iMaxLength;
hgs
parents:
diff changeset
  2598
            traceData[3] = sz;
hgs
parents:
diff changeset
  2599
            traceData[4] = iChunkSize;
hgs
parents:
diff changeset
  2600
            traceData[5] = iHighWaterMark;
hgs
parents:
diff changeset
  2601
            BTraceContextN(BTrace::ETest1, 3, (TUint32)this, 33, traceData, sizeof(traceData));
hgs
parents:
diff changeset
  2602
        }
hgs
parents:
diff changeset
  2603
#endif
hgs
parents:
diff changeset
  2604
hgs
parents:
diff changeset
  2605
    return offset(this, r - iOffset);
hgs
parents:
diff changeset
  2606
    // code below does delayed initialisation of the slabs.
hgs
parents:
diff changeset
  2607
    /*
hgs
parents:
diff changeset
  2608
    if (iChunkSize >= slab_init_threshold)
hgs
parents:
diff changeset
  2609
    {   // set up slab system now that heap is large enough
hgs
parents:
diff changeset
  2610
        slab_config(slab_config_bits);
hgs
parents:
diff changeset
  2611
        slab_init_threshold = KMaxTUint;
hgs
parents:
diff changeset
  2612
    }
hgs
parents:
diff changeset
  2613
    return p;
hgs
parents:
diff changeset
  2614
    */
hgs
parents:
diff changeset
  2615
}
hgs
parents:
diff changeset
  2616
hgs
parents:
diff changeset
  2617
void* RNewAllocator::remap(void* p,unsigned oldsz,unsigned sz)
hgs
parents:
diff changeset
  2618
{
hgs
parents:
diff changeset
  2619
    if (oldsz > sz)
hgs
parents:
diff changeset
  2620
        {   // shrink
hgs
parents:
diff changeset
  2621
        unmap(offset(p,sz), oldsz-sz);
hgs
parents:
diff changeset
  2622
        }
hgs
parents:
diff changeset
  2623
    else if (oldsz < sz)
hgs
parents:
diff changeset
  2624
        {   // grow, try and do this in place first
hgs
parents:
diff changeset
  2625
        if (!map(offset(p, oldsz), sz-oldsz))
hgs
parents:
diff changeset
  2626
            {
hgs
parents:
diff changeset
  2627
            // need to allocate-copy-free
hgs
parents:
diff changeset
  2628
            void* newp = map(0, sz);
hgs
parents:
diff changeset
  2629
            if (newp) {
hgs
parents:
diff changeset
  2630
                memcpy(newp, p, oldsz);
hgs
parents:
diff changeset
  2631
                unmap(p,oldsz);
hgs
parents:
diff changeset
  2632
            }
hgs
parents:
diff changeset
  2633
            return newp;
hgs
parents:
diff changeset
  2634
            }
hgs
parents:
diff changeset
  2635
        }
hgs
parents:
diff changeset
  2636
    return p;
hgs
parents:
diff changeset
  2637
}
hgs
parents:
diff changeset
  2638
hgs
parents:
diff changeset
  2639
void RNewAllocator::unmap(void* p,unsigned sz)
hgs
parents:
diff changeset
  2640
{
hgs
parents:
diff changeset
  2641
    ASSERT(p == floor(p, pagesize));
hgs
parents:
diff changeset
  2642
    ASSERT(sz == ceiling(sz, pagesize));
hgs
parents:
diff changeset
  2643
    ASSERT(sz > 0);
hgs
parents:
diff changeset
  2644
hgs
parents:
diff changeset
  2645
    RChunk chunk;
hgs
parents:
diff changeset
  2646
    chunk.SetHandle(iChunkHandle);
hgs
parents:
diff changeset
  2647
    TInt r = chunk.Decommit(ptrdiff(p, offset(this,-iOffset)), sz);
hgs
parents:
diff changeset
  2648
    //TInt offset = (TUint8*)p-(TUint8*)chunk.Base();
hgs
parents:
diff changeset
  2649
    //TInt r = chunk.Decommit(offset,sz);
hgs
parents:
diff changeset
  2650
hgs
parents:
diff changeset
  2651
    ASSERT(r >= 0);
hgs
parents:
diff changeset
  2652
    iChunkSize -= sz;
hgs
parents:
diff changeset
  2653
#ifdef TRACING_HEAPS
hgs
parents:
diff changeset
  2654
    if (iChunkSize > iHighWaterMark)
hgs
parents:
diff changeset
  2655
        {
hgs
parents:
diff changeset
  2656
            iHighWaterMark = ceiling(iChunkSize,16*pagesize);
hgs
parents:
diff changeset
  2657
hgs
parents:
diff changeset
  2658
hgs
parents:
diff changeset
  2659
            RChunk chunk;
hgs
parents:
diff changeset
  2660
            chunk.SetHandle(iChunkHandle);
hgs
parents:
diff changeset
  2661
            TKName chunk_name;
hgs
parents:
diff changeset
  2662
            chunk.FullName(chunk_name);
hgs
parents:
diff changeset
  2663
            BTraceContextBig(BTrace::ETest1, 4, 44, chunk_name.Ptr(), chunk_name.Size());
hgs
parents:
diff changeset
  2664
hgs
parents:
diff changeset
  2665
            TUint32 traceData[6];
hgs
parents:
diff changeset
  2666
            traceData[0] = iChunkHandle;
hgs
parents:
diff changeset
  2667
            traceData[1] = iMinLength;
hgs
parents:
diff changeset
  2668
            traceData[2] = iMaxLength;
hgs
parents:
diff changeset
  2669
            traceData[3] = sz;
hgs
parents:
diff changeset
  2670
            traceData[4] = iChunkSize;
hgs
parents:
diff changeset
  2671
            traceData[5] = iHighWaterMark;
hgs
parents:
diff changeset
  2672
            BTraceContextN(BTrace::ETest1, 3, (TUint32)this, 33, traceData, sizeof(traceData));
hgs
parents:
diff changeset
  2673
        }
hgs
parents:
diff changeset
  2674
#endif
hgs
parents:
diff changeset
  2675
}
hgs
parents:
diff changeset
  2676
hgs
parents:
diff changeset
  2677
void RNewAllocator::paged_init(unsigned pagepower)
hgs
parents:
diff changeset
  2678
    {
hgs
parents:
diff changeset
  2679
        if (pagepower == 0)
hgs
parents:
diff changeset
  2680
            pagepower = 31;
hgs
parents:
diff changeset
  2681
        else if (pagepower < minpagepower)
hgs
parents:
diff changeset
  2682
            pagepower = minpagepower;
hgs
parents:
diff changeset
  2683
        page_threshold = pagepower;
hgs
parents:
diff changeset
  2684
        for (int i=0;i<npagecells;++i)
hgs
parents:
diff changeset
  2685
        {
hgs
parents:
diff changeset
  2686
            pagelist[i].page = 0;
hgs
parents:
diff changeset
  2687
            pagelist[i].size = 0;
hgs
parents:
diff changeset
  2688
        }
hgs
parents:
diff changeset
  2689
    }
hgs
parents:
diff changeset
  2690
hgs
parents:
diff changeset
  2691
void* RNewAllocator::paged_allocate(unsigned size)
hgs
parents:
diff changeset
  2692
{
hgs
parents:
diff changeset
  2693
    unsigned nbytes = ceiling(size, pagesize);
hgs
parents:
diff changeset
  2694
    if (nbytes < size + cellalign)
hgs
parents:
diff changeset
  2695
    {   // not enough extra space for header and alignment, try and use cell list
hgs
parents:
diff changeset
  2696
        for (pagecell *c = pagelist,*e = c + npagecells;c < e;++c)
hgs
parents:
diff changeset
  2697
            if (c->page == 0)
hgs
parents:
diff changeset
  2698
            {
hgs
parents:
diff changeset
  2699
                void* p = map(0, nbytes);
hgs
parents:
diff changeset
  2700
                if (!p)
hgs
parents:
diff changeset
  2701
                    return 0;
hgs
parents:
diff changeset
  2702
                c->page = p;
hgs
parents:
diff changeset
  2703
                c->size = nbytes;
hgs
parents:
diff changeset
  2704
                iTotalAllocSize += nbytes;
hgs
parents:
diff changeset
  2705
                return p;
hgs
parents:
diff changeset
  2706
            }
hgs
parents:
diff changeset
  2707
    }
hgs
parents:
diff changeset
  2708
    // use a cell header
hgs
parents:
diff changeset
  2709
    nbytes = ceiling(size + cellalign, pagesize);
hgs
parents:
diff changeset
  2710
    void* p = map(0, nbytes);
hgs
parents:
diff changeset
  2711
    if (!p)
hgs
parents:
diff changeset
  2712
        return 0;
hgs
parents:
diff changeset
  2713
    *static_cast<unsigned*>(p) = nbytes;
hgs
parents:
diff changeset
  2714
    iTotalAllocSize += nbytes;
hgs
parents:
diff changeset
  2715
    return offset(p, cellalign);
hgs
parents:
diff changeset
  2716
}
hgs
parents:
diff changeset
  2717
hgs
parents:
diff changeset
  2718
void* RNewAllocator::paged_reallocate(void* p, unsigned size)
hgs
parents:
diff changeset
  2719
{
hgs
parents:
diff changeset
  2720
    if (lowbits(p, pagesize) == 0)
hgs
parents:
diff changeset
  2721
    {   // continue using descriptor
hgs
parents:
diff changeset
  2722
        pagecell* c = paged_descriptor(p);
hgs
parents:
diff changeset
  2723
        unsigned nbytes = ceiling(size, pagesize);
hgs
parents:
diff changeset
  2724
        void* newp = remap(p, c->size, nbytes);
hgs
parents:
diff changeset
  2725
        if (!newp)
hgs
parents:
diff changeset
  2726
            return 0;
hgs
parents:
diff changeset
  2727
        c->page = newp;
hgs
parents:
diff changeset
  2728
        c->size = nbytes;
hgs
parents:
diff changeset
  2729
        iTotalAllocSize += nbytes-c->size;
hgs
parents:
diff changeset
  2730
        return newp;
hgs
parents:
diff changeset
  2731
    }
hgs
parents:
diff changeset
  2732
    else
hgs
parents:
diff changeset
  2733
    {   // use a cell header
hgs
parents:
diff changeset
  2734
        ASSERT(lowbits(p,pagesize) == cellalign);
hgs
parents:
diff changeset
  2735
        p = offset(p,-int(cellalign));
hgs
parents:
diff changeset
  2736
        unsigned nbytes = ceiling(size + cellalign, pagesize);
hgs
parents:
diff changeset
  2737
        unsigned obytes = *static_cast<unsigned*>(p);
hgs
parents:
diff changeset
  2738
        void* newp = remap(p, obytes, nbytes);
hgs
parents:
diff changeset
  2739
        if (!newp)
hgs
parents:
diff changeset
  2740
            return 0;
hgs
parents:
diff changeset
  2741
        *static_cast<unsigned*>(newp) = nbytes;
hgs
parents:
diff changeset
  2742
        iTotalAllocSize += nbytes-obytes;
hgs
parents:
diff changeset
  2743
        return offset(newp, cellalign);
hgs
parents:
diff changeset
  2744
    }
hgs
parents:
diff changeset
  2745
}
hgs
parents:
diff changeset
  2746
hgs
parents:
diff changeset
  2747
void RNewAllocator::paged_free(void* p)
hgs
parents:
diff changeset
  2748
{
hgs
parents:
diff changeset
  2749
    if (lowbits(p,pagesize) == 0)
hgs
parents:
diff changeset
  2750
    {   // check pagelist
hgs
parents:
diff changeset
  2751
        pagecell* c = paged_descriptor(p);
hgs
parents:
diff changeset
  2752
hgs
parents:
diff changeset
  2753
        iTotalAllocSize -= c->size;
hgs
parents:
diff changeset
  2754
hgs
parents:
diff changeset
  2755
        unmap(p, c->size);
hgs
parents:
diff changeset
  2756
        c->page = 0;
hgs
parents:
diff changeset
  2757
        c->size = 0;
hgs
parents:
diff changeset
  2758
    }
hgs
parents:
diff changeset
  2759
    else
hgs
parents:
diff changeset
  2760
    {   // check page header
hgs
parents:
diff changeset
  2761
        unsigned* page = static_cast<unsigned*>(offset(p,-int(cellalign)));
hgs
parents:
diff changeset
  2762
        unsigned size = *page;
hgs
parents:
diff changeset
  2763
hgs
parents:
diff changeset
  2764
        iTotalAllocSize -= size;
hgs
parents:
diff changeset
  2765
hgs
parents:
diff changeset
  2766
        unmap(page,size);
hgs
parents:
diff changeset
  2767
    }
hgs
parents:
diff changeset
  2768
}
hgs
parents:
diff changeset
  2769
hgs
parents:
diff changeset
  2770
pagecell* RNewAllocator::paged_descriptor(const void* p) const
hgs
parents:
diff changeset
  2771
{
hgs
parents:
diff changeset
  2772
    ASSERT(lowbits(p,pagesize) == 0);
hgs
parents:
diff changeset
  2773
    // Double casting to keep the compiler happy. Seems to think we can trying to
hgs
parents:
diff changeset
  2774
    // change a non-const member (pagelist) in a const function
hgs
parents:
diff changeset
  2775
    pagecell* c = (pagecell*)((void*)pagelist);
hgs
parents:
diff changeset
  2776
#ifdef _DEBUG
hgs
parents:
diff changeset
  2777
    pagecell* e = c + npagecells;
hgs
parents:
diff changeset
  2778
#endif
hgs
parents:
diff changeset
  2779
    for (;;)
hgs
parents:
diff changeset
  2780
    {
hgs
parents:
diff changeset
  2781
        ASSERT(c!=e);
hgs
parents:
diff changeset
  2782
        if (c->page == p)
hgs
parents:
diff changeset
  2783
            return c;
hgs
parents:
diff changeset
  2784
        ++c;
hgs
parents:
diff changeset
  2785
    }
hgs
parents:
diff changeset
  2786
}
hgs
parents:
diff changeset
  2787
hgs
parents:
diff changeset
  2788
RNewAllocator* RNewAllocator::FixedHeap(TAny* aBase, TInt aMaxLength, TInt aAlign, TBool aSingleThread)
hgs
parents:
diff changeset
  2789
/**
hgs
parents:
diff changeset
  2790
Creates a fixed length heap at a specified location.
hgs
parents:
diff changeset
  2791
hgs
parents:
diff changeset
  2792
On successful return from this function, aMaxLength bytes are committed by the chunk.
hgs
parents:
diff changeset
  2793
The heap cannot be extended.
hgs
parents:
diff changeset
  2794
hgs
parents:
diff changeset
  2795
@param aBase         A pointer to the location where the heap is to be constructed.
hgs
parents:
diff changeset
  2796
@param aMaxLength    The length of the heap. If the supplied value is less
hgs
parents:
diff changeset
  2797
                     than KMinHeapSize, it is discarded and the value KMinHeapSize
hgs
parents:
diff changeset
  2798
                     is used instead.
hgs
parents:
diff changeset
  2799
@param aAlign        The alignment of heap cells.
hgs
parents:
diff changeset
  2800
@param aSingleThread Indicates whether single threaded or not.
hgs
parents:
diff changeset
  2801
hgs
parents:
diff changeset
  2802
@return A pointer to the new heap, or NULL if the heap could not be created.
hgs
parents:
diff changeset
  2803
hgs
parents:
diff changeset
  2804
@panic USER 56 if aMaxLength is negative.
hgs
parents:
diff changeset
  2805
*/
hgs
parents:
diff changeset
  2806
//
hgs
parents:
diff changeset
  2807
// Force construction of the fixed memory.
hgs
parents:
diff changeset
  2808
//
hgs
parents:
diff changeset
  2809
    {
hgs
parents:
diff changeset
  2810
hgs
parents:
diff changeset
  2811
    __ASSERT_ALWAYS(aMaxLength>=0, ::Panic(ETHeapMaxLengthNegative));
hgs
parents:
diff changeset
  2812
    if (aMaxLength<KMinHeapSize)
hgs
parents:
diff changeset
  2813
        aMaxLength=KMinHeapSize;
hgs
parents:
diff changeset
  2814
hgs
parents:
diff changeset
  2815
    RNewAllocator* h = new(aBase) RNewAllocator(aMaxLength, aAlign, aSingleThread);
hgs
parents:
diff changeset
  2816
hgs
parents:
diff changeset
  2817
    if (!aSingleThread)
hgs
parents:
diff changeset
  2818
        {
hgs
parents:
diff changeset
  2819
        TInt r = h->iLock.CreateLocal();
hgs
parents:
diff changeset
  2820
        if (r!=KErrNone)
hgs
parents:
diff changeset
  2821
            return NULL;
hgs
parents:
diff changeset
  2822
        h->iHandles = (TInt*)&h->iLock;
hgs
parents:
diff changeset
  2823
        h->iHandleCount = 1;
hgs
parents:
diff changeset
  2824
        }
hgs
parents:
diff changeset
  2825
    return h;
hgs
parents:
diff changeset
  2826
    }
hgs
parents:
diff changeset
  2827
hgs
parents:
diff changeset
  2828
RNewAllocator* RNewAllocator::ChunkHeap(const TDesC* aName, TInt aMinLength, TInt aMaxLength, TInt aGrowBy, TInt aAlign, TBool aSingleThread)
hgs
parents:
diff changeset
  2829
/**
hgs
parents:
diff changeset
  2830
Creates a heap in a local or global chunk.
hgs
parents:
diff changeset
  2831
hgs
parents:
diff changeset
  2832
The chunk hosting the heap can be local or global.
hgs
parents:
diff changeset
  2833
hgs
parents:
diff changeset
  2834
A local chunk is one which is private to the process creating it and is not
hgs
parents:
diff changeset
  2835
intended for access by other user processes.
hgs
parents:
diff changeset
  2836
A global chunk is one which is visible to all processes.
hgs
parents:
diff changeset
  2837
hgs
parents:
diff changeset
  2838
The hosting chunk is local, if the pointer aName is NULL, otherwise
hgs
parents:
diff changeset
  2839
the hosting chunk is global and the descriptor *aName is assumed to contain
hgs
parents:
diff changeset
  2840
the name to be assigned to it.
hgs
parents:
diff changeset
  2841
hgs
parents:
diff changeset
  2842
Ownership of the host chunk is vested in the current process.
hgs
parents:
diff changeset
  2843
hgs
parents:
diff changeset
  2844
A minimum and a maximum size for the heap can be specified. On successful
hgs
parents:
diff changeset
  2845
return from this function, the size of the heap is at least aMinLength.
hgs
parents:
diff changeset
  2846
If subsequent requests for allocation of memory from the heap cannot be
hgs
parents:
diff changeset
  2847
satisfied by compressing the heap, the size of the heap is extended in
hgs
parents:
diff changeset
  2848
increments of aGrowBy until the request can be satisfied. Attempts to extend
hgs
parents:
diff changeset
  2849
the heap causes the size of the host chunk to be adjusted.
hgs
parents:
diff changeset
  2850
hgs
parents:
diff changeset
  2851
Note that the size of the heap cannot be adjusted by more than aMaxLength.
hgs
parents:
diff changeset
  2852
hgs
parents:
diff changeset
  2853
@param aName         If NULL, the function constructs a local chunk to host
hgs
parents:
diff changeset
  2854
                     the heap.
hgs
parents:
diff changeset
  2855
                     If not NULL, a pointer to a descriptor containing the name
hgs
parents:
diff changeset
  2856
                     to be assigned to the global chunk hosting the heap.
hgs
parents:
diff changeset
  2857
@param aMinLength    The minimum length of the heap.
hgs
parents:
diff changeset
  2858
@param aMaxLength    The maximum length to which the heap can grow.
hgs
parents:
diff changeset
  2859
                     If the supplied value is less than KMinHeapSize, then it
hgs
parents:
diff changeset
  2860
                     is discarded and the value KMinHeapSize used instead.
hgs
parents:
diff changeset
  2861
@param aGrowBy       The increments to the size of the host chunk. If a value is
hgs
parents:
diff changeset
  2862
                     not explicitly specified, the value KMinHeapGrowBy is taken
hgs
parents:
diff changeset
  2863
                     by default
hgs
parents:
diff changeset
  2864
@param aAlign        The alignment of heap cells.
hgs
parents:
diff changeset
  2865
@param aSingleThread Indicates whether single threaded or not.
hgs
parents:
diff changeset
  2866
hgs
parents:
diff changeset
  2867
@return A pointer to the new heap or NULL if the heap could not be created.
hgs
parents:
diff changeset
  2868
hgs
parents:
diff changeset
  2869
@panic USER 41 if aMinLength is greater than the supplied value of aMaxLength.
hgs
parents:
diff changeset
  2870
@panic USER 55 if aMinLength is negative.
hgs
parents:
diff changeset
  2871
@panic USER 56 if aMaxLength is negative.
hgs
parents:
diff changeset
  2872
*/
hgs
parents:
diff changeset
  2873
//
hgs
parents:
diff changeset
  2874
// Allocate a Chunk of the requested size and force construction.
hgs
parents:
diff changeset
  2875
//
hgs
parents:
diff changeset
  2876
    {
hgs
parents:
diff changeset
  2877
hgs
parents:
diff changeset
  2878
    __ASSERT_ALWAYS(aMinLength>=0, ::Panic(ETHeapMinLengthNegative));
hgs
parents:
diff changeset
  2879
    __ASSERT_ALWAYS(aMaxLength>=aMinLength, ::Panic(ETHeapCreateMaxLessThanMin));
hgs
parents:
diff changeset
  2880
    if (aMaxLength<KMinHeapSize)
hgs
parents:
diff changeset
  2881
        aMaxLength=KMinHeapSize;
hgs
parents:
diff changeset
  2882
    RChunk c;
hgs
parents:
diff changeset
  2883
    TInt r;
hgs
parents:
diff changeset
  2884
    if (aName)
hgs
parents:
diff changeset
  2885
        r = c.CreateDisconnectedGlobal(*aName, 0, 0, aMaxLength*2, aSingleThread ? EOwnerThread : EOwnerProcess);
hgs
parents:
diff changeset
  2886
    else
hgs
parents:
diff changeset
  2887
        r = c.CreateDisconnectedLocal(0, 0, aMaxLength*2, aSingleThread ? EOwnerThread : EOwnerProcess);
hgs
parents:
diff changeset
  2888
    if (r!=KErrNone)
hgs
parents:
diff changeset
  2889
        return NULL;
hgs
parents:
diff changeset
  2890
hgs
parents:
diff changeset
  2891
    RNewAllocator* h = ChunkHeap(c, aMinLength, aGrowBy, aMaxLength, aAlign, aSingleThread, UserHeap::EChunkHeapDuplicate);
hgs
parents:
diff changeset
  2892
    c.Close();
hgs
parents:
diff changeset
  2893
    return h;
hgs
parents:
diff changeset
  2894
    }
hgs
parents:
diff changeset
  2895
hgs
parents:
diff changeset
  2896
RNewAllocator* RNewAllocator::ChunkHeap(RChunk aChunk, TInt aMinLength, TInt aGrowBy, TInt aMaxLength, TInt aAlign, TBool aSingleThread, TUint32 aMode)
hgs
parents:
diff changeset
  2897
/**
hgs
parents:
diff changeset
  2898
Creates a heap in an existing chunk.
hgs
parents:
diff changeset
  2899
hgs
parents:
diff changeset
  2900
This function is intended to be used to create a heap in a user writable code
hgs
parents:
diff changeset
  2901
chunk as created by a call to RChunk::CreateLocalCode().
hgs
parents:
diff changeset
  2902
This type of heap can be used to hold code fragments from a JIT compiler.
hgs
parents:
diff changeset
  2903
hgs
parents:
diff changeset
  2904
The maximum length to which the heap can grow is the same as
hgs
parents:
diff changeset
  2905
the maximum size of the chunk.
hgs
parents:
diff changeset
  2906
hgs
parents:
diff changeset
  2907
@param aChunk        The chunk that will host the heap.
hgs
parents:
diff changeset
  2908
@param aMinLength    The minimum length of the heap.
hgs
parents:
diff changeset
  2909
@param aGrowBy       The increments to the size of the host chunk.
hgs
parents:
diff changeset
  2910
@param aMaxLength    The maximum length to which the heap can grow.
hgs
parents:
diff changeset
  2911
@param aAlign        The alignment of heap cells.
hgs
parents:
diff changeset
  2912
@param aSingleThread Indicates whether single threaded or not.
hgs
parents:
diff changeset
  2913
@param aMode         Flags controlling the reallocation. The only bit which has any
hgs
parents:
diff changeset
  2914
                     effect on reallocation is that defined by the enumeration
hgs
parents:
diff changeset
  2915
                     ENeverMove of the enum RAllocator::TReAllocMode.
hgs
parents:
diff changeset
  2916
                     If this is set, then any successful reallocation guarantees not
hgs
parents:
diff changeset
  2917
                     to have changed the start address of the cell.
hgs
parents:
diff changeset
  2918
                     By default, this parameter is zero.
hgs
parents:
diff changeset
  2919
hgs
parents:
diff changeset
  2920
@return A pointer to the new heap or NULL if the heap could not be created.
hgs
parents:
diff changeset
  2921
*/
hgs
parents:
diff changeset
  2922
//
hgs
parents:
diff changeset
  2923
// Construct a heap in an already existing chunk
hgs
parents:
diff changeset
  2924
//
hgs
parents:
diff changeset
  2925
    {
hgs
parents:
diff changeset
  2926
hgs
parents:
diff changeset
  2927
    return OffsetChunkHeap(aChunk, aMinLength, 0, aGrowBy, aMaxLength, aAlign, aSingleThread, aMode);
hgs
parents:
diff changeset
  2928
    }
hgs
parents:
diff changeset
  2929
hgs
parents:
diff changeset
  2930
RNewAllocator* RNewAllocator::OffsetChunkHeap(RChunk aChunk, TInt aMinLength, TInt aOffset, TInt aGrowBy, TInt aMaxLength, TInt aAlign, TBool aSingleThread, TUint32 aMode)
hgs
parents:
diff changeset
  2931
/**
hgs
parents:
diff changeset
  2932
Creates a heap in an existing chunk, offset from the beginning of the chunk.
hgs
parents:
diff changeset
  2933
hgs
parents:
diff changeset
  2934
This function is intended to be used to create a heap where a fixed amount of
hgs
parents:
diff changeset
  2935
additional data must be stored at a known location. The additional data can be
hgs
parents:
diff changeset
  2936
placed at the base address of the chunk, allowing it to be located without
hgs
parents:
diff changeset
  2937
depending on the internals of the heap structure.
hgs
parents:
diff changeset
  2938
hgs
parents:
diff changeset
  2939
The maximum length to which the heap can grow is the maximum size of the chunk,
hgs
parents:
diff changeset
  2940
minus the offset.
hgs
parents:
diff changeset
  2941
hgs
parents:
diff changeset
  2942
@param aChunk        The chunk that will host the heap.
hgs
parents:
diff changeset
  2943
@param aMinLength    The minimum length of the heap.
hgs
parents:
diff changeset
  2944
@param aOffset       The offset from the start of the chunk, to the start of the heap.
hgs
parents:
diff changeset
  2945
@param aGrowBy       The increments to the size of the host chunk.
hgs
parents:
diff changeset
  2946
@param aMaxLength    The maximum length to which the heap can grow.
hgs
parents:
diff changeset
  2947
@param aAlign        The alignment of heap cells.
hgs
parents:
diff changeset
  2948
@param aSingleThread Indicates whether single threaded or not.
hgs
parents:
diff changeset
  2949
@param aMode         Flags controlling the reallocation. The only bit which has any
hgs
parents:
diff changeset
  2950
                     effect on reallocation is that defined by the enumeration
hgs
parents:
diff changeset
  2951
                     ENeverMove of the enum RAllocator::TReAllocMode.
hgs
parents:
diff changeset
  2952
                     If this is set, then any successful reallocation guarantees not
hgs
parents:
diff changeset
  2953
                     to have changed the start address of the cell.
hgs
parents:
diff changeset
  2954
                     By default, this parameter is zero.
hgs
parents:
diff changeset
  2955
hgs
parents:
diff changeset
  2956
@return A pointer to the new heap or NULL if the heap could not be created.
hgs
parents:
diff changeset
  2957
*/
hgs
parents:
diff changeset
  2958
//
hgs
parents:
diff changeset
  2959
// Construct a heap in an already existing chunk
hgs
parents:
diff changeset
  2960
//
hgs
parents:
diff changeset
  2961
    {
hgs
parents:
diff changeset
  2962
hgs
parents:
diff changeset
  2963
    TInt page_size = malloc_getpagesize;
hgs
parents:
diff changeset
  2964
    if (!aAlign)
hgs
parents:
diff changeset
  2965
        aAlign = RNewAllocator::ECellAlignment;
hgs
parents:
diff changeset
  2966
    TInt maxLength = aChunk.MaxSize();
hgs
parents:
diff changeset
  2967
    TInt round_up = Max(aAlign, page_size);
hgs
parents:
diff changeset
  2968
    TInt min_cell = _ALIGN_UP(Max((TInt)RNewAllocator::EAllocCellSize, (TInt)RNewAllocator::EFreeCellSize), aAlign);
hgs
parents:
diff changeset
  2969
    aOffset = _ALIGN_UP(aOffset, 8);
hgs
parents:
diff changeset
  2970
hgs
parents:
diff changeset
  2971
#ifdef NO_RESERVE_MEMORY
hgs
parents:
diff changeset
  2972
#ifdef TRACING_HEAPS
hgs
parents:
diff changeset
  2973
    TKName chunk_name;
hgs
parents:
diff changeset
  2974
    aChunk.FullName(chunk_name);
hgs
parents:
diff changeset
  2975
    BTraceContextBig(BTrace::ETest1, 0xF, 0xFF, chunk_name.Ptr(), chunk_name.Size());
hgs
parents:
diff changeset
  2976
hgs
parents:
diff changeset
  2977
    TUint32 traceData[4];
hgs
parents:
diff changeset
  2978
    traceData[0] = aChunk.Handle();
hgs
parents:
diff changeset
  2979
    traceData[1] = aMinLength;
hgs
parents:
diff changeset
  2980
    traceData[2] = aMaxLength;
hgs
parents:
diff changeset
  2981
    traceData[3] = aAlign;
hgs
parents:
diff changeset
  2982
    BTraceContextN(BTrace::ETest1, 0xE, 0xEE, 0xEE, traceData, sizeof(traceData));
hgs
parents:
diff changeset
  2983
#endif
hgs
parents:
diff changeset
  2984
    //modifying the aMinLength because not all memory is the same in the new allocator. So it cannot reserve it properly
hgs
parents:
diff changeset
  2985
    if ( aMinLength<aMaxLength)
hgs
parents:
diff changeset
  2986
        aMinLength = 0;
hgs
parents:
diff changeset
  2987
#endif
hgs
parents:
diff changeset
  2988
hgs
parents:
diff changeset
  2989
    if (aMaxLength && aMaxLength+aOffset<maxLength)
hgs
parents:
diff changeset
  2990
        maxLength = _ALIGN_UP(aMaxLength+aOffset, round_up);
hgs
parents:
diff changeset
  2991
    __ASSERT_ALWAYS(aMinLength>=0, ::Panic(ETHeapMinLengthNegative));
hgs
parents:
diff changeset
  2992
    __ASSERT_ALWAYS(maxLength>=aMinLength, ::Panic(ETHeapCreateMaxLessThanMin));
hgs
parents:
diff changeset
  2993
    aMinLength = _ALIGN_UP(Max(aMinLength, (TInt)sizeof(RNewAllocator) + min_cell) + aOffset, round_up);
hgs
parents:
diff changeset
  2994
hgs
parents:
diff changeset
  2995
    // the new allocator uses a disconnected chunk so must commit the initial allocation
hgs
parents:
diff changeset
  2996
    // with Commit() instead of Adjust()
hgs
parents:
diff changeset
  2997
    //  TInt r=aChunk.Adjust(aMinLength);
hgs
parents:
diff changeset
  2998
    //TInt r = aChunk.Commit(aOffset, aMinLength);
hgs
parents:
diff changeset
  2999
hgs
parents:
diff changeset
  3000
    aOffset = maxLength;
hgs
parents:
diff changeset
  3001
    //TInt MORE_CORE_OFFSET = maxLength/2;
hgs
parents:
diff changeset
  3002
    //TInt r = aChunk.Commit(MORE_CORE_OFFSET, aMinLength);
hgs
parents:
diff changeset
  3003
    TInt r = aChunk.Commit(aOffset, aMinLength);
hgs
parents:
diff changeset
  3004
hgs
parents:
diff changeset
  3005
    if (r!=KErrNone)
hgs
parents:
diff changeset
  3006
        return NULL;
hgs
parents:
diff changeset
  3007
hgs
parents:
diff changeset
  3008
    RNewAllocator* h = new (aChunk.Base() + aOffset) RNewAllocator(aChunk.Handle(), aOffset, aMinLength, maxLength, aGrowBy, aAlign, aSingleThread);
hgs
parents:
diff changeset
  3009
    //RNewAllocator* h = new (aChunk.Base() + MORE_CORE_OFFSET) RNewAllocator(aChunk.Handle(), aOffset, aMinLength, maxLength, aGrowBy, aAlign, aSingleThread);
hgs
parents:
diff changeset
  3010
hgs
parents:
diff changeset
  3011
    TBool duplicateLock = EFalse;
hgs
parents:
diff changeset
  3012
    if (!aSingleThread)
hgs
parents:
diff changeset
  3013
        {
hgs
parents:
diff changeset
  3014
        duplicateLock = aMode & UserHeap::EChunkHeapSwitchTo;
hgs
parents:
diff changeset
  3015
        if (h->iLock.CreateLocal(duplicateLock ? EOwnerThread : EOwnerProcess)!=KErrNone)
hgs
parents:
diff changeset
  3016
            {
hgs
parents:
diff changeset
  3017
            h->iChunkHandle = 0;
hgs
parents:
diff changeset
  3018
            return NULL;
hgs
parents:
diff changeset
  3019
            }
hgs
parents:
diff changeset
  3020
        }
hgs
parents:
diff changeset
  3021
hgs
parents:
diff changeset
  3022
    if (aMode & UserHeap::EChunkHeapSwitchTo)
hgs
parents:
diff changeset
  3023
        User::SwitchHeap(h);
hgs
parents:
diff changeset
  3024
hgs
parents:
diff changeset
  3025
    h->iHandles = &h->iChunkHandle;
hgs
parents:
diff changeset
  3026
    if (!aSingleThread)
hgs
parents:
diff changeset
  3027
        {
hgs
parents:
diff changeset
  3028
        // now change the thread-relative chunk/semaphore handles into process-relative handles
hgs
parents:
diff changeset
  3029
        h->iHandleCount = 2;
hgs
parents:
diff changeset
  3030
        if (duplicateLock)
hgs
parents:
diff changeset
  3031
            {
hgs
parents:
diff changeset
  3032
            RHandleBase s = h->iLock;
hgs
parents:
diff changeset
  3033
            r = h->iLock.Duplicate(RThread());
hgs
parents:
diff changeset
  3034
            s.Close();
hgs
parents:
diff changeset
  3035
            }
hgs
parents:
diff changeset
  3036
        if (r==KErrNone && (aMode & UserHeap::EChunkHeapDuplicate))
hgs
parents:
diff changeset
  3037
            {
hgs
parents:
diff changeset
  3038
            r = ((RChunk*)&h->iChunkHandle)->Duplicate(RThread());
hgs
parents:
diff changeset
  3039
            if (r!=KErrNone)
hgs
parents:
diff changeset
  3040
                h->iLock.Close(), h->iChunkHandle=0;
hgs
parents:
diff changeset
  3041
            }
hgs
parents:
diff changeset
  3042
        }
hgs
parents:
diff changeset
  3043
    else
hgs
parents:
diff changeset
  3044
        {
hgs
parents:
diff changeset
  3045
        h->iHandleCount = 1;
hgs
parents:
diff changeset
  3046
        if (aMode & UserHeap::EChunkHeapDuplicate)
hgs
parents:
diff changeset
  3047
            r = ((RChunk*)&h->iChunkHandle)->Duplicate(RThread(), EOwnerThread);
hgs
parents:
diff changeset
  3048
        }
hgs
parents:
diff changeset
  3049
hgs
parents:
diff changeset
  3050
    // return the heap address
hgs
parents:
diff changeset
  3051
    return (r==KErrNone) ? h : NULL;
hgs
parents:
diff changeset
  3052
    }
hgs
parents:
diff changeset
  3053
hgs
parents:
diff changeset
  3054
    /* Only for debugging purpose - start*/
hgs
parents:
diff changeset
  3055
#ifdef DL_CHUNK_MEM_DEBUG
hgs
parents:
diff changeset
  3056
void RNewAllocator::debug_check_small_chunk_access(mchunkptr p, size_t psize)
hgs
parents:
diff changeset
  3057
{
hgs
parents:
diff changeset
  3058
    size_t sz = chunksize(p);
hgs
parents:
diff changeset
  3059
    char ch = *((char*)chunk_plus_offset(p, psize-1));
hgs
parents:
diff changeset
  3060
}
hgs
parents:
diff changeset
  3061
hgs
parents:
diff changeset
  3062
void RNewAllocator::debug_check_any_chunk_access(mchunkptr p, size_t psize)
hgs
parents:
diff changeset
  3063
{
hgs
parents:
diff changeset
  3064
    if (p==0 || psize==0) return;
hgs
parents:
diff changeset
  3065
hgs
parents:
diff changeset
  3066
    mchunkptr next = chunk_plus_offset(p, psize);
hgs
parents:
diff changeset
  3067
    char* t = (char*)chunk_plus_offset(p, mparams.page_size);
hgs
parents:
diff changeset
  3068
    char ch = *((char*)p);
hgs
parents:
diff changeset
  3069
    while ((size_t)t<(size_t)next)
hgs
parents:
diff changeset
  3070
    {
hgs
parents:
diff changeset
  3071
        ch = *t;
hgs
parents:
diff changeset
  3072
        t = (char*)chunk_plus_offset(t, mparams.page_size);
hgs
parents:
diff changeset
  3073
    };
hgs
parents:
diff changeset
  3074
}
hgs
parents:
diff changeset
  3075
hgs
parents:
diff changeset
  3076
void RNewAllocator::debug_check_large_chunk_access(tchunkptr p, size_t psize)
hgs
parents:
diff changeset
  3077
{
hgs
parents:
diff changeset
  3078
    mchunkptr next = chunk_plus_offset(p, psize);
hgs
parents:
diff changeset
  3079
    char* t = (char*)chunk_plus_offset(p, mparams.page_size);
hgs
parents:
diff changeset
  3080
    char ch = *((char*)p);
hgs
parents:
diff changeset
  3081
    while ((size_t)t<(size_t)next)
hgs
parents:
diff changeset
  3082
    {
hgs
parents:
diff changeset
  3083
        ch = *t;
hgs
parents:
diff changeset
  3084
        t = (char*)chunk_plus_offset(t, mparams.page_size);
hgs
parents:
diff changeset
  3085
    };
hgs
parents:
diff changeset
  3086
}
hgs
parents:
diff changeset
  3087
hgs
parents:
diff changeset
  3088
void RNewAllocator::debug_chunk_page_release_check(mchunkptr p, size_t psize, mstate fm, int mem_released)
hgs
parents:
diff changeset
  3089
{
hgs
parents:
diff changeset
  3090
    if (mem_released)
hgs
parents:
diff changeset
  3091
    {
hgs
parents:
diff changeset
  3092
        if (!page_not_in_memory(p, psize) )
hgs
parents:
diff changeset
  3093
            MEM_LOG("CHUNK_PAGE_ERROR::dlfree, error - page_in_mem flag is corrupt");
hgs
parents:
diff changeset
  3094
        if (chunk_plus_offset(p, psize) > fm->top)
hgs
parents:
diff changeset
  3095
            MEM_LOG("CHUNK_PAGE_ERROR: error Top chunk address invalid");
hgs
parents:
diff changeset
  3096
        if (fm->dv >= p && fm->dv < chunk_plus_offset(p, psize))
hgs
parents:
diff changeset
  3097
            MEM_LOG("CHUNK_PAGE_ERROR: error DV chunk address invalid");
hgs
parents:
diff changeset
  3098
    }
hgs
parents:
diff changeset
  3099
}
hgs
parents:
diff changeset
  3100
#endif
hgs
parents:
diff changeset
  3101
hgs
parents:
diff changeset
  3102
#ifdef OOM_LOGGING
hgs
parents:
diff changeset
  3103
#include <hal.h>
hgs
parents:
diff changeset
  3104
void RNewAllocator::dump_large_chunk(mstate m, tchunkptr t) {
hgs
parents:
diff changeset
  3105
    tchunkptr u = t;
hgs
parents:
diff changeset
  3106
    bindex_t tindex = t->index;
hgs
parents:
diff changeset
  3107
    size_t tsize = chunksize(t);
hgs
parents:
diff changeset
  3108
    bindex_t idx;
hgs
parents:
diff changeset
  3109
    compute_tree_index(tsize, idx);
hgs
parents:
diff changeset
  3110
hgs
parents:
diff changeset
  3111
    size_t free = 0;
hgs
parents:
diff changeset
  3112
    int nfree = 0;
hgs
parents:
diff changeset
  3113
    do
hgs
parents:
diff changeset
  3114
        {   /* traverse through chain of same-sized nodes */
hgs
parents:
diff changeset
  3115
        if (u->child[0] != 0)
hgs
parents:
diff changeset
  3116
            {
hgs
parents:
diff changeset
  3117
            dump_large_chunk(m, u->child[0]);
hgs
parents:
diff changeset
  3118
            }
hgs
parents:
diff changeset
  3119
hgs
parents:
diff changeset
  3120
        if (u->child[1] != 0)
hgs
parents:
diff changeset
  3121
            {
hgs
parents:
diff changeset
  3122
            dump_large_chunk(m, u->child[1]);
hgs
parents:
diff changeset
  3123
            }
hgs
parents:
diff changeset
  3124
hgs
parents:
diff changeset
  3125
        free += chunksize(u);
hgs
parents:
diff changeset
  3126
        nfree++;
hgs
parents:
diff changeset
  3127
        u = u->fd;
hgs
parents:
diff changeset
  3128
        }
hgs
parents:
diff changeset
  3129
    while (u != t);
hgs
parents:
diff changeset
  3130
    C_LOGF(_L8("LARGE_BIN,%d,%d,%d"), tsize, free, nfree);
hgs
parents:
diff changeset
  3131
}
hgs
parents:
diff changeset
  3132
hgs
parents:
diff changeset
  3133
void RNewAllocator::dump_dl_free_chunks()
hgs
parents:
diff changeset
  3134
{
hgs
parents:
diff changeset
  3135
    C_LOG("");
hgs
parents:
diff changeset
  3136
    C_LOG("------------ dump_dl_free_chunks start -------------");
hgs
parents:
diff changeset
  3137
    C_LOG("BinType,BinSize,FreeSize,FreeCount");
hgs
parents:
diff changeset
  3138
hgs
parents:
diff changeset
  3139
    // dump small bins
hgs
parents:
diff changeset
  3140
    for (int i = 0; i < NSMALLBINS; ++i)
hgs
parents:
diff changeset
  3141
        {
hgs
parents:
diff changeset
  3142
        sbinptr b = smallbin_at(gm, i);
hgs
parents:
diff changeset
  3143
        unsigned int empty = (gm->smallmap & (1 << i)) == 0;
hgs
parents:
diff changeset
  3144
        int nfree = 0;
hgs
parents:
diff changeset
  3145
        if (!empty)
hgs
parents:
diff changeset
  3146
            {
hgs
parents:
diff changeset
  3147
            int nfree = 0;
hgs
parents:
diff changeset
  3148
            size_t free = 0;
hgs
parents:
diff changeset
  3149
            mchunkptr p = b->bk;
hgs
parents:
diff changeset
  3150
            size_t size = chunksize(p);
hgs
parents:
diff changeset
  3151
            for (; p != b; p = p->bk)
hgs
parents:
diff changeset
  3152
                {
hgs
parents:
diff changeset
  3153
                free += chunksize(p);
hgs
parents:
diff changeset
  3154
                nfree++;
hgs
parents:
diff changeset
  3155
                }
hgs
parents:
diff changeset
  3156
hgs
parents:
diff changeset
  3157
            C_LOGF(_L8("SMALL_BIN,%d,%d,%d"), size, free, nfree);
hgs
parents:
diff changeset
  3158
            }
hgs
parents:
diff changeset
  3159
        }
hgs
parents:
diff changeset
  3160
hgs
parents:
diff changeset
  3161
    // dump large bins
hgs
parents:
diff changeset
  3162
    for (int i = 0; i < NTREEBINS; ++i)
hgs
parents:
diff changeset
  3163
        {
hgs
parents:
diff changeset
  3164
        tbinptr* tb = treebin_at(gm, i);
hgs
parents:
diff changeset
  3165
        tchunkptr t = *tb;
hgs
parents:
diff changeset
  3166
        int empty = (gm->treemap & (1 << i)) == 0;
hgs
parents:
diff changeset
  3167
        if (!empty)
hgs
parents:
diff changeset
  3168
            dump_large_chunk(gm, t);
hgs
parents:
diff changeset
  3169
        }
hgs
parents:
diff changeset
  3170
hgs
parents:
diff changeset
  3171
    C_LOG("------------ dump_dl_free_chunks end -------------");
hgs
parents:
diff changeset
  3172
    C_LOG("");
hgs
parents:
diff changeset
  3173
    }
hgs
parents:
diff changeset
  3174
hgs
parents:
diff changeset
  3175
void RNewAllocator::dump_heap_logs(size_t fail_size)
hgs
parents:
diff changeset
  3176
{
hgs
parents:
diff changeset
  3177
    MEM_LOG("");
hgs
parents:
diff changeset
  3178
    if (fail_size) {
hgs
parents:
diff changeset
  3179
        MEM_LOG("MEMDEBUG::RSymbianDLHeap OOM Log dump *************** start");
hgs
parents:
diff changeset
  3180
        MEM_LOGF(_L8("Failing to alloc size: %d"), fail_size);
hgs
parents:
diff changeset
  3181
    }
hgs
parents:
diff changeset
  3182
    else
hgs
parents:
diff changeset
  3183
        MEM_LOG("MEMDEBUG::RSymbianDLHeap Log dump *************** start");
hgs
parents:
diff changeset
  3184
hgs
parents:
diff changeset
  3185
    TInt dl_chunk_size = ptrdiff(iTop,iBase);
hgs
parents:
diff changeset
  3186
    TInt slabp_chunk_size = iChunkSize + iUnmappedChunkSize - dl_chunk_size;
hgs
parents:
diff changeset
  3187
    TInt freeMem = 0;
hgs
parents:
diff changeset
  3188
    HAL::Get(HALData::EMemoryRAMFree, freeMem);
hgs
parents:
diff changeset
  3189
    MEM_LOGF(_L8("System Free RAM Size: %d"), freeMem);
hgs
parents:
diff changeset
  3190
    MEM_LOGF(_L8("Allocator Commited Chunk Size: %d"), iChunkSize);
hgs
parents:
diff changeset
  3191
    MEM_LOGF(_L8("DLHeap Arena Size=%d"), dl_chunk_size);
hgs
parents:
diff changeset
  3192
    MEM_LOGF(_L8("DLHeap unmapped chunk size: %d"), iUnmappedChunkSize);
hgs
parents:
diff changeset
  3193
    MEM_LOGF(_L8("Slab-Page Allocator Chunk Size=%d"), slabp_chunk_size);
hgs
parents:
diff changeset
  3194
hgs
parents:
diff changeset
  3195
    mallinfo info = dlmallinfo();
hgs
parents:
diff changeset
  3196
    TUint heapAlloc = info.uordblks;
hgs
parents:
diff changeset
  3197
    TUint heapFree = info.fordblks;
hgs
parents:
diff changeset
  3198
    MEM_LOGF(_L8("DLHeap allocated size: %d"), heapAlloc);
hgs
parents:
diff changeset
  3199
    MEM_LOGF(_L8("DLHeap free size: %d"), heapFree);
hgs
parents:
diff changeset
  3200
hgs
parents:
diff changeset
  3201
    if (fail_size) {
hgs
parents:
diff changeset
  3202
        MEM_LOG("MEMDEBUG::RSymbianDLHeap OOM Log dump *************** end");
hgs
parents:
diff changeset
  3203
    }else {
hgs
parents:
diff changeset
  3204
        MEM_LOG("MEMDEBUG::RSymbianDLHeap Log dump *************** end");
hgs
parents:
diff changeset
  3205
    }
hgs
parents:
diff changeset
  3206
    MEM_LOG("");
hgs
parents:
diff changeset
  3207
}
hgs
parents:
diff changeset
  3208
hgs
parents:
diff changeset
  3209
#endif
hgs
parents:
diff changeset
  3210
/* Only for debugging purpose - end*/
hgs
parents:
diff changeset
  3211
hgs
parents:
diff changeset
  3212
hgs
parents:
diff changeset
  3213
#define UserTestDebugMaskBit(bit) (TBool)(UserSvr::DebugMask(bit>>5) & (1<<(bit&31)))
hgs
parents:
diff changeset
  3214
hgs
parents:
diff changeset
  3215
#ifndef NO_NAMED_LOCAL_CHUNKS
hgs
parents:
diff changeset
  3216
//this class requires Symbian^3 for ElocalNamed
hgs
parents:
diff changeset
  3217
hgs
parents:
diff changeset
  3218
// Hack to get access to TChunkCreateInfo internals outside of the kernel
hgs
parents:
diff changeset
  3219
class TFakeChunkCreateInfo: public TChunkCreateInfo
hgs
parents:
diff changeset
  3220
    {
hgs
parents:
diff changeset
  3221
public:
hgs
parents:
diff changeset
  3222
     void SetThreadNewAllocator(TInt aInitialSize, TInt aMaxSize, const TDesC& aName)
hgs
parents:
diff changeset
  3223
        {
hgs
parents:
diff changeset
  3224
        iType = TChunkCreate::ENormal | TChunkCreate::EDisconnected | TChunkCreate::EData;
hgs
parents:
diff changeset
  3225
        iMaxSize = aMaxSize * 2;
hgs
parents:
diff changeset
  3226
hgs
parents:
diff changeset
  3227
        iInitialBottom = 0;
hgs
parents:
diff changeset
  3228
        iInitialTop = aInitialSize;
hgs
parents:
diff changeset
  3229
        iAttributes = TChunkCreate::ELocalNamed;
hgs
parents:
diff changeset
  3230
        iName = &aName;
hgs
parents:
diff changeset
  3231
        iOwnerType = EOwnerThread;
hgs
parents:
diff changeset
  3232
        }
hgs
parents:
diff changeset
  3233
    };
hgs
parents:
diff changeset
  3234
#endif
hgs
parents:
diff changeset
  3235
hgs
parents:
diff changeset
  3236
#ifndef NO_NAMED_LOCAL_CHUNKS
hgs
parents:
diff changeset
  3237
_LIT(KLitDollarHeap,"$HEAP");
hgs
parents:
diff changeset
  3238
#endif
hgs
parents:
diff changeset
  3239
TInt RNewAllocator::CreateThreadHeap(SStdEpocThreadCreateInfo& aInfo, RNewAllocator*& aHeap, TInt aAlign, TBool aSingleThread)
hgs
parents:
diff changeset
  3240
/**
hgs
parents:
diff changeset
  3241
@internalComponent
hgs
parents:
diff changeset
  3242
*/
hgs
parents:
diff changeset
  3243
//
hgs
parents:
diff changeset
  3244
// Create a user-side heap
hgs
parents:
diff changeset
  3245
//
hgs
parents:
diff changeset
  3246
    {
hgs
parents:
diff changeset
  3247
    TInt page_size = malloc_getpagesize;
hgs
parents:
diff changeset
  3248
    TInt minLength = _ALIGN_UP(aInfo.iHeapInitialSize, page_size);
hgs
parents:
diff changeset
  3249
    TInt maxLength = Max(aInfo.iHeapMaxSize, minLength);
hgs
parents:
diff changeset
  3250
#ifdef TRACING_ALLOCS
hgs
parents:
diff changeset
  3251
    if (UserTestDebugMaskBit(96)) // 96 == KUSERHEAPTRACE in nk_trace.h
hgs
parents:
diff changeset
  3252
        aInfo.iFlags |= ETraceHeapAllocs;
hgs
parents:
diff changeset
  3253
#endif
hgs
parents:
diff changeset
  3254
    // Create the thread's heap chunk.
hgs
parents:
diff changeset
  3255
    RChunk c;
hgs
parents:
diff changeset
  3256
#ifndef NO_NAMED_LOCAL_CHUNKS
hgs
parents:
diff changeset
  3257
    TFakeChunkCreateInfo createInfo;
hgs
parents:
diff changeset
  3258
    createInfo.SetThreadNewAllocator(0, maxLength, KLitDollarHeap());   // Initialise with no memory committed.
hgs
parents:
diff changeset
  3259
    TInt r = c.Create(createInfo);
hgs
parents:
diff changeset
  3260
#else
hgs
parents:
diff changeset
  3261
    TInt r = c.CreateDisconnectedLocal(0, 0, maxLength * 2);
hgs
parents:
diff changeset
  3262
#endif
hgs
parents:
diff changeset
  3263
    if (r!=KErrNone)
hgs
parents:
diff changeset
  3264
        return r;
hgs
parents:
diff changeset
  3265
    aHeap = ChunkHeap(c, minLength, page_size, maxLength, aAlign, aSingleThread, UserHeap::EChunkHeapSwitchTo|UserHeap::EChunkHeapDuplicate);
hgs
parents:
diff changeset
  3266
    c.Close();
hgs
parents:
diff changeset
  3267
    if (!aHeap)
hgs
parents:
diff changeset
  3268
        return KErrNoMemory;
hgs
parents:
diff changeset
  3269
#ifdef TRACING_ALLOCS
hgs
parents:
diff changeset
  3270
    if (aInfo.iFlags & ETraceHeapAllocs)
hgs
parents:
diff changeset
  3271
        {
hgs
parents:
diff changeset
  3272
        aHeap->iFlags |= RAllocator::ETraceAllocs;
hgs
parents:
diff changeset
  3273
        BTraceContext8(BTrace::EHeap, BTrace::EHeapCreate,(TUint32)aHeap, RNewAllocator::EAllocCellSize);
hgs
parents:
diff changeset
  3274
        TInt handle = aHeap->ChunkHandle();
hgs
parents:
diff changeset
  3275
        TInt chunkId = ((RHandleBase&)handle).BTraceId();
hgs
parents:
diff changeset
  3276
        BTraceContext8(BTrace::EHeap, BTrace::EHeapChunkCreate, (TUint32)aHeap, chunkId);
hgs
parents:
diff changeset
  3277
        }
hgs
parents:
diff changeset
  3278
#endif
hgs
parents:
diff changeset
  3279
    return KErrNone;
hgs
parents:
diff changeset
  3280
    }
hgs
parents:
diff changeset
  3281
hgs
parents:
diff changeset
  3282
/*
hgs
parents:
diff changeset
  3283
 * \internal
hgs
parents:
diff changeset
  3284
 * Called from the qtmain.lib application wrapper.
hgs
parents:
diff changeset
  3285
 * Create a new heap as requested, but use the new allocator
hgs
parents:
diff changeset
  3286
 */
hgs
parents:
diff changeset
  3287
TInt _symbian_SetupThreadHeap(TBool /*aNotFirst*/, SStdEpocThreadCreateInfo& aInfo)
hgs
parents:
diff changeset
  3288
    {
hgs
parents:
diff changeset
  3289
    TInt r = KErrNone;
hgs
parents:
diff changeset
  3290
    if (!aInfo.iAllocator && aInfo.iHeapInitialSize>0)
hgs
parents:
diff changeset
  3291
        {
hgs
parents:
diff changeset
  3292
        // new heap required
hgs
parents:
diff changeset
  3293
        RNewAllocator* pH = NULL;
hgs
parents:
diff changeset
  3294
        r = RNewAllocator::CreateThreadHeap(aInfo, pH);
hgs
parents:
diff changeset
  3295
        }
hgs
parents:
diff changeset
  3296
    else if (aInfo.iAllocator)
hgs
parents:
diff changeset
  3297
        {
hgs
parents:
diff changeset
  3298
        // sharing a heap
hgs
parents:
diff changeset
  3299
        RAllocator* pA = aInfo.iAllocator;
hgs
parents:
diff changeset
  3300
        r = pA->Open();
hgs
parents:
diff changeset
  3301
        if (r == KErrNone)
hgs
parents:
diff changeset
  3302
            {
hgs
parents:
diff changeset
  3303
            User::SwitchAllocator(pA);
hgs
parents:
diff changeset
  3304
            }
hgs
parents:
diff changeset
  3305
        }
hgs
parents:
diff changeset
  3306
    return r;
hgs
parents:
diff changeset
  3307
    }
hgs
parents:
diff changeset
  3308
hgs
parents:
diff changeset
  3309
#ifndef __WINS__
hgs
parents:
diff changeset
  3310
#pragma pop
hgs
parents:
diff changeset
  3311
#endif