16
|
1 |
/*
|
|
2 |
* Copyright (c) 2010 Nokia Corporation and/or its subsidiary(-ies).
|
|
3 |
*
|
|
4 |
* This file is part of Qt Web Runtime.
|
|
5 |
*
|
|
6 |
* This library is free software; you can redistribute it and/or
|
|
7 |
* modify it under the terms of the GNU Lesser General Public License
|
|
8 |
* version 2.1 as published by the Free Software Foundation.
|
|
9 |
*
|
|
10 |
* This library is distributed in the hope that it will be useful,
|
|
11 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
12 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
13 |
* Lesser General Public License for more details.
|
|
14 |
*
|
|
15 |
* You should have received a copy of the GNU Lesser General Public
|
|
16 |
* License along with this library; if not, write to the Free Software
|
|
17 |
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 |
*
|
|
19 |
*/
|
|
20 |
|
|
21 |
|
|
22 |
/****************************************************************************
|
|
23 |
*
|
|
24 |
* This file is part of the Symbian application wrapper of the Qt Toolkit.
|
|
25 |
*
|
|
26 |
* The memory allocator is backported from Symbian OS, and can eventually
|
|
27 |
* be removed from Qt once it is built in to all supported OS versions.
|
|
28 |
* The allocator is a composite of three allocators:
|
|
29 |
* - A page allocator, for large allocations
|
|
30 |
* - A slab allocator, for small allocations
|
|
31 |
* - Doug Lea's allocator, for medium size allocations
|
|
32 |
*
|
|
33 |
***************************************************************************/
|
|
34 |
|
|
35 |
|
|
36 |
#include <e32std.h>
|
|
37 |
#include <e32cmn.h>
|
|
38 |
#include <hal.h>
|
|
39 |
#include <e32panic.h>
|
|
40 |
|
|
41 |
#ifndef QT_SYMBIAN_HAVE_U32STD_H
|
|
42 |
struct SThreadCreateInfo
|
|
43 |
{
|
|
44 |
TAny* iHandle;
|
|
45 |
TInt iType;
|
|
46 |
TThreadFunction iFunction;
|
|
47 |
TAny* iPtr;
|
|
48 |
TAny* iSupervisorStack;
|
|
49 |
TInt iSupervisorStackSize;
|
|
50 |
TAny* iUserStack;
|
|
51 |
TInt iUserStackSize;
|
|
52 |
TInt iInitialThreadPriority;
|
|
53 |
TPtrC iName;
|
|
54 |
TInt iTotalSize; // Size including any extras (must be a multiple of 8 bytes)
|
|
55 |
};
|
|
56 |
|
|
57 |
struct SStdEpocThreadCreateInfo : public SThreadCreateInfo
|
|
58 |
{
|
|
59 |
RAllocator* iAllocator;
|
|
60 |
TInt iHeapInitialSize;
|
|
61 |
TInt iHeapMaxSize;
|
|
62 |
TInt iPadding; // Make structure size a multiple of 8 bytes
|
|
63 |
};
|
|
64 |
#else
|
|
65 |
#include <u32std.h>
|
|
66 |
#endif
|
|
67 |
#include <e32svr.h>
|
|
68 |
|
|
69 |
//Named local chunks require support from the kernel, which depends on Symbian^3
|
|
70 |
#define NO_NAMED_LOCAL_CHUNKS
|
|
71 |
//Reserving a minimum heap size is not supported, because the implementation does not know what type of
|
|
72 |
//memory to use. DLA memory grows upwards, slab and page allocators grow downwards.
|
|
73 |
//This would need kernel support to do properly.
|
|
74 |
#define NO_RESERVE_MEMORY
|
|
75 |
|
|
76 |
//The BTRACE debug framework requires Symbian OS 9.4 or higher.
|
|
77 |
//Required header files are not included in S60 5.0 SDKs, but
|
|
78 |
//they are available for open source versions of Symbian OS.
|
|
79 |
//Note that although Symbian OS 9.3 supports BTRACE, the usage in this file
|
|
80 |
//depends on 9.4 header files.
|
|
81 |
|
|
82 |
//This debug flag uses BTRACE to emit debug traces to identify the heaps.
|
|
83 |
//Note that it uses the ETest1 trace category which is not reserved
|
|
84 |
//#define TRACING_HEAPS
|
|
85 |
//This debug flag uses BTRACE to emit debug traces to aid with debugging
|
|
86 |
//allocs, frees & reallocs. It should be used together with the KUSERHEAPTRACE
|
|
87 |
//kernel trace flag to enable heap tracing.
|
|
88 |
//#define TRACING_ALLOCS
|
|
89 |
//This debug flag turns on tracing of the call stack for each alloc trace.
|
|
90 |
//It is dependent on TRACING_ALLOCS.
|
|
91 |
//#define TRACING_CALLSTACKS
|
|
92 |
|
|
93 |
#if defined(TRACING_ALLOCS) || defined(TRACING_HEAPS)
|
|
94 |
#include <e32btrace.h>
|
|
95 |
#endif
|
|
96 |
|
|
97 |
// Memory logging routines inherited from webkit allocator 9.2TB.
|
|
98 |
// #define OOM_LOGGING
|
|
99 |
// This debug flag logs error conditions when memory is unmapped/mapped from the system.
|
|
100 |
// Also, exports routines to dump the internal state and memory usage of the DL allocator.
|
|
101 |
// #define DL_CHUNK_MEM_DEBUG
|
|
102 |
// Exports debug rouintes to assert/trace chunked memory access.
|
|
103 |
#if defined(OOM_LOGGING) || defined(DL_CHUNK_MEM_DEBUG)
|
|
104 |
#include "MemoryLogger.h"
|
|
105 |
#endif
|
|
106 |
|
|
107 |
|
|
108 |
#ifndef __WINS__
|
|
109 |
#pragma push
|
|
110 |
#pragma arm
|
|
111 |
#endif
|
|
112 |
|
|
113 |
#include "dla_p.h"
|
|
114 |
#include "newallocator_p.h"
|
|
115 |
|
|
116 |
// if non zero this causes the slabs to be configured only when the chunk size exceeds this level
|
|
117 |
#define DELAYED_SLAB_THRESHOLD (64*1024) // 64KB seems about right based on trace data
|
|
118 |
#define SLAB_CONFIG (0xabe)
|
|
119 |
|
|
120 |
_LIT(KDLHeapPanicCategory, "DL Heap");
|
|
121 |
#define GET_PAGE_SIZE(x) HAL::Get(HALData::EMemoryPageSize, x)
|
|
122 |
#define __CHECK_CELL(p)
|
|
123 |
#define __POWER_OF_2(x) ((TUint32)((x)^((x)-1))>=(TUint32)(x))
|
|
124 |
#define HEAP_PANIC(r) Panic(r)
|
|
125 |
|
|
126 |
LOCAL_C void Panic(TCdtPanic aPanic)
|
|
127 |
// Panic the process with USER as the category.
|
|
128 |
{
|
|
129 |
User::Panic(_L("USER"),aPanic);
|
|
130 |
}
|
|
131 |
|
|
132 |
/* Purpose: Map chunk memory pages from system RAM
|
|
133 |
* Arguments: tp - tchunkptr in which memmory should be mapped
|
|
134 |
* psize - incoming tchunk size
|
|
135 |
* Return: KErrNone if successful, else KErrNoMemory
|
|
136 |
* Note:
|
|
137 |
*/
|
|
138 |
TInt RNewAllocator::map_chunk_pages(tchunkptr tp, size_t psize)
|
|
139 |
{
|
|
140 |
if (page_not_in_memory(tp, psize)) {
|
|
141 |
char *a_addr = tchunk_page_align(tp);
|
|
142 |
size_t npages = tp->npages;
|
|
143 |
|
|
144 |
#ifdef OOM_LOGGING
|
|
145 |
// check that npages matches the psize
|
|
146 |
size_t offset = address_offset(a_addr,tp);
|
|
147 |
if (offset < psize && (psize - offset) >= mparams.page_size )
|
|
148 |
{
|
|
149 |
size_t tpages = ( psize - offset) >> pageshift;
|
|
150 |
if (tpages != tp->npages) //assert condition
|
|
151 |
MEM_LOG("CHUNK_PAGE_ERROR:map_chunk_pages, error in npages");
|
|
152 |
}
|
|
153 |
else
|
|
154 |
MEM_LOG("CHUNK_PAGE_ERROR::map_chunk_pages: - Incorrect page-in-memmory flag");
|
|
155 |
#endif
|
|
156 |
|
|
157 |
if (map(a_addr, npages*mparams.page_size)) {
|
|
158 |
TRACE_DL_CHUNK_MAP(tp, psize, a_addr, npages*mparams.page_size);
|
|
159 |
ASSERT_RCHUNK_SIZE();
|
|
160 |
TRACE_UNMAPPED_CHUNK(-1*npages*mparams.page_size);
|
|
161 |
return KErrNone;
|
|
162 |
}
|
|
163 |
else {
|
|
164 |
#ifdef OOM_LOGGING
|
|
165 |
|
|
166 |
MEM_LOGF(_L8("CHUNK_PAGE_ERROR:: map_chunk_pages - Failed to Commit RAM, page_addr=%x, npages=%d, chunk_size=%d"), a_addr, npages, psize);
|
|
167 |
MEM_DUMP_OOM_LOGS(psize, "RSymbianDLHeap::map_chunk_pages - Failed to Commit RAM");
|
|
168 |
#endif
|
|
169 |
return KErrNoMemory;
|
|
170 |
}
|
|
171 |
}
|
|
172 |
return KErrNone;
|
|
173 |
}
|
|
174 |
|
|
175 |
/* Purpose: Map partial chunk memory pages from system RAM
|
|
176 |
* Arguments: tp - tchunkptr in which memmory should be mapped
|
|
177 |
* psize - incoming tchunk size
|
|
178 |
* r - remainder chunk pointer
|
|
179 |
* rsize - remainder chunk size
|
|
180 |
* Return: Number of unmapped pages from remainder chunk if successful (0 or more), else KErrNoMemory
|
|
181 |
* Note: Remainder chunk should be large enough to be mapped out (checked before invoking this function)
|
|
182 |
* pageout headers will be set from insert_large_chunk(), not here.
|
|
183 |
*/
|
|
184 |
TInt RNewAllocator::map_chunk_pages_partial(tchunkptr tp, size_t psize, tchunkptr r, size_t rsize)
|
|
185 |
{
|
|
186 |
if (page_not_in_memory(tp, psize)) {
|
|
187 |
size_t npages = tp->npages; // total no of pages unmapped in this chunk
|
|
188 |
char *page_addr_map = tchunk_page_align(tp); // address to begin page map
|
|
189 |
char *page_addr_rem = tchunk_page_align(r); // address in remainder chunk to remain unmapped
|
|
190 |
assert(address_offset(page_addr_rem, r) < rsize);
|
|
191 |
size_t npages_map = address_offset(page_addr_rem, page_addr_map) >> pageshift; // no of pages to be mapped
|
|
192 |
if (npages_map > 0) {
|
|
193 |
if (map(page_addr_map, npages_map*mparams.page_size)) {
|
|
194 |
#ifdef DL_CHUNK_MEM_DEBUG
|
|
195 |
TRACE_DL_CHUNK_MAP(tp, psize, page_addr_map, npages_map*mparams.page_size);
|
|
196 |
ASSERT_RCHUNK_SIZE();
|
|
197 |
TRACE_UNMAPPED_CHUNK(-1*npages_map*mparams.page_size);
|
|
198 |
#endif
|
|
199 |
return (npages - npages_map);
|
|
200 |
}
|
|
201 |
else {
|
|
202 |
#ifdef OOM_LOGGING
|
|
203 |
MEM_LOGF(_L8("CHUNK_PAGE_ERROR:: map_chunk_pages_partial - Failed to Commit RAM, page_addr=%x, npages=%d, chunk_size=%d"), page_addr_map, npages_map, psize);
|
|
204 |
MEM_DUMP_OOM_LOGS(psize, "RSymbianDLHeap::map_chunk_pages_partial - Failed to Commit RAM");
|
|
205 |
#endif
|
|
206 |
return KErrNoMemory;
|
|
207 |
}
|
|
208 |
}
|
|
209 |
else {
|
|
210 |
// map not needed, first page is already mapped
|
|
211 |
return npages;
|
|
212 |
}
|
|
213 |
}
|
|
214 |
|
|
215 |
return 0;
|
|
216 |
}
|
|
217 |
|
|
218 |
|
|
219 |
/* Purpose: Release (unmap) chunk memory pages to system RAM
|
|
220 |
* Arguments: tp - tchunkptr from which memmory may be released
|
|
221 |
* psize - incoming tchunk size
|
|
222 |
* prev_npages - number of pages that has been already unmapped from this chunk
|
|
223 |
* Return: total number of pages that has been unmapped from this chunk (new unmapped pages + prev_npages)
|
|
224 |
* Note: pageout headers will be set from insert_large_chunk(), not here.
|
|
225 |
*/
|
|
226 |
TInt RNewAllocator::unmap_chunk_pages(tchunkptr tp, size_t psize, size_t prev_npages)
|
|
227 |
{
|
|
228 |
size_t npages = 0;
|
|
229 |
char *a_addr = tchunk_page_align(tp);
|
|
230 |
size_t offset = address_offset(a_addr,tp);
|
|
231 |
if (offset < psize && (psize - offset) >= mparams.page_size)
|
|
232 |
{ /* check for new pages to decommit */
|
|
233 |
npages = ( psize - offset) >> pageshift;
|
|
234 |
if (npages > prev_npages) {
|
|
235 |
unmap(a_addr, npages*mparams.page_size); // assuming kernel takes care of already unmapped pages
|
|
236 |
TRACE_DL_CHUNK_UNMAP(tp, psize, a_addr, npages*mparams.page_size);
|
|
237 |
iChunkSize += prev_npages*mparams.page_size; //adjust actual chunk size
|
|
238 |
ASSERT_RCHUNK_SIZE();
|
|
239 |
TRACE_UNMAPPED_CHUNK((npages-prev_npages)*mparams.page_size);
|
|
240 |
assert((a_addr + npages*mparams.page_size - 1) < (char*)next_chunk(tp));
|
|
241 |
}
|
|
242 |
}
|
|
243 |
|
|
244 |
#ifdef OOM_LOGGING
|
|
245 |
if (npages && (npages < prev_npages))
|
|
246 |
MEM_LOG("CHUNK_PAGE_ERROR:unmap_chunk_pages, error in npages");
|
|
247 |
if (npages > prev_npages) {
|
|
248 |
/* check that end of decommited address lie within this chunk */
|
|
249 |
if ((a_addr + npages*mparams.page_size - 1) >= (char*)next_chunk(tp))
|
|
250 |
MEM_LOG("CHUNK_PAGE_ERROR:unmap_chunk_pages, error chunk boundary");
|
|
251 |
}
|
|
252 |
#endif
|
|
253 |
#ifdef DL_CHUNK_MEM_DEBUG
|
|
254 |
mchunkptr next = next_chunk(tp);
|
|
255 |
do_check_any_chunk_access(next, chunksize(next));
|
|
256 |
if (!npages) do_check_any_chunk_access((mchunkptr)tp, psize);
|
|
257 |
#endif
|
|
258 |
|
|
259 |
return (npages);
|
|
260 |
}
|
|
261 |
|
|
262 |
/* Purpose: Unmap all pages between previously unmapped and end of top chunk
|
|
263 |
and reset top to beginning of prev chunk
|
|
264 |
* Arguments: fm - global malloc state
|
|
265 |
* prev - previous chunk which has unmapped pages
|
|
266 |
* psize - size of previous chunk
|
|
267 |
* prev_npages - number of unmapped pages from previous chunk
|
|
268 |
* Return: nonzero if sucessful, else 0
|
|
269 |
* Note:
|
|
270 |
*/
|
|
271 |
TInt RNewAllocator::sys_trim_partial(mstate m, mchunkptr prev, size_t psize, size_t prev_npages)
|
|
272 |
{
|
|
273 |
size_t released = 0;
|
|
274 |
size_t extra = 0;
|
|
275 |
if (is_initialized(m)) {
|
|
276 |
psize += m->topsize;
|
|
277 |
char *a_addr = tchunk_page_align(prev); // includes space for TOP footer
|
|
278 |
size_t addr_offset = address_offset(a_addr, prev);
|
|
279 |
assert(addr_offset > TOP_FOOT_SIZE); //always assert?
|
|
280 |
assert((char*)iTop >= a_addr); //always assert?
|
|
281 |
if ((char*)iTop > a_addr)
|
|
282 |
extra = address_offset(iTop, a_addr);
|
|
283 |
|
|
284 |
#ifdef OOM_LOGGING
|
|
285 |
if ((char*)iTop < a_addr)
|
|
286 |
MEM_LOGF(_L8("RSymbianDLHeap::sys_trim_partial - incorrect iTop value, top=%x, iTop=%x"), m->top, iTop);
|
|
287 |
#endif
|
|
288 |
msegmentptr sp = segment_holding(m, (TUint8*)prev);
|
|
289 |
if (!is_extern_segment(sp)) {
|
|
290 |
if (is_mmapped_segment(sp)) {
|
|
291 |
if (HAVE_MMAP && sp->size >= extra && !has_segment_link(m, sp)) { /* can't shrink if pinned */
|
|
292 |
// size_t newsize = sp->size - extra;
|
|
293 |
/* Prefer mremap, fall back to munmap */
|
|
294 |
if ((CALL_MREMAP(sp->base, sp->size, sp->size - extra, 0) != MFAIL) ||
|
|
295 |
(CALL_MUNMAP(sp->base + sp->size - extra, extra) == 0)) {
|
|
296 |
released = extra;
|
|
297 |
}
|
|
298 |
}
|
|
299 |
}
|
|
300 |
else if (HAVE_MORECORE) {
|
|
301 |
if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
|
|
302 |
extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - mparams.granularity;
|
|
303 |
ACQUIRE_MORECORE_LOCK(m);
|
|
304 |
{
|
|
305 |
/* Make sure end of memory is where we last set it. */
|
|
306 |
TUint8* old_br = (TUint8*)(CALL_MORECORE(0));
|
|
307 |
if (old_br == sp->base + sp->size) {
|
|
308 |
TUint8* rel_br = (TUint8*)(CALL_MORECORE(-extra));
|
|
309 |
TUint8* new_br = (TUint8*)(CALL_MORECORE(0));
|
|
310 |
if (rel_br != CMFAIL && new_br < old_br)
|
|
311 |
released = old_br - new_br;
|
|
312 |
}
|
|
313 |
}
|
|
314 |
RELEASE_MORECORE_LOCK(m);
|
|
315 |
}
|
|
316 |
}
|
|
317 |
|
|
318 |
if (released != 0) {
|
|
319 |
TRACE_DL_CHUNK_UNMAP(prev, psize, a_addr, released);
|
|
320 |
iChunkSize += prev_npages*mparams.page_size; // prev_unmapped was already unmapped
|
|
321 |
TRACE_UNMAPPED_CHUNK(-1*prev_npages*mparams.page_size);
|
|
322 |
ASSERT_RCHUNK_SIZE();
|
|
323 |
sp->size -= released;
|
|
324 |
m->footprint -= released;
|
|
325 |
}
|
|
326 |
|
|
327 |
/* reset top to prev chunk */
|
|
328 |
init_top(m, prev, addr_offset - TOP_FOOT_SIZE);
|
|
329 |
check_top_chunk(m, m->top);
|
|
330 |
}
|
|
331 |
|
|
332 |
// DL region not initalized, do not reset top here
|
|
333 |
return (released != 0)? 1 : 0;
|
|
334 |
}
|
|
335 |
|
|
336 |
|
|
337 |
#define STACKSIZE 32
|
|
338 |
inline void RNewAllocator::TraceCallStack()
|
|
339 |
{
|
|
340 |
#ifdef TRACING_CALLSTACKS
|
|
341 |
TUint32 filteredStack[STACKSIZE];
|
|
342 |
TThreadStackInfo info;
|
|
343 |
TUint32 *sp = (TUint32*)&sp;
|
|
344 |
RThread().StackInfo(info);
|
|
345 |
Lock();
|
|
346 |
TInt i;
|
|
347 |
for (i=0;i<STACKSIZE;i++) {
|
|
348 |
if ((TLinAddr)sp>=info.iBase) break;
|
|
349 |
while ((TLinAddr)sp < info.iBase) {
|
|
350 |
TUint32 cur = *sp++;
|
|
351 |
TUint32 range = cur & 0xF0000000;
|
|
352 |
if (range == 0x80000000 || range == 0x70000000) {
|
|
353 |
filteredStack[i] = cur;
|
|
354 |
break;
|
|
355 |
}
|
|
356 |
}
|
|
357 |
}
|
|
358 |
Unlock();
|
|
359 |
BTraceContextBig(BTrace::EHeap, BTrace::EHeapCallStack, (TUint32)this, filteredStack, i * 4);
|
|
360 |
#endif
|
|
361 |
}
|
|
362 |
|
|
363 |
size_t getpagesize()
|
|
364 |
{
|
|
365 |
TInt size;
|
|
366 |
TInt err = GET_PAGE_SIZE(size);
|
|
367 |
if (err != KErrNone)
|
|
368 |
return (size_t)0x1000;
|
|
369 |
return (size_t)size;
|
|
370 |
}
|
|
371 |
|
|
372 |
#define gm (&iGlobalMallocState)
|
|
373 |
|
|
374 |
RNewAllocator::RNewAllocator(TInt aMaxLength, TInt aAlign, TBool aSingleThread)
|
|
375 |
// constructor for a fixed heap. Just use DL allocator
|
|
376 |
:iMinLength(aMaxLength), iMaxLength(aMaxLength), iOffset(0), iGrowBy(0), iChunkHandle(0),
|
|
377 |
iNestingLevel(0), iAllocCount(0), iFailType(ENone), iTestData(NULL), iChunkSize(aMaxLength)
|
|
378 |
{
|
|
379 |
|
|
380 |
if ((TUint32)aAlign>=sizeof(TAny*) && __POWER_OF_2(iAlign))
|
|
381 |
{
|
|
382 |
iAlign = aAlign;
|
|
383 |
}
|
|
384 |
else
|
|
385 |
{
|
|
386 |
iAlign = 4;
|
|
387 |
}
|
|
388 |
iPageSize = 0;
|
|
389 |
iFlags = aSingleThread ? (ESingleThreaded|EFixedSize) : EFixedSize;
|
|
390 |
|
|
391 |
Init(0, 0, 0);
|
|
392 |
}
|
|
393 |
|
|
394 |
RNewAllocator::RNewAllocator(TInt aChunkHandle, TInt aOffset, TInt aMinLength, TInt aMaxLength, TInt aGrowBy,
|
|
395 |
TInt aAlign, TBool aSingleThread)
|
|
396 |
: iMinLength(aMinLength), iMaxLength(aMaxLength), iOffset(aOffset), iChunkHandle(aChunkHandle), iAlign(aAlign), iNestingLevel(0), iAllocCount(0),
|
|
397 |
iFailType(ENone), iTestData(NULL), iChunkSize(aMinLength),iHighWaterMark(aMinLength)
|
|
398 |
{
|
|
399 |
iPageSize = malloc_getpagesize;
|
|
400 |
__ASSERT_ALWAYS(aOffset >=0, User::Panic(KDLHeapPanicCategory, ETHeapNewBadOffset));
|
|
401 |
iGrowBy = _ALIGN_UP(aGrowBy, iPageSize);
|
|
402 |
iFlags = aSingleThread ? ESingleThreaded : 0;
|
|
403 |
|
|
404 |
// Initialise
|
|
405 |
// if the heap is created with aMinLength==aMaxLength then it cannot allocate slab or page memory
|
|
406 |
// so these sub-allocators should be disabled. Otherwise initialise with default values
|
|
407 |
if (aMinLength == aMaxLength)
|
|
408 |
Init(0, 0, 0);
|
|
409 |
else
|
|
410 |
Init(0x3fff, 15, 0x10000); // all slabs, page {32KB}, trim {64KB} // Andrew: Adopting Webkit config?
|
|
411 |
//Init(0xabe, 16, iPageSize*4); // slabs {48, 40, 32, 24, 20, 16, 12, 8}, page {64KB}, trim {16KB}
|
|
412 |
#ifdef TRACING_HEAPS
|
|
413 |
RChunk chunk;
|
|
414 |
chunk.SetHandle(iChunkHandle);
|
|
415 |
TKName chunk_name;
|
|
416 |
chunk.FullName(chunk_name);
|
|
417 |
BTraceContextBig(BTrace::ETest1, 2, 22, chunk_name.Ptr(), chunk_name.Size());
|
|
418 |
|
|
419 |
TUint32 traceData[4];
|
|
420 |
traceData[0] = iChunkHandle;
|
|
421 |
traceData[1] = iMinLength;
|
|
422 |
traceData[2] = iMaxLength;
|
|
423 |
traceData[3] = iAlign;
|
|
424 |
BTraceContextN(BTrace::ETest1, 1, (TUint32)this, 11, traceData, sizeof(traceData));
|
|
425 |
#endif
|
|
426 |
|
|
427 |
}
|
|
428 |
|
|
429 |
TAny* RNewAllocator::operator new(TUint aSize, TAny* aBase) __NO_THROW
|
|
430 |
{
|
|
431 |
__ASSERT_ALWAYS(aSize>=sizeof(RNewAllocator), HEAP_PANIC(ETHeapNewBadSize));
|
|
432 |
RNewAllocator* h = (RNewAllocator*)aBase;
|
|
433 |
h->iAlign = 0x80000000; // garbage value
|
|
434 |
h->iBase = ((TUint8*)aBase) + aSize;
|
|
435 |
return aBase;
|
|
436 |
}
|
|
437 |
|
|
438 |
void RNewAllocator::Init(TInt aBitmapSlab, TInt aPagePower, size_t aTrimThreshold)
|
|
439 |
{
|
|
440 |
__ASSERT_ALWAYS((TUint32)iAlign>=sizeof(TAny*) && __POWER_OF_2(iAlign), HEAP_PANIC(ETHeapNewBadAlignment));
|
|
441 |
|
|
442 |
/*Moved code which does initialization */
|
|
443 |
iTop = (TUint8*)this + iMinLength;
|
|
444 |
spare_page = 0;
|
|
445 |
iAllocCount = 0; // FIXME -- not used anywhere - already initialized to 0 in constructor anyway
|
|
446 |
memset(&mparams,0,sizeof(mparams));
|
|
447 |
|
|
448 |
Init_Dlmalloc(iTop - iBase, 0, aTrimThreshold);
|
|
449 |
|
|
450 |
slab_init(aBitmapSlab);
|
|
451 |
|
|
452 |
/*10-1K,11-2K,12-4k,13-8K,14-16K,15-32K,16-64K*/
|
|
453 |
paged_init(aPagePower);
|
|
454 |
|
|
455 |
#ifdef TRACING_ALLOCS
|
|
456 |
TUint32 traceData[3];
|
|
457 |
traceData[0] = aBitmapSlab;
|
|
458 |
traceData[1] = aPagePower;
|
|
459 |
traceData[2] = aTrimThreshold;
|
|
460 |
BTraceContextN(BTrace::ETest1, BTrace::EHeapAlloc, (TUint32)this, 0, traceData, sizeof(traceData));
|
|
461 |
#endif
|
|
462 |
|
|
463 |
}
|
|
464 |
|
|
465 |
RNewAllocator::SCell* RNewAllocator::GetAddress(const TAny* aCell) const
|
|
466 |
//
|
|
467 |
// As much as possible, check a cell address and backspace it
|
|
468 |
// to point at the cell header.
|
|
469 |
//
|
|
470 |
{
|
|
471 |
|
|
472 |
TLinAddr m = TLinAddr(iAlign - 1);
|
|
473 |
__ASSERT_ALWAYS(!(TLinAddr(aCell)&m), HEAP_PANIC(ETHeapBadCellAddress));
|
|
474 |
|
|
475 |
SCell* pC = (SCell*)(((TUint8*)aCell)-EAllocCellSize);
|
|
476 |
__CHECK_CELL(pC);
|
|
477 |
|
|
478 |
return pC;
|
|
479 |
}
|
|
480 |
|
|
481 |
TInt RNewAllocator::AllocLen(const TAny* aCell) const
|
|
482 |
{
|
|
483 |
if (ptrdiff(aCell, this) >= 0)
|
|
484 |
{
|
|
485 |
mchunkptr m = mem2chunk(aCell);
|
|
486 |
return chunksize(m) - CHUNK_OVERHEAD; // Andrew: Picking up webkit change.
|
|
487 |
}
|
|
488 |
if (lowbits(aCell, pagesize) > cellalign)
|
|
489 |
return header_size(slab::slabfor(aCell)->header);
|
|
490 |
if (lowbits(aCell, pagesize) == cellalign)
|
|
491 |
return *(unsigned*)(offset(aCell,-int(cellalign)))-cellalign;
|
|
492 |
return paged_descriptor(aCell)->size;
|
|
493 |
}
|
|
494 |
|
|
495 |
TAny* RNewAllocator::Alloc(TInt aSize)
|
|
496 |
{
|
|
497 |
__ASSERT_ALWAYS((TUint)aSize<(KMaxTInt/2),HEAP_PANIC(ETHeapBadAllocatedCellSize));
|
|
498 |
|
|
499 |
TAny* addr;
|
|
500 |
|
|
501 |
#ifdef TRACING_ALLOCS
|
|
502 |
TInt aCnt=0;
|
|
503 |
#endif
|
|
504 |
Lock();
|
|
505 |
if (aSize < slab_threshold)
|
|
506 |
{
|
|
507 |
TInt ix = sizemap[(aSize+3)>>2];
|
|
508 |
ASSERT(ix != 0xff);
|
|
509 |
addr = slab_allocate(slaballoc[ix]);
|
|
510 |
if (addr) iTotalAllocSize += slaballoc[ix].size;
|
|
511 |
}else if ((aSize >> page_threshold)==0)
|
|
512 |
{
|
|
513 |
#ifdef TRACING_ALLOCS
|
|
514 |
aCnt=1;
|
|
515 |
#endif
|
|
516 |
addr = dlmalloc(aSize);
|
|
517 |
}
|
|
518 |
else
|
|
519 |
{
|
|
520 |
#ifdef TRACING_ALLOCS
|
|
521 |
aCnt=2;
|
|
522 |
#endif
|
|
523 |
addr = paged_allocate(aSize);
|
|
524 |
//attempt dlmalloc() if paged_allocate() fails. This can improve allocation chances if fragmentation is high in the heap.
|
|
525 |
if (!addr) { // paged_allocator failed, try in dlmalloc
|
|
526 |
addr = dlmalloc(aSize);
|
|
527 |
}
|
|
528 |
}
|
|
529 |
|
|
530 |
if (addr) {
|
|
531 |
iCellCount++;
|
|
532 |
// Increment iTotalAllocSize in memory segment specific code for more accuracy
|
|
533 |
//iTotalAllocSize += aSize;
|
|
534 |
}
|
|
535 |
Unlock();
|
|
536 |
|
|
537 |
#ifdef TRACING_ALLOCS
|
|
538 |
if (iFlags & ETraceAllocs)
|
|
539 |
{
|
|
540 |
TUint32 traceData[3];
|
|
541 |
traceData[0] = AllocLen(addr);
|
|
542 |
traceData[1] = aSize;
|
|
543 |
traceData[2] = aCnt;
|
|
544 |
BTraceContextN(BTrace::EHeap, BTrace::EHeapAlloc, (TUint32)this, (TUint32)addr, traceData, sizeof(traceData));
|
|
545 |
TraceCallStack();
|
|
546 |
}
|
|
547 |
#endif
|
|
548 |
|
|
549 |
return addr;
|
|
550 |
}
|
|
551 |
|
|
552 |
TInt RNewAllocator::Compress()
|
|
553 |
{
|
|
554 |
if (iFlags & EFixedSize)
|
|
555 |
return 0;
|
|
556 |
|
|
557 |
Lock();
|
|
558 |
dlmalloc_trim(0);
|
|
559 |
if (spare_page)
|
|
560 |
{
|
|
561 |
unmap(spare_page,pagesize);
|
|
562 |
spare_page = 0;
|
|
563 |
}
|
|
564 |
Unlock();
|
|
565 |
return 0;
|
|
566 |
}
|
|
567 |
|
|
568 |
void RNewAllocator::Free(TAny* aPtr)
|
|
569 |
{
|
|
570 |
|
|
571 |
#ifdef TRACING_ALLOCS
|
|
572 |
TInt aCnt=0;
|
|
573 |
#endif
|
|
574 |
#ifdef ENABLE_DEBUG_TRACE
|
|
575 |
RThread me;
|
|
576 |
TBuf<100> thName;
|
|
577 |
me.FullName(thName);
|
|
578 |
#endif
|
|
579 |
//if (!aPtr) return; //return in case of NULL pointer
|
|
580 |
|
|
581 |
Lock();
|
|
582 |
|
|
583 |
if (!aPtr)
|
|
584 |
;
|
|
585 |
else if (ptrdiff(aPtr, this) >= 0)
|
|
586 |
{
|
|
587 |
#ifdef TRACING_ALLOCS
|
|
588 |
aCnt = 1;
|
|
589 |
#endif
|
|
590 |
dlfree( aPtr);
|
|
591 |
}
|
|
592 |
else if (lowbits(aPtr, pagesize) <= cellalign)
|
|
593 |
{
|
|
594 |
#ifdef TRACING_ALLOCS
|
|
595 |
aCnt = 2;
|
|
596 |
#endif
|
|
597 |
paged_free(aPtr);
|
|
598 |
}
|
|
599 |
else
|
|
600 |
{
|
|
601 |
#ifdef TRACING_ALLOCS
|
|
602 |
aCnt = 0;
|
|
603 |
#endif
|
|
604 |
slab_free(aPtr);
|
|
605 |
}
|
|
606 |
iCellCount--;
|
|
607 |
Unlock();
|
|
608 |
|
|
609 |
#ifdef TRACING_ALLOCS
|
|
610 |
if (iFlags & ETraceAllocs)
|
|
611 |
{
|
|
612 |
TUint32 traceData;
|
|
613 |
traceData = aCnt;
|
|
614 |
BTraceContextN(BTrace::EHeap, BTrace::EHeapFree, (TUint32)this, (TUint32)aPtr, &traceData, sizeof(traceData));
|
|
615 |
TraceCallStack();
|
|
616 |
}
|
|
617 |
#endif
|
|
618 |
}
|
|
619 |
|
|
620 |
|
|
621 |
void RNewAllocator::Reset()
|
|
622 |
{
|
|
623 |
// TODO free everything
|
|
624 |
User::Panic(_L("RNewAllocator"), 1); //this should never be called
|
|
625 |
}
|
|
626 |
|
|
627 |
#ifdef TRACING_ALLOCS
|
|
628 |
inline void RNewAllocator::TraceReAlloc(TAny* aPtr, TInt aSize, TAny* aNewPtr, TInt aZone)
|
|
629 |
{
|
|
630 |
if (aNewPtr && (iFlags & ETraceAllocs)) {
|
|
631 |
TUint32 traceData[3];
|
|
632 |
traceData[0] = AllocLen(aNewPtr);
|
|
633 |
traceData[1] = aSize;
|
|
634 |
traceData[2] = (TUint32) aPtr;
|
|
635 |
BTraceContextN(BTrace::EHeap, BTrace::EHeapReAlloc, (TUint32) this, (TUint32) aNewPtr,
|
|
636 |
traceData, sizeof(traceData));
|
|
637 |
TraceCallStack();
|
|
638 |
//workaround for SAW not handling reallocs properly
|
|
639 |
if (aZone >= 0 && aPtr != aNewPtr) {
|
|
640 |
BTraceContextN(BTrace::EHeap, BTrace::EHeapFree, (TUint32) this, (TUint32) aPtr,
|
|
641 |
&aZone, sizeof(aZone));
|
|
642 |
TraceCallStack();
|
|
643 |
}
|
|
644 |
}
|
|
645 |
}
|
|
646 |
#else
|
|
647 |
//Q_UNUSED generates code that prevents the compiler optimising out the empty inline function
|
|
648 |
inline void RNewAllocator::TraceReAlloc(TAny* , TInt , TAny* , TInt )
|
|
649 |
{}
|
|
650 |
#endif
|
|
651 |
|
|
652 |
TAny* RNewAllocator::ReAlloc(TAny* aPtr, TInt aSize, TInt /*aMode = 0*/)
|
|
653 |
{
|
|
654 |
if (ptrdiff(aPtr,this)>=0)
|
|
655 |
{
|
|
656 |
// original cell is in DL zone
|
|
657 |
if ((aSize>>page_threshold)==0 || aSize <= chunksize(mem2chunk(aPtr)) - CHUNK_OVERHEAD)
|
|
658 |
{
|
|
659 |
// new one is below page limit or smaller than old one (so can't be moved)
|
|
660 |
Lock();
|
|
661 |
TAny* addr = dlrealloc(aPtr,aSize);
|
|
662 |
Unlock();
|
|
663 |
TraceReAlloc(aPtr, aSize, addr, 2);
|
|
664 |
return addr;
|
|
665 |
}
|
|
666 |
}
|
|
667 |
else if (lowbits(aPtr,pagesize)<=cellalign)
|
|
668 |
{
|
|
669 |
// original cell is either NULL or in paged zone
|
|
670 |
if (!aPtr)
|
|
671 |
return Alloc(aSize);
|
|
672 |
|
|
673 |
// either the new size is larger (in which case it will still be in paged zone)
|
|
674 |
// or it is smaller, but we will never move a shrinking cell so in paged zone
|
|
675 |
// must handle [rare] case that aSize == 0, as paged_[re]allocate() will panic
|
|
676 |
if (aSize == 0)
|
|
677 |
aSize = 1;
|
|
678 |
Lock();
|
|
679 |
TAny* addr = paged_reallocate(aPtr,aSize);
|
|
680 |
Unlock();
|
|
681 |
TraceReAlloc(aPtr, aSize, addr, 2);
|
|
682 |
return addr;
|
|
683 |
}
|
|
684 |
else
|
|
685 |
{
|
|
686 |
// original cell is in slab zone
|
|
687 |
// return original if new one smaller
|
|
688 |
if (aSize <= header_size(slab::slabfor(aPtr)->header))
|
|
689 |
return aPtr;
|
|
690 |
}
|
|
691 |
// can't do better than allocate/copy/free
|
|
692 |
TAny* newp = Alloc(aSize);
|
|
693 |
if (newp)
|
|
694 |
{
|
|
695 |
TInt oldsize = AllocLen(aPtr);
|
|
696 |
memcpy(newp,aPtr,oldsize<aSize?oldsize:aSize);
|
|
697 |
Free(aPtr);
|
|
698 |
}
|
|
699 |
return newp;
|
|
700 |
}
|
|
701 |
|
|
702 |
TInt RNewAllocator::Available(TInt& aBiggestBlock) const
|
|
703 |
{
|
|
704 |
//TODO: consider page and slab allocators
|
|
705 |
|
|
706 |
//this gets free space in DL region - the C ported code doesn't respect const yet.
|
|
707 |
RNewAllocator* self = const_cast<RNewAllocator*> (this);
|
|
708 |
mallinfo info = self->dlmallinfo();
|
|
709 |
aBiggestBlock = info.largestBlock;
|
|
710 |
return info.fordblks;
|
|
711 |
}
|
|
712 |
TInt RNewAllocator::AllocSize(TInt& aTotalAllocSize) const
|
|
713 |
{
|
|
714 |
aTotalAllocSize = iTotalAllocSize;
|
|
715 |
return iCellCount;
|
|
716 |
}
|
|
717 |
|
|
718 |
TInt RNewAllocator::DebugFunction(TInt aFunc, TAny* a1, TAny* /*a2*/)
|
|
719 |
{
|
|
720 |
TInt r = KErrNotSupported;
|
|
721 |
TInt* a1int = reinterpret_cast<TInt*>(a1);
|
|
722 |
switch (aFunc) {
|
|
723 |
case RAllocator::ECount:
|
|
724 |
{
|
|
725 |
struct mallinfo mi = dlmallinfo();
|
|
726 |
*a1int = mi.fordblks;
|
|
727 |
r = mi.uordblks;
|
|
728 |
}
|
|
729 |
break;
|
|
730 |
case RAllocator::EMarkStart:
|
|
731 |
case RAllocator::EMarkEnd:
|
|
732 |
case RAllocator::ESetFail:
|
|
733 |
case RAllocator::ECheck:
|
|
734 |
r = KErrNone;
|
|
735 |
break;
|
|
736 |
}
|
|
737 |
return r;
|
|
738 |
}
|
|
739 |
|
|
740 |
TInt RNewAllocator::Extension_(TUint /* aExtensionId */, TAny*& /* a0 */, TAny* /* a1 */)
|
|
741 |
{
|
|
742 |
return KErrNotSupported;
|
|
743 |
}
|
|
744 |
|
|
745 |
///////////////////////////////////////////////////////////////////////////////
|
|
746 |
// imported from dla.cpp
|
|
747 |
///////////////////////////////////////////////////////////////////////////////
|
|
748 |
|
|
749 |
//#include <unistd.h>
|
|
750 |
//#define DEBUG_REALLOC
|
|
751 |
#ifdef DEBUG_REALLOC
|
|
752 |
#include <e32debug.h>
|
|
753 |
#endif
|
|
754 |
int RNewAllocator::init_mparams(size_t aTrimThreshold /*= DEFAULT_TRIM_THRESHOLD*/)
|
|
755 |
{
|
|
756 |
if (mparams.page_size == 0)
|
|
757 |
{
|
|
758 |
size_t s;
|
|
759 |
mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
|
|
760 |
mparams.trim_threshold = aTrimThreshold;
|
|
761 |
#if MORECORE_CONTIGUOUS
|
|
762 |
mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT;
|
|
763 |
#else /* MORECORE_CONTIGUOUS */
|
|
764 |
mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
|
|
765 |
#endif /* MORECORE_CONTIGUOUS */
|
|
766 |
|
|
767 |
s = (size_t)0x58585858U;
|
|
768 |
ACQUIRE_MAGIC_INIT_LOCK(&mparams);
|
|
769 |
if (mparams.magic == 0) {
|
|
770 |
mparams.magic = s;
|
|
771 |
/* Set up lock for main malloc area */
|
|
772 |
INITIAL_LOCK(&gm->mutex);
|
|
773 |
gm->mflags = mparams.default_mflags;
|
|
774 |
}
|
|
775 |
RELEASE_MAGIC_INIT_LOCK(&mparams);
|
|
776 |
|
|
777 |
mparams.page_size = malloc_getpagesize;
|
|
778 |
|
|
779 |
mparams.granularity = ((DEFAULT_GRANULARITY != 0)?
|
|
780 |
DEFAULT_GRANULARITY : mparams.page_size);
|
|
781 |
|
|
782 |
/* Sanity-check configuration:
|
|
783 |
size_t must be unsigned and as wide as pointer type.
|
|
784 |
ints must be at least 4 bytes.
|
|
785 |
alignment must be at least 8.
|
|
786 |
Alignment, min chunk size, and page size must all be powers of 2.
|
|
787 |
*/
|
|
788 |
|
|
789 |
if ((sizeof(size_t) != sizeof(TUint8*)) ||
|
|
790 |
(MAX_SIZE_T < MIN_CHUNK_SIZE) ||
|
|
791 |
(sizeof(int) < 4) ||
|
|
792 |
(MALLOC_ALIGNMENT < (size_t)8U) ||
|
|
793 |
((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) ||
|
|
794 |
((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) ||
|
|
795 |
((mparams.granularity & (mparams.granularity-SIZE_T_ONE)) != 0) ||
|
|
796 |
((mparams.page_size & (mparams.page_size-SIZE_T_ONE)) != 0))
|
|
797 |
ABORT;
|
|
798 |
}
|
|
799 |
return 0;
|
|
800 |
}
|
|
801 |
|
|
802 |
void RNewAllocator::init_bins(mstate m) {
|
|
803 |
/* Establish circular links for smallbins */
|
|
804 |
bindex_t i;
|
|
805 |
for (i = 0; i < NSMALLBINS; ++i) {
|
|
806 |
sbinptr bin = smallbin_at(m,i);
|
|
807 |
bin->fd = bin->bk = bin;
|
|
808 |
}
|
|
809 |
}
|
|
810 |
/* ---------------------------- malloc support --------------------------- */
|
|
811 |
|
|
812 |
/* allocate a large request from the best fitting chunk in a treebin */
|
|
813 |
void* RNewAllocator::tmalloc_large(mstate m, size_t nb) {
|
|
814 |
tchunkptr v = 0;
|
|
815 |
size_t rsize = -nb; /* Unsigned negation */
|
|
816 |
tchunkptr t;
|
|
817 |
bindex_t idx;
|
|
818 |
compute_tree_index(nb, idx);
|
|
819 |
|
|
820 |
if ((t = *treebin_at(m, idx)) != 0) {
|
|
821 |
/* Traverse tree for this bin looking for node with size == nb */
|
|
822 |
size_t sizebits =
|
|
823 |
nb <<
|
|
824 |
leftshift_for_tree_index(idx);
|
|
825 |
tchunkptr rst = 0; /* The deepest untaken right subtree */
|
|
826 |
for (;;) {
|
|
827 |
tchunkptr rt;
|
|
828 |
size_t trem = chunksize(t) - nb;
|
|
829 |
if (trem < rsize) {
|
|
830 |
v = t;
|
|
831 |
if ((rsize = trem) == 0)
|
|
832 |
break;
|
|
833 |
}
|
|
834 |
rt = t->child[1];
|
|
835 |
t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
|
|
836 |
if (rt != 0 && rt != t)
|
|
837 |
rst = rt;
|
|
838 |
if (t == 0) {
|
|
839 |
t = rst; /* set t to least subtree holding sizes > nb */
|
|
840 |
break;
|
|
841 |
}
|
|
842 |
sizebits <<= 1;
|
|
843 |
}
|
|
844 |
}
|
|
845 |
if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
|
|
846 |
binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
|
|
847 |
if (leftbits != 0) {
|
|
848 |
bindex_t i;
|
|
849 |
binmap_t leastbit = least_bit(leftbits);
|
|
850 |
compute_bit2idx(leastbit, i);
|
|
851 |
t = *treebin_at(m, i);
|
|
852 |
}
|
|
853 |
}
|
|
854 |
while (t != 0) { /* find smallest of tree or subtree */
|
|
855 |
size_t trem = chunksize(t) - nb;
|
|
856 |
if (trem < rsize) {
|
|
857 |
rsize = trem;
|
|
858 |
v = t;
|
|
859 |
}
|
|
860 |
t = leftmost_child(t);
|
|
861 |
}
|
|
862 |
/* If dv is a better fit, return 0 so malloc will use it */
|
|
863 |
if (v != 0) {
|
|
864 |
if (RTCHECK(ok_address(m, v))) { /* split */
|
|
865 |
mchunkptr r = chunk_plus_offset(v, nb);
|
|
866 |
assert(chunksize(v) == rsize + nb);
|
|
867 |
|
|
868 |
/* check for chunk memory page-in */
|
|
869 |
size_t npages_out = 0;
|
|
870 |
if (page_not_in_memory(v, chunksize(v))) {
|
|
871 |
if (!is_small(rsize) && rsize>=CHUNK_PAGEOUT_THESHOLD) {
|
|
872 |
// partial chunk page mapping
|
|
873 |
TInt result = map_chunk_pages_partial(v, chunksize(v), (tchunkptr)r, rsize);
|
|
874 |
if (result < 0) return 0; // Failed to Commit RAM
|
|
875 |
else npages_out = (size_t)result;
|
|
876 |
}
|
|
877 |
else {
|
|
878 |
// full chunk page map needed
|
|
879 |
TInt err = map_chunk_pages(v, chunksize(v));
|
|
880 |
if (err != KErrNone) return 0; // Failed to Commit RAM
|
|
881 |
}
|
|
882 |
}
|
|
883 |
|
|
884 |
if (RTCHECK(ok_next(v, r))) {
|
|
885 |
unlink_large_chunk(m, v);
|
|
886 |
if (rsize < free_chunk_threshold) // exaust if less than slab threshold
|
|
887 |
set_inuse_and_pinuse(m, v, (rsize + nb));
|
|
888 |
else {
|
|
889 |
set_size_and_pinuse_of_inuse_chunk(m, v, nb);
|
|
890 |
set_size_and_pinuse_of_free_chunk(r, rsize);
|
|
891 |
insert_chunk(m, r, rsize, npages_out);
|
|
892 |
}
|
|
893 |
return chunk2mem(v);
|
|
894 |
}
|
|
895 |
}
|
|
896 |
#if !INSECURE // conditional statement to keep compiler happy. code is reachable if RTCHECK evaluates to False
|
|
897 |
CORRUPTION_ERROR_ACTION(m);
|
|
898 |
#endif
|
|
899 |
}
|
|
900 |
return 0;
|
|
901 |
}
|
|
902 |
|
|
903 |
/* allocate a small request from the best fitting chunk in a treebin */
|
|
904 |
void* RNewAllocator::tmalloc_small(mstate m, size_t nb) {
|
|
905 |
tchunkptr t, v;
|
|
906 |
size_t rsize;
|
|
907 |
bindex_t i;
|
|
908 |
binmap_t leastbit = least_bit(m->treemap);
|
|
909 |
compute_bit2idx(leastbit, i);
|
|
910 |
|
|
911 |
v = t = *treebin_at(m, i);
|
|
912 |
rsize = chunksize(t) - nb;
|
|
913 |
|
|
914 |
while ((t = leftmost_child(t)) != 0) {
|
|
915 |
size_t trem = chunksize(t) - nb;
|
|
916 |
if (trem < rsize) {
|
|
917 |
rsize = trem;
|
|
918 |
v = t;
|
|
919 |
}
|
|
920 |
}
|
|
921 |
|
|
922 |
if (RTCHECK(ok_address(m, v))) {
|
|
923 |
mchunkptr r = chunk_plus_offset(v, nb);
|
|
924 |
assert(chunksize(v) == rsize + nb);
|
|
925 |
|
|
926 |
/* check for chunk memory page-in */
|
|
927 |
if (page_not_in_memory(v, chunksize(v))) {
|
|
928 |
TInt err = map_chunk_pages(v, chunksize(v));
|
|
929 |
if (err != KErrNone) return 0; // Failed to Commit RAM
|
|
930 |
}
|
|
931 |
|
|
932 |
if (RTCHECK(ok_next(v, r))) {
|
|
933 |
unlink_large_chunk(m, v);
|
|
934 |
if (rsize < free_chunk_threshold) // exaust if less than slab threshold
|
|
935 |
set_inuse_and_pinuse(m, v, (rsize + nb));
|
|
936 |
else {
|
|
937 |
set_size_and_pinuse_of_inuse_chunk(m, v, nb);
|
|
938 |
set_size_and_pinuse_of_free_chunk(r, rsize);
|
|
939 |
insert_chunk(m, r, rsize, 0);
|
|
940 |
}
|
|
941 |
return chunk2mem(v);
|
|
942 |
}
|
|
943 |
}
|
|
944 |
#if !INSECURE // conditional statement to keep compiler happy. code is reachable if RTCHECK evaluates to False
|
|
945 |
CORRUPTION_ERROR_ACTION(m);
|
|
946 |
return 0;
|
|
947 |
#endif
|
|
948 |
}
|
|
949 |
|
|
950 |
void RNewAllocator::init_top(mstate m, mchunkptr p, size_t psize)
|
|
951 |
{
|
|
952 |
/* Ensure alignment */
|
|
953 |
size_t offset = align_offset(chunk2mem(p));
|
|
954 |
p = (mchunkptr)((TUint8*)p + offset);
|
|
955 |
psize -= offset;
|
|
956 |
m->top = p;
|
|
957 |
m->topsize = psize;
|
|
958 |
p->head = psize | PINUSE_BIT;
|
|
959 |
/* set size of fake trailing chunk holding overhead space only once */
|
|
960 |
mchunkptr chunkPlusOff = chunk_plus_offset(p, psize);
|
|
961 |
chunkPlusOff->head = TOP_FOOT_SIZE;
|
|
962 |
m->trim_check = mparams.trim_threshold; /* reset on each update */
|
|
963 |
}
|
|
964 |
|
|
965 |
void* RNewAllocator::internal_realloc(mstate m, void* oldmem, size_t bytes)
|
|
966 |
{
|
|
967 |
if (bytes >= MAX_REQUEST) {
|
|
968 |
MALLOC_FAILURE_ACTION;
|
|
969 |
return 0;
|
|
970 |
}
|
|
971 |
if (!PREACTION(m)) {
|
|
972 |
mchunkptr oldp = mem2chunk(oldmem);
|
|
973 |
size_t oldsize = chunksize(oldp);
|
|
974 |
mchunkptr next = chunk_plus_offset(oldp, oldsize);
|
|
975 |
mchunkptr newp = 0;
|
|
976 |
void* extra = 0;
|
|
977 |
|
|
978 |
/* Try to either shrink or extend into top. Else malloc-copy-free */
|
|
979 |
|
|
980 |
if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) &&
|
|
981 |
ok_next(oldp, next) && ok_pinuse(next))) {
|
|
982 |
size_t nb = request2size(bytes);
|
|
983 |
if (is_mmapped(oldp))
|
|
984 |
newp = mmap_resize(m, oldp, nb);
|
|
985 |
else
|
|
986 |
if (oldsize >= nb) { /* already big enough */
|
|
987 |
size_t rsize = oldsize - nb;
|
|
988 |
newp = oldp;
|
|
989 |
if (rsize >= free_chunk_threshold) {
|
|
990 |
mchunkptr remainder = chunk_plus_offset(newp, nb);
|
|
991 |
set_inuse(m, newp, nb);
|
|
992 |
set_inuse(m, remainder, rsize);
|
|
993 |
extra = chunk2mem(remainder);
|
|
994 |
iTotalAllocSize -= rsize;
|
|
995 |
}
|
|
996 |
}
|
|
997 |
/*AMOD: Modified to optimized*/
|
|
998 |
else if (next == m->top && oldsize + m->topsize > nb)
|
|
999 |
{
|
|
1000 |
/* Expand into top */
|
|
1001 |
if (oldsize + m->topsize > nb)
|
|
1002 |
{
|
|
1003 |
size_t newsize = oldsize + m->topsize;
|
|
1004 |
size_t newtopsize = newsize - nb;
|
|
1005 |
mchunkptr newtop = chunk_plus_offset(oldp, nb);
|
|
1006 |
set_inuse(m, oldp, nb);
|
|
1007 |
newtop->head = newtopsize |PINUSE_BIT;
|
|
1008 |
m->top = newtop;
|
|
1009 |
m->topsize = newtopsize;
|
|
1010 |
iTotalAllocSize += nb - oldsize;
|
|
1011 |
newp = oldp;
|
|
1012 |
}
|
|
1013 |
}
|
|
1014 |
}
|
|
1015 |
else {
|
|
1016 |
USAGE_ERROR_ACTION(m, oldmem);
|
|
1017 |
POSTACTION(m);
|
|
1018 |
return 0;
|
|
1019 |
}
|
|
1020 |
|
|
1021 |
POSTACTION(m);
|
|
1022 |
|
|
1023 |
if (newp != 0) {
|
|
1024 |
if (extra != 0) {
|
|
1025 |
internal_free(m, extra);
|
|
1026 |
}
|
|
1027 |
check_inuse_chunk(m, newp);
|
|
1028 |
return chunk2mem(newp);
|
|
1029 |
}
|
|
1030 |
else {
|
|
1031 |
void* newmem = internal_malloc(m, bytes);
|
|
1032 |
if (newmem != 0) {
|
|
1033 |
size_t oc = oldsize - overhead_for(oldp);
|
|
1034 |
memcpy(newmem, oldmem, (oc < bytes)? oc : bytes);
|
|
1035 |
internal_free(m, oldmem);
|
|
1036 |
}
|
|
1037 |
return newmem;
|
|
1038 |
}
|
|
1039 |
}
|
|
1040 |
#if USE_LOCKS // keep the compiler happy
|
|
1041 |
return 0;
|
|
1042 |
#endif
|
|
1043 |
}
|
|
1044 |
/* ----------------------------- statistics ------------------------------ */
|
|
1045 |
mallinfo RNewAllocator::internal_mallinfo(mstate m) {
|
|
1046 |
struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
|
|
1047 |
TInt chunkCnt = 0;
|
|
1048 |
if (!PREACTION(m)) {
|
|
1049 |
check_malloc_state(m);
|
|
1050 |
if (is_initialized(m)) {
|
|
1051 |
size_t nfree = SIZE_T_ONE; /* top always free */
|
|
1052 |
size_t mfree = m->topsize + TOP_FOOT_SIZE;
|
|
1053 |
size_t sum = mfree;
|
|
1054 |
msegmentptr s = &m->seg;
|
|
1055 |
while (s != 0) {
|
|
1056 |
mchunkptr q = align_as_chunk(s->base);
|
|
1057 |
chunkCnt++;
|
|
1058 |
while (segment_holds(s, q) &&
|
|
1059 |
q != m->top && q->head != FENCEPOST_HEAD) {
|
|
1060 |
size_t sz = chunksize(q);
|
|
1061 |
sum += sz;
|
|
1062 |
if (!cinuse(q)) {
|
|
1063 |
if (sz > nm.largestBlock)
|
|
1064 |
nm.largestBlock = sz;
|
|
1065 |
mfree += sz;
|
|
1066 |
++nfree;
|
|
1067 |
}
|
|
1068 |
q = next_chunk(q);
|
|
1069 |
}
|
|
1070 |
s = s->next;
|
|
1071 |
}
|
|
1072 |
nm.arena = sum;
|
|
1073 |
nm.ordblks = nfree;
|
|
1074 |
nm.hblkhd = m->footprint - sum;
|
|
1075 |
nm.usmblks = m->max_footprint;
|
|
1076 |
nm.uordblks = m->footprint - mfree;
|
|
1077 |
nm.fordblks = mfree;
|
|
1078 |
nm.keepcost = m->topsize;
|
|
1079 |
nm.cellCount= chunkCnt;/*number of chunks allocated*/
|
|
1080 |
}
|
|
1081 |
POSTACTION(m);
|
|
1082 |
}
|
|
1083 |
return nm;
|
|
1084 |
}
|
|
1085 |
|
|
1086 |
void RNewAllocator::internal_malloc_stats(mstate m) {
|
|
1087 |
if (!PREACTION(m)) {
|
|
1088 |
size_t fp = 0;
|
|
1089 |
size_t used = 0;
|
|
1090 |
check_malloc_state(m);
|
|
1091 |
if (is_initialized(m)) {
|
|
1092 |
msegmentptr s = &m->seg;
|
|
1093 |
//size_t maxfp = m->max_footprint;
|
|
1094 |
fp = m->footprint;
|
|
1095 |
used = fp - (m->topsize + TOP_FOOT_SIZE);
|
|
1096 |
|
|
1097 |
while (s != 0) {
|
|
1098 |
mchunkptr q = align_as_chunk(s->base);
|
|
1099 |
while (segment_holds(s, q) &&
|
|
1100 |
q != m->top && q->head != FENCEPOST_HEAD) {
|
|
1101 |
if (!cinuse(q))
|
|
1102 |
used -= chunksize(q);
|
|
1103 |
q = next_chunk(q);
|
|
1104 |
}
|
|
1105 |
s = s->next;
|
|
1106 |
}
|
|
1107 |
}
|
|
1108 |
POSTACTION(m);
|
|
1109 |
}
|
|
1110 |
}
|
|
1111 |
/* support for mallopt */
|
|
1112 |
int RNewAllocator::change_mparam(int param_number, int value) {
|
|
1113 |
size_t val = (size_t)value;
|
|
1114 |
init_mparams(DEFAULT_TRIM_THRESHOLD);
|
|
1115 |
switch (param_number) {
|
|
1116 |
case M_TRIM_THRESHOLD:
|
|
1117 |
mparams.trim_threshold = val;
|
|
1118 |
return 1;
|
|
1119 |
case M_GRANULARITY:
|
|
1120 |
if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
|
|
1121 |
mparams.granularity = val;
|
|
1122 |
return 1;
|
|
1123 |
}
|
|
1124 |
else
|
|
1125 |
return 0;
|
|
1126 |
case M_MMAP_THRESHOLD:
|
|
1127 |
mparams.mmap_threshold = val;
|
|
1128 |
return 1;
|
|
1129 |
default:
|
|
1130 |
return 0;
|
|
1131 |
}
|
|
1132 |
}
|
|
1133 |
/* Get memory from system using MORECORE or MMAP */
|
|
1134 |
void* RNewAllocator::sys_alloc(mstate m, size_t nb)
|
|
1135 |
{
|
|
1136 |
TUint8* tbase = CMFAIL;
|
|
1137 |
size_t tsize = 0;
|
|
1138 |
flag_t mmap_flag = 0;
|
|
1139 |
//init_mparams();/*No need to do init_params here*/
|
|
1140 |
/* Directly map large chunks */
|
|
1141 |
if (use_mmap(m) && nb >= mparams.mmap_threshold)
|
|
1142 |
{
|
|
1143 |
void* mem = mmap_alloc(m, nb);
|
|
1144 |
if (mem != 0)
|
|
1145 |
return mem;
|
|
1146 |
}
|
|
1147 |
/*
|
|
1148 |
Try getting memory in any of three ways (in most-preferred to
|
|
1149 |
least-preferred order):
|
|
1150 |
1. A call to MORECORE that can normally contiguously extend memory.
|
|
1151 |
(disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
|
|
1152 |
or main space is mmapped or a previous contiguous call failed)
|
|
1153 |
2. A call to MMAP new space (disabled if not HAVE_MMAP).
|
|
1154 |
Note that under the default settings, if MORECORE is unable to
|
|
1155 |
fulfill a request, and HAVE_MMAP is true, then mmap is
|
|
1156 |
used as a noncontiguous system allocator. This is a useful backup
|
|
1157 |
strategy for systems with holes in address spaces -- in this case
|
|
1158 |
sbrk cannot contiguously expand the heap, but mmap may be able to
|
|
1159 |
find space.
|
|
1160 |
3. A call to MORECORE that cannot usually contiguously extend memory.
|
|
1161 |
(disabled if not HAVE_MORECORE)
|
|
1162 |
*/
|
|
1163 |
/*Trying to allocate the memory*/
|
|
1164 |
if (MORECORE_CONTIGUOUS && !use_noncontiguous(m))
|
|
1165 |
{
|
|
1166 |
TUint8* br = CMFAIL;
|
|
1167 |
msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (TUint8*)m->top);
|
|
1168 |
size_t asize = 0;
|
|
1169 |
ACQUIRE_MORECORE_LOCK(m);
|
|
1170 |
if (ss == 0)
|
|
1171 |
{ /* First time through or recovery */
|
|
1172 |
TUint8* base = (TUint8*)CALL_MORECORE(0);
|
|
1173 |
if (base != CMFAIL)
|
|
1174 |
{
|
|
1175 |
asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
|
|
1176 |
/* Adjust to end on a page boundary */
|
|
1177 |
if (!is_page_aligned(base))
|
|
1178 |
asize += (page_align((size_t)base) - (size_t)base);
|
|
1179 |
/* Can't call MORECORE if size is negative when treated as signed */
|
|
1180 |
if (asize < HALF_MAX_SIZE_T &&(br = (TUint8*)(CALL_MORECORE(asize))) == base)
|
|
1181 |
{
|
|
1182 |
tbase = base;
|
|
1183 |
tsize = asize;
|
|
1184 |
}
|
|
1185 |
}
|
|
1186 |
}
|
|
1187 |
else
|
|
1188 |
{
|
|
1189 |
/* Subtract out existing available top space from MORECORE request. */
|
|
1190 |
asize = granularity_align(nb - m->topsize + TOP_FOOT_SIZE + SIZE_T_ONE);
|
|
1191 |
/* Use mem here only if it did continuously extend old space */
|
|
1192 |
if (asize < HALF_MAX_SIZE_T &&
|
|
1193 |
(br = (TUint8*)(CALL_MORECORE(asize))) == ss->base+ss->size) {
|
|
1194 |
tbase = br;
|
|
1195 |
tsize = asize;
|
|
1196 |
}
|
|
1197 |
}
|
|
1198 |
if (tbase == CMFAIL) { /* Cope with partial failure */
|
|
1199 |
if (br != CMFAIL) { /* Try to use/extend the space we did get */
|
|
1200 |
if (asize < HALF_MAX_SIZE_T &&
|
|
1201 |
asize < nb + TOP_FOOT_SIZE + SIZE_T_ONE) {
|
|
1202 |
size_t esize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE - asize);
|
|
1203 |
if (esize < HALF_MAX_SIZE_T) {
|
|
1204 |
TUint8* end = (TUint8*)CALL_MORECORE(esize);
|
|
1205 |
if (end != CMFAIL)
|
|
1206 |
asize += esize;
|
|
1207 |
else { /* Can't use; try to release */
|
|
1208 |
CALL_MORECORE(-asize);
|
|
1209 |
br = CMFAIL;
|
|
1210 |
}
|
|
1211 |
}
|
|
1212 |
}
|
|
1213 |
}
|
|
1214 |
if (br != CMFAIL) { /* Use the space we did get */
|
|
1215 |
tbase = br;
|
|
1216 |
tsize = asize;
|
|
1217 |
}
|
|
1218 |
else
|
|
1219 |
disable_contiguous(m); /* Don't try contiguous path in the future */
|
|
1220 |
}
|
|
1221 |
RELEASE_MORECORE_LOCK(m);
|
|
1222 |
}
|
|
1223 |
if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */
|
|
1224 |
size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE;
|
|
1225 |
size_t rsize = granularity_align(req);
|
|
1226 |
if (rsize > nb) { /* Fail if wraps around zero */
|
|
1227 |
TUint8* mp = (TUint8*)(CALL_MMAP(rsize));
|
|
1228 |
if (mp != CMFAIL) {
|
|
1229 |
tbase = mp;
|
|
1230 |
tsize = rsize;
|
|
1231 |
mmap_flag = IS_MMAPPED_BIT;
|
|
1232 |
}
|
|
1233 |
}
|
|
1234 |
}
|
|
1235 |
if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */
|
|
1236 |
size_t asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
|
|
1237 |
if (asize < HALF_MAX_SIZE_T) {
|
|
1238 |
TUint8* br = CMFAIL;
|
|
1239 |
TUint8* end = CMFAIL;
|
|
1240 |
ACQUIRE_MORECORE_LOCK(m);
|
|
1241 |
br = (TUint8*)(CALL_MORECORE(asize));
|
|
1242 |
end = (TUint8*)(CALL_MORECORE(0));
|
|
1243 |
RELEASE_MORECORE_LOCK(m);
|
|
1244 |
if (br != CMFAIL && end != CMFAIL && br < end) {
|
|
1245 |
size_t ssize = end - br;
|
|
1246 |
if (ssize > nb + TOP_FOOT_SIZE) {
|
|
1247 |
tbase = br;
|
|
1248 |
tsize = ssize;
|
|
1249 |
}
|
|
1250 |
}
|
|
1251 |
}
|
|
1252 |
}
|
|
1253 |
if (tbase != CMFAIL) {
|
|
1254 |
if ((m->footprint += tsize) > m->max_footprint)
|
|
1255 |
m->max_footprint = m->footprint;
|
|
1256 |
if (!is_initialized(m)) { /* first-time initialization */
|
|
1257 |
m->seg.base = m->least_addr = tbase;
|
|
1258 |
m->seg.size = tsize;
|
|
1259 |
m->seg.sflags = mmap_flag;
|
|
1260 |
m->magic = mparams.magic;
|
|
1261 |
init_bins(m);
|
|
1262 |
if (is_global(m))
|
|
1263 |
init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
|
|
1264 |
else {
|
|
1265 |
/* Offset top by embedded malloc_state */
|
|
1266 |
mchunkptr mn = next_chunk(mem2chunk(m));
|
|
1267 |
init_top(m, mn, (size_t)((tbase + tsize) - (TUint8*)mn) -TOP_FOOT_SIZE);
|
|
1268 |
}
|
|
1269 |
}else {
|
|
1270 |
/* Try to merge with an existing segment */
|
|
1271 |
msegmentptr sp = &m->seg;
|
|
1272 |
while (sp != 0 && tbase != sp->base + sp->size)
|
|
1273 |
sp = sp->next;
|
|
1274 |
if (sp != 0 && !is_extern_segment(sp) &&
|
|
1275 |
(sp->sflags & IS_MMAPPED_BIT) == mmap_flag &&
|
|
1276 |
segment_holds(sp, m->top))
|
|
1277 |
{ /* append */
|
|
1278 |
sp->size += tsize;
|
|
1279 |
init_top(m, m->top, m->topsize + tsize);
|
|
1280 |
}
|
|
1281 |
else {
|
|
1282 |
if (tbase < m->least_addr)
|
|
1283 |
m->least_addr = tbase;
|
|
1284 |
sp = &m->seg;
|
|
1285 |
while (sp != 0 && sp->base != tbase + tsize)
|
|
1286 |
sp = sp->next;
|
|
1287 |
if (sp != 0 &&
|
|
1288 |
!is_extern_segment(sp) &&
|
|
1289 |
(sp->sflags & IS_MMAPPED_BIT) == mmap_flag) {
|
|
1290 |
TUint8* oldbase = sp->base;
|
|
1291 |
sp->base = tbase;
|
|
1292 |
sp->size += tsize;
|
|
1293 |
return prepend_alloc(m, tbase, oldbase, nb);
|
|
1294 |
}
|
|
1295 |
else
|
|
1296 |
add_segment(m, tbase, tsize, mmap_flag);
|
|
1297 |
}
|
|
1298 |
}
|
|
1299 |
if (nb < m->topsize) { /* Allocate from new or extended top space */
|
|
1300 |
size_t rsize = m->topsize -= nb;
|
|
1301 |
mchunkptr p = m->top;
|
|
1302 |
mchunkptr r = m->top = chunk_plus_offset(p, nb);
|
|
1303 |
r->head = rsize | PINUSE_BIT;
|
|
1304 |
set_size_and_pinuse_of_inuse_chunk(m, p, nb);
|
|
1305 |
check_top_chunk(m, m->top);
|
|
1306 |
check_malloced_chunk(m, chunk2mem(p), nb);
|
|
1307 |
return chunk2mem(p);
|
|
1308 |
}
|
|
1309 |
}
|
|
1310 |
/*need to check this*/
|
|
1311 |
MEM_DUMP_OOM_LOGS(nb, "sys_alloc:: FAILED to get more memory");
|
|
1312 |
|
|
1313 |
//errno = -1;
|
|
1314 |
return 0;
|
|
1315 |
}
|
|
1316 |
msegmentptr RNewAllocator::segment_holding(mstate m, TUint8* addr) {
|
|
1317 |
msegmentptr sp = &m->seg;
|
|
1318 |
for (;;) {
|
|
1319 |
if (addr >= sp->base && addr < sp->base + sp->size)
|
|
1320 |
return sp;
|
|
1321 |
if ((sp = sp->next) == 0)
|
|
1322 |
return 0;
|
|
1323 |
}
|
|
1324 |
}
|
|
1325 |
/* Unlink the first chunk from a smallbin */
|
|
1326 |
inline void RNewAllocator::unlink_first_small_chunk(mstate M,mchunkptr B,mchunkptr P,bindex_t& I)
|
|
1327 |
{
|
|
1328 |
mchunkptr F = P->fd;
|
|
1329 |
assert(P != B);
|
|
1330 |
assert(P != F);
|
|
1331 |
assert(chunksize(P) == small_index2size(I));
|
|
1332 |
if (B == F)
|
|
1333 |
clear_smallmap(M, I);
|
|
1334 |
else if (RTCHECK(ok_address(M, F))) {
|
|
1335 |
B->fd = F;
|
|
1336 |
F->bk = B;
|
|
1337 |
}
|
|
1338 |
else {
|
|
1339 |
CORRUPTION_ERROR_ACTION(M);
|
|
1340 |
}
|
|
1341 |
}
|
|
1342 |
/* Link a free chunk into a smallbin */
|
|
1343 |
inline void RNewAllocator::insert_small_chunk(mstate M,mchunkptr P, size_t S)
|
|
1344 |
{
|
|
1345 |
bindex_t I = small_index(S);
|
|
1346 |
mchunkptr B = smallbin_at(M, I);
|
|
1347 |
mchunkptr F = B;
|
|
1348 |
assert(S >= MIN_CHUNK_SIZE);
|
|
1349 |
if (!smallmap_is_marked(M, I))
|
|
1350 |
mark_smallmap(M, I);
|
|
1351 |
else if (RTCHECK(ok_address(M, B->fd)))
|
|
1352 |
F = B->fd;
|
|
1353 |
else {
|
|
1354 |
CORRUPTION_ERROR_ACTION(M);
|
|
1355 |
}
|
|
1356 |
B->fd = P;
|
|
1357 |
F->bk = P;
|
|
1358 |
P->fd = F;
|
|
1359 |
P->bk = B;
|
|
1360 |
}
|
|
1361 |
|
|
1362 |
|
|
1363 |
inline void RNewAllocator::insert_chunk(mstate M,mchunkptr P,size_t S,size_t NPAGES)
|
|
1364 |
{
|
|
1365 |
if (is_small(S))
|
|
1366 |
insert_small_chunk(M, P, S);
|
|
1367 |
else{
|
|
1368 |
tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S, NPAGES);
|
|
1369 |
}
|
|
1370 |
}
|
|
1371 |
|
|
1372 |
inline void RNewAllocator::unlink_large_chunk(mstate M,tchunkptr X)
|
|
1373 |
{
|
|
1374 |
tchunkptr XP = X->parent;
|
|
1375 |
tchunkptr R;
|
|
1376 |
reset_tchunk_mem_pageout(X); // clear chunk pageout flag
|
|
1377 |
if (X->bk != X) {
|
|
1378 |
tchunkptr F = X->fd;
|
|
1379 |
R = X->bk;
|
|
1380 |
if (RTCHECK(ok_address(M, F))) {
|
|
1381 |
F->bk = R;
|
|
1382 |
R->fd = F;
|
|
1383 |
}
|
|
1384 |
else {
|
|
1385 |
CORRUPTION_ERROR_ACTION(M);
|
|
1386 |
}
|
|
1387 |
}
|
|
1388 |
else {
|
|
1389 |
tchunkptr* RP;
|
|
1390 |
if (((R = *(RP = &(X->child[1]))) != 0) ||
|
|
1391 |
((R = *(RP = &(X->child[0]))) != 0)) {
|
|
1392 |
tchunkptr* CP;
|
|
1393 |
while ((*(CP = &(R->child[1])) != 0) ||
|
|
1394 |
(*(CP = &(R->child[0])) != 0)) {
|
|
1395 |
R = *(RP = CP);
|
|
1396 |
}
|
|
1397 |
if (RTCHECK(ok_address(M, RP)))
|
|
1398 |
*RP = 0;
|
|
1399 |
else {
|
|
1400 |
CORRUPTION_ERROR_ACTION(M);
|
|
1401 |
}
|
|
1402 |
}
|
|
1403 |
}
|
|
1404 |
if (XP != 0) {
|
|
1405 |
tbinptr* H = treebin_at(M, X->index);
|
|
1406 |
if (X == *H) {
|
|
1407 |
if ((*H = R) == 0)
|
|
1408 |
clear_treemap(M, X->index);
|
|
1409 |
}
|
|
1410 |
else if (RTCHECK(ok_address(M, XP))) {
|
|
1411 |
if (XP->child[0] == X)
|
|
1412 |
XP->child[0] = R;
|
|
1413 |
else
|
|
1414 |
XP->child[1] = R;
|
|
1415 |
}
|
|
1416 |
else
|
|
1417 |
CORRUPTION_ERROR_ACTION(M);
|
|
1418 |
if (R != 0) {
|
|
1419 |
if (RTCHECK(ok_address(M, R))) {
|
|
1420 |
tchunkptr C0, C1;
|
|
1421 |
R->parent = XP;
|
|
1422 |
if ((C0 = X->child[0]) != 0) {
|
|
1423 |
if (RTCHECK(ok_address(M, C0))) {
|
|
1424 |
R->child[0] = C0;
|
|
1425 |
C0->parent = R;
|
|
1426 |
}
|
|
1427 |
else
|
|
1428 |
CORRUPTION_ERROR_ACTION(M);
|
|
1429 |
}
|
|
1430 |
if ((C1 = X->child[1]) != 0) {
|
|
1431 |
if (RTCHECK(ok_address(M, C1))) {
|
|
1432 |
R->child[1] = C1;
|
|
1433 |
C1->parent = R;
|
|
1434 |
}
|
|
1435 |
else
|
|
1436 |
CORRUPTION_ERROR_ACTION(M);
|
|
1437 |
}
|
|
1438 |
}
|
|
1439 |
else
|
|
1440 |
CORRUPTION_ERROR_ACTION(M);
|
|
1441 |
}
|
|
1442 |
}
|
|
1443 |
}
|
|
1444 |
|
|
1445 |
/* Unlink a chunk from a smallbin */
|
|
1446 |
inline void RNewAllocator::unlink_small_chunk(mstate M, mchunkptr P,size_t S)
|
|
1447 |
{
|
|
1448 |
mchunkptr F = P->fd;
|
|
1449 |
mchunkptr B = P->bk;
|
|
1450 |
bindex_t I = small_index(S);
|
|
1451 |
assert(P != B);
|
|
1452 |
assert(P != F);
|
|
1453 |
assert(chunksize(P) == small_index2size(I));
|
|
1454 |
if (F == B)
|
|
1455 |
clear_smallmap(M, I);
|
|
1456 |
else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&
|
|
1457 |
(B == smallbin_at(M,I) || ok_address(M, B)))) {
|
|
1458 |
F->bk = B;
|
|
1459 |
B->fd = F;
|
|
1460 |
}
|
|
1461 |
else {
|
|
1462 |
CORRUPTION_ERROR_ACTION(M);
|
|
1463 |
}
|
|
1464 |
}
|
|
1465 |
|
|
1466 |
inline void RNewAllocator::unlink_chunk(mstate M, mchunkptr P, size_t S)
|
|
1467 |
{
|
|
1468 |
if (is_small(S))
|
|
1469 |
unlink_small_chunk(M, P, S);
|
|
1470 |
else
|
|
1471 |
{
|
|
1472 |
tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP);
|
|
1473 |
}
|
|
1474 |
}
|
|
1475 |
|
|
1476 |
inline void RNewAllocator::compute_tree_index(size_t S, bindex_t& I)
|
|
1477 |
{
|
|
1478 |
size_t X = S >> TREEBIN_SHIFT;
|
|
1479 |
if (X == 0)
|
|
1480 |
I = 0;
|
|
1481 |
else if (X > 0xFFFF)
|
|
1482 |
I = NTREEBINS-1;
|
|
1483 |
else {
|
|
1484 |
unsigned int Y = (unsigned int)X;
|
|
1485 |
unsigned int N = ((Y - 0x100) >> 16) & 8;
|
|
1486 |
unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;
|
|
1487 |
N += K;
|
|
1488 |
N += K = (((Y <<= K) - 0x4000) >> 16) & 2;
|
|
1489 |
K = 14 - N + ((Y <<= K) >> 15);
|
|
1490 |
I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));
|
|
1491 |
}
|
|
1492 |
}
|
|
1493 |
|
|
1494 |
/* ------------------------- Operations on trees ------------------------- */
|
|
1495 |
|
|
1496 |
/* Insert chunk into tree */
|
|
1497 |
inline void RNewAllocator::insert_large_chunk(mstate M,tchunkptr X,size_t S,size_t NPAGES)
|
|
1498 |
{
|
|
1499 |
tbinptr* H;
|
|
1500 |
bindex_t I;
|
|
1501 |
compute_tree_index(S, I);
|
|
1502 |
H = treebin_at(M, I);
|
|
1503 |
X->index = I;
|
|
1504 |
X->child[0] = X->child[1] = 0;
|
|
1505 |
|
|
1506 |
if (NPAGES) { set_tchunk_mem_pageout(X, NPAGES) }
|
|
1507 |
else { reset_tchunk_mem_pageout(X) }
|
|
1508 |
|
|
1509 |
if (!treemap_is_marked(M, I)) {
|
|
1510 |
mark_treemap(M, I);
|
|
1511 |
*H = X;
|
|
1512 |
X->parent = (tchunkptr)H;
|
|
1513 |
X->fd = X->bk = X;
|
|
1514 |
}
|
|
1515 |
else {
|
|
1516 |
tchunkptr T = *H;
|
|
1517 |
size_t K = S << leftshift_for_tree_index(I);
|
|
1518 |
for (;;) {
|
|
1519 |
if (chunksize(T) != S) {
|
|
1520 |
tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);
|
|
1521 |
K <<= 1;
|
|
1522 |
if (*C != 0)
|
|
1523 |
T = *C;
|
|
1524 |
else if (RTCHECK(ok_address(M, C))) {
|
|
1525 |
*C = X;
|
|
1526 |
X->parent = T;
|
|
1527 |
X->fd = X->bk = X;
|
|
1528 |
break;
|
|
1529 |
}
|
|
1530 |
else {
|
|
1531 |
CORRUPTION_ERROR_ACTION(M);
|
|
1532 |
break;
|
|
1533 |
}
|
|
1534 |
}
|
|
1535 |
else {
|
|
1536 |
tchunkptr F = T->fd;
|
|
1537 |
if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {
|
|
1538 |
T->fd = F->bk = X;
|
|
1539 |
X->fd = F;
|
|
1540 |
X->bk = T;
|
|
1541 |
X->parent = 0;
|
|
1542 |
break;
|
|
1543 |
}
|
|
1544 |
else {
|
|
1545 |
CORRUPTION_ERROR_ACTION(M);
|
|
1546 |
break;
|
|
1547 |
}
|
|
1548 |
}
|
|
1549 |
}
|
|
1550 |
}
|
|
1551 |
}
|
|
1552 |
|
|
1553 |
/*
|
|
1554 |
Unlink steps:
|
|
1555 |
|
|
1556 |
1. If x is a chained node, unlink it from its same-sized fd/bk links
|
|
1557 |
and choose its bk node as its replacement.
|
|
1558 |
2. If x was the last node of its size, but not a leaf node, it must
|
|
1559 |
be replaced with a leaf node (not merely one with an open left or
|
|
1560 |
right), to make sure that lefts and rights of descendents
|
|
1561 |
correspond properly to bit masks. We use the rightmost descendent
|
|
1562 |
of x. We could use any other leaf, but this is easy to locate and
|
|
1563 |
tends to counteract removal of leftmosts elsewhere, and so keeps
|
|
1564 |
paths shorter than minimally guaranteed. This doesn't loop much
|
|
1565 |
because on average a node in a tree is near the bottom.
|
|
1566 |
3. If x is the base of a chain (i.e., has parent links) relink
|
|
1567 |
x's parent and children to x's replacement (or null if none).
|
|
1568 |
*/
|
|
1569 |
|
|
1570 |
/* Replace dv node, binning the old one */
|
|
1571 |
/* Used only when dvsize known to be small */
|
|
1572 |
inline void RNewAllocator::replace_dv(mstate M, mchunkptr P, size_t S)
|
|
1573 |
{
|
|
1574 |
size_t DVS = M->dvsize;
|
|
1575 |
if (DVS != 0) {
|
|
1576 |
mchunkptr DV = M->dv;
|
|
1577 |
assert(is_small(DVS));
|
|
1578 |
insert_small_chunk(M, DV, DVS);
|
|
1579 |
}
|
|
1580 |
M->dvsize = S;
|
|
1581 |
M->dv = P;
|
|
1582 |
}
|
|
1583 |
|
|
1584 |
inline void RNewAllocator::compute_bit2idx(binmap_t X,bindex_t& I)
|
|
1585 |
{
|
|
1586 |
unsigned int Y = X - 1;
|
|
1587 |
unsigned int K = Y >> (16-4) & 16;
|
|
1588 |
unsigned int N = K; Y >>= K;
|
|
1589 |
N += K = Y >> (8-3) & 8; Y >>= K;
|
|
1590 |
N += K = Y >> (4-2) & 4; Y >>= K;
|
|
1591 |
N += K = Y >> (2-1) & 2; Y >>= K;
|
|
1592 |
N += K = Y >> (1-0) & 1; Y >>= K;
|
|
1593 |
I = (bindex_t)(N + Y);
|
|
1594 |
}
|
|
1595 |
|
|
1596 |
void RNewAllocator::add_segment(mstate m, TUint8* tbase, size_t tsize, flag_t mmapped) {
|
|
1597 |
/* Determine locations and sizes of segment, fenceposts, old top */
|
|
1598 |
TUint8* old_top = (TUint8*)m->top;
|
|
1599 |
msegmentptr oldsp = segment_holding(m, old_top);
|
|
1600 |
TUint8* old_end = oldsp->base + oldsp->size;
|
|
1601 |
size_t ssize = pad_request(sizeof(struct malloc_segment));
|
|
1602 |
TUint8* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
|
|
1603 |
size_t offset = align_offset(chunk2mem(rawsp));
|
|
1604 |
TUint8* asp = rawsp + offset;
|
|
1605 |
TUint8* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
|
|
1606 |
mchunkptr sp = (mchunkptr)csp;
|
|
1607 |
msegmentptr ss = (msegmentptr)(chunk2mem(sp));
|
|
1608 |
mchunkptr tnext = chunk_plus_offset(sp, ssize);
|
|
1609 |
mchunkptr p = tnext;
|
|
1610 |
int nfences = 0;
|
|
1611 |
|
|
1612 |
/* reset top to new space */
|
|
1613 |
init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
|
|
1614 |
|
|
1615 |
/* Set up segment record */
|
|
1616 |
assert(is_aligned(ss));
|
|
1617 |
set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
|
|
1618 |
*ss = m->seg; /* Push current record */
|
|
1619 |
m->seg.base = tbase;
|
|
1620 |
m->seg.size = tsize;
|
|
1621 |
m->seg.sflags = mmapped;
|
|
1622 |
m->seg.next = ss;
|
|
1623 |
|
|
1624 |
/* Insert trailing fenceposts */
|
|
1625 |
for (;;) {
|
|
1626 |
mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
|
|
1627 |
p->head = FENCEPOST_HEAD;
|
|
1628 |
++nfences;
|
|
1629 |
if ((TUint8*)(&(nextp->head)) < old_end)
|
|
1630 |
p = nextp;
|
|
1631 |
else
|
|
1632 |
break;
|
|
1633 |
}
|
|
1634 |
assert(nfences >= 2);
|
|
1635 |
|
|
1636 |
/* Insert the rest of old top into a bin as an ordinary free chunk */
|
|
1637 |
if (csp != old_top) {
|
|
1638 |
mchunkptr q = (mchunkptr)old_top;
|
|
1639 |
size_t psize = csp - old_top;
|
|
1640 |
mchunkptr tn = chunk_plus_offset(q, psize);
|
|
1641 |
set_free_with_pinuse(q, psize, tn);
|
|
1642 |
insert_chunk(m, q, psize, 0);
|
|
1643 |
}
|
|
1644 |
|
|
1645 |
check_top_chunk(m, m->top);
|
|
1646 |
}
|
|
1647 |
|
|
1648 |
|
|
1649 |
void* RNewAllocator::prepend_alloc(mstate m, TUint8* newbase, TUint8* oldbase,
|
|
1650 |
size_t nb) {
|
|
1651 |
mchunkptr p = align_as_chunk(newbase);
|
|
1652 |
mchunkptr oldfirst = align_as_chunk(oldbase);
|
|
1653 |
size_t psize = (TUint8*)oldfirst - (TUint8*)p;
|
|
1654 |
mchunkptr q = chunk_plus_offset(p, nb);
|
|
1655 |
size_t qsize = psize - nb;
|
|
1656 |
set_size_and_pinuse_of_inuse_chunk(m, p, nb);
|
|
1657 |
|
|
1658 |
assert((TUint8*)oldfirst > (TUint8*)q);
|
|
1659 |
assert(pinuse(oldfirst));
|
|
1660 |
assert(qsize >= MIN_CHUNK_SIZE);
|
|
1661 |
|
|
1662 |
/* consolidate remainder with first chunk of old base */
|
|
1663 |
if (oldfirst == m->top) {
|
|
1664 |
size_t tsize = m->topsize += qsize;
|
|
1665 |
m->top = q;
|
|
1666 |
q->head = tsize | PINUSE_BIT;
|
|
1667 |
check_top_chunk(m, q);
|
|
1668 |
}
|
|
1669 |
else {
|
|
1670 |
if (!cinuse(oldfirst)) {
|
|
1671 |
size_t nsize = chunksize(oldfirst);
|
|
1672 |
|
|
1673 |
/* check for chunk memory page-in */
|
|
1674 |
if (page_not_in_memory(oldfirst, nsize))
|
|
1675 |
map_chunk_pages((tchunkptr)oldfirst, nsize); //Err Ignored, branch not reachable.
|
|
1676 |
|
|
1677 |
unlink_chunk(m, oldfirst, nsize);
|
|
1678 |
oldfirst = chunk_plus_offset(oldfirst, nsize);
|
|
1679 |
qsize += nsize;
|
|
1680 |
}
|
|
1681 |
set_free_with_pinuse(q, qsize, oldfirst);
|
|
1682 |
insert_chunk(m, q, qsize, 0);
|
|
1683 |
check_free_chunk(m, q);
|
|
1684 |
}
|
|
1685 |
|
|
1686 |
check_malloced_chunk(m, chunk2mem(p), nb);
|
|
1687 |
return chunk2mem(p);
|
|
1688 |
}
|
|
1689 |
|
|
1690 |
void* RNewAllocator::mmap_alloc(mstate m, size_t nb) {
|
|
1691 |
size_t mmsize = granularity_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
|
|
1692 |
if (mmsize > nb) { /* Check for wrap around 0 */
|
|
1693 |
TUint8* mm = (TUint8*)(DIRECT_MMAP(mmsize));
|
|
1694 |
if (mm != CMFAIL) {
|
|
1695 |
size_t offset = align_offset(chunk2mem(mm));
|
|
1696 |
size_t psize = mmsize - offset - MMAP_FOOT_PAD;
|
|
1697 |
mchunkptr p = (mchunkptr)(mm + offset);
|
|
1698 |
p->prev_foot = offset | IS_MMAPPED_BIT;
|
|
1699 |
(p)->head = (psize|CINUSE_BIT);
|
|
1700 |
mark_inuse_foot(m, p, psize);
|
|
1701 |
chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
|
|
1702 |
chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
|
|
1703 |
|
|
1704 |
if (mm < m->least_addr)
|
|
1705 |
m->least_addr = mm;
|
|
1706 |
if ((m->footprint += mmsize) > m->max_footprint)
|
|
1707 |
m->max_footprint = m->footprint;
|
|
1708 |
assert(is_aligned(chunk2mem(p)));
|
|
1709 |
check_mmapped_chunk(m, p);
|
|
1710 |
return chunk2mem(p);
|
|
1711 |
}
|
|
1712 |
}
|
|
1713 |
return 0;
|
|
1714 |
}
|
|
1715 |
|
|
1716 |
int RNewAllocator::sys_trim(mstate m, size_t pad)
|
|
1717 |
{
|
|
1718 |
size_t released = 0;
|
|
1719 |
if (pad < MAX_REQUEST && is_initialized(m)) {
|
|
1720 |
pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
|
|
1721 |
|
|
1722 |
if (m->topsize > pad) {
|
|
1723 |
/* Shrink top space in granularity-size units, keeping at least one */
|
|
1724 |
size_t unit = mparams.granularity;
|
|
1725 |
size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - SIZE_T_ONE) * unit;
|
|
1726 |
msegmentptr sp = segment_holding(m, (TUint8*)m->top);
|
|
1727 |
|
|
1728 |
if (!is_extern_segment(sp)) {
|
|
1729 |
if (is_mmapped_segment(sp)) {
|
|
1730 |
if (HAVE_MMAP &&
|
|
1731 |
sp->size >= extra &&
|
|
1732 |
!has_segment_link(m, sp)) { /* can't shrink if pinned */
|
|
1733 |
/*size_t newsize = sp->size - extra; */
|
|
1734 |
/* Prefer mremap, fall back to munmap */
|
|
1735 |
if ((CALL_MREMAP(sp->base, sp->size, sp->size - extra, 0) != MFAIL) ||
|
|
1736 |
(CALL_MUNMAP(sp->base + sp->size - extra, extra) == 0)) {
|
|
1737 |
released = extra;
|
|
1738 |
}
|
|
1739 |
}
|
|
1740 |
}
|
|
1741 |
else if (HAVE_MORECORE) {
|
|
1742 |
if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
|
|
1743 |
extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
|
|
1744 |
ACQUIRE_MORECORE_LOCK(m);
|
|
1745 |
{
|
|
1746 |
/* Make sure end of memory is where we last set it. */
|
|
1747 |
TUint8* old_br = (TUint8*)(CALL_MORECORE(0));
|
|
1748 |
if (old_br == sp->base + sp->size) {
|
|
1749 |
TUint8* rel_br = (TUint8*)(CALL_MORECORE(-extra));
|
|
1750 |
TUint8* new_br = (TUint8*)(CALL_MORECORE(0));
|
|
1751 |
if (rel_br != CMFAIL && new_br < old_br)
|
|
1752 |
released = old_br - new_br;
|
|
1753 |
}
|
|
1754 |
}
|
|
1755 |
RELEASE_MORECORE_LOCK(m);
|
|
1756 |
}
|
|
1757 |
}
|
|
1758 |
|
|
1759 |
if (released != 0) {
|
|
1760 |
sp->size -= released;
|
|
1761 |
m->footprint -= released;
|
|
1762 |
init_top(m, m->top, m->topsize - released);
|
|
1763 |
check_top_chunk(m, m->top);
|
|
1764 |
}
|
|
1765 |
}
|
|
1766 |
|
|
1767 |
/* Unmap any unused mmapped segments */
|
|
1768 |
if (HAVE_MMAP)
|
|
1769 |
released += release_unused_segments(m);
|
|
1770 |
|
|
1771 |
/* On failure, disable autotrim to avoid repeated failed future calls */
|
|
1772 |
if (released == 0)
|
|
1773 |
m->trim_check = MAX_SIZE_T;
|
|
1774 |
}
|
|
1775 |
|
|
1776 |
return (released != 0)? 1 : 0;
|
|
1777 |
}
|
|
1778 |
|
|
1779 |
inline int RNewAllocator::has_segment_link(mstate m, msegmentptr ss)
|
|
1780 |
{
|
|
1781 |
msegmentptr sp = &m->seg;
|
|
1782 |
for (;;) {
|
|
1783 |
if ((TUint8*)sp >= ss->base && (TUint8*)sp < ss->base + ss->size)
|
|
1784 |
return 1;
|
|
1785 |
if ((sp = sp->next) == 0)
|
|
1786 |
return 0;
|
|
1787 |
}
|
|
1788 |
}
|
|
1789 |
|
|
1790 |
/* Unmap and unlink any mmapped segments that don't contain used chunks */
|
|
1791 |
size_t RNewAllocator::release_unused_segments(mstate m)
|
|
1792 |
{
|
|
1793 |
size_t released = 0;
|
|
1794 |
msegmentptr pred = &m->seg;
|
|
1795 |
msegmentptr sp = pred->next;
|
|
1796 |
while (sp != 0) {
|
|
1797 |
TUint8* base = sp->base;
|
|
1798 |
size_t size = sp->size;
|
|
1799 |
msegmentptr next = sp->next;
|
|
1800 |
if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
|
|
1801 |
mchunkptr p = align_as_chunk(base);
|
|
1802 |
size_t psize = chunksize(p);
|
|
1803 |
/* Can unmap if first chunk holds entire segment and not pinned */
|
|
1804 |
if (!cinuse(p) && (TUint8*)p + psize >= base + size - TOP_FOOT_SIZE) {
|
|
1805 |
tchunkptr tp = (tchunkptr)p;
|
|
1806 |
size_t npages_out = tp->npages;
|
|
1807 |
assert(segment_holds(sp, (TUint8*)sp));
|
|
1808 |
unlink_large_chunk(m, tp);
|
|
1809 |
if (CALL_MUNMAP(base, size) == 0) {
|
|
1810 |
released += size;
|
|
1811 |
m->footprint -= size;
|
|
1812 |
/* unlink obsoleted record */
|
|
1813 |
sp = pred;
|
|
1814 |
sp->next = next;
|
|
1815 |
}
|
|
1816 |
else { /* back out if cannot unmap */
|
|
1817 |
insert_large_chunk(m, tp, psize, npages_out);
|
|
1818 |
}
|
|
1819 |
}
|
|
1820 |
}
|
|
1821 |
pred = sp;
|
|
1822 |
sp = next;
|
|
1823 |
}/*End of while*/
|
|
1824 |
return released;
|
|
1825 |
}
|
|
1826 |
/* Realloc using mmap */
|
|
1827 |
inline mchunkptr RNewAllocator::mmap_resize(mstate m, mchunkptr oldp, size_t nb)
|
|
1828 |
{
|
|
1829 |
size_t oldsize = chunksize(oldp);
|
|
1830 |
if (is_small(nb)) /* Can't shrink mmap regions below small size */
|
|
1831 |
return 0;
|
|
1832 |
/* Keep old chunk if big enough but not too big */
|
|
1833 |
if (oldsize >= nb + SIZE_T_SIZE &&
|
|
1834 |
(oldsize - nb) <= (mparams.granularity << 1))
|
|
1835 |
return oldp;
|
|
1836 |
else {
|
|
1837 |
size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT;
|
|
1838 |
size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
|
|
1839 |
size_t newmmsize = granularity_align(nb + SIX_SIZE_T_SIZES +
|
|
1840 |
CHUNK_ALIGN_MASK);
|
|
1841 |
TUint8* cp = (TUint8*)CALL_MREMAP((char*)oldp - offset,
|
|
1842 |
oldmmsize, newmmsize, 1);
|
|
1843 |
if (cp != CMFAIL) {
|
|
1844 |
mchunkptr newp = (mchunkptr)(cp + offset);
|
|
1845 |
size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
|
|
1846 |
newp->head = (psize|CINUSE_BIT);
|
|
1847 |
mark_inuse_foot(m, newp, psize);
|
|
1848 |
chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
|
|
1849 |
chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
|
|
1850 |
|
|
1851 |
if (cp < m->least_addr)
|
|
1852 |
m->least_addr = cp;
|
|
1853 |
if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
|
|
1854 |
m->max_footprint = m->footprint;
|
|
1855 |
check_mmapped_chunk(m, newp);
|
|
1856 |
return newp;
|
|
1857 |
}
|
|
1858 |
}
|
|
1859 |
return 0;
|
|
1860 |
}
|
|
1861 |
|
|
1862 |
|
|
1863 |
void RNewAllocator::Init_Dlmalloc(size_t capacity, int locked, size_t aTrimThreshold)
|
|
1864 |
{
|
|
1865 |
memset(gm,0,sizeof(malloc_state));
|
|
1866 |
init_mparams(aTrimThreshold); /* Ensure pagesize etc initialized */
|
|
1867 |
// The maximum amount that can be allocated can be calculated as:-
|
|
1868 |
// 2^sizeof(size_t) - sizeof(malloc_state) - TOP_FOOT_SIZE - page size (all accordingly padded)
|
|
1869 |
// If the capacity exceeds this, no allocation will be done.
|
|
1870 |
gm->seg.base = gm->least_addr = iBase;
|
|
1871 |
gm->seg.size = capacity;
|
|
1872 |
gm->seg.sflags = !IS_MMAPPED_BIT;
|
|
1873 |
set_lock(gm, locked);
|
|
1874 |
gm->magic = mparams.magic;
|
|
1875 |
init_bins(gm);
|
|
1876 |
init_top(gm, (mchunkptr)iBase, capacity - TOP_FOOT_SIZE);
|
|
1877 |
}
|
|
1878 |
|
|
1879 |
void* RNewAllocator::dlmalloc(size_t bytes) {
|
|
1880 |
/*
|
|
1881 |
Basic algorithm:
|
|
1882 |
If a small request (< 256 bytes minus per-chunk overhead):
|
|
1883 |
1. If one exists, use a remainderless chunk in associated smallbin.
|
|
1884 |
(Remainderless means that there are too few excess bytes to represent as a chunk.)
|
|
1885 |
2. If one exists, split the smallest available chunk in a bin, saving remainder in bin.
|
|
1886 |
4. If it is big enough, use the top chunk.
|
|
1887 |
5. If available, get memory from system and use it
|
|
1888 |
Otherwise, for a large request:
|
|
1889 |
1. Find the smallest available binned chunk that fits, splitting if necessary.
|
|
1890 |
3. If it is big enough, use the top chunk.
|
|
1891 |
4. If request size >= mmap threshold, try to directly mmap this chunk.
|
|
1892 |
5. If available, get memory from system and use it
|
|
1893 |
|
|
1894 |
The ugly goto's here ensure that postaction occurs along all paths.
|
|
1895 |
*/
|
|
1896 |
if (!PREACTION(gm)) {
|
|
1897 |
void* mem;
|
|
1898 |
size_t nb;
|
|
1899 |
if (bytes <= MAX_SMALL_REQUEST) {
|
|
1900 |
bindex_t idx;
|
|
1901 |
binmap_t smallbits;
|
|
1902 |
nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
|
|
1903 |
idx = small_index(nb);
|
|
1904 |
smallbits = gm->smallmap >> idx;
|
|
1905 |
|
|
1906 |
if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
|
|
1907 |
mchunkptr b, p;
|
|
1908 |
idx += ~smallbits & 1; /* Uses next bin if idx empty */
|
|
1909 |
b = smallbin_at(gm, idx);
|
|
1910 |
p = b->fd;
|
|
1911 |
assert(chunksize(p) == small_index2size(idx));
|
|
1912 |
unlink_first_small_chunk(gm, b, p, idx);
|
|
1913 |
set_inuse_and_pinuse(gm, p, small_index2size(idx));
|
|
1914 |
mem = chunk2mem(p);
|
|
1915 |
check_malloced_chunk(gm, mem, nb);
|
|
1916 |
goto postaction;
|
|
1917 |
} else {
|
|
1918 |
if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
|
|
1919 |
mchunkptr b, p, r;
|
|
1920 |
size_t rsize;
|
|
1921 |
bindex_t i;
|
|
1922 |
binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
|
|
1923 |
binmap_t leastbit = least_bit(leftbits);
|
|
1924 |
compute_bit2idx(leastbit, i);
|
|
1925 |
b = smallbin_at(gm, i);
|
|
1926 |
p = b->fd;
|
|
1927 |
assert(chunksize(p) == small_index2size(i));
|
|
1928 |
unlink_first_small_chunk(gm, b, p, i);
|
|
1929 |
rsize = small_index2size(i) - nb;
|
|
1930 |
/* Fit here cannot be remainderless if 4byte sizes */
|
|
1931 |
if (rsize < free_chunk_threshold)
|
|
1932 |
set_inuse_and_pinuse(gm, p, small_index2size(i));
|
|
1933 |
else {
|
|
1934 |
set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
|
|
1935 |
r = chunk_plus_offset(p, nb);
|
|
1936 |
set_size_and_pinuse_of_free_chunk(r, rsize);
|
|
1937 |
insert_chunk(gm, r, rsize, 0);
|
|
1938 |
}
|
|
1939 |
mem = chunk2mem(p);
|
|
1940 |
check_malloced_chunk(gm, mem, nb);
|
|
1941 |
goto postaction;
|
|
1942 |
}
|
|
1943 |
|
|
1944 |
else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
|
|
1945 |
check_malloced_chunk(gm, mem, nb);
|
|
1946 |
goto postaction;
|
|
1947 |
}
|
|
1948 |
}
|
|
1949 |
} /* else - large alloc request */
|
|
1950 |
else if (bytes >= MAX_REQUEST)
|
|
1951 |
nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
|
|
1952 |
else {
|
|
1953 |
nb = pad_request(bytes);
|
|
1954 |
if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
|
|
1955 |
check_malloced_chunk(gm, mem, nb);
|
|
1956 |
goto postaction;
|
|
1957 |
}
|
|
1958 |
}
|
|
1959 |
|
|
1960 |
if (nb < gm->topsize) { /* Split top */
|
|
1961 |
size_t rsize = gm->topsize -= nb;
|
|
1962 |
mchunkptr p = gm->top;
|
|
1963 |
mchunkptr r = gm->top = chunk_plus_offset(p, nb);
|
|
1964 |
r->head = rsize | PINUSE_BIT;
|
|
1965 |
set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
|
|
1966 |
mem = chunk2mem(p);
|
|
1967 |
check_top_chunk(gm, gm->top);
|
|
1968 |
check_malloced_chunk(gm, mem, nb);
|
|
1969 |
goto postaction;
|
|
1970 |
}
|
|
1971 |
|
|
1972 |
mem = sys_alloc(gm, nb);
|
|
1973 |
|
|
1974 |
postaction:
|
|
1975 |
POSTACTION(gm);
|
|
1976 |
#ifdef DL_CHUNK_MEM_DEBUG
|
|
1977 |
if (mem) {
|
|
1978 |
mchunkptr pp = mem2chunk(mem);
|
|
1979 |
do_check_any_chunk_access(pp, chunksize(pp));
|
|
1980 |
}
|
|
1981 |
#endif
|
|
1982 |
|
|
1983 |
if (mem) {
|
|
1984 |
mchunkptr pp = mem2chunk(mem);
|
|
1985 |
iTotalAllocSize += chunksize(pp);
|
|
1986 |
}
|
|
1987 |
|
|
1988 |
return mem;
|
|
1989 |
}
|
|
1990 |
#if USE_LOCKS // keep the compiler happy
|
|
1991 |
return 0;
|
|
1992 |
#endif
|
|
1993 |
}
|
|
1994 |
|
|
1995 |
void RNewAllocator::dlfree(void* mem) {
|
|
1996 |
/*
|
|
1997 |
Consolidate freed chunks with preceeding or succeeding bordering
|
|
1998 |
free chunks, if they exist, and then place in a bin. Intermixed
|
|
1999 |
with special cases for top, dv, mmapped chunks, and usage errors.
|
|
2000 |
*/
|
|
2001 |
|
|
2002 |
if (mem != 0)
|
|
2003 |
{
|
|
2004 |
size_t unmapped_pages = 0;
|
|
2005 |
int prev_chunk_unmapped = 0;
|
|
2006 |
mchunkptr p = mem2chunk(mem);
|
|
2007 |
#if FOOTERS
|
|
2008 |
mstate fm = get_mstate_for(p);
|
|
2009 |
if (!ok_magic(fm))
|
|
2010 |
{
|
|
2011 |
USAGE_ERROR_ACTION(fm, p);
|
|
2012 |
return;
|
|
2013 |
}
|
|
2014 |
#else /* FOOTERS */
|
|
2015 |
#define fm gm
|
|
2016 |
#endif /* FOOTERS */
|
|
2017 |
|
|
2018 |
if (!PREACTION(fm))
|
|
2019 |
{
|
|
2020 |
check_inuse_chunk(fm, p);
|
|
2021 |
if (RTCHECK(ok_address(fm, p) && ok_cinuse(p)))
|
|
2022 |
{
|
|
2023 |
size_t psize = chunksize(p);
|
|
2024 |
iTotalAllocSize -= psize;
|
|
2025 |
mchunkptr next = chunk_plus_offset(p, psize);
|
|
2026 |
if (!pinuse(p))
|
|
2027 |
{
|
|
2028 |
size_t prevsize = p->prev_foot;
|
|
2029 |
if ((prevsize & IS_MMAPPED_BIT) != 0)
|
|
2030 |
{
|
|
2031 |
prevsize &= ~IS_MMAPPED_BIT;
|
|
2032 |
psize += prevsize + MMAP_FOOT_PAD;
|
|
2033 |
if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
|
|
2034 |
fm->footprint -= psize;
|
|
2035 |
goto postaction;
|
|
2036 |
}
|
|
2037 |
else
|
|
2038 |
{
|
|
2039 |
mchunkptr prev = chunk_minus_offset(p, prevsize);
|
|
2040 |
if (page_not_in_memory(prev, prevsize)) {
|
|
2041 |
prev_chunk_unmapped = 1;
|
|
2042 |
unmapped_pages = ((tchunkptr)prev)->npages;
|
|
2043 |
}
|
|
2044 |
|
|
2045 |
psize += prevsize;
|
|
2046 |
p = prev;
|
|
2047 |
if (RTCHECK(ok_address(fm, prev)))
|
|
2048 |
{ /* consolidate backward */
|
|
2049 |
unlink_chunk(fm, p, prevsize);
|
|
2050 |
}
|
|
2051 |
else
|
|
2052 |
goto erroraction;
|
|
2053 |
}
|
|
2054 |
}
|
|
2055 |
|
|
2056 |
if (RTCHECK(ok_next(p, next) && ok_pinuse(next)))
|
|
2057 |
{
|
|
2058 |
if (!cinuse(next))
|
|
2059 |
{ /* consolidate forward */
|
|
2060 |
if (next == fm->top)
|
|
2061 |
{
|
|
2062 |
if (prev_chunk_unmapped) { // previous chunk is unmapped
|
|
2063 |
/* unmap all pages between previously unmapped and end of top chunk
|
|
2064 |
and reset top to beginning of prev chunk - done in sys_trim_partial() */
|
|
2065 |
sys_trim_partial(fm, p, psize, unmapped_pages);
|
|
2066 |
do_check_any_chunk_access(fm->top, fm->topsize);
|
|
2067 |
goto postaction;
|
|
2068 |
}
|
|
2069 |
else { // forward merge to top
|
|
2070 |
size_t tsize = fm->topsize += psize;
|
|
2071 |
fm->top = p;
|
|
2072 |
p->head = tsize | PINUSE_BIT;
|
|
2073 |
if (should_trim(fm, tsize))
|
|
2074 |
sys_trim(fm, 0);
|
|
2075 |
do_check_any_chunk_access(fm->top, fm->topsize);
|
|
2076 |
goto postaction;
|
|
2077 |
}
|
|
2078 |
}
|
|
2079 |
else
|
|
2080 |
{
|
|
2081 |
size_t nsize = chunksize(next);
|
|
2082 |
//int next_chunk_unmapped = 0;
|
|
2083 |
if ( page_not_in_memory(next, nsize) ) {
|
|
2084 |
//next_chunk_unmapped = 1;
|
|
2085 |
unmapped_pages += ((tchunkptr)next)->npages;
|
|
2086 |
}
|
|
2087 |
|
|
2088 |
psize += nsize;
|
|
2089 |
unlink_chunk(fm, next, nsize);
|
|
2090 |
set_size_and_pinuse_of_free_chunk(p, psize);
|
|
2091 |
}
|
|
2092 |
}
|
|
2093 |
else
|
|
2094 |
set_free_with_pinuse(p, psize, next);
|
|
2095 |
|
|
2096 |
/* check if chunk memmory can be released */
|
|
2097 |
size_t npages_out = 0;
|
|
2098 |
if (!is_small(psize) && psize>=CHUNK_PAGEOUT_THESHOLD)
|
|
2099 |
npages_out = unmap_chunk_pages((tchunkptr)p, psize, unmapped_pages);
|
|
2100 |
|
|
2101 |
insert_chunk(fm, p, psize, npages_out);
|
|
2102 |
check_free_chunk(fm, p);
|
|
2103 |
do_chunk_page_release_check(p, psize, fm, npages_out);
|
|
2104 |
goto postaction;
|
|
2105 |
}
|
|
2106 |
}
|
|
2107 |
erroraction:
|
|
2108 |
USAGE_ERROR_ACTION(fm, p);
|
|
2109 |
postaction:
|
|
2110 |
POSTACTION(fm);
|
|
2111 |
}
|
|
2112 |
}
|
|
2113 |
#if !FOOTERS
|
|
2114 |
#undef fm
|
|
2115 |
#endif /* FOOTERS */
|
|
2116 |
}
|
|
2117 |
|
|
2118 |
void* RNewAllocator::dlrealloc(void* oldmem, size_t bytes) {
|
|
2119 |
if (oldmem == 0)
|
|
2120 |
return dlmalloc(bytes);
|
|
2121 |
#ifdef REALLOC_ZERO_BYTES_FREES
|
|
2122 |
if (bytes == 0) {
|
|
2123 |
dlfree(oldmem);
|
|
2124 |
return 0;
|
|
2125 |
}
|
|
2126 |
#endif /* REALLOC_ZERO_BYTES_FREES */
|
|
2127 |
else {
|
|
2128 |
#if ! FOOTERS
|
|
2129 |
mstate m = gm;
|
|
2130 |
#else /* FOOTERS */
|
|
2131 |
mstate m = get_mstate_for(mem2chunk(oldmem));
|
|
2132 |
if (!ok_magic(m)) {
|
|
2133 |
USAGE_ERROR_ACTION(m, oldmem);
|
|
2134 |
return 0;
|
|
2135 |
}
|
|
2136 |
#endif /* FOOTERS */
|
|
2137 |
return internal_realloc(m, oldmem, bytes);
|
|
2138 |
}
|
|
2139 |
}
|
|
2140 |
|
|
2141 |
|
|
2142 |
int RNewAllocator::dlmalloc_trim(size_t pad) {
|
|
2143 |
int result = 0;
|
|
2144 |
if (!PREACTION(gm)) {
|
|
2145 |
result = sys_trim(gm, pad);
|
|
2146 |
POSTACTION(gm);
|
|
2147 |
}
|
|
2148 |
return result;
|
|
2149 |
}
|
|
2150 |
|
|
2151 |
size_t RNewAllocator::dlmalloc_footprint(void) {
|
|
2152 |
return gm->footprint;
|
|
2153 |
}
|
|
2154 |
|
|
2155 |
size_t RNewAllocator::dlmalloc_max_footprint(void) {
|
|
2156 |
return gm->max_footprint;
|
|
2157 |
}
|
|
2158 |
|
|
2159 |
#if !NO_MALLINFO
|
|
2160 |
struct mallinfo RNewAllocator::dlmallinfo(void) {
|
|
2161 |
return internal_mallinfo(gm);
|
|
2162 |
}
|
|
2163 |
#endif /* NO_MALLINFO */
|
|
2164 |
|
|
2165 |
void RNewAllocator::dlmalloc_stats() {
|
|
2166 |
internal_malloc_stats(gm);
|
|
2167 |
}
|
|
2168 |
|
|
2169 |
int RNewAllocator::dlmallopt(int param_number, int value) {
|
|
2170 |
return change_mparam(param_number, value);
|
|
2171 |
}
|
|
2172 |
|
|
2173 |
//inline slab* slab::slabfor(void* p)
|
|
2174 |
slab* slab::slabfor( const void* p)
|
|
2175 |
{
|
|
2176 |
return (slab*)(floor(p, slabsize));
|
|
2177 |
}
|
|
2178 |
|
|
2179 |
|
|
2180 |
void RNewAllocator::tree_remove(slab* s)
|
|
2181 |
{
|
|
2182 |
slab** r = s->parent;
|
|
2183 |
slab* c1 = s->child1;
|
|
2184 |
slab* c2 = s->child2;
|
|
2185 |
for (;;)
|
|
2186 |
{
|
|
2187 |
if (!c2)
|
|
2188 |
{
|
|
2189 |
*r = c1;
|
|
2190 |
if (c1)
|
|
2191 |
c1->parent = r;
|
|
2192 |
return;
|
|
2193 |
}
|
|
2194 |
if (!c1)
|
|
2195 |
{
|
|
2196 |
*r = c2;
|
|
2197 |
c2->parent = r;
|
|
2198 |
return;
|
|
2199 |
}
|
|
2200 |
if (c1 > c2)
|
|
2201 |
{
|
|
2202 |
slab* c3 = c1;
|
|
2203 |
c1 = c2;
|
|
2204 |
c2 = c3;
|
|
2205 |
}
|
|
2206 |
slab* newc2 = c1->child2;
|
|
2207 |
*r = c1;
|
|
2208 |
c1->parent = r;
|
|
2209 |
c1->child2 = c2;
|
|
2210 |
c2->parent = &c1->child2;
|
|
2211 |
s = c1;
|
|
2212 |
c1 = s->child1;
|
|
2213 |
c2 = newc2;
|
|
2214 |
r = &s->child1;
|
|
2215 |
}
|
|
2216 |
}
|
|
2217 |
void RNewAllocator::tree_insert(slab* s,slab** r)
|
|
2218 |
{
|
|
2219 |
slab* n = *r;
|
|
2220 |
for (;;)
|
|
2221 |
{
|
|
2222 |
if (!n)
|
|
2223 |
{ // tree empty
|
|
2224 |
*r = s;
|
|
2225 |
s->parent = r;
|
|
2226 |
s->child1 = s->child2 = 0;
|
|
2227 |
break;
|
|
2228 |
}
|
|
2229 |
if (s < n)
|
|
2230 |
{ // insert between parent and n
|
|
2231 |
*r = s;
|
|
2232 |
s->parent = r;
|
|
2233 |
s->child1 = n;
|
|
2234 |
s->child2 = 0;
|
|
2235 |
n->parent = &s->child1;
|
|
2236 |
break;
|
|
2237 |
}
|
|
2238 |
slab* c1 = n->child1;
|
|
2239 |
slab* c2 = n->child2;
|
|
2240 |
if ((c1 - 1) > (c2 - 1))
|
|
2241 |
{
|
|
2242 |
r = &n->child1;
|
|
2243 |
n = c1;
|
|
2244 |
}
|
|
2245 |
else
|
|
2246 |
{
|
|
2247 |
r = &n->child2;
|
|
2248 |
n = c2;
|
|
2249 |
}
|
|
2250 |
}
|
|
2251 |
}
|
|
2252 |
void* RNewAllocator::allocnewslab(slabset& allocator)
|
|
2253 |
//
|
|
2254 |
// Acquire and initialise a new slab, returning a cell from the slab
|
|
2255 |
// The strategy is:
|
|
2256 |
// 1. Use the lowest address free slab, if available. This is done by using the lowest slab
|
|
2257 |
// in the page at the root of the partial_page heap (which is address ordered). If the
|
|
2258 |
// is now fully used, remove it from the partial_page heap.
|
|
2259 |
// 2. Allocate a new page for slabs if no empty slabs are available
|
|
2260 |
//
|
|
2261 |
{
|
|
2262 |
page* p = page::pagefor(partial_page);
|
|
2263 |
if (!p)
|
|
2264 |
return allocnewpage(allocator);
|
|
2265 |
|
|
2266 |
unsigned h = p->slabs[0].header;
|
|
2267 |
unsigned pagemap = header_pagemap(h);
|
|
2268 |
ASSERT(&p->slabs[hibit(pagemap)] == partial_page);
|
|
2269 |
|
|
2270 |
unsigned slabix = lowbit(pagemap);
|
|
2271 |
p->slabs[0].header = h &~ (0x100<<slabix);
|
|
2272 |
if (!(pagemap &~ (1<<slabix)))
|
|
2273 |
{
|
|
2274 |
tree_remove(partial_page); // last free slab in page
|
|
2275 |
}
|
|
2276 |
return allocator.initslab(&p->slabs[slabix]);
|
|
2277 |
}
|
|
2278 |
|
|
2279 |
/**Defination of this functionis not there in proto code***/
|
|
2280 |
#if 0
|
|
2281 |
void RNewAllocator::partial_insert(slab* s)
|
|
2282 |
{
|
|
2283 |
// slab has had first cell freed and needs to be linked back into partial tree
|
|
2284 |
slabset& ss = slaballoc[sizemap[s->clz]];
|
|
2285 |
|
|
2286 |
ASSERT(s->used == slabfull);
|
|
2287 |
s->used = ss.fulluse - s->clz; // full-1 loading
|
|
2288 |
tree_insert(s,&ss.partial);
|
|
2289 |
checktree(ss.partial);
|
|
2290 |
}
|
|
2291 |
/**Defination of this functionis not there in proto code***/
|
|
2292 |
#endif
|
|
2293 |
|
|
2294 |
void* RNewAllocator::allocnewpage(slabset& allocator)
|
|
2295 |
//
|
|
2296 |
// Acquire and initialise a new page, returning a cell from a new slab
|
|
2297 |
// The partial_page tree is empty (otherwise we'd have used a slab from there)
|
|
2298 |
// The partial_page link is put in the highest addressed slab in the page, and the
|
|
2299 |
// lowest addressed slab is used to fulfill the allocation request
|
|
2300 |
//
|
|
2301 |
{
|
|
2302 |
page* p = spare_page;
|
|
2303 |
if (p)
|
|
2304 |
spare_page = 0;
|
|
2305 |
else
|
|
2306 |
{
|
|
2307 |
p = static_cast<page*>(map(0,pagesize));
|
|
2308 |
if (!p)
|
|
2309 |
return 0;
|
|
2310 |
}
|
|
2311 |
ASSERT(p == floor(p,pagesize));
|
|
2312 |
p->slabs[0].header = ((1<<3) + (1<<2) + (1<<1))<<8; // set pagemap
|
|
2313 |
p->slabs[3].parent = &partial_page;
|
|
2314 |
p->slabs[3].child1 = p->slabs[3].child2 = 0;
|
|
2315 |
partial_page = &p->slabs[3];
|
|
2316 |
return allocator.initslab(&p->slabs[0]);
|
|
2317 |
}
|
|
2318 |
|
|
2319 |
void RNewAllocator::freepage(page* p)
|
|
2320 |
//
|
|
2321 |
// Release an unused page to the OS
|
|
2322 |
// A single page is cached for reuse to reduce thrashing
|
|
2323 |
// the OS allocator.
|
|
2324 |
//
|
|
2325 |
{
|
|
2326 |
ASSERT(ceiling(p,pagesize) == p);
|
|
2327 |
if (!spare_page)
|
|
2328 |
{
|
|
2329 |
spare_page = p;
|
|
2330 |
return;
|
|
2331 |
}
|
|
2332 |
unmap(p,pagesize);
|
|
2333 |
}
|
|
2334 |
|
|
2335 |
void RNewAllocator::freeslab(slab* s)
|
|
2336 |
//
|
|
2337 |
// Release an empty slab to the slab manager
|
|
2338 |
// The strategy is:
|
|
2339 |
// 1. The page containing the slab is checked to see the state of the other slabs in the page by
|
|
2340 |
// inspecting the pagemap field in the header of the first slab in the page.
|
|
2341 |
// 2. The pagemap is updated to indicate the new unused slab
|
|
2342 |
// 3. If this is the only unused slab in the page then the slab header is used to add the page to
|
|
2343 |
// the partial_page tree/heap
|
|
2344 |
// 4. If all the slabs in the page are now unused the page is release back to the OS
|
|
2345 |
// 5. If this slab has a higher address than the one currently used to track this page in
|
|
2346 |
// the partial_page heap, the linkage is moved to the new unused slab
|
|
2347 |
//
|
|
2348 |
{
|
|
2349 |
tree_remove(s);
|
|
2350 |
checktree(*s->parent);
|
|
2351 |
ASSERT(header_usedm4(s->header) == header_size(s->header)-4);
|
|
2352 |
CHECK(s->header |= 0xFF00000); // illegal value for debug purposes
|
|
2353 |
page* p = page::pagefor(s);
|
|
2354 |
unsigned h = p->slabs[0].header;
|
|
2355 |
int slabix = s - &p->slabs[0];
|
|
2356 |
unsigned pagemap = header_pagemap(h);
|
|
2357 |
p->slabs[0].header = h | (0x100<<slabix);
|
|
2358 |
if (pagemap == 0)
|
|
2359 |
{ // page was full before, use this slab as link in empty heap
|
|
2360 |
tree_insert(s, &partial_page);
|
|
2361 |
}
|
|
2362 |
else
|
|
2363 |
{ // find the current empty-link slab
|
|
2364 |
slab* sl = &p->slabs[hibit(pagemap)];
|
|
2365 |
pagemap ^= (1<<slabix);
|
|
2366 |
if (pagemap == 0xf)
|
|
2367 |
{ // page is now empty so recycle page to os
|
|
2368 |
tree_remove(sl);
|
|
2369 |
freepage(p);
|
|
2370 |
return;
|
|
2371 |
}
|
|
2372 |
// ensure the free list link is in highest address slab in page
|
|
2373 |
if (s > sl)
|
|
2374 |
{ // replace current link with new one. Address-order tree so position stays the same
|
|
2375 |
slab** r = sl->parent;
|
|
2376 |
slab* c1 = sl->child1;
|
|
2377 |
slab* c2 = sl->child2;
|
|
2378 |
s->parent = r;
|
|
2379 |
s->child1 = c1;
|
|
2380 |
s->child2 = c2;
|
|
2381 |
*r = s;
|
|
2382 |
if (c1)
|
|
2383 |
c1->parent = &s->child1;
|
|
2384 |
if (c2)
|
|
2385 |
c2->parent = &s->child2;
|
|
2386 |
}
|
|
2387 |
CHECK(if (s < sl) s=sl);
|
|
2388 |
}
|
|
2389 |
ASSERT(header_pagemap(p->slabs[0].header) != 0);
|
|
2390 |
ASSERT(hibit(header_pagemap(p->slabs[0].header)) == unsigned(s - &p->slabs[0]));
|
|
2391 |
}
|
|
2392 |
|
|
2393 |
void RNewAllocator::slab_init(unsigned slabbitmap)
|
|
2394 |
{
|
|
2395 |
ASSERT((slabbitmap & ~okbits) == 0);
|
|
2396 |
ASSERT(maxslabsize <= 60);
|
|
2397 |
|
|
2398 |
slab_threshold=0;
|
|
2399 |
partial_page = 0;
|
|
2400 |
unsigned char ix = 0xff;
|
|
2401 |
unsigned bit = 1<<((maxslabsize>>2)-1);
|
|
2402 |
for (int sz = maxslabsize; sz >= 0; sz -= 4, bit >>= 1)
|
|
2403 |
{
|
|
2404 |
if (slabbitmap & bit)
|
|
2405 |
{
|
|
2406 |
if (++ix == 0)
|
|
2407 |
slab_threshold=sz+1;
|
|
2408 |
slabset& c = slaballoc[ix];
|
|
2409 |
c.size = sz;
|
|
2410 |
c.partial = 0;
|
|
2411 |
}
|
|
2412 |
sizemap[sz>>2] = ix;
|
|
2413 |
}
|
|
2414 |
|
|
2415 |
free_chunk_threshold = pad_request(slab_threshold);
|
|
2416 |
}
|
|
2417 |
|
|
2418 |
void* RNewAllocator::slab_allocate(slabset& ss)
|
|
2419 |
//
|
|
2420 |
// Allocate a cell from the given slabset
|
|
2421 |
// Strategy:
|
|
2422 |
// 1. Take the partially full slab at the top of the heap (lowest address).
|
|
2423 |
// 2. If there is no such slab, allocate from a new slab
|
|
2424 |
// 3. If the slab has a non-empty freelist, pop the cell from the front of the list and update the slab
|
|
2425 |
// 4. Otherwise, if the slab is not full, return the cell at the end of the currently used region of
|
|
2426 |
// the slab, updating the slab
|
|
2427 |
// 5. Otherwise, release the slab from the partial tree/heap, marking it as 'floating' and go back to
|
|
2428 |
// step 1
|
|
2429 |
//
|
|
2430 |
{
|
|
2431 |
for (;;)
|
|
2432 |
{
|
|
2433 |
slab *s = ss.partial;
|
|
2434 |
if (!s)
|
|
2435 |
break;
|
|
2436 |
unsigned h = s->header;
|
|
2437 |
unsigned free = h & 0xff; // extract free cell positiong
|
|
2438 |
if (free)
|
|
2439 |
{
|
|
2440 |
ASSERT(((free<<2)-sizeof(slabhdr))%header_size(h) == 0);
|
|
2441 |
void* p = offset(s,free<<2);
|
|
2442 |
free = *(unsigned char*)p; // get next pos in free list
|
|
2443 |
h += (h&0x3C000)<<6; // update usedm4
|
|
2444 |
h &= ~0xff;
|
|
2445 |
h |= free; // update freelist
|
|
2446 |
s->header = h;
|
|
2447 |
ASSERT(header_free(h) == 0 || ((header_free(h)<<2)-sizeof(slabhdr))%header_size(h) == 0);
|
|
2448 |
ASSERT(header_usedm4(h) <= 0x3F8u);
|
|
2449 |
ASSERT((header_usedm4(h)+4)%header_size(h) == 0);
|
|
2450 |
return p;
|
|
2451 |
}
|
|
2452 |
unsigned h2 = h + ((h&0x3C000)<<6);
|
|
2453 |
if (h2 < 0xfc00000)
|
|
2454 |
{
|
|
2455 |
ASSERT((header_usedm4(h2)+4)%header_size(h2) == 0);
|
|
2456 |
s->header = h2;
|
|
2457 |
return offset(s,(h>>18) + sizeof(unsigned) + sizeof(slabhdr));
|
|
2458 |
}
|
|
2459 |
h |= 0x80000000; // mark the slab as full-floating
|
|
2460 |
s->header = h;
|
|
2461 |
tree_remove(s);
|
|
2462 |
checktree(ss.partial);
|
|
2463 |
// go back and try the next slab...
|
|
2464 |
}
|
|
2465 |
// no partial slabs found, so allocate from a new slab
|
|
2466 |
return allocnewslab(ss);
|
|
2467 |
}
|
|
2468 |
|
|
2469 |
void RNewAllocator::slab_free(void* p)
|
|
2470 |
//
|
|
2471 |
// Free a cell from the slab allocator
|
|
2472 |
// Strategy:
|
|
2473 |
// 1. Find the containing slab (round down to nearest 1KB boundary)
|
|
2474 |
// 2. Push the cell into the slab's freelist, and update the slab usage count
|
|
2475 |
// 3. If this is the last allocated cell, free the slab to the main slab manager
|
|
2476 |
// 4. If the slab was full-floating then insert the slab in it's respective partial tree
|
|
2477 |
//
|
|
2478 |
{
|
|
2479 |
ASSERT(lowbits(p,3)==0);
|
|
2480 |
slab* s = slab::slabfor(p);
|
|
2481 |
|
|
2482 |
unsigned pos = lowbits(p, slabsize);
|
|
2483 |
unsigned h = s->header;
|
|
2484 |
ASSERT(header_usedm4(h) != 0x3fC); // slab is empty already
|
|
2485 |
ASSERT((pos-sizeof(slabhdr))%header_size(h) == 0);
|
|
2486 |
*(unsigned char*)p = (unsigned char)h;
|
|
2487 |
h &= ~0xFF;
|
|
2488 |
h |= (pos>>2);
|
|
2489 |
unsigned size = h & 0x3C000;
|
|
2490 |
unsigned allocSize = (h & 0x3F000) >> 12; // size is stored in bits 12...17 in slabhdr
|
|
2491 |
iTotalAllocSize -= allocSize;
|
|
2492 |
if (int(h) >= 0)
|
|
2493 |
{
|
|
2494 |
h -= size<<6;
|
|
2495 |
if (int(h)>=0)
|
|
2496 |
{
|
|
2497 |
s->header = h;
|
|
2498 |
return;
|
|
2499 |
}
|
|
2500 |
freeslab(s);
|
|
2501 |
return;
|
|
2502 |
}
|
|
2503 |
h -= size<<6;
|
|
2504 |
h &= ~0x80000000;
|
|
2505 |
s->header = h;
|
|
2506 |
slabset& ss = slaballoc[sizemap[size>>14]];
|
|
2507 |
tree_insert(s,&ss.partial);
|
|
2508 |
checktree(ss.partial);
|
|
2509 |
}
|
|
2510 |
|
|
2511 |
void* slabset::initslab(slab* s)
|
|
2512 |
//
|
|
2513 |
// initialise an empty slab for this allocator and return the fist cell
|
|
2514 |
// pre-condition: the slabset has no partial slabs for allocation
|
|
2515 |
//
|
|
2516 |
{
|
|
2517 |
ASSERT(partial==0);
|
|
2518 |
unsigned h = s->header & 0xF00; // preserve pagemap only
|
|
2519 |
h |= (size<<12); // set size
|
|
2520 |
h |= (size-4)<<18; // set usedminus4 to one object minus 4
|
|
2521 |
s->header = h;
|
|
2522 |
partial = s;
|
|
2523 |
s->parent = &partial;
|
|
2524 |
s->child1 = s->child2 = 0;
|
|
2525 |
return offset(s,sizeof(slabhdr));
|
|
2526 |
}
|
|
2527 |
|
|
2528 |
TAny* RNewAllocator::SetBrk(TInt32 aDelta)
|
|
2529 |
{
|
|
2530 |
if (iFlags & EFixedSize)
|
|
2531 |
return MFAIL;
|
|
2532 |
|
|
2533 |
if (aDelta < 0)
|
|
2534 |
{
|
|
2535 |
unmap(offset(iTop, aDelta), -aDelta);
|
|
2536 |
}
|
|
2537 |
else if (aDelta > 0)
|
|
2538 |
{
|
|
2539 |
if (!map(iTop, aDelta))
|
|
2540 |
return MFAIL;
|
|
2541 |
}
|
|
2542 |
void * p =iTop;
|
|
2543 |
iTop = offset(iTop, aDelta);
|
|
2544 |
return p;
|
|
2545 |
}
|
|
2546 |
|
|
2547 |
void* RNewAllocator::map(void* p,unsigned sz)
|
|
2548 |
//
|
|
2549 |
// allocate pages in the chunk
|
|
2550 |
// if p is NULL, find and allocate the required number of pages (which must lie in the lower half)
|
|
2551 |
// otherwise commit the pages specified
|
|
2552 |
//
|
|
2553 |
{
|
|
2554 |
ASSERT(p == floor(p, pagesize));
|
|
2555 |
ASSERT(sz == ceiling(sz, pagesize));
|
|
2556 |
ASSERT(sz > 0);
|
|
2557 |
|
|
2558 |
if (iChunkSize + sz > iMaxLength)
|
|
2559 |
return 0;
|
|
2560 |
|
|
2561 |
RChunk chunk;
|
|
2562 |
chunk.SetHandle(iChunkHandle);
|
|
2563 |
if (p)
|
|
2564 |
{
|
|
2565 |
TInt r = chunk.Commit(iOffset + ptrdiff(p, this),sz);
|
|
2566 |
if (r < 0)
|
|
2567 |
return 0;
|
|
2568 |
iChunkSize += sz;
|
|
2569 |
return p;
|
|
2570 |
}
|
|
2571 |
|
|
2572 |
TInt r = chunk.Allocate(sz);
|
|
2573 |
if (r < 0)
|
|
2574 |
return 0;
|
|
2575 |
if (r > iOffset)
|
|
2576 |
{
|
|
2577 |
// can't allow page allocations in DL zone
|
|
2578 |
chunk.Decommit(r, sz);
|
|
2579 |
return 0;
|
|
2580 |
}
|
|
2581 |
iChunkSize += sz;
|
|
2582 |
#ifdef TRACING_HEAPS
|
|
2583 |
if (iChunkSize > iHighWaterMark)
|
|
2584 |
{
|
|
2585 |
iHighWaterMark = ceiling(iChunkSize,16*pagesize);
|
|
2586 |
|
|
2587 |
|
|
2588 |
RChunk chunk;
|
|
2589 |
chunk.SetHandle(iChunkHandle);
|
|
2590 |
TKName chunk_name;
|
|
2591 |
chunk.FullName(chunk_name);
|
|
2592 |
BTraceContextBig(BTrace::ETest1, 4, 44, chunk_name.Ptr(), chunk_name.Size());
|
|
2593 |
|
|
2594 |
TUint32 traceData[6];
|
|
2595 |
traceData[0] = iChunkHandle;
|
|
2596 |
traceData[1] = iMinLength;
|
|
2597 |
traceData[2] = iMaxLength;
|
|
2598 |
traceData[3] = sz;
|
|
2599 |
traceData[4] = iChunkSize;
|
|
2600 |
traceData[5] = iHighWaterMark;
|
|
2601 |
BTraceContextN(BTrace::ETest1, 3, (TUint32)this, 33, traceData, sizeof(traceData));
|
|
2602 |
}
|
|
2603 |
#endif
|
|
2604 |
|
|
2605 |
return offset(this, r - iOffset);
|
|
2606 |
// code below does delayed initialisation of the slabs.
|
|
2607 |
/*
|
|
2608 |
if (iChunkSize >= slab_init_threshold)
|
|
2609 |
{ // set up slab system now that heap is large enough
|
|
2610 |
slab_config(slab_config_bits);
|
|
2611 |
slab_init_threshold = KMaxTUint;
|
|
2612 |
}
|
|
2613 |
return p;
|
|
2614 |
*/
|
|
2615 |
}
|
|
2616 |
|
|
2617 |
void* RNewAllocator::remap(void* p,unsigned oldsz,unsigned sz)
|
|
2618 |
{
|
|
2619 |
if (oldsz > sz)
|
|
2620 |
{ // shrink
|
|
2621 |
unmap(offset(p,sz), oldsz-sz);
|
|
2622 |
}
|
|
2623 |
else if (oldsz < sz)
|
|
2624 |
{ // grow, try and do this in place first
|
|
2625 |
if (!map(offset(p, oldsz), sz-oldsz))
|
|
2626 |
{
|
|
2627 |
// need to allocate-copy-free
|
|
2628 |
void* newp = map(0, sz);
|
|
2629 |
if (newp) {
|
|
2630 |
memcpy(newp, p, oldsz);
|
|
2631 |
unmap(p,oldsz);
|
|
2632 |
}
|
|
2633 |
return newp;
|
|
2634 |
}
|
|
2635 |
}
|
|
2636 |
return p;
|
|
2637 |
}
|
|
2638 |
|
|
2639 |
void RNewAllocator::unmap(void* p,unsigned sz)
|
|
2640 |
{
|
|
2641 |
ASSERT(p == floor(p, pagesize));
|
|
2642 |
ASSERT(sz == ceiling(sz, pagesize));
|
|
2643 |
ASSERT(sz > 0);
|
|
2644 |
|
|
2645 |
RChunk chunk;
|
|
2646 |
chunk.SetHandle(iChunkHandle);
|
|
2647 |
TInt r = chunk.Decommit(ptrdiff(p, offset(this,-iOffset)), sz);
|
|
2648 |
//TInt offset = (TUint8*)p-(TUint8*)chunk.Base();
|
|
2649 |
//TInt r = chunk.Decommit(offset,sz);
|
|
2650 |
|
|
2651 |
ASSERT(r >= 0);
|
|
2652 |
iChunkSize -= sz;
|
|
2653 |
#ifdef TRACING_HEAPS
|
|
2654 |
if (iChunkSize > iHighWaterMark)
|
|
2655 |
{
|
|
2656 |
iHighWaterMark = ceiling(iChunkSize,16*pagesize);
|
|
2657 |
|
|
2658 |
|
|
2659 |
RChunk chunk;
|
|
2660 |
chunk.SetHandle(iChunkHandle);
|
|
2661 |
TKName chunk_name;
|
|
2662 |
chunk.FullName(chunk_name);
|
|
2663 |
BTraceContextBig(BTrace::ETest1, 4, 44, chunk_name.Ptr(), chunk_name.Size());
|
|
2664 |
|
|
2665 |
TUint32 traceData[6];
|
|
2666 |
traceData[0] = iChunkHandle;
|
|
2667 |
traceData[1] = iMinLength;
|
|
2668 |
traceData[2] = iMaxLength;
|
|
2669 |
traceData[3] = sz;
|
|
2670 |
traceData[4] = iChunkSize;
|
|
2671 |
traceData[5] = iHighWaterMark;
|
|
2672 |
BTraceContextN(BTrace::ETest1, 3, (TUint32)this, 33, traceData, sizeof(traceData));
|
|
2673 |
}
|
|
2674 |
#endif
|
|
2675 |
}
|
|
2676 |
|
|
2677 |
void RNewAllocator::paged_init(unsigned pagepower)
|
|
2678 |
{
|
|
2679 |
if (pagepower == 0)
|
|
2680 |
pagepower = 31;
|
|
2681 |
else if (pagepower < minpagepower)
|
|
2682 |
pagepower = minpagepower;
|
|
2683 |
page_threshold = pagepower;
|
|
2684 |
for (int i=0;i<npagecells;++i)
|
|
2685 |
{
|
|
2686 |
pagelist[i].page = 0;
|
|
2687 |
pagelist[i].size = 0;
|
|
2688 |
}
|
|
2689 |
}
|
|
2690 |
|
|
2691 |
void* RNewAllocator::paged_allocate(unsigned size)
|
|
2692 |
{
|
|
2693 |
unsigned nbytes = ceiling(size, pagesize);
|
|
2694 |
if (nbytes < size + cellalign)
|
|
2695 |
{ // not enough extra space for header and alignment, try and use cell list
|
|
2696 |
for (pagecell *c = pagelist,*e = c + npagecells;c < e;++c)
|
|
2697 |
if (c->page == 0)
|
|
2698 |
{
|
|
2699 |
void* p = map(0, nbytes);
|
|
2700 |
if (!p)
|
|
2701 |
return 0;
|
|
2702 |
c->page = p;
|
|
2703 |
c->size = nbytes;
|
|
2704 |
iTotalAllocSize += nbytes;
|
|
2705 |
return p;
|
|
2706 |
}
|
|
2707 |
}
|
|
2708 |
// use a cell header
|
|
2709 |
nbytes = ceiling(size + cellalign, pagesize);
|
|
2710 |
void* p = map(0, nbytes);
|
|
2711 |
if (!p)
|
|
2712 |
return 0;
|
|
2713 |
*static_cast<unsigned*>(p) = nbytes;
|
|
2714 |
iTotalAllocSize += nbytes;
|
|
2715 |
return offset(p, cellalign);
|
|
2716 |
}
|
|
2717 |
|
|
2718 |
void* RNewAllocator::paged_reallocate(void* p, unsigned size)
|
|
2719 |
{
|
|
2720 |
if (lowbits(p, pagesize) == 0)
|
|
2721 |
{ // continue using descriptor
|
|
2722 |
pagecell* c = paged_descriptor(p);
|
|
2723 |
unsigned nbytes = ceiling(size, pagesize);
|
|
2724 |
void* newp = remap(p, c->size, nbytes);
|
|
2725 |
if (!newp)
|
|
2726 |
return 0;
|
|
2727 |
c->page = newp;
|
|
2728 |
c->size = nbytes;
|
|
2729 |
iTotalAllocSize += nbytes-c->size;
|
|
2730 |
return newp;
|
|
2731 |
}
|
|
2732 |
else
|
|
2733 |
{ // use a cell header
|
|
2734 |
ASSERT(lowbits(p,pagesize) == cellalign);
|
|
2735 |
p = offset(p,-int(cellalign));
|
|
2736 |
unsigned nbytes = ceiling(size + cellalign, pagesize);
|
|
2737 |
unsigned obytes = *static_cast<unsigned*>(p);
|
|
2738 |
void* newp = remap(p, obytes, nbytes);
|
|
2739 |
if (!newp)
|
|
2740 |
return 0;
|
|
2741 |
*static_cast<unsigned*>(newp) = nbytes;
|
|
2742 |
iTotalAllocSize += nbytes-obytes;
|
|
2743 |
return offset(newp, cellalign);
|
|
2744 |
}
|
|
2745 |
}
|
|
2746 |
|
|
2747 |
void RNewAllocator::paged_free(void* p)
|
|
2748 |
{
|
|
2749 |
if (lowbits(p,pagesize) == 0)
|
|
2750 |
{ // check pagelist
|
|
2751 |
pagecell* c = paged_descriptor(p);
|
|
2752 |
|
|
2753 |
iTotalAllocSize -= c->size;
|
|
2754 |
|
|
2755 |
unmap(p, c->size);
|
|
2756 |
c->page = 0;
|
|
2757 |
c->size = 0;
|
|
2758 |
}
|
|
2759 |
else
|
|
2760 |
{ // check page header
|
|
2761 |
unsigned* page = static_cast<unsigned*>(offset(p,-int(cellalign)));
|
|
2762 |
unsigned size = *page;
|
|
2763 |
|
|
2764 |
iTotalAllocSize -= size;
|
|
2765 |
|
|
2766 |
unmap(page,size);
|
|
2767 |
}
|
|
2768 |
}
|
|
2769 |
|
|
2770 |
pagecell* RNewAllocator::paged_descriptor(const void* p) const
|
|
2771 |
{
|
|
2772 |
ASSERT(lowbits(p,pagesize) == 0);
|
|
2773 |
// Double casting to keep the compiler happy. Seems to think we can trying to
|
|
2774 |
// change a non-const member (pagelist) in a const function
|
|
2775 |
pagecell* c = (pagecell*)((void*)pagelist);
|
|
2776 |
#ifdef _DEBUG
|
|
2777 |
pagecell* e = c + npagecells;
|
|
2778 |
#endif
|
|
2779 |
for (;;)
|
|
2780 |
{
|
|
2781 |
ASSERT(c!=e);
|
|
2782 |
if (c->page == p)
|
|
2783 |
return c;
|
|
2784 |
++c;
|
|
2785 |
}
|
|
2786 |
}
|
|
2787 |
|
|
2788 |
RNewAllocator* RNewAllocator::FixedHeap(TAny* aBase, TInt aMaxLength, TInt aAlign, TBool aSingleThread)
|
|
2789 |
/**
|
|
2790 |
Creates a fixed length heap at a specified location.
|
|
2791 |
|
|
2792 |
On successful return from this function, aMaxLength bytes are committed by the chunk.
|
|
2793 |
The heap cannot be extended.
|
|
2794 |
|
|
2795 |
@param aBase A pointer to the location where the heap is to be constructed.
|
|
2796 |
@param aMaxLength The length of the heap. If the supplied value is less
|
|
2797 |
than KMinHeapSize, it is discarded and the value KMinHeapSize
|
|
2798 |
is used instead.
|
|
2799 |
@param aAlign The alignment of heap cells.
|
|
2800 |
@param aSingleThread Indicates whether single threaded or not.
|
|
2801 |
|
|
2802 |
@return A pointer to the new heap, or NULL if the heap could not be created.
|
|
2803 |
|
|
2804 |
@panic USER 56 if aMaxLength is negative.
|
|
2805 |
*/
|
|
2806 |
//
|
|
2807 |
// Force construction of the fixed memory.
|
|
2808 |
//
|
|
2809 |
{
|
|
2810 |
|
|
2811 |
__ASSERT_ALWAYS(aMaxLength>=0, ::Panic(ETHeapMaxLengthNegative));
|
|
2812 |
if (aMaxLength<KMinHeapSize)
|
|
2813 |
aMaxLength=KMinHeapSize;
|
|
2814 |
|
|
2815 |
RNewAllocator* h = new(aBase) RNewAllocator(aMaxLength, aAlign, aSingleThread);
|
|
2816 |
|
|
2817 |
if (!aSingleThread)
|
|
2818 |
{
|
|
2819 |
TInt r = h->iLock.CreateLocal();
|
|
2820 |
if (r!=KErrNone)
|
|
2821 |
return NULL;
|
|
2822 |
h->iHandles = (TInt*)&h->iLock;
|
|
2823 |
h->iHandleCount = 1;
|
|
2824 |
}
|
|
2825 |
return h;
|
|
2826 |
}
|
|
2827 |
|
|
2828 |
RNewAllocator* RNewAllocator::ChunkHeap(const TDesC* aName, TInt aMinLength, TInt aMaxLength, TInt aGrowBy, TInt aAlign, TBool aSingleThread)
|
|
2829 |
/**
|
|
2830 |
Creates a heap in a local or global chunk.
|
|
2831 |
|
|
2832 |
The chunk hosting the heap can be local or global.
|
|
2833 |
|
|
2834 |
A local chunk is one which is private to the process creating it and is not
|
|
2835 |
intended for access by other user processes.
|
|
2836 |
A global chunk is one which is visible to all processes.
|
|
2837 |
|
|
2838 |
The hosting chunk is local, if the pointer aName is NULL, otherwise
|
|
2839 |
the hosting chunk is global and the descriptor *aName is assumed to contain
|
|
2840 |
the name to be assigned to it.
|
|
2841 |
|
|
2842 |
Ownership of the host chunk is vested in the current process.
|
|
2843 |
|
|
2844 |
A minimum and a maximum size for the heap can be specified. On successful
|
|
2845 |
return from this function, the size of the heap is at least aMinLength.
|
|
2846 |
If subsequent requests for allocation of memory from the heap cannot be
|
|
2847 |
satisfied by compressing the heap, the size of the heap is extended in
|
|
2848 |
increments of aGrowBy until the request can be satisfied. Attempts to extend
|
|
2849 |
the heap causes the size of the host chunk to be adjusted.
|
|
2850 |
|
|
2851 |
Note that the size of the heap cannot be adjusted by more than aMaxLength.
|
|
2852 |
|
|
2853 |
@param aName If NULL, the function constructs a local chunk to host
|
|
2854 |
the heap.
|
|
2855 |
If not NULL, a pointer to a descriptor containing the name
|
|
2856 |
to be assigned to the global chunk hosting the heap.
|
|
2857 |
@param aMinLength The minimum length of the heap.
|
|
2858 |
@param aMaxLength The maximum length to which the heap can grow.
|
|
2859 |
If the supplied value is less than KMinHeapSize, then it
|
|
2860 |
is discarded and the value KMinHeapSize used instead.
|
|
2861 |
@param aGrowBy The increments to the size of the host chunk. If a value is
|
|
2862 |
not explicitly specified, the value KMinHeapGrowBy is taken
|
|
2863 |
by default
|
|
2864 |
@param aAlign The alignment of heap cells.
|
|
2865 |
@param aSingleThread Indicates whether single threaded or not.
|
|
2866 |
|
|
2867 |
@return A pointer to the new heap or NULL if the heap could not be created.
|
|
2868 |
|
|
2869 |
@panic USER 41 if aMinLength is greater than the supplied value of aMaxLength.
|
|
2870 |
@panic USER 55 if aMinLength is negative.
|
|
2871 |
@panic USER 56 if aMaxLength is negative.
|
|
2872 |
*/
|
|
2873 |
//
|
|
2874 |
// Allocate a Chunk of the requested size and force construction.
|
|
2875 |
//
|
|
2876 |
{
|
|
2877 |
|
|
2878 |
__ASSERT_ALWAYS(aMinLength>=0, ::Panic(ETHeapMinLengthNegative));
|
|
2879 |
__ASSERT_ALWAYS(aMaxLength>=aMinLength, ::Panic(ETHeapCreateMaxLessThanMin));
|
|
2880 |
if (aMaxLength<KMinHeapSize)
|
|
2881 |
aMaxLength=KMinHeapSize;
|
|
2882 |
RChunk c;
|
|
2883 |
TInt r;
|
|
2884 |
if (aName)
|
|
2885 |
r = c.CreateDisconnectedGlobal(*aName, 0, 0, aMaxLength*2, aSingleThread ? EOwnerThread : EOwnerProcess);
|
|
2886 |
else
|
|
2887 |
r = c.CreateDisconnectedLocal(0, 0, aMaxLength*2, aSingleThread ? EOwnerThread : EOwnerProcess);
|
|
2888 |
if (r!=KErrNone)
|
|
2889 |
return NULL;
|
|
2890 |
|
|
2891 |
RNewAllocator* h = ChunkHeap(c, aMinLength, aGrowBy, aMaxLength, aAlign, aSingleThread, UserHeap::EChunkHeapDuplicate);
|
|
2892 |
c.Close();
|
|
2893 |
return h;
|
|
2894 |
}
|
|
2895 |
|
|
2896 |
RNewAllocator* RNewAllocator::ChunkHeap(RChunk aChunk, TInt aMinLength, TInt aGrowBy, TInt aMaxLength, TInt aAlign, TBool aSingleThread, TUint32 aMode)
|
|
2897 |
/**
|
|
2898 |
Creates a heap in an existing chunk.
|
|
2899 |
|
|
2900 |
This function is intended to be used to create a heap in a user writable code
|
|
2901 |
chunk as created by a call to RChunk::CreateLocalCode().
|
|
2902 |
This type of heap can be used to hold code fragments from a JIT compiler.
|
|
2903 |
|
|
2904 |
The maximum length to which the heap can grow is the same as
|
|
2905 |
the maximum size of the chunk.
|
|
2906 |
|
|
2907 |
@param aChunk The chunk that will host the heap.
|
|
2908 |
@param aMinLength The minimum length of the heap.
|
|
2909 |
@param aGrowBy The increments to the size of the host chunk.
|
|
2910 |
@param aMaxLength The maximum length to which the heap can grow.
|
|
2911 |
@param aAlign The alignment of heap cells.
|
|
2912 |
@param aSingleThread Indicates whether single threaded or not.
|
|
2913 |
@param aMode Flags controlling the reallocation. The only bit which has any
|
|
2914 |
effect on reallocation is that defined by the enumeration
|
|
2915 |
ENeverMove of the enum RAllocator::TReAllocMode.
|
|
2916 |
If this is set, then any successful reallocation guarantees not
|
|
2917 |
to have changed the start address of the cell.
|
|
2918 |
By default, this parameter is zero.
|
|
2919 |
|
|
2920 |
@return A pointer to the new heap or NULL if the heap could not be created.
|
|
2921 |
*/
|
|
2922 |
//
|
|
2923 |
// Construct a heap in an already existing chunk
|
|
2924 |
//
|
|
2925 |
{
|
|
2926 |
|
|
2927 |
return OffsetChunkHeap(aChunk, aMinLength, 0, aGrowBy, aMaxLength, aAlign, aSingleThread, aMode);
|
|
2928 |
}
|
|
2929 |
|
|
2930 |
RNewAllocator* RNewAllocator::OffsetChunkHeap(RChunk aChunk, TInt aMinLength, TInt aOffset, TInt aGrowBy, TInt aMaxLength, TInt aAlign, TBool aSingleThread, TUint32 aMode)
|
|
2931 |
/**
|
|
2932 |
Creates a heap in an existing chunk, offset from the beginning of the chunk.
|
|
2933 |
|
|
2934 |
This function is intended to be used to create a heap where a fixed amount of
|
|
2935 |
additional data must be stored at a known location. The additional data can be
|
|
2936 |
placed at the base address of the chunk, allowing it to be located without
|
|
2937 |
depending on the internals of the heap structure.
|
|
2938 |
|
|
2939 |
The maximum length to which the heap can grow is the maximum size of the chunk,
|
|
2940 |
minus the offset.
|
|
2941 |
|
|
2942 |
@param aChunk The chunk that will host the heap.
|
|
2943 |
@param aMinLength The minimum length of the heap.
|
|
2944 |
@param aOffset The offset from the start of the chunk, to the start of the heap.
|
|
2945 |
@param aGrowBy The increments to the size of the host chunk.
|
|
2946 |
@param aMaxLength The maximum length to which the heap can grow.
|
|
2947 |
@param aAlign The alignment of heap cells.
|
|
2948 |
@param aSingleThread Indicates whether single threaded or not.
|
|
2949 |
@param aMode Flags controlling the reallocation. The only bit which has any
|
|
2950 |
effect on reallocation is that defined by the enumeration
|
|
2951 |
ENeverMove of the enum RAllocator::TReAllocMode.
|
|
2952 |
If this is set, then any successful reallocation guarantees not
|
|
2953 |
to have changed the start address of the cell.
|
|
2954 |
By default, this parameter is zero.
|
|
2955 |
|
|
2956 |
@return A pointer to the new heap or NULL if the heap could not be created.
|
|
2957 |
*/
|
|
2958 |
//
|
|
2959 |
// Construct a heap in an already existing chunk
|
|
2960 |
//
|
|
2961 |
{
|
|
2962 |
|
|
2963 |
TInt page_size = malloc_getpagesize;
|
|
2964 |
if (!aAlign)
|
|
2965 |
aAlign = RNewAllocator::ECellAlignment;
|
|
2966 |
TInt maxLength = aChunk.MaxSize();
|
|
2967 |
TInt round_up = Max(aAlign, page_size);
|
|
2968 |
TInt min_cell = _ALIGN_UP(Max((TInt)RNewAllocator::EAllocCellSize, (TInt)RNewAllocator::EFreeCellSize), aAlign);
|
|
2969 |
aOffset = _ALIGN_UP(aOffset, 8);
|
|
2970 |
|
|
2971 |
#ifdef NO_RESERVE_MEMORY
|
|
2972 |
#ifdef TRACING_HEAPS
|
|
2973 |
TKName chunk_name;
|
|
2974 |
aChunk.FullName(chunk_name);
|
|
2975 |
BTraceContextBig(BTrace::ETest1, 0xF, 0xFF, chunk_name.Ptr(), chunk_name.Size());
|
|
2976 |
|
|
2977 |
TUint32 traceData[4];
|
|
2978 |
traceData[0] = aChunk.Handle();
|
|
2979 |
traceData[1] = aMinLength;
|
|
2980 |
traceData[2] = aMaxLength;
|
|
2981 |
traceData[3] = aAlign;
|
|
2982 |
BTraceContextN(BTrace::ETest1, 0xE, 0xEE, 0xEE, traceData, sizeof(traceData));
|
|
2983 |
#endif
|
|
2984 |
//modifying the aMinLength because not all memory is the same in the new allocator. So it cannot reserve it properly
|
|
2985 |
if ( aMinLength<aMaxLength)
|
|
2986 |
aMinLength = 0;
|
|
2987 |
#endif
|
|
2988 |
|
|
2989 |
if (aMaxLength && aMaxLength+aOffset<maxLength)
|
|
2990 |
maxLength = _ALIGN_UP(aMaxLength+aOffset, round_up);
|
|
2991 |
__ASSERT_ALWAYS(aMinLength>=0, ::Panic(ETHeapMinLengthNegative));
|
|
2992 |
__ASSERT_ALWAYS(maxLength>=aMinLength, ::Panic(ETHeapCreateMaxLessThanMin));
|
|
2993 |
aMinLength = _ALIGN_UP(Max(aMinLength, (TInt)sizeof(RNewAllocator) + min_cell) + aOffset, round_up);
|
|
2994 |
|
|
2995 |
// the new allocator uses a disconnected chunk so must commit the initial allocation
|
|
2996 |
// with Commit() instead of Adjust()
|
|
2997 |
// TInt r=aChunk.Adjust(aMinLength);
|
|
2998 |
//TInt r = aChunk.Commit(aOffset, aMinLength);
|
|
2999 |
|
|
3000 |
aOffset = maxLength;
|
|
3001 |
//TInt MORE_CORE_OFFSET = maxLength/2;
|
|
3002 |
//TInt r = aChunk.Commit(MORE_CORE_OFFSET, aMinLength);
|
|
3003 |
TInt r = aChunk.Commit(aOffset, aMinLength);
|
|
3004 |
|
|
3005 |
if (r!=KErrNone)
|
|
3006 |
return NULL;
|
|
3007 |
|
|
3008 |
RNewAllocator* h = new (aChunk.Base() + aOffset) RNewAllocator(aChunk.Handle(), aOffset, aMinLength, maxLength, aGrowBy, aAlign, aSingleThread);
|
|
3009 |
//RNewAllocator* h = new (aChunk.Base() + MORE_CORE_OFFSET) RNewAllocator(aChunk.Handle(), aOffset, aMinLength, maxLength, aGrowBy, aAlign, aSingleThread);
|
|
3010 |
|
|
3011 |
TBool duplicateLock = EFalse;
|
|
3012 |
if (!aSingleThread)
|
|
3013 |
{
|
|
3014 |
duplicateLock = aMode & UserHeap::EChunkHeapSwitchTo;
|
|
3015 |
if (h->iLock.CreateLocal(duplicateLock ? EOwnerThread : EOwnerProcess)!=KErrNone)
|
|
3016 |
{
|
|
3017 |
h->iChunkHandle = 0;
|
|
3018 |
return NULL;
|
|
3019 |
}
|
|
3020 |
}
|
|
3021 |
|
|
3022 |
if (aMode & UserHeap::EChunkHeapSwitchTo)
|
|
3023 |
User::SwitchHeap(h);
|
|
3024 |
|
|
3025 |
h->iHandles = &h->iChunkHandle;
|
|
3026 |
if (!aSingleThread)
|
|
3027 |
{
|
|
3028 |
// now change the thread-relative chunk/semaphore handles into process-relative handles
|
|
3029 |
h->iHandleCount = 2;
|
|
3030 |
if (duplicateLock)
|
|
3031 |
{
|
|
3032 |
RHandleBase s = h->iLock;
|
|
3033 |
r = h->iLock.Duplicate(RThread());
|
|
3034 |
s.Close();
|
|
3035 |
}
|
|
3036 |
if (r==KErrNone && (aMode & UserHeap::EChunkHeapDuplicate))
|
|
3037 |
{
|
|
3038 |
r = ((RChunk*)&h->iChunkHandle)->Duplicate(RThread());
|
|
3039 |
if (r!=KErrNone)
|
|
3040 |
h->iLock.Close(), h->iChunkHandle=0;
|
|
3041 |
}
|
|
3042 |
}
|
|
3043 |
else
|
|
3044 |
{
|
|
3045 |
h->iHandleCount = 1;
|
|
3046 |
if (aMode & UserHeap::EChunkHeapDuplicate)
|
|
3047 |
r = ((RChunk*)&h->iChunkHandle)->Duplicate(RThread(), EOwnerThread);
|
|
3048 |
}
|
|
3049 |
|
|
3050 |
// return the heap address
|
|
3051 |
return (r==KErrNone) ? h : NULL;
|
|
3052 |
}
|
|
3053 |
|
|
3054 |
/* Only for debugging purpose - start*/
|
|
3055 |
#ifdef DL_CHUNK_MEM_DEBUG
|
|
3056 |
void RNewAllocator::debug_check_small_chunk_access(mchunkptr p, size_t psize)
|
|
3057 |
{
|
|
3058 |
size_t sz = chunksize(p);
|
|
3059 |
char ch = *((char*)chunk_plus_offset(p, psize-1));
|
|
3060 |
}
|
|
3061 |
|
|
3062 |
void RNewAllocator::debug_check_any_chunk_access(mchunkptr p, size_t psize)
|
|
3063 |
{
|
|
3064 |
if (p==0 || psize==0) return;
|
|
3065 |
|
|
3066 |
mchunkptr next = chunk_plus_offset(p, psize);
|
|
3067 |
char* t = (char*)chunk_plus_offset(p, mparams.page_size);
|
|
3068 |
char ch = *((char*)p);
|
|
3069 |
while ((size_t)t<(size_t)next)
|
|
3070 |
{
|
|
3071 |
ch = *t;
|
|
3072 |
t = (char*)chunk_plus_offset(t, mparams.page_size);
|
|
3073 |
};
|
|
3074 |
}
|
|
3075 |
|
|
3076 |
void RNewAllocator::debug_check_large_chunk_access(tchunkptr p, size_t psize)
|
|
3077 |
{
|
|
3078 |
mchunkptr next = chunk_plus_offset(p, psize);
|
|
3079 |
char* t = (char*)chunk_plus_offset(p, mparams.page_size);
|
|
3080 |
char ch = *((char*)p);
|
|
3081 |
while ((size_t)t<(size_t)next)
|
|
3082 |
{
|
|
3083 |
ch = *t;
|
|
3084 |
t = (char*)chunk_plus_offset(t, mparams.page_size);
|
|
3085 |
};
|
|
3086 |
}
|
|
3087 |
|
|
3088 |
void RNewAllocator::debug_chunk_page_release_check(mchunkptr p, size_t psize, mstate fm, int mem_released)
|
|
3089 |
{
|
|
3090 |
if (mem_released)
|
|
3091 |
{
|
|
3092 |
if (!page_not_in_memory(p, psize) )
|
|
3093 |
MEM_LOG("CHUNK_PAGE_ERROR::dlfree, error - page_in_mem flag is corrupt");
|
|
3094 |
if (chunk_plus_offset(p, psize) > fm->top)
|
|
3095 |
MEM_LOG("CHUNK_PAGE_ERROR: error Top chunk address invalid");
|
|
3096 |
if (fm->dv >= p && fm->dv < chunk_plus_offset(p, psize))
|
|
3097 |
MEM_LOG("CHUNK_PAGE_ERROR: error DV chunk address invalid");
|
|
3098 |
}
|
|
3099 |
}
|
|
3100 |
#endif
|
|
3101 |
|
|
3102 |
#ifdef OOM_LOGGING
|
|
3103 |
#include <hal.h>
|
|
3104 |
void RNewAllocator::dump_large_chunk(mstate m, tchunkptr t) {
|
|
3105 |
tchunkptr u = t;
|
|
3106 |
bindex_t tindex = t->index;
|
|
3107 |
size_t tsize = chunksize(t);
|
|
3108 |
bindex_t idx;
|
|
3109 |
compute_tree_index(tsize, idx);
|
|
3110 |
|
|
3111 |
size_t free = 0;
|
|
3112 |
int nfree = 0;
|
|
3113 |
do
|
|
3114 |
{ /* traverse through chain of same-sized nodes */
|
|
3115 |
if (u->child[0] != 0)
|
|
3116 |
{
|
|
3117 |
dump_large_chunk(m, u->child[0]);
|
|
3118 |
}
|
|
3119 |
|
|
3120 |
if (u->child[1] != 0)
|
|
3121 |
{
|
|
3122 |
dump_large_chunk(m, u->child[1]);
|
|
3123 |
}
|
|
3124 |
|
|
3125 |
free += chunksize(u);
|
|
3126 |
nfree++;
|
|
3127 |
u = u->fd;
|
|
3128 |
}
|
|
3129 |
while (u != t);
|
|
3130 |
C_LOGF(_L8("LARGE_BIN,%d,%d,%d"), tsize, free, nfree);
|
|
3131 |
}
|
|
3132 |
|
|
3133 |
void RNewAllocator::dump_dl_free_chunks()
|
|
3134 |
{
|
|
3135 |
C_LOG("");
|
|
3136 |
C_LOG("------------ dump_dl_free_chunks start -------------");
|
|
3137 |
C_LOG("BinType,BinSize,FreeSize,FreeCount");
|
|
3138 |
|
|
3139 |
// dump small bins
|
|
3140 |
for (int i = 0; i < NSMALLBINS; ++i)
|
|
3141 |
{
|
|
3142 |
sbinptr b = smallbin_at(gm, i);
|
|
3143 |
unsigned int empty = (gm->smallmap & (1 << i)) == 0;
|
|
3144 |
int nfree = 0;
|
|
3145 |
if (!empty)
|
|
3146 |
{
|
|
3147 |
int nfree = 0;
|
|
3148 |
size_t free = 0;
|
|
3149 |
mchunkptr p = b->bk;
|
|
3150 |
size_t size = chunksize(p);
|
|
3151 |
for (; p != b; p = p->bk)
|
|
3152 |
{
|
|
3153 |
free += chunksize(p);
|
|
3154 |
nfree++;
|
|
3155 |
}
|
|
3156 |
|
|
3157 |
C_LOGF(_L8("SMALL_BIN,%d,%d,%d"), size, free, nfree);
|
|
3158 |
}
|
|
3159 |
}
|
|
3160 |
|
|
3161 |
// dump large bins
|
|
3162 |
for (int i = 0; i < NTREEBINS; ++i)
|
|
3163 |
{
|
|
3164 |
tbinptr* tb = treebin_at(gm, i);
|
|
3165 |
tchunkptr t = *tb;
|
|
3166 |
int empty = (gm->treemap & (1 << i)) == 0;
|
|
3167 |
if (!empty)
|
|
3168 |
dump_large_chunk(gm, t);
|
|
3169 |
}
|
|
3170 |
|
|
3171 |
C_LOG("------------ dump_dl_free_chunks end -------------");
|
|
3172 |
C_LOG("");
|
|
3173 |
}
|
|
3174 |
|
|
3175 |
void RNewAllocator::dump_heap_logs(size_t fail_size)
|
|
3176 |
{
|
|
3177 |
MEM_LOG("");
|
|
3178 |
if (fail_size) {
|
|
3179 |
MEM_LOG("MEMDEBUG::RSymbianDLHeap OOM Log dump *************** start");
|
|
3180 |
MEM_LOGF(_L8("Failing to alloc size: %d"), fail_size);
|
|
3181 |
}
|
|
3182 |
else
|
|
3183 |
MEM_LOG("MEMDEBUG::RSymbianDLHeap Log dump *************** start");
|
|
3184 |
|
|
3185 |
TInt dl_chunk_size = ptrdiff(iTop,iBase);
|
|
3186 |
TInt slabp_chunk_size = iChunkSize + iUnmappedChunkSize - dl_chunk_size;
|
|
3187 |
TInt freeMem = 0;
|
|
3188 |
HAL::Get(HALData::EMemoryRAMFree, freeMem);
|
|
3189 |
MEM_LOGF(_L8("System Free RAM Size: %d"), freeMem);
|
|
3190 |
MEM_LOGF(_L8("Allocator Commited Chunk Size: %d"), iChunkSize);
|
|
3191 |
MEM_LOGF(_L8("DLHeap Arena Size=%d"), dl_chunk_size);
|
|
3192 |
MEM_LOGF(_L8("DLHeap unmapped chunk size: %d"), iUnmappedChunkSize);
|
|
3193 |
MEM_LOGF(_L8("Slab-Page Allocator Chunk Size=%d"), slabp_chunk_size);
|
|
3194 |
|
|
3195 |
mallinfo info = dlmallinfo();
|
|
3196 |
TUint heapAlloc = info.uordblks;
|
|
3197 |
TUint heapFree = info.fordblks;
|
|
3198 |
MEM_LOGF(_L8("DLHeap allocated size: %d"), heapAlloc);
|
|
3199 |
MEM_LOGF(_L8("DLHeap free size: %d"), heapFree);
|
|
3200 |
|
|
3201 |
if (fail_size) {
|
|
3202 |
MEM_LOG("MEMDEBUG::RSymbianDLHeap OOM Log dump *************** end");
|
|
3203 |
}else {
|
|
3204 |
MEM_LOG("MEMDEBUG::RSymbianDLHeap Log dump *************** end");
|
|
3205 |
}
|
|
3206 |
MEM_LOG("");
|
|
3207 |
}
|
|
3208 |
|
|
3209 |
#endif
|
|
3210 |
/* Only for debugging purpose - end*/
|
|
3211 |
|
|
3212 |
|
|
3213 |
#define UserTestDebugMaskBit(bit) (TBool)(UserSvr::DebugMask(bit>>5) & (1<<(bit&31)))
|
|
3214 |
|
|
3215 |
#ifndef NO_NAMED_LOCAL_CHUNKS
|
|
3216 |
//this class requires Symbian^3 for ElocalNamed
|
|
3217 |
|
|
3218 |
// Hack to get access to TChunkCreateInfo internals outside of the kernel
|
|
3219 |
class TFakeChunkCreateInfo: public TChunkCreateInfo
|
|
3220 |
{
|
|
3221 |
public:
|
|
3222 |
void SetThreadNewAllocator(TInt aInitialSize, TInt aMaxSize, const TDesC& aName)
|
|
3223 |
{
|
|
3224 |
iType = TChunkCreate::ENormal | TChunkCreate::EDisconnected | TChunkCreate::EData;
|
|
3225 |
iMaxSize = aMaxSize * 2;
|
|
3226 |
|
|
3227 |
iInitialBottom = 0;
|
|
3228 |
iInitialTop = aInitialSize;
|
|
3229 |
iAttributes = TChunkCreate::ELocalNamed;
|
|
3230 |
iName = &aName;
|
|
3231 |
iOwnerType = EOwnerThread;
|
|
3232 |
}
|
|
3233 |
};
|
|
3234 |
#endif
|
|
3235 |
|
|
3236 |
#ifndef NO_NAMED_LOCAL_CHUNKS
|
|
3237 |
_LIT(KLitDollarHeap,"$HEAP");
|
|
3238 |
#endif
|
|
3239 |
TInt RNewAllocator::CreateThreadHeap(SStdEpocThreadCreateInfo& aInfo, RNewAllocator*& aHeap, TInt aAlign, TBool aSingleThread)
|
|
3240 |
/**
|
|
3241 |
@internalComponent
|
|
3242 |
*/
|
|
3243 |
//
|
|
3244 |
// Create a user-side heap
|
|
3245 |
//
|
|
3246 |
{
|
|
3247 |
TInt page_size = malloc_getpagesize;
|
|
3248 |
TInt minLength = _ALIGN_UP(aInfo.iHeapInitialSize, page_size);
|
|
3249 |
TInt maxLength = Max(aInfo.iHeapMaxSize, minLength);
|
|
3250 |
#ifdef TRACING_ALLOCS
|
|
3251 |
if (UserTestDebugMaskBit(96)) // 96 == KUSERHEAPTRACE in nk_trace.h
|
|
3252 |
aInfo.iFlags |= ETraceHeapAllocs;
|
|
3253 |
#endif
|
|
3254 |
// Create the thread's heap chunk.
|
|
3255 |
RChunk c;
|
|
3256 |
#ifndef NO_NAMED_LOCAL_CHUNKS
|
|
3257 |
TFakeChunkCreateInfo createInfo;
|
|
3258 |
createInfo.SetThreadNewAllocator(0, maxLength, KLitDollarHeap()); // Initialise with no memory committed.
|
|
3259 |
TInt r = c.Create(createInfo);
|
|
3260 |
#else
|
|
3261 |
TInt r = c.CreateDisconnectedLocal(0, 0, maxLength * 2);
|
|
3262 |
#endif
|
|
3263 |
if (r!=KErrNone)
|
|
3264 |
return r;
|
|
3265 |
aHeap = ChunkHeap(c, minLength, page_size, maxLength, aAlign, aSingleThread, UserHeap::EChunkHeapSwitchTo|UserHeap::EChunkHeapDuplicate);
|
|
3266 |
c.Close();
|
|
3267 |
if (!aHeap)
|
|
3268 |
return KErrNoMemory;
|
|
3269 |
#ifdef TRACING_ALLOCS
|
|
3270 |
if (aInfo.iFlags & ETraceHeapAllocs)
|
|
3271 |
{
|
|
3272 |
aHeap->iFlags |= RAllocator::ETraceAllocs;
|
|
3273 |
BTraceContext8(BTrace::EHeap, BTrace::EHeapCreate,(TUint32)aHeap, RNewAllocator::EAllocCellSize);
|
|
3274 |
TInt handle = aHeap->ChunkHandle();
|
|
3275 |
TInt chunkId = ((RHandleBase&)handle).BTraceId();
|
|
3276 |
BTraceContext8(BTrace::EHeap, BTrace::EHeapChunkCreate, (TUint32)aHeap, chunkId);
|
|
3277 |
}
|
|
3278 |
#endif
|
|
3279 |
return KErrNone;
|
|
3280 |
}
|
|
3281 |
|
|
3282 |
/*
|
|
3283 |
* \internal
|
|
3284 |
* Called from the qtmain.lib application wrapper.
|
|
3285 |
* Create a new heap as requested, but use the new allocator
|
|
3286 |
*/
|
|
3287 |
TInt _symbian_SetupThreadHeap(TBool /*aNotFirst*/, SStdEpocThreadCreateInfo& aInfo)
|
|
3288 |
{
|
|
3289 |
TInt r = KErrNone;
|
|
3290 |
if (!aInfo.iAllocator && aInfo.iHeapInitialSize>0)
|
|
3291 |
{
|
|
3292 |
// new heap required
|
|
3293 |
RNewAllocator* pH = NULL;
|
|
3294 |
r = RNewAllocator::CreateThreadHeap(aInfo, pH);
|
|
3295 |
}
|
|
3296 |
else if (aInfo.iAllocator)
|
|
3297 |
{
|
|
3298 |
// sharing a heap
|
|
3299 |
RAllocator* pA = aInfo.iAllocator;
|
|
3300 |
r = pA->Open();
|
|
3301 |
if (r == KErrNone)
|
|
3302 |
{
|
|
3303 |
User::SwitchAllocator(pA);
|
|
3304 |
}
|
|
3305 |
}
|
|
3306 |
return r;
|
|
3307 |
}
|
|
3308 |
|
|
3309 |
#ifndef __WINS__
|
|
3310 |
#pragma pop
|
|
3311 |
#endif
|