|
1 /* |
|
2 * Copyright (c) 2006 Nokia Corporation and/or its subsidiary(-ies). |
|
3 * All rights reserved. |
|
4 * This component and the accompanying materials are made available |
|
5 * under the terms of the License "Eclipse Public License v1.0" |
|
6 * which accompanies this distribution, and is available |
|
7 * at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
8 * |
|
9 * Initial Contributors: |
|
10 * Nokia Corporation - initial contribution. |
|
11 * |
|
12 * Contributors: |
|
13 * |
|
14 * Description: |
|
15 * |
|
16 * |
|
17 */ |
|
18 |
|
19 #include "common.h" |
|
20 |
|
21 #ifdef TRACE_CHUNK_USAGE |
|
22 void TraceChunkUsage(TInt aChunkHandle, TUint8* aBase, TInt aChunkSize) |
|
23 { |
|
24 RDebug::Print(_L("MEM: c,%d,%d,%d"), aChunkHandle, aBase, aChunkSize); |
|
25 } |
|
26 #else |
|
27 #define TraceChunkUsage(a,b,c) |
|
28 #endif |
|
29 |
|
30 #ifdef __NEW_ALLOCATOR__ |
|
31 |
|
32 #include "SymbianDLHeap.h" |
|
33 |
|
34 _LIT(KDLHeapPanicCategory, "DL Heap"); |
|
35 #define GET_PAGE_SIZE(x) UserHal::PageSizeInBytes(x) |
|
36 #define __CHECK_CELL(p) |
|
37 #define __POWER_OF_2(x) ((TUint32)((x)^((x)-1))>=(TUint32)(x)) |
|
38 |
|
39 #define gm (&iGlobalMallocState) |
|
40 |
|
41 void Panic(TCdtPanic aPanic) |
|
42 { |
|
43 User::Panic(KDLHeapPanicCategory, aPanic); |
|
44 } |
|
45 |
|
46 #undef UEXPORT_C |
|
47 #define UEXPORT_C |
|
48 |
|
49 UEXPORT_C RSymbianDLHeap::RSymbianDLHeap(TInt aMaxLength, TInt aAlign, TBool aSingleThread) |
|
50 // constructor for a fixed heap. Just use DL allocator |
|
51 :iMinLength(aMaxLength), iMaxLength(aMaxLength), iOffset(0), iGrowBy(0), iChunkHandle(0), |
|
52 iNestingLevel(0), iAllocCount(0), iFailType(ENone), iTestData(NULL), iChunkSize(aMaxLength) |
|
53 { |
|
54 |
|
55 // bodge so GKIServ (hudson generic low level layer) starts up ok - it uses an aAlign of 0 which panics, so if see 0 then force to 4 |
|
56 if ((TUint32)aAlign>=sizeof(TAny*) && __POWER_OF_2(iAlign)) |
|
57 { |
|
58 iAlign = aAlign; |
|
59 } |
|
60 else |
|
61 { |
|
62 iAlign = 4; |
|
63 } |
|
64 iPageSize = 0; |
|
65 iFlags = aSingleThread ? (ESingleThreaded|EFixedSize) : EFixedSize; |
|
66 |
|
67 Init(0, 0, 0); |
|
68 } |
|
69 |
|
70 UEXPORT_C RSymbianDLHeap::RSymbianDLHeap(TInt aChunkHandle, TInt aOffset, TInt aMinLength, TInt aMaxLength, TInt aGrowBy, |
|
71 TInt aAlign, TBool aSingleThread) |
|
72 : iMinLength(aMinLength), iMaxLength(aMaxLength), iOffset(aOffset), iChunkHandle(aChunkHandle), iNestingLevel(0), iAllocCount(0), |
|
73 iAlign(aAlign),iFailType(ENone), iTestData(NULL), iChunkSize(aMinLength) |
|
74 { |
|
75 // TODO: Locked the page size to 4 KB - change this to pick up from the OS |
|
76 GET_PAGE_SIZE(iPageSize); |
|
77 __ASSERT_ALWAYS(aOffset >=0, User::Panic(KDLHeapPanicCategory, ETHeapNewBadOffset)); |
|
78 iGrowBy = _ALIGN_UP(aGrowBy, iPageSize); |
|
79 iFlags = aSingleThread ? ESingleThreaded : 0; |
|
80 |
|
81 // Initialise |
|
82 // if the heap is created with aMinLength==aMaxLength then it cannot allocate slab or page memory |
|
83 // so these sub-allocators should be disabled. Otherwise initialise with default values |
|
84 if (aMinLength == aMaxLength) |
|
85 Init(0, 0, 0); |
|
86 else |
|
87 Init(0x3fff, 16, 0x10000); // all slabs, page {64KB}, trim {64KB} |
|
88 // Init(0xabe, 16, iPageSize*4); // slabs {48, 40, 32, 24, 20, 16, 12}, page {64KB}, trim {16KB} |
|
89 } |
|
90 |
|
91 UEXPORT_C TAny* RSymbianDLHeap::operator new(TUint aSize, TAny* aBase) __NO_THROW |
|
92 { |
|
93 __ASSERT_ALWAYS(aSize>=sizeof(RSymbianDLHeap), HEAP_PANIC(ETHeapNewBadSize)); |
|
94 RSymbianDLHeap* h = (RSymbianDLHeap*)aBase; |
|
95 h->iAlign = 0x80000000; // garbage value |
|
96 h->iBase = ((TUint8*)aBase) + aSize; |
|
97 return aBase; |
|
98 } |
|
99 |
|
100 void RSymbianDLHeap::Init(TInt aBitmapSlab, TInt aPagePower, size_t aTrimThreshold) |
|
101 { |
|
102 __ASSERT_ALWAYS((TUint32)iAlign>=sizeof(TAny*) && __POWER_OF_2(iAlign), HEAP_PANIC(ETHeapNewBadAlignment)); |
|
103 |
|
104 /*Moved code which does iunitilization */ |
|
105 iTop = (TUint8*)this + iMinLength; |
|
106 spare_page = 0; |
|
107 memset(&mparams,0,sizeof(mparams)); |
|
108 |
|
109 Init_Dlmalloc(iTop - iBase, 0, aTrimThreshold); |
|
110 |
|
111 slab_init(aBitmapSlab); |
|
112 |
|
113 /*10-1K,11-2K,12-4k,13-8K,14-16K,15-32K,16-64K*/ |
|
114 paged_init(aPagePower); |
|
115 } |
|
116 |
|
117 UEXPORT_C RSymbianDLHeap::SCell* RSymbianDLHeap::GetAddress(const TAny* aCell) const |
|
118 // |
|
119 // As much as possible, check a cell address and backspace it |
|
120 // to point at the cell header. |
|
121 // |
|
122 { |
|
123 |
|
124 TLinAddr m = TLinAddr(iAlign - 1); |
|
125 __ASSERT_ALWAYS(!(TLinAddr(aCell)&m), HEAP_PANIC(ETHeapBadCellAddress)); |
|
126 |
|
127 SCell* pC = (SCell*)(((TUint8*)aCell)-EAllocCellSize); |
|
128 __CHECK_CELL(pC); |
|
129 |
|
130 return pC; |
|
131 } |
|
132 |
|
133 UEXPORT_C TInt RSymbianDLHeap::AllocLen(const TAny* aCell) const |
|
134 { |
|
135 if (ptrdiff(aCell, this) >= 0) |
|
136 { |
|
137 mchunkptr m = mem2chunk(aCell); |
|
138 return chunksize(m) - CHUNK_OVERHEAD; |
|
139 } |
|
140 if (lowbits(aCell, pagesize) > cellalign) |
|
141 return header_size(slab::slabfor(aCell)->header); |
|
142 if (lowbits(aCell, pagesize) == cellalign) |
|
143 return *(unsigned*)(offset(aCell,-int(cellalign)))-cellalign; |
|
144 return paged_descriptor(aCell)->size; |
|
145 } |
|
146 |
|
147 UEXPORT_C TAny* RSymbianDLHeap::Alloc(TInt aSize) |
|
148 { |
|
149 __ASSERT_ALWAYS((TUint)aSize<(KMaxTInt/2),HEAP_PANIC(ETHeapBadAllocatedCellSize)); |
|
150 |
|
151 TAny* addr; |
|
152 |
|
153 Lock(); |
|
154 if (aSize < slab_threshold) |
|
155 { |
|
156 TInt ix = sizemap[(aSize+3)>>2]; |
|
157 ASSERT(ix != 0xff); |
|
158 addr = slab_allocate(slaballoc[ix]); |
|
159 }else if((aSize >> page_threshold)==0) |
|
160 { |
|
161 addr = dlmalloc(aSize); |
|
162 } |
|
163 else |
|
164 { |
|
165 addr = paged_allocate(aSize); |
|
166 } |
|
167 |
|
168 Unlock(); |
|
169 |
|
170 return addr; |
|
171 } |
|
172 |
|
173 UEXPORT_C TInt RSymbianDLHeap::Compress() |
|
174 { |
|
175 if (iFlags & EFixedSize) |
|
176 return 0; |
|
177 |
|
178 Lock(); |
|
179 dlmalloc_trim(0); |
|
180 if (spare_page) |
|
181 { |
|
182 unmap(spare_page, pagesize); |
|
183 spare_page = 0; |
|
184 } |
|
185 Unlock(); |
|
186 return 0; |
|
187 } |
|
188 |
|
189 UEXPORT_C void RSymbianDLHeap::Free(TAny* aPtr) |
|
190 { |
|
191 |
|
192 Lock(); |
|
193 |
|
194 if (!aPtr) |
|
195 ; |
|
196 else if (ptrdiff(aPtr, this) >= 0) |
|
197 { |
|
198 dlfree( aPtr); |
|
199 } |
|
200 else if (lowbits(aPtr, pagesize) <= cellalign) |
|
201 { |
|
202 paged_free(aPtr); |
|
203 } |
|
204 else |
|
205 { |
|
206 slab_free(aPtr); |
|
207 } |
|
208 Unlock(); |
|
209 } |
|
210 |
|
211 |
|
212 UEXPORT_C void RSymbianDLHeap::Reset() |
|
213 { |
|
214 // TODO free everything |
|
215 } |
|
216 |
|
217 UEXPORT_C TAny* RSymbianDLHeap::ReAlloc(TAny* aPtr, TInt aSize, TInt /*aMode = 0*/) |
|
218 // |
|
219 // Support aMode==0 properly: do not move the cell if the request is smaller than old size |
|
220 // |
|
221 { |
|
222 if(ptrdiff(aPtr,this)>=0) |
|
223 { |
|
224 // original cell is in DL zone |
|
225 if((aSize>>page_threshold)==0 || aSize <= chunksize(mem2chunk(aPtr)) - CHUNK_OVERHEAD) |
|
226 { |
|
227 // new one is below page limit or smaller than old one (so can't be moved) |
|
228 Lock(); |
|
229 TAny* addr = dlrealloc(aPtr,aSize); |
|
230 Unlock(); |
|
231 return addr; |
|
232 } |
|
233 } |
|
234 else if(lowbits(aPtr,pagesize)<=cellalign) |
|
235 { |
|
236 // original cell is either NULL or in paged zone |
|
237 if (!aPtr) |
|
238 return Alloc(aSize); |
|
239 |
|
240 // either the new size is larger (in which case it will still be in paged zone) |
|
241 // or it is smaller, but we will never move a shrinking cell so in paged zone |
|
242 // must handle [rare] case that aSize == 0, as paged_[re]allocate() will panic |
|
243 if (aSize == 0) |
|
244 aSize = 1; |
|
245 Lock(); |
|
246 TAny* addr = paged_reallocate(aPtr,aSize); |
|
247 Unlock(); |
|
248 return addr; |
|
249 } |
|
250 else |
|
251 { |
|
252 // original cell is in slab zone |
|
253 // return original if new one smaller |
|
254 if(aSize <= header_size(slab::slabfor(aPtr)->header)) |
|
255 return aPtr; |
|
256 } |
|
257 // can't do better than allocate/copy/free |
|
258 TAny* newp = Alloc(aSize); |
|
259 if(newp) |
|
260 { |
|
261 TInt oldsize = AllocLen(aPtr); |
|
262 memcpy(newp,aPtr,oldsize<aSize?oldsize:aSize); |
|
263 Free(aPtr); |
|
264 } |
|
265 return newp; |
|
266 } |
|
267 |
|
268 UEXPORT_C TInt RSymbianDLHeap::Available(TInt& aBiggestBlock) const |
|
269 { |
|
270 aBiggestBlock = 0; |
|
271 return 1000; |
|
272 /*Need to see how to implement this*/ |
|
273 // TODO: return iHeap.Available(aBiggestBlock); |
|
274 } |
|
275 UEXPORT_C TInt RSymbianDLHeap::AllocSize(TInt& aTotalAllocSize) const |
|
276 { |
|
277 aTotalAllocSize = 0; |
|
278 return 0; |
|
279 } |
|
280 |
|
281 UEXPORT_C TInt RSymbianDLHeap::DebugFunction(TInt /*aFunc*/, TAny* /*a1*/, TAny* /*a2*/) |
|
282 { |
|
283 return 0; |
|
284 } |
|
285 UEXPORT_C TInt RSymbianDLHeap::Extension_(TUint /* aExtensionId */, TAny*& /* a0 */, TAny* /* a1 */) |
|
286 { |
|
287 return KErrNotSupported; |
|
288 } |
|
289 |
|
290 long sysconf (int size ) |
|
291 { |
|
292 if (UserHal::PageSizeInBytes(size)!=KErrNone) |
|
293 size = 0x1000; |
|
294 return size; |
|
295 } |
|
296 |
|
297 |
|
298 /////////////////////////////////////////////////////////////////////////////// |
|
299 // imported from dla.cpp |
|
300 /////////////////////////////////////////////////////////////////////////////// |
|
301 |
|
302 //#include <unistd.h> |
|
303 //#define DEBUG_REALLOC |
|
304 #ifdef DEBUG_REALLOC |
|
305 #include <e32debug.h> |
|
306 #endif |
|
307 inline int RSymbianDLHeap::init_mparams(size_t aTrimThreshold /*= DEFAULT_TRIM_THRESHOLD*/) |
|
308 { |
|
309 if (mparams.page_size == 0) |
|
310 { |
|
311 size_t s; |
|
312 mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD; |
|
313 mparams.trim_threshold = aTrimThreshold; |
|
314 #if MORECORE_CONTIGUOUS |
|
315 mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT; |
|
316 #else /* MORECORE_CONTIGUOUS */ |
|
317 mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT; |
|
318 #endif /* MORECORE_CONTIGUOUS */ |
|
319 |
|
320 s = (size_t)0x58585858U; |
|
321 ACQUIRE_MAGIC_INIT_LOCK(&mparams); |
|
322 if (mparams.magic == 0) { |
|
323 mparams.magic = s; |
|
324 /* Set up lock for main malloc area */ |
|
325 INITIAL_LOCK(&gm->mutex); |
|
326 gm->mflags = mparams.default_mflags; |
|
327 } |
|
328 RELEASE_MAGIC_INIT_LOCK(&mparams); |
|
329 |
|
330 // DAN replaced |
|
331 // mparams.page_size = malloc_getpagesize; |
|
332 int temp = 0; |
|
333 GET_PAGE_SIZE(temp); |
|
334 mparams.page_size = temp; |
|
335 |
|
336 mparams.granularity = ((DEFAULT_GRANULARITY != 0)? |
|
337 DEFAULT_GRANULARITY : mparams.page_size); |
|
338 |
|
339 /* Sanity-check configuration: |
|
340 size_t must be unsigned and as wide as pointer type. |
|
341 ints must be at least 4 bytes. |
|
342 alignment must be at least 8. |
|
343 Alignment, min chunk size, and page size must all be powers of 2. |
|
344 */ |
|
345 |
|
346 if ((sizeof(size_t) != sizeof(TUint8*)) || |
|
347 (MAX_SIZE_T < MIN_CHUNK_SIZE) || |
|
348 (sizeof(int) < 4) || |
|
349 (MALLOC_ALIGNMENT < (size_t)8U) || |
|
350 ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) || |
|
351 ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) || |
|
352 ((mparams.granularity & (mparams.granularity-SIZE_T_ONE)) != 0) || |
|
353 ((mparams.page_size & (mparams.page_size-SIZE_T_ONE)) != 0)) |
|
354 ABORT; |
|
355 } |
|
356 return 0; |
|
357 } |
|
358 |
|
359 inline void RSymbianDLHeap::init_bins(mstate m) { |
|
360 /* Establish circular links for smallbins */ |
|
361 bindex_t i; |
|
362 for (i = 0; i < NSMALLBINS; ++i) { |
|
363 sbinptr bin = smallbin_at(m,i); |
|
364 bin->fd = bin->bk = bin; |
|
365 } |
|
366 } |
|
367 /* ---------------------------- malloc support --------------------------- */ |
|
368 |
|
369 /* allocate a large request from the best fitting chunk in a treebin */ |
|
370 void* RSymbianDLHeap::tmalloc_large(mstate m, size_t nb) { |
|
371 tchunkptr v = 0; |
|
372 size_t rsize = -nb; /* Unsigned negation */ |
|
373 tchunkptr t; |
|
374 bindex_t idx; |
|
375 compute_tree_index(nb, idx); |
|
376 |
|
377 if ((t = *treebin_at(m, idx)) != 0) { |
|
378 /* Traverse tree for this bin looking for node with size == nb */ |
|
379 size_t sizebits = |
|
380 nb << |
|
381 leftshift_for_tree_index(idx); |
|
382 tchunkptr rst = 0; /* The deepest untaken right subtree */ |
|
383 for (;;) { |
|
384 tchunkptr rt; |
|
385 size_t trem = chunksize(t) - nb; |
|
386 if (trem < rsize) { |
|
387 v = t; |
|
388 if ((rsize = trem) == 0) |
|
389 break; |
|
390 } |
|
391 rt = t->child[1]; |
|
392 t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; |
|
393 if (rt != 0 && rt != t) |
|
394 rst = rt; |
|
395 if (t == 0) { |
|
396 t = rst; /* set t to least subtree holding sizes > nb */ |
|
397 break; |
|
398 } |
|
399 sizebits <<= 1; |
|
400 } |
|
401 } |
|
402 if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */ |
|
403 binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap; |
|
404 if (leftbits != 0) { |
|
405 bindex_t i; |
|
406 binmap_t leastbit = least_bit(leftbits); |
|
407 compute_bit2idx(leastbit, i); |
|
408 t = *treebin_at(m, i); |
|
409 } |
|
410 } |
|
411 while (t != 0) { /* find smallest of tree or subtree */ |
|
412 size_t trem = chunksize(t) - nb; |
|
413 if (trem < rsize) { |
|
414 rsize = trem; |
|
415 v = t; |
|
416 } |
|
417 t = leftmost_child(t); |
|
418 } |
|
419 /* If dv is a better fit, return 0 so malloc will use it */ |
|
420 if (v != 0 && rsize < (size_t)(m->dvsize - nb)) { |
|
421 if (RTCHECK(ok_address(m, v))) { /* split */ |
|
422 mchunkptr r = chunk_plus_offset(v, nb); |
|
423 assert(chunksize(v) == rsize + nb); |
|
424 if (RTCHECK(ok_next(v, r))) { |
|
425 unlink_large_chunk(m, v); |
|
426 if (rsize < MIN_CHUNK_SIZE) |
|
427 set_inuse_and_pinuse(m, v, (rsize + nb)); |
|
428 else { |
|
429 set_size_and_pinuse_of_inuse_chunk(m, v, nb); |
|
430 set_size_and_pinuse_of_free_chunk(r, rsize); |
|
431 insert_chunk(m, r, rsize); |
|
432 } |
|
433 return chunk2mem(v); |
|
434 } |
|
435 } |
|
436 CORRUPTION_ERROR_ACTION(m); |
|
437 } |
|
438 return 0; |
|
439 } |
|
440 |
|
441 /* allocate a small request from the best fitting chunk in a treebin */ |
|
442 void* RSymbianDLHeap::tmalloc_small(mstate m, size_t nb) { |
|
443 tchunkptr t, v; |
|
444 size_t rsize; |
|
445 bindex_t i; |
|
446 binmap_t leastbit = least_bit(m->treemap); |
|
447 compute_bit2idx(leastbit, i); |
|
448 |
|
449 v = t = *treebin_at(m, i); |
|
450 rsize = chunksize(t) - nb; |
|
451 |
|
452 while ((t = leftmost_child(t)) != 0) { |
|
453 size_t trem = chunksize(t) - nb; |
|
454 if (trem < rsize) { |
|
455 rsize = trem; |
|
456 v = t; |
|
457 } |
|
458 } |
|
459 |
|
460 if (RTCHECK(ok_address(m, v))) { |
|
461 mchunkptr r = chunk_plus_offset(v, nb); |
|
462 assert(chunksize(v) == rsize + nb); |
|
463 if (RTCHECK(ok_next(v, r))) { |
|
464 unlink_large_chunk(m, v); |
|
465 if (rsize < MIN_CHUNK_SIZE) |
|
466 set_inuse_and_pinuse(m, v, (rsize + nb)); |
|
467 else { |
|
468 set_size_and_pinuse_of_inuse_chunk(m, v, nb); |
|
469 set_size_and_pinuse_of_free_chunk(r, rsize); |
|
470 replace_dv(m, r, rsize); |
|
471 } |
|
472 return chunk2mem(v); |
|
473 } |
|
474 } |
|
475 CORRUPTION_ERROR_ACTION(m); |
|
476 return 0; |
|
477 } |
|
478 |
|
479 inline void RSymbianDLHeap::init_top(mstate m, mchunkptr p, size_t psize) |
|
480 { |
|
481 /* Ensure alignment */ |
|
482 size_t offset = align_offset(chunk2mem(p)); |
|
483 p = (mchunkptr)((TUint8*)p + offset); |
|
484 psize -= offset; |
|
485 m->top = p; |
|
486 m->topsize = psize; |
|
487 p->head = psize | PINUSE_BIT; |
|
488 /* set size of fake trailing chunk holding overhead space only once */ |
|
489 mchunkptr chunkPlusOff = chunk_plus_offset(p, psize); |
|
490 chunkPlusOff->head = TOP_FOOT_SIZE; |
|
491 m->trim_check = mparams.trim_threshold; /* reset on each update */ |
|
492 } |
|
493 |
|
494 void* RSymbianDLHeap::internal_realloc(mstate m, void* oldmem, size_t bytes) |
|
495 { |
|
496 if (bytes >= MAX_REQUEST) { |
|
497 MALLOC_FAILURE_ACTION; |
|
498 return 0; |
|
499 } |
|
500 if (!PREACTION(m)) { |
|
501 mchunkptr oldp = mem2chunk(oldmem); |
|
502 size_t oldsize = chunksize(oldp); |
|
503 mchunkptr next = chunk_plus_offset(oldp, oldsize); |
|
504 mchunkptr newp = 0; |
|
505 void* extra = 0; |
|
506 |
|
507 /* Try to either shrink or extend into top. Else malloc-copy-free */ |
|
508 |
|
509 if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) && |
|
510 ok_next(oldp, next) && ok_pinuse(next))) { |
|
511 size_t nb = request2size(bytes); |
|
512 if (is_mmapped(oldp)) |
|
513 newp = mmap_resize(m, oldp, nb); |
|
514 else |
|
515 if (oldsize >= nb) { /* already big enough */ |
|
516 size_t rsize = oldsize - nb; |
|
517 newp = oldp; |
|
518 if (rsize >= MIN_CHUNK_SIZE) { |
|
519 mchunkptr remainder = chunk_plus_offset(newp, nb); |
|
520 set_inuse(m, newp, nb); |
|
521 set_inuse(m, remainder, rsize); |
|
522 extra = chunk2mem(remainder); |
|
523 } |
|
524 } |
|
525 /*AMOD: Modified to optimized*/ |
|
526 else if (next == m->top && oldsize + m->topsize > nb) |
|
527 { |
|
528 /* Expand into top */ |
|
529 if(oldsize + m->topsize > nb) |
|
530 { |
|
531 size_t newsize = oldsize + m->topsize; |
|
532 size_t newtopsize = newsize - nb; |
|
533 mchunkptr newtop = chunk_plus_offset(oldp, nb); |
|
534 set_inuse(m, oldp, nb); |
|
535 newtop->head = newtopsize |PINUSE_BIT; |
|
536 m->top = newtop; |
|
537 m->topsize = newtopsize; |
|
538 newp = oldp; |
|
539 } |
|
540 } |
|
541 } |
|
542 else { |
|
543 USAGE_ERROR_ACTION(m, oldmem); |
|
544 POSTACTION(m); |
|
545 return 0; |
|
546 } |
|
547 |
|
548 POSTACTION(m); |
|
549 |
|
550 if (newp != 0) { |
|
551 if (extra != 0) { |
|
552 internal_free(m, extra); |
|
553 } |
|
554 check_inuse_chunk(m, newp); |
|
555 return chunk2mem(newp); |
|
556 } |
|
557 else { |
|
558 void* newmem = internal_malloc(m, bytes); |
|
559 if (newmem != 0) { |
|
560 size_t oc = oldsize - overhead_for(oldp); |
|
561 memcpy(newmem, oldmem, (oc < bytes)? oc : bytes); |
|
562 internal_free(m, oldmem); |
|
563 } |
|
564 return newmem; |
|
565 } |
|
566 } |
|
567 return 0; |
|
568 } |
|
569 /* ----------------------------- statistics ------------------------------ */ |
|
570 mallinfo RSymbianDLHeap::internal_mallinfo(mstate m) { |
|
571 struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; |
|
572 TInt chunkCnt = 0; |
|
573 if (!PREACTION(m)) { |
|
574 check_malloc_state(m); |
|
575 if (is_initialized(m)) { |
|
576 size_t nfree = SIZE_T_ONE; /* top always free */ |
|
577 size_t mfree = m->topsize + TOP_FOOT_SIZE; |
|
578 size_t sum = mfree; |
|
579 msegmentptr s = &m->seg; |
|
580 TInt tmp = (TUint8*)m->top - (TUint8*)s->base; |
|
581 while (s != 0) { |
|
582 mchunkptr q = align_as_chunk(s->base); |
|
583 chunkCnt++; |
|
584 while (segment_holds(s, q) && |
|
585 q != m->top && q->head != FENCEPOST_HEAD) { |
|
586 size_t sz = chunksize(q); |
|
587 sum += sz; |
|
588 if (!cinuse(q)) { |
|
589 mfree += sz; |
|
590 ++nfree; |
|
591 } |
|
592 q = next_chunk(q); |
|
593 } |
|
594 s = s->next; |
|
595 } |
|
596 nm.arena = sum; |
|
597 nm.ordblks = nfree; |
|
598 nm.hblkhd = m->footprint - sum; |
|
599 nm.usmblks = m->max_footprint; |
|
600 nm.uordblks = m->footprint - mfree; |
|
601 nm.fordblks = mfree; |
|
602 nm.keepcost = m->topsize; |
|
603 nm.cellCount= chunkCnt;/*number of chunks allocated*/ |
|
604 } |
|
605 POSTACTION(m); |
|
606 } |
|
607 return nm; |
|
608 } |
|
609 |
|
610 void RSymbianDLHeap::internal_malloc_stats(mstate m) { |
|
611 if (!PREACTION(m)) { |
|
612 size_t maxfp = 0; |
|
613 size_t fp = 0; |
|
614 size_t used = 0; |
|
615 check_malloc_state(m); |
|
616 if (is_initialized(m)) { |
|
617 msegmentptr s = &m->seg; |
|
618 maxfp = m->max_footprint; |
|
619 fp = m->footprint; |
|
620 used = fp - (m->topsize + TOP_FOOT_SIZE); |
|
621 |
|
622 while (s != 0) { |
|
623 mchunkptr q = align_as_chunk(s->base); |
|
624 while (segment_holds(s, q) && |
|
625 q != m->top && q->head != FENCEPOST_HEAD) { |
|
626 if (!cinuse(q)) |
|
627 used -= chunksize(q); |
|
628 q = next_chunk(q); |
|
629 } |
|
630 s = s->next; |
|
631 } |
|
632 } |
|
633 POSTACTION(m); |
|
634 } |
|
635 } |
|
636 /* support for mallopt */ |
|
637 int RSymbianDLHeap::change_mparam(int param_number, int value) { |
|
638 size_t val = (size_t)value; |
|
639 init_mparams(DEFAULT_TRIM_THRESHOLD); |
|
640 switch(param_number) { |
|
641 case M_TRIM_THRESHOLD: |
|
642 mparams.trim_threshold = val; |
|
643 return 1; |
|
644 case M_GRANULARITY: |
|
645 if (val >= mparams.page_size && ((val & (val-1)) == 0)) { |
|
646 mparams.granularity = val; |
|
647 return 1; |
|
648 } |
|
649 else |
|
650 return 0; |
|
651 case M_MMAP_THRESHOLD: |
|
652 mparams.mmap_threshold = val; |
|
653 return 1; |
|
654 default: |
|
655 return 0; |
|
656 } |
|
657 } |
|
658 /* Get memory from system using MORECORE or MMAP */ |
|
659 void* RSymbianDLHeap::sys_alloc(mstate m, size_t nb) |
|
660 { |
|
661 TUint8* tbase = CMFAIL; |
|
662 size_t tsize = 0; |
|
663 flag_t mmap_flag = 0; |
|
664 //init_mparams();/*No need to do init_params here*/ |
|
665 /* Directly map large chunks */ |
|
666 if (use_mmap(m) && nb >= mparams.mmap_threshold) |
|
667 { |
|
668 void* mem = mmap_alloc(m, nb); |
|
669 if (mem != 0) |
|
670 return mem; |
|
671 } |
|
672 /* |
|
673 Try getting memory in any of three ways (in most-preferred to |
|
674 least-preferred order): |
|
675 1. A call to MORECORE that can normally contiguously extend memory. |
|
676 (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or |
|
677 or main space is mmapped or a previous contiguous call failed) |
|
678 2. A call to MMAP new space (disabled if not HAVE_MMAP). |
|
679 Note that under the default settings, if MORECORE is unable to |
|
680 fulfill a request, and HAVE_MMAP is true, then mmap is |
|
681 used as a noncontiguous system allocator. This is a useful backup |
|
682 strategy for systems with holes in address spaces -- in this case |
|
683 sbrk cannot contiguously expand the heap, but mmap may be able to |
|
684 find space. |
|
685 3. A call to MORECORE that cannot usually contiguously extend memory. |
|
686 (disabled if not HAVE_MORECORE) |
|
687 */ |
|
688 /*Trying to allocate the memory*/ |
|
689 if(MORECORE_CONTIGUOUS && !use_noncontiguous(m)) |
|
690 { |
|
691 TUint8* br = CMFAIL; |
|
692 msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (TUint8*)m->top); |
|
693 size_t asize = 0; |
|
694 ACQUIRE_MORECORE_LOCK(m); |
|
695 if (ss == 0) |
|
696 { /* First time through or recovery */ |
|
697 TUint8* base = (TUint8*)CALL_MORECORE(0); |
|
698 if (base != CMFAIL) |
|
699 { |
|
700 asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE); |
|
701 /* Adjust to end on a page boundary */ |
|
702 if (!is_page_aligned(base)) |
|
703 asize += (page_align((size_t)base) - (size_t)base); |
|
704 /* Can't call MORECORE if size is negative when treated as signed */ |
|
705 if (asize < HALF_MAX_SIZE_T &&(br = (TUint8*)(CALL_MORECORE(asize))) == base) |
|
706 { |
|
707 tbase = base; |
|
708 tsize = asize; |
|
709 } |
|
710 } |
|
711 } |
|
712 else |
|
713 { |
|
714 /* Subtract out existing available top space from MORECORE request. */ |
|
715 asize = granularity_align(nb - m->topsize + TOP_FOOT_SIZE + SIZE_T_ONE); |
|
716 /* Use mem here only if it did continuously extend old space */ |
|
717 if (asize < HALF_MAX_SIZE_T && |
|
718 (br = (TUint8*)(CALL_MORECORE(asize))) == ss->base+ss->size) { |
|
719 tbase = br; |
|
720 tsize = asize; |
|
721 } |
|
722 } |
|
723 if (tbase == CMFAIL) { /* Cope with partial failure */ |
|
724 if (br != CMFAIL) { /* Try to use/extend the space we did get */ |
|
725 if (asize < HALF_MAX_SIZE_T && |
|
726 asize < nb + TOP_FOOT_SIZE + SIZE_T_ONE) { |
|
727 size_t esize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE - asize); |
|
728 if (esize < HALF_MAX_SIZE_T) { |
|
729 TUint8* end = (TUint8*)CALL_MORECORE(esize); |
|
730 if (end != CMFAIL) |
|
731 asize += esize; |
|
732 else { /* Can't use; try to release */ |
|
733 CALL_MORECORE(-asize); |
|
734 br = CMFAIL; |
|
735 } |
|
736 } |
|
737 } |
|
738 } |
|
739 if (br != CMFAIL) { /* Use the space we did get */ |
|
740 tbase = br; |
|
741 tsize = asize; |
|
742 } |
|
743 else |
|
744 disable_contiguous(m); /* Don't try contiguous path in the future */ |
|
745 } |
|
746 RELEASE_MORECORE_LOCK(m); |
|
747 } |
|
748 if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */ |
|
749 size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE; |
|
750 size_t rsize = granularity_align(req); |
|
751 if (rsize > nb) { /* Fail if wraps around zero */ |
|
752 TUint8* mp = (TUint8*)(CALL_MMAP(rsize)); |
|
753 if (mp != CMFAIL) { |
|
754 tbase = mp; |
|
755 tsize = rsize; |
|
756 mmap_flag = IS_MMAPPED_BIT; |
|
757 } |
|
758 } |
|
759 } |
|
760 if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */ |
|
761 size_t asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE); |
|
762 if (asize < HALF_MAX_SIZE_T) { |
|
763 TUint8* br = CMFAIL; |
|
764 TUint8* end = CMFAIL; |
|
765 ACQUIRE_MORECORE_LOCK(m); |
|
766 br = (TUint8*)(CALL_MORECORE(asize)); |
|
767 end = (TUint8*)(CALL_MORECORE(0)); |
|
768 RELEASE_MORECORE_LOCK(m); |
|
769 if (br != CMFAIL && end != CMFAIL && br < end) { |
|
770 size_t ssize = end - br; |
|
771 if (ssize > nb + TOP_FOOT_SIZE) { |
|
772 tbase = br; |
|
773 tsize = ssize; |
|
774 } |
|
775 } |
|
776 } |
|
777 } |
|
778 if (tbase != CMFAIL) { |
|
779 if ((m->footprint += tsize) > m->max_footprint) |
|
780 m->max_footprint = m->footprint; |
|
781 if (!is_initialized(m)) { /* first-time initialization */ |
|
782 m->seg.base = m->least_addr = tbase; |
|
783 m->seg.size = tsize; |
|
784 m->seg.sflags = mmap_flag; |
|
785 m->magic = mparams.magic; |
|
786 init_bins(m); |
|
787 if (is_global(m)) |
|
788 init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); |
|
789 else { |
|
790 /* Offset top by embedded malloc_state */ |
|
791 mchunkptr mn = next_chunk(mem2chunk(m)); |
|
792 init_top(m, mn, (size_t)((tbase + tsize) - (TUint8*)mn) -TOP_FOOT_SIZE); |
|
793 } |
|
794 }else { |
|
795 /* Try to merge with an existing segment */ |
|
796 msegmentptr sp = &m->seg; |
|
797 while (sp != 0 && tbase != sp->base + sp->size) |
|
798 sp = sp->next; |
|
799 if (sp != 0 && !is_extern_segment(sp) && |
|
800 (sp->sflags & IS_MMAPPED_BIT) == mmap_flag && |
|
801 segment_holds(sp, m->top)) |
|
802 { /* append */ |
|
803 sp->size += tsize; |
|
804 init_top(m, m->top, m->topsize + tsize); |
|
805 } |
|
806 else { |
|
807 if (tbase < m->least_addr) |
|
808 m->least_addr = tbase; |
|
809 sp = &m->seg; |
|
810 while (sp != 0 && sp->base != tbase + tsize) |
|
811 sp = sp->next; |
|
812 if (sp != 0 && |
|
813 !is_extern_segment(sp) && |
|
814 (sp->sflags & IS_MMAPPED_BIT) == mmap_flag) { |
|
815 TUint8* oldbase = sp->base; |
|
816 sp->base = tbase; |
|
817 sp->size += tsize; |
|
818 return prepend_alloc(m, tbase, oldbase, nb); |
|
819 } |
|
820 else |
|
821 add_segment(m, tbase, tsize, mmap_flag); |
|
822 } |
|
823 } |
|
824 if (nb < m->topsize) { /* Allocate from new or extended top space */ |
|
825 size_t rsize = m->topsize -= nb; |
|
826 mchunkptr p = m->top; |
|
827 mchunkptr r = m->top = chunk_plus_offset(p, nb); |
|
828 r->head = rsize | PINUSE_BIT; |
|
829 set_size_and_pinuse_of_inuse_chunk(m, p, nb); |
|
830 check_top_chunk(m, m->top); |
|
831 check_malloced_chunk(m, chunk2mem(p), nb); |
|
832 return chunk2mem(p); |
|
833 } |
|
834 } |
|
835 /*need to check this*/ |
|
836 //errno = -1; |
|
837 return 0; |
|
838 } |
|
839 msegmentptr RSymbianDLHeap::segment_holding(mstate m, TUint8* addr) { |
|
840 msegmentptr sp = &m->seg; |
|
841 for (;;) { |
|
842 if (addr >= sp->base && addr < sp->base + sp->size) |
|
843 return sp; |
|
844 if ((sp = sp->next) == 0) |
|
845 return 0; |
|
846 } |
|
847 } |
|
848 /* Unlink the first chunk from a smallbin */ |
|
849 inline void RSymbianDLHeap::unlink_first_small_chunk(mstate M,mchunkptr B,mchunkptr P,bindex_t& I) |
|
850 { |
|
851 mchunkptr F = P->fd; |
|
852 assert(P != B); |
|
853 assert(P != F); |
|
854 assert(chunksize(P) == small_index2size(I)); |
|
855 if (B == F) |
|
856 clear_smallmap(M, I); |
|
857 else if (RTCHECK(ok_address(M, F))) { |
|
858 B->fd = F; |
|
859 F->bk = B; |
|
860 } |
|
861 else { |
|
862 CORRUPTION_ERROR_ACTION(M); |
|
863 } |
|
864 } |
|
865 /* Link a free chunk into a smallbin */ |
|
866 inline void RSymbianDLHeap::insert_small_chunk(mstate M,mchunkptr P, size_t S) |
|
867 { |
|
868 bindex_t I = small_index(S); |
|
869 mchunkptr B = smallbin_at(M, I); |
|
870 mchunkptr F = B; |
|
871 assert(S >= MIN_CHUNK_SIZE); |
|
872 if (!smallmap_is_marked(M, I)) |
|
873 mark_smallmap(M, I); |
|
874 else if (RTCHECK(ok_address(M, B->fd))) |
|
875 F = B->fd; |
|
876 else { |
|
877 CORRUPTION_ERROR_ACTION(M); |
|
878 } |
|
879 B->fd = P; |
|
880 F->bk = P; |
|
881 P->fd = F; |
|
882 P->bk = B; |
|
883 } |
|
884 |
|
885 |
|
886 inline void RSymbianDLHeap::insert_chunk(mstate M,mchunkptr P,size_t S) |
|
887 { |
|
888 if (is_small(S)) |
|
889 insert_small_chunk(M, P, S); |
|
890 else{ |
|
891 tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); |
|
892 } |
|
893 } |
|
894 |
|
895 inline void RSymbianDLHeap::unlink_large_chunk(mstate M,tchunkptr X) |
|
896 { |
|
897 tchunkptr XP = X->parent; |
|
898 tchunkptr R; |
|
899 if (X->bk != X) { |
|
900 tchunkptr F = X->fd; |
|
901 R = X->bk; |
|
902 if (RTCHECK(ok_address(M, F))) { |
|
903 F->bk = R; |
|
904 R->fd = F; |
|
905 } |
|
906 else { |
|
907 CORRUPTION_ERROR_ACTION(M); |
|
908 } |
|
909 } |
|
910 else { |
|
911 tchunkptr* RP; |
|
912 if (((R = *(RP = &(X->child[1]))) != 0) || |
|
913 ((R = *(RP = &(X->child[0]))) != 0)) { |
|
914 tchunkptr* CP; |
|
915 while ((*(CP = &(R->child[1])) != 0) || |
|
916 (*(CP = &(R->child[0])) != 0)) { |
|
917 R = *(RP = CP); |
|
918 } |
|
919 if (RTCHECK(ok_address(M, RP))) |
|
920 *RP = 0; |
|
921 else { |
|
922 CORRUPTION_ERROR_ACTION(M); |
|
923 } |
|
924 } |
|
925 } |
|
926 if (XP != 0) { |
|
927 tbinptr* H = treebin_at(M, X->index); |
|
928 if (X == *H) { |
|
929 if ((*H = R) == 0) |
|
930 clear_treemap(M, X->index); |
|
931 } |
|
932 else if (RTCHECK(ok_address(M, XP))) { |
|
933 if (XP->child[0] == X) |
|
934 XP->child[0] = R; |
|
935 else |
|
936 XP->child[1] = R; |
|
937 } |
|
938 else |
|
939 CORRUPTION_ERROR_ACTION(M); |
|
940 if (R != 0) { |
|
941 if (RTCHECK(ok_address(M, R))) { |
|
942 tchunkptr C0, C1; |
|
943 R->parent = XP; |
|
944 if ((C0 = X->child[0]) != 0) { |
|
945 if (RTCHECK(ok_address(M, C0))) { |
|
946 R->child[0] = C0; |
|
947 C0->parent = R; |
|
948 } |
|
949 else |
|
950 CORRUPTION_ERROR_ACTION(M); |
|
951 } |
|
952 if ((C1 = X->child[1]) != 0) { |
|
953 if (RTCHECK(ok_address(M, C1))) { |
|
954 R->child[1] = C1; |
|
955 C1->parent = R; |
|
956 } |
|
957 else |
|
958 CORRUPTION_ERROR_ACTION(M); |
|
959 } |
|
960 } |
|
961 else |
|
962 CORRUPTION_ERROR_ACTION(M); |
|
963 } |
|
964 } |
|
965 } |
|
966 |
|
967 /* Unlink a chunk from a smallbin */ |
|
968 inline void RSymbianDLHeap::unlink_small_chunk(mstate M, mchunkptr P,size_t S) |
|
969 { |
|
970 mchunkptr F = P->fd; |
|
971 mchunkptr B = P->bk; |
|
972 bindex_t I = small_index(S); |
|
973 assert(P != B); |
|
974 assert(P != F); |
|
975 assert(chunksize(P) == small_index2size(I)); |
|
976 if (F == B) |
|
977 clear_smallmap(M, I); |
|
978 else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) && |
|
979 (B == smallbin_at(M,I) || ok_address(M, B)))) { |
|
980 F->bk = B; |
|
981 B->fd = F; |
|
982 } |
|
983 else { |
|
984 CORRUPTION_ERROR_ACTION(M); |
|
985 } |
|
986 } |
|
987 |
|
988 inline void RSymbianDLHeap::unlink_chunk(mstate M, mchunkptr P, size_t S) |
|
989 { |
|
990 if (is_small(S)) |
|
991 unlink_small_chunk(M, P, S); |
|
992 else |
|
993 { |
|
994 tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); |
|
995 } |
|
996 } |
|
997 |
|
998 inline void RSymbianDLHeap::compute_tree_index(size_t S, bindex_t& I) |
|
999 { |
|
1000 size_t X = S >> TREEBIN_SHIFT; |
|
1001 if (X == 0) |
|
1002 I = 0; |
|
1003 else if (X > 0xFFFF) |
|
1004 I = NTREEBINS-1; |
|
1005 else { |
|
1006 unsigned int Y = (unsigned int)X; |
|
1007 unsigned int N = ((Y - 0x100) >> 16) & 8; |
|
1008 unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4; |
|
1009 N += K; |
|
1010 N += K = (((Y <<= K) - 0x4000) >> 16) & 2; |
|
1011 K = 14 - N + ((Y <<= K) >> 15); |
|
1012 I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)); |
|
1013 } |
|
1014 } |
|
1015 |
|
1016 /* ------------------------- Operations on trees ------------------------- */ |
|
1017 |
|
1018 /* Insert chunk into tree */ |
|
1019 inline void RSymbianDLHeap::insert_large_chunk(mstate M,tchunkptr X,size_t S) |
|
1020 { |
|
1021 tbinptr* H; |
|
1022 bindex_t I; |
|
1023 compute_tree_index(S, I); |
|
1024 H = treebin_at(M, I); |
|
1025 X->index = I; |
|
1026 X->child[0] = X->child[1] = 0; |
|
1027 if (!treemap_is_marked(M, I)) { |
|
1028 mark_treemap(M, I); |
|
1029 *H = X; |
|
1030 X->parent = (tchunkptr)H; |
|
1031 X->fd = X->bk = X; |
|
1032 } |
|
1033 else { |
|
1034 tchunkptr T = *H; |
|
1035 size_t K = S << leftshift_for_tree_index(I); |
|
1036 for (;;) { |
|
1037 if (chunksize(T) != S) { |
|
1038 tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]); |
|
1039 K <<= 1; |
|
1040 if (*C != 0) |
|
1041 T = *C; |
|
1042 else if (RTCHECK(ok_address(M, C))) { |
|
1043 *C = X; |
|
1044 X->parent = T; |
|
1045 X->fd = X->bk = X; |
|
1046 break; |
|
1047 } |
|
1048 else { |
|
1049 CORRUPTION_ERROR_ACTION(M); |
|
1050 break; |
|
1051 } |
|
1052 } |
|
1053 else { |
|
1054 tchunkptr F = T->fd; |
|
1055 if (RTCHECK(ok_address(M, T) && ok_address(M, F))) { |
|
1056 T->fd = F->bk = X; |
|
1057 X->fd = F; |
|
1058 X->bk = T; |
|
1059 X->parent = 0; |
|
1060 break; |
|
1061 } |
|
1062 else { |
|
1063 CORRUPTION_ERROR_ACTION(M); |
|
1064 break; |
|
1065 } |
|
1066 } |
|
1067 } |
|
1068 } |
|
1069 } |
|
1070 |
|
1071 /* |
|
1072 Unlink steps: |
|
1073 |
|
1074 1. If x is a chained node, unlink it from its same-sized fd/bk links |
|
1075 and choose its bk node as its replacement. |
|
1076 2. If x was the last node of its size, but not a leaf node, it must |
|
1077 be replaced with a leaf node (not merely one with an open left or |
|
1078 right), to make sure that lefts and rights of descendents |
|
1079 correspond properly to bit masks. We use the rightmost descendent |
|
1080 of x. We could use any other leaf, but this is easy to locate and |
|
1081 tends to counteract removal of leftmosts elsewhere, and so keeps |
|
1082 paths shorter than minimally guaranteed. This doesn't loop much |
|
1083 because on average a node in a tree is near the bottom. |
|
1084 3. If x is the base of a chain (i.e., has parent links) relink |
|
1085 x's parent and children to x's replacement (or null if none). |
|
1086 */ |
|
1087 |
|
1088 /* Replace dv node, binning the old one */ |
|
1089 /* Used only when dvsize known to be small */ |
|
1090 inline void RSymbianDLHeap::replace_dv(mstate M, mchunkptr P, size_t S) |
|
1091 { |
|
1092 size_t DVS = M->dvsize; |
|
1093 if (DVS != 0) { |
|
1094 mchunkptr DV = M->dv; |
|
1095 assert(is_small(DVS)); |
|
1096 insert_small_chunk(M, DV, DVS); |
|
1097 } |
|
1098 M->dvsize = S; |
|
1099 M->dv = P; |
|
1100 } |
|
1101 |
|
1102 inline void RSymbianDLHeap::compute_bit2idx(binmap_t X,bindex_t& I) |
|
1103 { |
|
1104 unsigned int Y = X - 1; |
|
1105 unsigned int K = Y >> (16-4) & 16; |
|
1106 unsigned int N = K; Y >>= K; |
|
1107 N += K = Y >> (8-3) & 8; Y >>= K; |
|
1108 N += K = Y >> (4-2) & 4; Y >>= K; |
|
1109 N += K = Y >> (2-1) & 2; Y >>= K; |
|
1110 N += K = Y >> (1-0) & 1; Y >>= K; |
|
1111 I = (bindex_t)(N + Y); |
|
1112 } |
|
1113 |
|
1114 void RSymbianDLHeap::add_segment(mstate m, TUint8* tbase, size_t tsize, flag_t mmapped) { |
|
1115 /* Determine locations and sizes of segment, fenceposts, old top */ |
|
1116 TUint8* old_top = (TUint8*)m->top; |
|
1117 msegmentptr oldsp = segment_holding(m, old_top); |
|
1118 TUint8* old_end = oldsp->base + oldsp->size; |
|
1119 size_t ssize = pad_request(sizeof(struct malloc_segment)); |
|
1120 TUint8* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK); |
|
1121 size_t offset = align_offset(chunk2mem(rawsp)); |
|
1122 TUint8* asp = rawsp + offset; |
|
1123 TUint8* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp; |
|
1124 mchunkptr sp = (mchunkptr)csp; |
|
1125 msegmentptr ss = (msegmentptr)(chunk2mem(sp)); |
|
1126 mchunkptr tnext = chunk_plus_offset(sp, ssize); |
|
1127 mchunkptr p = tnext; |
|
1128 int nfences = 0; |
|
1129 |
|
1130 /* reset top to new space */ |
|
1131 init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); |
|
1132 |
|
1133 /* Set up segment record */ |
|
1134 assert(is_aligned(ss)); |
|
1135 set_size_and_pinuse_of_inuse_chunk(m, sp, ssize); |
|
1136 *ss = m->seg; /* Push current record */ |
|
1137 m->seg.base = tbase; |
|
1138 m->seg.size = tsize; |
|
1139 m->seg.sflags = mmapped; |
|
1140 m->seg.next = ss; |
|
1141 |
|
1142 /* Insert trailing fenceposts */ |
|
1143 for (;;) { |
|
1144 mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE); |
|
1145 p->head = FENCEPOST_HEAD; |
|
1146 ++nfences; |
|
1147 if ((TUint8*)(&(nextp->head)) < old_end) |
|
1148 p = nextp; |
|
1149 else |
|
1150 break; |
|
1151 } |
|
1152 assert(nfences >= 2); |
|
1153 |
|
1154 /* Insert the rest of old top into a bin as an ordinary free chunk */ |
|
1155 if (csp != old_top) { |
|
1156 mchunkptr q = (mchunkptr)old_top; |
|
1157 size_t psize = csp - old_top; |
|
1158 mchunkptr tn = chunk_plus_offset(q, psize); |
|
1159 set_free_with_pinuse(q, psize, tn); |
|
1160 insert_chunk(m, q, psize); |
|
1161 } |
|
1162 |
|
1163 check_top_chunk(m, m->top); |
|
1164 } |
|
1165 |
|
1166 |
|
1167 void* RSymbianDLHeap::prepend_alloc(mstate m, TUint8* newbase, TUint8* oldbase, |
|
1168 size_t nb) { |
|
1169 mchunkptr p = align_as_chunk(newbase); |
|
1170 mchunkptr oldfirst = align_as_chunk(oldbase); |
|
1171 size_t psize = (TUint8*)oldfirst - (TUint8*)p; |
|
1172 mchunkptr q = chunk_plus_offset(p, nb); |
|
1173 size_t qsize = psize - nb; |
|
1174 set_size_and_pinuse_of_inuse_chunk(m, p, nb); |
|
1175 |
|
1176 assert((TUint8*)oldfirst > (TUint8*)q); |
|
1177 assert(pinuse(oldfirst)); |
|
1178 assert(qsize >= MIN_CHUNK_SIZE); |
|
1179 |
|
1180 /* consolidate remainder with first chunk of old base */ |
|
1181 if (oldfirst == m->top) { |
|
1182 size_t tsize = m->topsize += qsize; |
|
1183 m->top = q; |
|
1184 q->head = tsize | PINUSE_BIT; |
|
1185 check_top_chunk(m, q); |
|
1186 } |
|
1187 else if (oldfirst == m->dv) { |
|
1188 size_t dsize = m->dvsize += qsize; |
|
1189 m->dv = q; |
|
1190 set_size_and_pinuse_of_free_chunk(q, dsize); |
|
1191 } |
|
1192 else { |
|
1193 if (!cinuse(oldfirst)) { |
|
1194 size_t nsize = chunksize(oldfirst); |
|
1195 unlink_chunk(m, oldfirst, nsize); |
|
1196 oldfirst = chunk_plus_offset(oldfirst, nsize); |
|
1197 qsize += nsize; |
|
1198 } |
|
1199 set_free_with_pinuse(q, qsize, oldfirst); |
|
1200 insert_chunk(m, q, qsize); |
|
1201 check_free_chunk(m, q); |
|
1202 } |
|
1203 |
|
1204 check_malloced_chunk(m, chunk2mem(p), nb); |
|
1205 return chunk2mem(p); |
|
1206 } |
|
1207 |
|
1208 void* RSymbianDLHeap::mmap_alloc(mstate m, size_t nb) { |
|
1209 size_t mmsize = granularity_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); |
|
1210 if (mmsize > nb) { /* Check for wrap around 0 */ |
|
1211 TUint8* mm = (TUint8*)(DIRECT_MMAP(mmsize)); |
|
1212 if (mm != CMFAIL) { |
|
1213 size_t offset = align_offset(chunk2mem(mm)); |
|
1214 size_t psize = mmsize - offset - MMAP_FOOT_PAD; |
|
1215 mchunkptr p = (mchunkptr)(mm + offset); |
|
1216 p->prev_foot = offset | IS_MMAPPED_BIT; |
|
1217 (p)->head = (psize|CINUSE_BIT); |
|
1218 mark_inuse_foot(m, p, psize); |
|
1219 chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD; |
|
1220 chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0; |
|
1221 |
|
1222 if (mm < m->least_addr) |
|
1223 m->least_addr = mm; |
|
1224 if ((m->footprint += mmsize) > m->max_footprint) |
|
1225 m->max_footprint = m->footprint; |
|
1226 assert(is_aligned(chunk2mem(p))); |
|
1227 check_mmapped_chunk(m, p); |
|
1228 return chunk2mem(p); |
|
1229 } |
|
1230 } |
|
1231 return 0; |
|
1232 } |
|
1233 |
|
1234 int RSymbianDLHeap::sys_trim(mstate m, size_t pad) |
|
1235 { |
|
1236 size_t released = 0; |
|
1237 if (pad < MAX_REQUEST && is_initialized(m)) { |
|
1238 pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */ |
|
1239 |
|
1240 if (m->topsize > pad) { |
|
1241 /* Shrink top space in granularity-size units, keeping at least one */ |
|
1242 size_t unit = mparams.granularity; |
|
1243 size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - SIZE_T_ONE) * unit; |
|
1244 msegmentptr sp = segment_holding(m, (TUint8*)m->top); |
|
1245 |
|
1246 if (!is_extern_segment(sp)) { |
|
1247 if (is_mmapped_segment(sp)) { |
|
1248 if (HAVE_MMAP && |
|
1249 sp->size >= extra && |
|
1250 !has_segment_link(m, sp)) { /* can't shrink if pinned */ |
|
1251 size_t newsize = sp->size - extra; |
|
1252 /* Prefer mremap, fall back to munmap */ |
|
1253 if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) || |
|
1254 (CALL_MUNMAP(sp->base + newsize, extra) == 0)) { |
|
1255 released = extra; |
|
1256 } |
|
1257 } |
|
1258 } |
|
1259 else if (HAVE_MORECORE) { |
|
1260 if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */ |
|
1261 extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit; |
|
1262 ACQUIRE_MORECORE_LOCK(m); |
|
1263 { |
|
1264 /* Make sure end of memory is where we last set it. */ |
|
1265 TUint8* old_br = (TUint8*)(CALL_MORECORE(0)); |
|
1266 if (old_br == sp->base + sp->size) { |
|
1267 TUint8* rel_br = (TUint8*)(CALL_MORECORE(-extra)); |
|
1268 TUint8* new_br = (TUint8*)(CALL_MORECORE(0)); |
|
1269 if (rel_br != CMFAIL && new_br < old_br) |
|
1270 released = old_br - new_br; |
|
1271 } |
|
1272 } |
|
1273 RELEASE_MORECORE_LOCK(m); |
|
1274 } |
|
1275 } |
|
1276 |
|
1277 if (released != 0) { |
|
1278 sp->size -= released; |
|
1279 m->footprint -= released; |
|
1280 init_top(m, m->top, m->topsize - released); |
|
1281 check_top_chunk(m, m->top); |
|
1282 } |
|
1283 } |
|
1284 |
|
1285 /* Unmap any unused mmapped segments */ |
|
1286 if (HAVE_MMAP) |
|
1287 released += release_unused_segments(m); |
|
1288 |
|
1289 /* On failure, disable autotrim to avoid repeated failed future calls */ |
|
1290 if (released == 0) |
|
1291 m->trim_check = MAX_SIZE_T; |
|
1292 } |
|
1293 |
|
1294 return (released != 0)? 1 : 0; |
|
1295 } |
|
1296 |
|
1297 inline int RSymbianDLHeap::has_segment_link(mstate m, msegmentptr ss) |
|
1298 { |
|
1299 msegmentptr sp = &m->seg; |
|
1300 for (;;) { |
|
1301 if ((TUint8*)sp >= ss->base && (TUint8*)sp < ss->base + ss->size) |
|
1302 return 1; |
|
1303 if ((sp = sp->next) == 0) |
|
1304 return 0; |
|
1305 } |
|
1306 } |
|
1307 |
|
1308 /* Unmap and unlink any mmapped segments that don't contain used chunks */ |
|
1309 size_t RSymbianDLHeap::release_unused_segments(mstate m) |
|
1310 { |
|
1311 size_t released = 0; |
|
1312 msegmentptr pred = &m->seg; |
|
1313 msegmentptr sp = pred->next; |
|
1314 while (sp != 0) { |
|
1315 TUint8* base = sp->base; |
|
1316 size_t size = sp->size; |
|
1317 msegmentptr next = sp->next; |
|
1318 if (is_mmapped_segment(sp) && !is_extern_segment(sp)) { |
|
1319 mchunkptr p = align_as_chunk(base); |
|
1320 size_t psize = chunksize(p); |
|
1321 /* Can unmap if first chunk holds entire segment and not pinned */ |
|
1322 if (!cinuse(p) && (TUint8*)p + psize >= base + size - TOP_FOOT_SIZE) { |
|
1323 tchunkptr tp = (tchunkptr)p; |
|
1324 assert(segment_holds(sp, (TUint8*)sp)); |
|
1325 if (p == m->dv) { |
|
1326 m->dv = 0; |
|
1327 m->dvsize = 0; |
|
1328 } |
|
1329 else { |
|
1330 unlink_large_chunk(m, tp); |
|
1331 } |
|
1332 if (CALL_MUNMAP(base, size) == 0) { |
|
1333 released += size; |
|
1334 m->footprint -= size; |
|
1335 /* unlink obsoleted record */ |
|
1336 sp = pred; |
|
1337 sp->next = next; |
|
1338 } |
|
1339 else { /* back out if cannot unmap */ |
|
1340 insert_large_chunk(m, tp, psize); |
|
1341 } |
|
1342 } |
|
1343 } |
|
1344 pred = sp; |
|
1345 sp = next; |
|
1346 }/*End of while*/ |
|
1347 return released; |
|
1348 } |
|
1349 /* Realloc using mmap */ |
|
1350 inline mchunkptr RSymbianDLHeap::mmap_resize(mstate m, mchunkptr oldp, size_t nb) |
|
1351 { |
|
1352 size_t oldsize = chunksize(oldp); |
|
1353 if (is_small(nb)) /* Can't shrink mmap regions below small size */ |
|
1354 return 0; |
|
1355 /* Keep old chunk if big enough but not too big */ |
|
1356 if (oldsize >= nb + SIZE_T_SIZE && |
|
1357 (oldsize - nb) <= (mparams.granularity << 1)) |
|
1358 return oldp; |
|
1359 else { |
|
1360 size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT; |
|
1361 size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD; |
|
1362 size_t newmmsize = granularity_align(nb + SIX_SIZE_T_SIZES + |
|
1363 CHUNK_ALIGN_MASK); |
|
1364 TUint8* cp = (TUint8*)CALL_MREMAP((char*)oldp - offset, |
|
1365 oldmmsize, newmmsize, 1); |
|
1366 if (cp != CMFAIL) { |
|
1367 mchunkptr newp = (mchunkptr)(cp + offset); |
|
1368 size_t psize = newmmsize - offset - MMAP_FOOT_PAD; |
|
1369 newp->head = (psize|CINUSE_BIT); |
|
1370 mark_inuse_foot(m, newp, psize); |
|
1371 chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD; |
|
1372 chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0; |
|
1373 |
|
1374 if (cp < m->least_addr) |
|
1375 m->least_addr = cp; |
|
1376 if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint) |
|
1377 m->max_footprint = m->footprint; |
|
1378 check_mmapped_chunk(m, newp); |
|
1379 return newp; |
|
1380 } |
|
1381 } |
|
1382 return 0; |
|
1383 } |
|
1384 |
|
1385 |
|
1386 void RSymbianDLHeap::Init_Dlmalloc(size_t capacity, int locked, size_t aTrimThreshold) |
|
1387 { |
|
1388 memset(gm,0,sizeof(malloc_state)); |
|
1389 init_mparams(aTrimThreshold); /* Ensure pagesize etc initialized */ |
|
1390 // The maximum amount that can be allocated can be calculated as:- |
|
1391 // 2^sizeof(size_t) - sizeof(malloc_state) - TOP_FOOT_SIZE - page size (all accordingly padded) |
|
1392 // If the capacity exceeds this, no allocation will be done. |
|
1393 gm->seg.base = gm->least_addr = iBase; |
|
1394 gm->seg.size = capacity; |
|
1395 gm->seg.sflags = !IS_MMAPPED_BIT; |
|
1396 set_lock(gm, locked); |
|
1397 gm->magic = mparams.magic; |
|
1398 init_bins(gm); |
|
1399 init_top(gm, (mchunkptr)iBase, capacity - TOP_FOOT_SIZE); |
|
1400 } |
|
1401 |
|
1402 void* RSymbianDLHeap::dlmalloc(size_t bytes) { |
|
1403 /* |
|
1404 Basic algorithm: |
|
1405 If a small request (< 256 bytes minus per-chunk overhead): |
|
1406 1. If one exists, use a remainderless chunk in associated smallbin. |
|
1407 (Remainderless means that there are too few excess bytes to |
|
1408 represent as a chunk.) |
|
1409 2. If it is big enough, use the dv chunk, which is normally the |
|
1410 chunk adjacent to the one used for the most recent small request. |
|
1411 3. If one exists, split the smallest available chunk in a bin, |
|
1412 saving remainder in dv. |
|
1413 4. If it is big enough, use the top chunk. |
|
1414 5. If available, get memory from system and use it |
|
1415 Otherwise, for a large request: |
|
1416 1. Find the smallest available binned chunk that fits, and use it |
|
1417 if it is better fitting than dv chunk, splitting if necessary. |
|
1418 2. If better fitting than any binned chunk, use the dv chunk. |
|
1419 3. If it is big enough, use the top chunk. |
|
1420 4. If request size >= mmap threshold, try to directly mmap this chunk. |
|
1421 5. If available, get memory from system and use it |
|
1422 |
|
1423 The ugly goto's here ensure that postaction occurs along all paths. |
|
1424 */ |
|
1425 if (!PREACTION(gm)) { |
|
1426 void* mem; |
|
1427 size_t nb; |
|
1428 if (bytes <= MAX_SMALL_REQUEST) { |
|
1429 bindex_t idx; |
|
1430 binmap_t smallbits; |
|
1431 nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); |
|
1432 idx = small_index(nb); |
|
1433 smallbits = gm->smallmap >> idx; |
|
1434 |
|
1435 if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ |
|
1436 mchunkptr b, p; |
|
1437 idx += ~smallbits & 1; /* Uses next bin if idx empty */ |
|
1438 b = smallbin_at(gm, idx); |
|
1439 p = b->fd; |
|
1440 assert(chunksize(p) == small_index2size(idx)); |
|
1441 unlink_first_small_chunk(gm, b, p, idx); |
|
1442 set_inuse_and_pinuse(gm, p, small_index2size(idx)); |
|
1443 mem = chunk2mem(p); |
|
1444 check_malloced_chunk(gm, mem, nb); |
|
1445 goto postaction; |
|
1446 } |
|
1447 |
|
1448 else if (nb > gm->dvsize) { |
|
1449 if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ |
|
1450 mchunkptr b, p, r; |
|
1451 size_t rsize; |
|
1452 bindex_t i; |
|
1453 binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); |
|
1454 binmap_t leastbit = least_bit(leftbits); |
|
1455 compute_bit2idx(leastbit, i); |
|
1456 b = smallbin_at(gm, i); |
|
1457 p = b->fd; |
|
1458 assert(chunksize(p) == small_index2size(i)); |
|
1459 unlink_first_small_chunk(gm, b, p, i); |
|
1460 rsize = small_index2size(i) - nb; |
|
1461 /* Fit here cannot be remainderless if 4byte sizes */ |
|
1462 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) |
|
1463 set_inuse_and_pinuse(gm, p, small_index2size(i)); |
|
1464 else { |
|
1465 set_size_and_pinuse_of_inuse_chunk(gm, p, nb); |
|
1466 r = chunk_plus_offset(p, nb); |
|
1467 set_size_and_pinuse_of_free_chunk(r, rsize); |
|
1468 replace_dv(gm, r, rsize); |
|
1469 } |
|
1470 mem = chunk2mem(p); |
|
1471 check_malloced_chunk(gm, mem, nb); |
|
1472 goto postaction; |
|
1473 } |
|
1474 |
|
1475 else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) { |
|
1476 check_malloced_chunk(gm, mem, nb); |
|
1477 goto postaction; |
|
1478 } |
|
1479 } |
|
1480 } |
|
1481 else if (bytes >= MAX_REQUEST) |
|
1482 nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ |
|
1483 else { |
|
1484 nb = pad_request(bytes); |
|
1485 if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) { |
|
1486 check_malloced_chunk(gm, mem, nb); |
|
1487 goto postaction; |
|
1488 } |
|
1489 } |
|
1490 |
|
1491 if (nb <= gm->dvsize) { |
|
1492 size_t rsize = gm->dvsize - nb; |
|
1493 mchunkptr p = gm->dv; |
|
1494 if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ |
|
1495 mchunkptr r = gm->dv = chunk_plus_offset(p, nb); |
|
1496 gm->dvsize = rsize; |
|
1497 set_size_and_pinuse_of_free_chunk(r, rsize); |
|
1498 set_size_and_pinuse_of_inuse_chunk(gm, p, nb); |
|
1499 } |
|
1500 else { /* exhaust dv */ |
|
1501 size_t dvs = gm->dvsize; |
|
1502 gm->dvsize = 0; |
|
1503 gm->dv = 0; |
|
1504 set_inuse_and_pinuse(gm, p, dvs); |
|
1505 } |
|
1506 mem = chunk2mem(p); |
|
1507 check_malloced_chunk(gm, mem, nb); |
|
1508 goto postaction; |
|
1509 } |
|
1510 |
|
1511 else if (nb < gm->topsize) { /* Split top */ |
|
1512 size_t rsize = gm->topsize -= nb; |
|
1513 mchunkptr p = gm->top; |
|
1514 mchunkptr r = gm->top = chunk_plus_offset(p, nb); |
|
1515 r->head = rsize | PINUSE_BIT; |
|
1516 set_size_and_pinuse_of_inuse_chunk(gm, p, nb); |
|
1517 mem = chunk2mem(p); |
|
1518 check_top_chunk(gm, gm->top); |
|
1519 check_malloced_chunk(gm, mem, nb); |
|
1520 goto postaction; |
|
1521 } |
|
1522 |
|
1523 mem = sys_alloc(gm, nb); |
|
1524 |
|
1525 postaction: |
|
1526 POSTACTION(gm); |
|
1527 return mem; |
|
1528 } |
|
1529 |
|
1530 return 0; |
|
1531 } |
|
1532 |
|
1533 void RSymbianDLHeap::dlfree(void* mem) { |
|
1534 /* |
|
1535 Consolidate freed chunks with preceeding or succeeding bordering |
|
1536 free chunks, if they exist, and then place in a bin. Intermixed |
|
1537 with special cases for top, dv, mmapped chunks, and usage errors. |
|
1538 */ |
|
1539 |
|
1540 if (mem != 0) |
|
1541 { |
|
1542 mchunkptr p = mem2chunk(mem); |
|
1543 #if FOOTERS |
|
1544 mstate fm = get_mstate_for(p); |
|
1545 if (!ok_magic(fm)) |
|
1546 { |
|
1547 USAGE_ERROR_ACTION(fm, p); |
|
1548 return; |
|
1549 } |
|
1550 #else /* FOOTERS */ |
|
1551 #define fm gm |
|
1552 #endif /* FOOTERS */ |
|
1553 |
|
1554 if (!PREACTION(fm)) |
|
1555 { |
|
1556 check_inuse_chunk(fm, p); |
|
1557 if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) |
|
1558 { |
|
1559 size_t psize = chunksize(p); |
|
1560 mchunkptr next = chunk_plus_offset(p, psize); |
|
1561 if (!pinuse(p)) |
|
1562 { |
|
1563 size_t prevsize = p->prev_foot; |
|
1564 if ((prevsize & IS_MMAPPED_BIT) != 0) |
|
1565 { |
|
1566 prevsize &= ~IS_MMAPPED_BIT; |
|
1567 psize += prevsize + MMAP_FOOT_PAD; |
|
1568 /*TInt tmp = TOP_FOOT_SIZE; |
|
1569 TUint8* top = (TUint8*)fm->top + fm->topsize + 40; |
|
1570 if((top == (TUint8*)p)&& fm->topsize > 4096) |
|
1571 { |
|
1572 fm->topsize += psize; |
|
1573 msegmentptr sp = segment_holding(fm, (TUint8*)fm->top); |
|
1574 sp->size+=psize; |
|
1575 if (should_trim(fm, fm->topsize)) |
|
1576 sys_trim(fm, 0); |
|
1577 goto postaction; |
|
1578 } |
|
1579 else*/ |
|
1580 { |
|
1581 if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) |
|
1582 fm->footprint -= psize; |
|
1583 goto postaction; |
|
1584 } |
|
1585 } |
|
1586 else |
|
1587 { |
|
1588 mchunkptr prev = chunk_minus_offset(p, prevsize); |
|
1589 psize += prevsize; |
|
1590 p = prev; |
|
1591 if (RTCHECK(ok_address(fm, prev))) |
|
1592 { /* consolidate backward */ |
|
1593 if (p != fm->dv) |
|
1594 { |
|
1595 unlink_chunk(fm, p, prevsize); |
|
1596 } |
|
1597 else if ((next->head & INUSE_BITS) == INUSE_BITS) |
|
1598 { |
|
1599 fm->dvsize = psize; |
|
1600 set_free_with_pinuse(p, psize, next); |
|
1601 goto postaction; |
|
1602 } |
|
1603 } |
|
1604 else |
|
1605 goto erroraction; |
|
1606 } |
|
1607 } |
|
1608 |
|
1609 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) |
|
1610 { |
|
1611 if (!cinuse(next)) |
|
1612 { /* consolidate forward */ |
|
1613 if (next == fm->top) |
|
1614 { |
|
1615 size_t tsize = fm->topsize += psize; |
|
1616 fm->top = p; |
|
1617 p->head = tsize | PINUSE_BIT; |
|
1618 if (p == fm->dv) |
|
1619 { |
|
1620 fm->dv = 0; |
|
1621 fm->dvsize = 0; |
|
1622 } |
|
1623 if (should_trim(fm, tsize)) |
|
1624 sys_trim(fm, 0); |
|
1625 goto postaction; |
|
1626 } |
|
1627 else if (next == fm->dv) |
|
1628 { |
|
1629 size_t dsize = fm->dvsize += psize; |
|
1630 fm->dv = p; |
|
1631 set_size_and_pinuse_of_free_chunk(p, dsize); |
|
1632 goto postaction; |
|
1633 } |
|
1634 else |
|
1635 { |
|
1636 size_t nsize = chunksize(next); |
|
1637 psize += nsize; |
|
1638 unlink_chunk(fm, next, nsize); |
|
1639 set_size_and_pinuse_of_free_chunk(p, psize); |
|
1640 if (p == fm->dv) |
|
1641 { |
|
1642 fm->dvsize = psize; |
|
1643 goto postaction; |
|
1644 } |
|
1645 } |
|
1646 } |
|
1647 else |
|
1648 set_free_with_pinuse(p, psize, next); |
|
1649 insert_chunk(fm, p, psize); |
|
1650 check_free_chunk(fm, p); |
|
1651 goto postaction; |
|
1652 } |
|
1653 } |
|
1654 erroraction: |
|
1655 USAGE_ERROR_ACTION(fm, p); |
|
1656 postaction: |
|
1657 POSTACTION(fm); |
|
1658 } |
|
1659 } |
|
1660 #if !FOOTERS |
|
1661 #undef fm |
|
1662 #endif /* FOOTERS */ |
|
1663 } |
|
1664 |
|
1665 void* RSymbianDLHeap::dlrealloc(void* oldmem, size_t bytes) { |
|
1666 if (oldmem == 0) |
|
1667 return dlmalloc(bytes); |
|
1668 #ifdef REALLOC_ZERO_BYTES_FREES |
|
1669 if (bytes == 0) { |
|
1670 dlfree(oldmem); |
|
1671 return 0; |
|
1672 } |
|
1673 #endif /* REALLOC_ZERO_BYTES_FREES */ |
|
1674 else { |
|
1675 #if ! FOOTERS |
|
1676 mstate m = gm; |
|
1677 #else /* FOOTERS */ |
|
1678 mstate m = get_mstate_for(mem2chunk(oldmem)); |
|
1679 if (!ok_magic(m)) { |
|
1680 USAGE_ERROR_ACTION(m, oldmem); |
|
1681 return 0; |
|
1682 } |
|
1683 #endif /* FOOTERS */ |
|
1684 return internal_realloc(m, oldmem, bytes); |
|
1685 } |
|
1686 } |
|
1687 |
|
1688 |
|
1689 int RSymbianDLHeap::dlmalloc_trim(size_t pad) { |
|
1690 int result = 0; |
|
1691 if (!PREACTION(gm)) { |
|
1692 result = sys_trim(gm, pad); |
|
1693 POSTACTION(gm); |
|
1694 } |
|
1695 return result; |
|
1696 } |
|
1697 |
|
1698 size_t RSymbianDLHeap::dlmalloc_footprint(void) { |
|
1699 return gm->footprint; |
|
1700 } |
|
1701 |
|
1702 size_t RSymbianDLHeap::dlmalloc_max_footprint(void) { |
|
1703 return gm->max_footprint; |
|
1704 } |
|
1705 |
|
1706 #if !NO_MALLINFO |
|
1707 struct mallinfo RSymbianDLHeap::dlmallinfo(void) { |
|
1708 return internal_mallinfo(gm); |
|
1709 } |
|
1710 #endif /* NO_MALLINFO */ |
|
1711 |
|
1712 void RSymbianDLHeap::dlmalloc_stats() { |
|
1713 internal_malloc_stats(gm); |
|
1714 } |
|
1715 |
|
1716 int RSymbianDLHeap::dlmallopt(int param_number, int value) { |
|
1717 return change_mparam(param_number, value); |
|
1718 } |
|
1719 |
|
1720 //inline slab* slab::slabfor(void* p) |
|
1721 slab* slab::slabfor( const void* p) |
|
1722 { |
|
1723 return (slab*)(floor(p, slabsize)); |
|
1724 } |
|
1725 |
|
1726 |
|
1727 void RSymbianDLHeap::tree_remove(slab* s) |
|
1728 { |
|
1729 slab** r = s->parent; |
|
1730 slab* c1 = s->child1; |
|
1731 slab* c2 = s->child2; |
|
1732 for (;;) |
|
1733 { |
|
1734 if (!c2) |
|
1735 { |
|
1736 *r = c1; |
|
1737 if (c1) |
|
1738 c1->parent = r; |
|
1739 return; |
|
1740 } |
|
1741 if (!c1) |
|
1742 { |
|
1743 *r = c2; |
|
1744 c2->parent = r; |
|
1745 return; |
|
1746 } |
|
1747 if (c1 > c2) |
|
1748 { |
|
1749 slab* c3 = c1; |
|
1750 c1 = c2; |
|
1751 c2 = c3; |
|
1752 } |
|
1753 slab* newc2 = c1->child2; |
|
1754 *r = c1; |
|
1755 c1->parent = r; |
|
1756 c1->child2 = c2; |
|
1757 c2->parent = &c1->child2; |
|
1758 s = c1; |
|
1759 c1 = s->child1; |
|
1760 c2 = newc2; |
|
1761 r = &s->child1; |
|
1762 } |
|
1763 } |
|
1764 void RSymbianDLHeap::tree_insert(slab* s,slab** r) |
|
1765 { |
|
1766 slab* n = *r; |
|
1767 for (;;) |
|
1768 { |
|
1769 if (!n) |
|
1770 { // tree empty |
|
1771 *r = s; |
|
1772 s->parent = r; |
|
1773 s->child1 = s->child2 = 0; |
|
1774 break; |
|
1775 } |
|
1776 if (s < n) |
|
1777 { // insert between parent and n |
|
1778 *r = s; |
|
1779 s->parent = r; |
|
1780 s->child1 = n; |
|
1781 s->child2 = 0; |
|
1782 n->parent = &s->child1; |
|
1783 break; |
|
1784 } |
|
1785 slab* c1 = n->child1; |
|
1786 slab* c2 = n->child2; |
|
1787 if ((c1 - 1) > (c2 - 1)) |
|
1788 { |
|
1789 r = &n->child1; |
|
1790 n = c1; |
|
1791 } |
|
1792 else |
|
1793 { |
|
1794 r = &n->child2; |
|
1795 n = c2; |
|
1796 } |
|
1797 } |
|
1798 } |
|
1799 void* RSymbianDLHeap::allocnewslab(slabset& allocator) |
|
1800 // |
|
1801 // Acquire and initialise a new slab, returning a cell from the slab |
|
1802 // The strategy is: |
|
1803 // 1. Use the lowest address free slab, if available. This is done by using the lowest slab |
|
1804 // in the page at the root of the partial_page heap (which is address ordered). If the |
|
1805 // is now fully used, remove it from the partial_page heap. |
|
1806 // 2. Allocate a new page for slabs if no empty slabs are available |
|
1807 // |
|
1808 { |
|
1809 page* p = page::pagefor(partial_page); |
|
1810 if (!p) |
|
1811 return allocnewpage(allocator); |
|
1812 |
|
1813 unsigned h = p->slabs[0].header; |
|
1814 unsigned pagemap = header_pagemap(h); |
|
1815 ASSERT(&p->slabs[hibit(pagemap)] == partial_page); |
|
1816 |
|
1817 unsigned slabix = lowbit(pagemap); |
|
1818 p->slabs[0].header = h &~ (0x100<<slabix); |
|
1819 if (!(pagemap &~ (1<<slabix))) |
|
1820 { |
|
1821 tree_remove(partial_page); // last free slab in page |
|
1822 } |
|
1823 return allocator.initslab(&p->slabs[slabix]); |
|
1824 } |
|
1825 |
|
1826 /**Defination of this functionis not there in proto code***/ |
|
1827 #if 0 |
|
1828 void RSymbianDLHeap::partial_insert(slab* s) |
|
1829 { |
|
1830 // slab has had first cell freed and needs to be linked back into partial tree |
|
1831 slabset& ss = slaballoc[sizemap[s->clz]]; |
|
1832 |
|
1833 ASSERT(s->used == slabfull); |
|
1834 s->used = ss.fulluse - s->clz; // full-1 loading |
|
1835 tree_insert(s,&ss.partial); |
|
1836 checktree(ss.partial); |
|
1837 } |
|
1838 /**Defination of this functionis not there in proto code***/ |
|
1839 #endif |
|
1840 |
|
1841 void* RSymbianDLHeap::allocnewpage(slabset& allocator) |
|
1842 // |
|
1843 // Acquire and initialise a new page, returning a cell from a new slab |
|
1844 // The partial_page tree is empty (otherwise we'd have used a slab from there) |
|
1845 // The partial_page link is put in the highest addressed slab in the page, and the |
|
1846 // lowest addressed slab is used to fulfill the allocation request |
|
1847 // |
|
1848 { |
|
1849 page* p = spare_page; |
|
1850 if (p) |
|
1851 spare_page = 0; |
|
1852 else |
|
1853 { |
|
1854 p = static_cast<page*>(map(0,pagesize)); |
|
1855 if (!p) |
|
1856 return 0; |
|
1857 } |
|
1858 |
|
1859 ASSERT(p == floor(p,pagesize)); |
|
1860 p->slabs[0].header = ((1<<3) + (1<<2) + (1<<1))<<8; // set pagemap |
|
1861 p->slabs[3].parent = &partial_page; |
|
1862 p->slabs[3].child1 = p->slabs[3].child2 = 0; |
|
1863 partial_page = &p->slabs[3]; |
|
1864 return allocator.initslab(&p->slabs[0]); |
|
1865 } |
|
1866 |
|
1867 void RSymbianDLHeap::freepage(page* p) |
|
1868 // |
|
1869 // Release an unused page to the OS |
|
1870 // A single page is cached for reuse to reduce thrashing |
|
1871 // the OS allocator. |
|
1872 // |
|
1873 { |
|
1874 ASSERT(ceiling(p,pagesize) == p); |
|
1875 if (!spare_page) |
|
1876 { |
|
1877 spare_page = p; |
|
1878 return; |
|
1879 } |
|
1880 unmap(p,pagesize); |
|
1881 } |
|
1882 |
|
1883 void RSymbianDLHeap::freeslab(slab* s) |
|
1884 // |
|
1885 // Release an empty slab to the slab manager |
|
1886 // The strategy is: |
|
1887 // 1. The page containing the slab is checked to see the state of the other slabs in the page by |
|
1888 // inspecting the pagemap field in the header of the first slab in the page. |
|
1889 // 2. The pagemap is updated to indicate the new unused slab |
|
1890 // 3. If this is the only unused slab in the page then the slab header is used to add the page to |
|
1891 // the partial_page tree/heap |
|
1892 // 4. If all the slabs in the page are now unused the page is release back to the OS |
|
1893 // 5. If this slab has a higher address than the one currently used to track this page in |
|
1894 // the partial_page heap, the linkage is moved to the new unused slab |
|
1895 // |
|
1896 { |
|
1897 tree_remove(s); |
|
1898 checktree(*s->parent); |
|
1899 ASSERT(header_usedm4(s->header) == header_size(s->header)-4); |
|
1900 CHECK(s->header |= 0xFF00000); // illegal value for debug purposes |
|
1901 page* p = page::pagefor(s); |
|
1902 unsigned h = p->slabs[0].header; |
|
1903 int slabix = s - &p->slabs[0]; |
|
1904 unsigned pagemap = header_pagemap(h); |
|
1905 p->slabs[0].header = h | (0x100<<slabix); |
|
1906 if (pagemap == 0) |
|
1907 { // page was full before, use this slab as link in empty heap |
|
1908 tree_insert(s, &partial_page); |
|
1909 } |
|
1910 else |
|
1911 { // find the current empty-link slab |
|
1912 slab* sl = &p->slabs[hibit(pagemap)]; |
|
1913 pagemap ^= (1<<slabix); |
|
1914 if (pagemap == 0xf) |
|
1915 { // page is now empty so recycle page to os |
|
1916 tree_remove(sl); |
|
1917 freepage(p); |
|
1918 return; |
|
1919 } |
|
1920 // ensure the free list link is in highest address slab in page |
|
1921 if (s > sl) |
|
1922 { // replace current link with new one. Address-order tree so position stays the same |
|
1923 slab** r = sl->parent; |
|
1924 slab* c1 = sl->child1; |
|
1925 slab* c2 = sl->child2; |
|
1926 s->parent = r; |
|
1927 s->child1 = c1; |
|
1928 s->child2 = c2; |
|
1929 *r = s; |
|
1930 if (c1) |
|
1931 c1->parent = &s->child1; |
|
1932 if (c2) |
|
1933 c2->parent = &s->child2; |
|
1934 } |
|
1935 CHECK(if (s < sl) s=sl); |
|
1936 } |
|
1937 ASSERT(header_pagemap(p->slabs[0].header) != 0); |
|
1938 ASSERT(hibit(header_pagemap(p->slabs[0].header)) == unsigned(s - &p->slabs[0])); |
|
1939 } |
|
1940 |
|
1941 void RSymbianDLHeap::slab_init(unsigned slabbitmap) |
|
1942 { |
|
1943 ASSERT((slabbitmap & ~okbits) == 0); |
|
1944 ASSERT(maxslabsize <= 60); |
|
1945 |
|
1946 slab_threshold=0; |
|
1947 partial_page = 0; |
|
1948 unsigned char ix = 0xff; |
|
1949 unsigned bit = 1<<((maxslabsize>>2)-1); |
|
1950 for (int sz = maxslabsize; sz >= 0; sz -= 4, bit >>= 1) |
|
1951 { |
|
1952 if (slabbitmap & bit) |
|
1953 { |
|
1954 if (++ix == 0) |
|
1955 slab_threshold=sz+1; |
|
1956 slabset& c = slaballoc[ix]; |
|
1957 c.size = sz; |
|
1958 c.partial = 0; |
|
1959 } |
|
1960 sizemap[sz>>2] = ix; |
|
1961 } |
|
1962 } |
|
1963 |
|
1964 void* RSymbianDLHeap::slab_allocate(slabset& ss) |
|
1965 // |
|
1966 // Allocate a cell from the given slabset |
|
1967 // Strategy: |
|
1968 // 1. Take the partially full slab at the top of the heap (lowest address). |
|
1969 // 2. If there is no such slab, allocate from a new slab |
|
1970 // 3. If the slab has a non-empty freelist, pop the cell from the front of the list and update the slab |
|
1971 // 4. Otherwise, if the slab is not full, return the cell at the end of the currently used region of |
|
1972 // the slab, updating the slab |
|
1973 // 5. Otherwise, release the slab from the partial tree/heap, marking it as 'floating' and go back to |
|
1974 // step 1 |
|
1975 // |
|
1976 { |
|
1977 for (;;) |
|
1978 { |
|
1979 slab *s = ss.partial; |
|
1980 if (!s) |
|
1981 break; |
|
1982 unsigned h = s->header; |
|
1983 unsigned free = h & 0xff; // extract free cell positiong |
|
1984 if (free) |
|
1985 { |
|
1986 ASSERT(((free<<2)-sizeof(slabhdr))%header_size(h) == 0); |
|
1987 void* p = offset(s,free<<2); |
|
1988 free = *(unsigned char*)p; // get next pos in free list |
|
1989 h += (h&0x3C000)<<6; // update usedm4 |
|
1990 h &= ~0xff; |
|
1991 h |= free; // update freelist |
|
1992 s->header = h; |
|
1993 ASSERT(header_free(h) == 0 || ((header_free(h)<<2)-sizeof(slabhdr))%header_size(h) == 0); |
|
1994 ASSERT(header_usedm4(h) <= 0x3F8u); |
|
1995 ASSERT((header_usedm4(h)+4)%header_size(h) == 0); |
|
1996 return p; |
|
1997 } |
|
1998 unsigned h2 = h + ((h&0x3C000)<<6); |
|
1999 if (h2 < 0xfc00000) |
|
2000 { |
|
2001 ASSERT((header_usedm4(h2)+4)%header_size(h2) == 0); |
|
2002 s->header = h2; |
|
2003 return offset(s,(h>>18) + sizeof(unsigned) + sizeof(slabhdr)); |
|
2004 } |
|
2005 h |= 0x80000000; // mark the slab as full-floating |
|
2006 s->header = h; |
|
2007 tree_remove(s); |
|
2008 checktree(ss.partial); |
|
2009 // go back and try the next slab... |
|
2010 } |
|
2011 // no partial slabs found, so allocate from a new slab |
|
2012 return allocnewslab(ss); |
|
2013 } |
|
2014 void RSymbianDLHeap::slab_free(void* p) |
|
2015 // |
|
2016 // Free a cell from the slab allocator |
|
2017 // Strategy: |
|
2018 // 1. Find the containing slab (round down to nearest 1KB boundary) |
|
2019 // 2. Push the cell into the slab's freelist, and update the slab usage count |
|
2020 // 3. If this is the last allocated cell, free the slab to the main slab manager |
|
2021 // 4. If the slab was full-floating then insert the slab in it's respective partial tree |
|
2022 // |
|
2023 { |
|
2024 ASSERT(lowbits(p,3)==0); |
|
2025 slab* s = slab::slabfor(p); |
|
2026 |
|
2027 unsigned pos = lowbits(p, slabsize); |
|
2028 unsigned h = s->header; |
|
2029 ASSERT(header_usedm4(h) != 0x3fC); // slab is empty already |
|
2030 ASSERT((pos-sizeof(slabhdr))%header_size(h) == 0); |
|
2031 *(unsigned char*)p = (unsigned char)h; |
|
2032 h &= ~0xFF; |
|
2033 h |= (pos>>2); |
|
2034 unsigned size = h & 0x3C000; |
|
2035 if (int(h) >= 0) |
|
2036 { |
|
2037 h -= size<<6; |
|
2038 if (int(h)>=0) |
|
2039 { |
|
2040 s->header = h; |
|
2041 return; |
|
2042 } |
|
2043 freeslab(s); |
|
2044 return; |
|
2045 } |
|
2046 h -= size<<6; |
|
2047 h &= ~0x80000000; |
|
2048 s->header = h; |
|
2049 slabset& ss = slaballoc[sizemap[size>>14]]; |
|
2050 tree_insert(s,&ss.partial); |
|
2051 checktree(ss.partial); |
|
2052 } |
|
2053 |
|
2054 void* slabset::initslab(slab* s) |
|
2055 // |
|
2056 // initialise an empty slab for this allocator and return the fist cell |
|
2057 // pre-condition: the slabset has no partial slabs for allocation |
|
2058 // |
|
2059 { |
|
2060 ASSERT(partial==0); |
|
2061 unsigned h = s->header & 0xF00; // preserve pagemap only |
|
2062 h |= (size<<12); // set size |
|
2063 h |= (size-4)<<18; // set usedminus4 to one object minus 4 |
|
2064 s->header = h; |
|
2065 partial = s; |
|
2066 s->parent = &partial; |
|
2067 s->child1 = s->child2 = 0; |
|
2068 return offset(s,sizeof(slabhdr)); |
|
2069 } |
|
2070 |
|
2071 TAny* RSymbianDLHeap::SetBrk(TInt32 aDelta) |
|
2072 { |
|
2073 if (iFlags & EFixedSize) |
|
2074 return MFAIL; |
|
2075 |
|
2076 if (aDelta < 0) |
|
2077 { |
|
2078 unmap(offset(iTop, aDelta), -aDelta); |
|
2079 } |
|
2080 else if (aDelta > 0) |
|
2081 { |
|
2082 if (!map(iTop, aDelta)) |
|
2083 return MFAIL; |
|
2084 } |
|
2085 void * p =iTop; |
|
2086 iTop = offset(iTop, aDelta); |
|
2087 |
|
2088 return p; |
|
2089 } |
|
2090 |
|
2091 void* RSymbianDLHeap::map(void* p,unsigned sz) |
|
2092 // |
|
2093 // allocate pages in the chunk |
|
2094 // if p is NULL, find an allocate the required number of pages (which must lie in the lower half) |
|
2095 // otherwise commit the pages specified |
|
2096 // |
|
2097 { |
|
2098 ASSERT(p == floor(p, pagesize)); |
|
2099 ASSERT(sz == ceiling(sz, pagesize)); |
|
2100 ASSERT(sz > 0); |
|
2101 |
|
2102 if (iChunkSize + sz > iMaxLength) |
|
2103 return 0; |
|
2104 |
|
2105 RChunk chunk; |
|
2106 chunk.SetHandle(iChunkHandle); |
|
2107 if (p) |
|
2108 { |
|
2109 TInt r = chunk.Commit(iOffset + ptrdiff(p, this),sz); |
|
2110 if (r < 0) |
|
2111 return 0; |
|
2112 ASSERT(p = offset(this, r - iOffset)); |
|
2113 iChunkSize += sz; |
|
2114 return p; |
|
2115 } |
|
2116 |
|
2117 TInt r = chunk.Allocate(sz); |
|
2118 if (r < 0) |
|
2119 return 0; |
|
2120 if (r > iOffset) |
|
2121 { |
|
2122 // can't allow page allocations in DL zone |
|
2123 chunk.Decommit(r, sz); |
|
2124 return 0; |
|
2125 } |
|
2126 iChunkSize += sz; |
|
2127 |
|
2128 TraceChunkUsage(iChunkHandle, iBase, iChunkSize); |
|
2129 |
|
2130 return offset(this, r - iOffset); |
|
2131 } |
|
2132 |
|
2133 void* RSymbianDLHeap::remap(void* p,unsigned oldsz,unsigned sz) |
|
2134 { |
|
2135 if (oldsz > sz) |
|
2136 { // shrink |
|
2137 unmap(offset(p,sz), oldsz-sz); |
|
2138 } |
|
2139 else if (oldsz < sz) |
|
2140 { // grow, try and do this in place first |
|
2141 if (!map(offset(p, oldsz), sz-oldsz)) |
|
2142 { |
|
2143 // need to allocate-copy-free |
|
2144 void* newp = map(0, sz); |
|
2145 memcpy(newp, p, oldsz); |
|
2146 unmap(p,oldsz); |
|
2147 return newp; |
|
2148 } |
|
2149 } |
|
2150 return p; |
|
2151 } |
|
2152 |
|
2153 void RSymbianDLHeap::unmap(void* p,unsigned sz) |
|
2154 { |
|
2155 ASSERT(p == floor(p, pagesize)); |
|
2156 ASSERT(sz == ceiling(sz, pagesize)); |
|
2157 ASSERT(sz > 0); |
|
2158 |
|
2159 RChunk chunk; |
|
2160 chunk.SetHandle(iChunkHandle); |
|
2161 TInt r = chunk.Decommit(ptrdiff(p, offset(this,-iOffset)), sz); |
|
2162 //TInt offset = (TUint8*)p-(TUint8*)chunk.Base(); |
|
2163 //TInt r = chunk.Decommit(offset,sz); |
|
2164 |
|
2165 ASSERT(r >= 0); |
|
2166 iChunkSize -= sz; |
|
2167 |
|
2168 TraceChunkUsage(iChunkHandle, iBase, iChunkSize); |
|
2169 } |
|
2170 |
|
2171 void RSymbianDLHeap::paged_init(unsigned pagepower) |
|
2172 { |
|
2173 if (pagepower == 0) |
|
2174 pagepower = 31; |
|
2175 else if (pagepower < minpagepower) |
|
2176 pagepower = minpagepower; |
|
2177 page_threshold = pagepower; |
|
2178 for (int i=0;i<npagecells;++i) |
|
2179 { |
|
2180 pagelist[i].page = 0; |
|
2181 pagelist[i].size = 0; |
|
2182 } |
|
2183 } |
|
2184 |
|
2185 void* RSymbianDLHeap::paged_allocate(unsigned size) |
|
2186 { |
|
2187 unsigned nbytes = ceiling(size, pagesize); |
|
2188 if (nbytes < size + cellalign) |
|
2189 { // not enough extra space for header and alignment, try and use cell list |
|
2190 for (pagecell *c = pagelist,*e = c + npagecells;c < e;++c) |
|
2191 if (c->page == 0) |
|
2192 { |
|
2193 void* p = map(0, nbytes); |
|
2194 if (!p) |
|
2195 return 0; |
|
2196 c->page = p; |
|
2197 c->size = nbytes; |
|
2198 return p; |
|
2199 } |
|
2200 } |
|
2201 // use a cell header |
|
2202 nbytes = ceiling(size + cellalign, pagesize); |
|
2203 void* p = map(0, nbytes); |
|
2204 if (!p) |
|
2205 return 0; |
|
2206 *static_cast<unsigned*>(p) = nbytes; |
|
2207 return offset(p, cellalign); |
|
2208 } |
|
2209 |
|
2210 void* RSymbianDLHeap::paged_reallocate(void* p, unsigned size) |
|
2211 { |
|
2212 if (lowbits(p, pagesize) == 0) |
|
2213 { // continue using descriptor |
|
2214 pagecell* c = paged_descriptor(p); |
|
2215 unsigned nbytes = ceiling(size, pagesize); |
|
2216 void* newp = remap(p, c->size, nbytes); |
|
2217 if (!newp) |
|
2218 return 0; |
|
2219 c->page = newp; |
|
2220 c->size = nbytes; |
|
2221 return newp; |
|
2222 } |
|
2223 else |
|
2224 { // use a cell header |
|
2225 ASSERT(lowbits(p,pagesize) == cellalign); |
|
2226 p = offset(p,-int(cellalign)); |
|
2227 unsigned nbytes = ceiling(size + cellalign, pagesize); |
|
2228 unsigned obytes = *static_cast<unsigned*>(p); |
|
2229 void* newp = remap(p, obytes, nbytes); |
|
2230 if (!newp) |
|
2231 return 0; |
|
2232 *static_cast<unsigned*>(newp) = nbytes; |
|
2233 return offset(newp, cellalign); |
|
2234 } |
|
2235 } |
|
2236 |
|
2237 void RSymbianDLHeap::paged_free(void* p) |
|
2238 { |
|
2239 if (lowbits(p,pagesize) == 0) |
|
2240 { // check pagelist |
|
2241 pagecell* c = paged_descriptor(p); |
|
2242 unmap(p, c->size); |
|
2243 c->page = 0; |
|
2244 c->size = 0; |
|
2245 } |
|
2246 else |
|
2247 { // check page header |
|
2248 unsigned* page = static_cast<unsigned*>(offset(p,-int(cellalign))); |
|
2249 unsigned size = *page; |
|
2250 unmap(page,size); |
|
2251 } |
|
2252 } |
|
2253 |
|
2254 pagecell* RSymbianDLHeap::paged_descriptor(const void* p) const |
|
2255 { |
|
2256 ASSERT(lowbits(p,pagesize) == 0); |
|
2257 // Double casting to keep the compiler happy. Seems to think we can trying to |
|
2258 // change a non-const member (pagelist) in a const function |
|
2259 pagecell* c = (pagecell*)((void*)pagelist); |
|
2260 pagecell* e = c + npagecells; |
|
2261 for (;;) |
|
2262 { |
|
2263 ASSERT(c!=e); |
|
2264 if (c->page == p) |
|
2265 return c; |
|
2266 ++c; |
|
2267 } |
|
2268 } |
|
2269 |
|
2270 #endif |