15 * |
15 * |
16 * |
16 * |
17 */ |
17 */ |
18 |
18 |
19 #include "common.h" |
19 #include "common.h" |
|
20 #include "StopScheduler.h" |
|
21 #include <hal.h> |
20 |
22 |
21 #ifdef TRACE_CHUNK_USAGE |
23 #ifdef TRACE_CHUNK_USAGE |
22 void TraceChunkUsage(TInt aChunkHandle, TUint8* aBase, TInt aChunkSize) |
24 void TraceChunkUsage(TInt aChunkHandle, TUint8* aBase, TInt aChunkSize) |
23 { |
25 { |
24 RDebug::Print(_L("MEM: c,%d,%d,%d"), aChunkHandle, aBase, aChunkSize); |
26 RDebug::Print(_L("MEM: c,%d,%d,%d"), aChunkHandle, aBase, aChunkSize); |
28 #endif |
30 #endif |
29 |
31 |
30 #ifdef __NEW_ALLOCATOR__ |
32 #ifdef __NEW_ALLOCATOR__ |
31 |
33 |
32 #include "MemoryLogger.h" |
34 #include "MemoryLogger.h" |
33 #include "SymbianDlHeap.h" |
35 #include "SymbianDLHeap.h" |
34 |
36 |
35 _LIT(KDLHeapPanicCategory, "DL Heap"); |
37 _LIT(KDLHeapPanicCategory, "DL Heap"); |
36 #define GET_PAGE_SIZE(x) UserHal::PageSizeInBytes(x) |
38 #define GET_PAGE_SIZE(x) UserHal::PageSizeInBytes(x) |
37 #define __CHECK_CELL(p) |
39 #define __CHECK_CELL(p) |
38 #define __POWER_OF_2(x) ((TUint32)((x)^((x)-1))>=(TUint32)(x)) |
40 #define __POWER_OF_2(x) ((TUint32)((x)^((x)-1))>=(TUint32)(x)) |
261 { |
263 { |
262 iAlign = 4; |
264 iAlign = 4; |
263 } |
265 } |
264 iPageSize = 0; |
266 iPageSize = 0; |
265 iFlags = aSingleThread ? (ESingleThreaded|EFixedSize) : EFixedSize; |
267 iFlags = aSingleThread ? (ESingleThreaded|EFixedSize) : EFixedSize; |
|
268 isLowSystemMemory = 0; |
266 |
269 |
267 Init(0, 0, 0); |
270 Init(0, 0, 0); |
268 } |
271 } |
269 |
272 |
270 UEXPORT_C RSymbianDLHeap::RSymbianDLHeap(TInt aChunkHandle, TInt aOffset, TInt aMinLength, TInt aMaxLength, TInt aGrowBy, |
273 UEXPORT_C RSymbianDLHeap::RSymbianDLHeap(TInt aChunkHandle, TInt aOffset, TInt aMinLength, TInt aMaxLength, TInt aGrowBy, |
271 TInt aAlign, TBool aSingleThread) |
274 TInt aAlign, TBool aSingleThread) |
272 : iMinLength(aMinLength), iMaxLength(aMaxLength), iOffset(aOffset), iChunkHandle(aChunkHandle), iNestingLevel(0), iAllocCount(0), |
275 : iMinLength(aMinLength), iMaxLength(aMaxLength), iOffset(aOffset), iChunkHandle(aChunkHandle), |
273 iAlign(aAlign),iFailType(ENone), iTestData(NULL), iChunkSize(aMinLength) |
276 iAlign(aAlign), iNestingLevel(0), iAllocCount(0), iFailType(ENone), iTestData(NULL), iChunkSize(aMinLength) |
274 { |
277 { |
275 // TODO: Locked the page size to 4 KB - change this to pick up from the OS |
278 // TODO: Locked the page size to 4 KB - change this to pick up from the OS |
276 GET_PAGE_SIZE(iPageSize); |
279 GET_PAGE_SIZE(iPageSize); |
277 __ASSERT_ALWAYS(aOffset >=0, User::Panic(KDLHeapPanicCategory, ETHeapNewBadOffset)); |
280 __ASSERT_ALWAYS(aOffset >=0, User::Panic(KDLHeapPanicCategory, ETHeapNewBadOffset)); |
278 iGrowBy = _ALIGN_UP(aGrowBy, iPageSize); |
281 iGrowBy = _ALIGN_UP(aGrowBy, iPageSize); |
279 iFlags = aSingleThread ? ESingleThreaded : 0; |
282 iFlags = aSingleThread ? ESingleThreaded : 0; |
|
283 isLowSystemMemory = 0; |
280 |
284 |
281 // Initialise |
285 // Initialise |
282 // if the heap is created with aMinLength==aMaxLength then it cannot allocate slab or page memory |
286 // if the heap is created with aMinLength==aMaxLength then it cannot allocate slab or page memory |
283 // so these sub-allocators should be disabled. Otherwise initialise with default values |
287 // so these sub-allocators should be disabled. Otherwise initialise with default values |
284 if (aMinLength == aMaxLength) |
288 if (aMinLength == aMaxLength) |
701 insert_chunk(m, r, rsize, 0); |
705 insert_chunk(m, r, rsize, 0); |
702 } |
706 } |
703 return chunk2mem(v); |
707 return chunk2mem(v); |
704 } |
708 } |
705 } |
709 } |
706 CORRUPTION_ERROR_ACTION(m); |
710 //CORRUPTION_ERROR_ACTION(m); |
707 return 0; |
711 //return 0; |
708 } |
712 } |
709 |
713 |
710 inline void RSymbianDLHeap::init_top(mstate m, mchunkptr p, size_t psize) |
714 inline void RSymbianDLHeap::init_top(mstate m, mchunkptr p, size_t psize) |
711 { |
715 { |
712 /* Ensure alignment */ |
716 /* Ensure alignment */ |
793 internal_free(m, oldmem); |
797 internal_free(m, oldmem); |
794 } |
798 } |
795 return newmem; |
799 return newmem; |
796 } |
800 } |
797 } |
801 } |
798 return 0; |
802 //return 0; |
799 } |
803 } |
800 /* ----------------------------- statistics ------------------------------ */ |
804 /* ----------------------------- statistics ------------------------------ */ |
801 mallinfo RSymbianDLHeap::internal_mallinfo(mstate m) { |
805 mallinfo RSymbianDLHeap::internal_mallinfo(mstate m) { |
802 struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; |
806 struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; |
803 TInt chunkCnt = 0; |
807 TInt chunkCnt = 0; |
806 if (is_initialized(m)) { |
810 if (is_initialized(m)) { |
807 size_t nfree = SIZE_T_ONE; /* top always free */ |
811 size_t nfree = SIZE_T_ONE; /* top always free */ |
808 size_t mfree = m->topsize + TOP_FOOT_SIZE; |
812 size_t mfree = m->topsize + TOP_FOOT_SIZE; |
809 size_t sum = mfree; |
813 size_t sum = mfree; |
810 msegmentptr s = &m->seg; |
814 msegmentptr s = &m->seg; |
811 TInt tmp = (TUint8*)m->top - (TUint8*)s->base; |
815 // TInt tmp = (TUint8*)m->top - (TUint8*)s->base; |
812 while (s != 0) { |
816 while (s != 0) { |
813 mchunkptr q = align_as_chunk(s->base); |
817 mchunkptr q = align_as_chunk(s->base); |
814 chunkCnt++; |
818 chunkCnt++; |
815 while (segment_holds(s, q) && |
819 while (segment_holds(s, q) && |
816 q != m->top && q->head != FENCEPOST_HEAD) { |
820 q != m->top && q->head != FENCEPOST_HEAD) { |
838 return nm; |
842 return nm; |
839 } |
843 } |
840 |
844 |
841 void RSymbianDLHeap::internal_malloc_stats(mstate m) { |
845 void RSymbianDLHeap::internal_malloc_stats(mstate m) { |
842 if (!PREACTION(m)) { |
846 if (!PREACTION(m)) { |
843 size_t maxfp = 0; |
|
844 size_t fp = 0; |
847 size_t fp = 0; |
845 size_t used = 0; |
848 size_t used = 0; |
846 check_malloc_state(m); |
849 check_malloc_state(m); |
847 if (is_initialized(m)) { |
850 if (is_initialized(m)) { |
848 msegmentptr s = &m->seg; |
851 msegmentptr s = &m->seg; |
849 maxfp = m->max_footprint; |
|
850 fp = m->footprint; |
852 fp = m->footprint; |
851 used = fp - (m->topsize + TOP_FOOT_SIZE); |
853 used = fp - (m->topsize + TOP_FOOT_SIZE); |
852 |
854 |
853 while (s != 0) { |
855 while (s != 0) { |
854 mchunkptr q = align_as_chunk(s->base); |
856 mchunkptr q = align_as_chunk(s->base); |
1825 } |
1827 } |
1826 } |
1828 } |
1827 else |
1829 else |
1828 { |
1830 { |
1829 size_t nsize = chunksize(next); |
1831 size_t nsize = chunksize(next); |
1830 int next_chunk_unmapped = 0; |
1832 //int next_chunk_unmapped = 0; |
1831 if( page_not_in_memory(next, nsize) ) { |
1833 if( page_not_in_memory(next, nsize) ) { |
1832 next_chunk_unmapped = 1; |
1834 // next_chunk_unmapped = 1; |
1833 unmapped_pages += ((tchunkptr)next)->npages; |
1835 unmapped_pages += ((tchunkptr)next)->npages; |
1834 } |
1836 } |
1835 |
1837 |
1836 psize += nsize; |
1838 psize += nsize; |
1837 unlink_chunk(fm, next, nsize); |
1839 unlink_chunk(fm, next, nsize); |
2296 // allocate pages in the chunk |
2298 // allocate pages in the chunk |
2297 // if p is NULL, find an allocate the required number of pages (which must lie in the lower half) |
2299 // if p is NULL, find an allocate the required number of pages (which must lie in the lower half) |
2298 // otherwise commit the pages specified |
2300 // otherwise commit the pages specified |
2299 // |
2301 // |
2300 { |
2302 { |
2301 ASSERT(p == floor(p, pagesize)); |
2303 // Check for min threshold in system RAM to be left free |
2302 ASSERT(sz == ceiling(sz, pagesize)); |
2304 TInt sysFreeRAM = 0; |
2303 ASSERT(sz > 0); |
2305 if(HAL::Get(HALData::EMemoryRAMFree, sysFreeRAM) == KErrNone) |
|
2306 { |
|
2307 if(sysFreeRAM < KStopThreshold) // 1MB |
|
2308 return 0; |
|
2309 |
|
2310 // check system memory level |
|
2311 if(sysFreeRAM < KGoodMemoryThreshold) |
|
2312 isLowSystemMemory = 1; |
|
2313 else |
|
2314 isLowSystemMemory = 0; |
|
2315 } |
|
2316 |
|
2317 |
|
2318 ASSERT(p == floor(p, pagesize)); |
|
2319 ASSERT(sz == ceiling(sz, pagesize)); |
|
2320 ASSERT(sz > 0); |
2304 |
2321 |
2305 if (iChunkSize + sz > iMaxLength) |
2322 if (iChunkSize + sz > iMaxLength) |
2306 return 0; |
2323 return 0; |
2307 |
2324 |
2308 RChunk chunk; |
2325 RChunk chunk; |
2342 { // grow, try and do this in place first |
2359 { // grow, try and do this in place first |
2343 if (!map(offset(p, oldsz), sz-oldsz)) |
2360 if (!map(offset(p, oldsz), sz-oldsz)) |
2344 { |
2361 { |
2345 // need to allocate-copy-free |
2362 // need to allocate-copy-free |
2346 void* newp = map(0, sz); |
2363 void* newp = map(0, sz); |
2347 memcpy(newp, p, oldsz); |
2364 if(newp) |
2348 unmap(p,oldsz); |
2365 { |
2349 return newp; |
2366 memcpy(newp, p, oldsz); |
|
2367 unmap(p,oldsz); |
|
2368 return newp; |
|
2369 } |
|
2370 else |
|
2371 { |
|
2372 return 0; |
|
2373 } |
2350 } |
2374 } |
2351 } |
2375 } |
2352 return p; |
2376 return p; |
2353 } |
2377 } |
2354 |
2378 |