43 User::Panic(KDLHeapPanicCategory, aPanic); |
44 User::Panic(KDLHeapPanicCategory, aPanic); |
44 } |
45 } |
45 |
46 |
46 #undef UEXPORT_C |
47 #undef UEXPORT_C |
47 #define UEXPORT_C |
48 #define UEXPORT_C |
|
49 |
|
50 |
|
51 /* Purpose: Map chunk memory pages from system RAM |
|
52 * Arguments: tp - tchunkptr in which memmory should be mapped |
|
53 * psize - incoming tchunk size |
|
54 * Return: KErrNone if successful, else KErrNoMemory |
|
55 * Note: |
|
56 */ |
|
57 TInt RSymbianDLHeap::map_chunk_pages(tchunkptr tp, size_t psize) |
|
58 { |
|
59 if(page_not_in_memory(tp, psize)) { |
|
60 char *a_addr = tchunk_page_align(tp); |
|
61 size_t npages = tp->npages; |
|
62 |
|
63 #ifdef OOM_LOGGING |
|
64 // check that npages matches the psize |
|
65 size_t offset = address_offset(a_addr,tp); |
|
66 if(offset < psize && (psize - offset) >= mparams.page_size ) |
|
67 { |
|
68 size_t tpages = ( psize - offset) >> pageshift; |
|
69 if(tpages != tp->npages) //assert condition |
|
70 MEM_LOG("CHUNK_PAGE_ERROR:map_chunk_pages, error in npages"); |
|
71 } |
|
72 else |
|
73 MEM_LOG("CHUNK_PAGE_ERROR::map_chunk_pages: - Incorrect page-in-memmory flag"); |
|
74 #endif |
|
75 |
|
76 if(map(a_addr, npages*mparams.page_size)) { |
|
77 TRACE_DL_CHUNK_MAP(tp, psize, a_addr, npages*mparams.page_size); |
|
78 ASSERT_RCHUNK_SIZE(); |
|
79 TRACE_UNMAPPED_CHUNK(-1*npages*mparams.page_size); |
|
80 return KErrNone; |
|
81 } |
|
82 else { |
|
83 MEM_LOGF(_L8("CHUNK_PAGE_ERROR:: map_chunk_pages - Failed to Commit RAM, page_addr=%x, npages=%d, chunk_size=%d"), a_addr, npages, psize); |
|
84 MEM_DUMP_OOM_LOGS(psize, "RSymbianDLHeap::map_chunk_pages - Failed to Commit RAM"); |
|
85 return KErrNoMemory; |
|
86 } |
|
87 } |
|
88 return KErrNone; |
|
89 } |
|
90 |
|
91 /* Purpose: Map partial chunk memory pages from system RAM |
|
92 * Arguments: tp - tchunkptr in which memmory should be mapped |
|
93 * psize - incoming tchunk size |
|
94 * r - remainder chunk pointer |
|
95 * rsize - remainder chunk size |
|
96 * Return: Number of unmapped pages from remainder chunk if successful (0 or more), else KErrNoMemory |
|
97 * Note: Remainder chunk should be large enough to be mapped out (checked before invoking this function) |
|
98 * pageout headers will be set from insert_large_chunk(), not here. |
|
99 */ |
|
100 TInt RSymbianDLHeap::map_chunk_pages_partial(tchunkptr tp, size_t psize, tchunkptr r, size_t rsize) |
|
101 { |
|
102 if(page_not_in_memory(tp, psize)) { |
|
103 size_t npages = tp->npages; // total no of pages unmapped in this chunk |
|
104 char *page_addr_map = tchunk_page_align(tp); // address to begin page map |
|
105 char *page_addr_rem = tchunk_page_align(r); // address in remainder chunk to remain unmapped |
|
106 assert(address_offset(page_addr_rem, r) < rsize); |
|
107 size_t npages_map = address_offset(page_addr_rem, page_addr_map) >> pageshift; // no of pages to be mapped |
|
108 if(npages_map > 0) { |
|
109 if(map(page_addr_map, npages_map*mparams.page_size)) { |
|
110 TRACE_DL_CHUNK_MAP(tp, psize, page_addr_map, npages_map*mparams.page_size); |
|
111 ASSERT_RCHUNK_SIZE(); |
|
112 TRACE_UNMAPPED_CHUNK(-1*npages_map*mparams.page_size); |
|
113 return (npages - npages_map); |
|
114 } |
|
115 else { |
|
116 MEM_LOGF(_L8("CHUNK_PAGE_ERROR:: map_chunk_pages_partial - Failed to Commit RAM, page_addr=%x, npages=%d, chunk_size=%d"), page_addr_map, npages_map, psize); |
|
117 MEM_DUMP_OOM_LOGS(psize, "RSymbianDLHeap::map_chunk_pages_partial - Failed to Commit RAM"); |
|
118 return KErrNoMemory; |
|
119 } |
|
120 } |
|
121 else { |
|
122 // map not needed, first page is already mapped |
|
123 return npages; |
|
124 } |
|
125 } |
|
126 |
|
127 return 0; |
|
128 } |
|
129 |
|
130 |
|
131 /* Purpose: Release (unmap) chunk memory pages to system RAM |
|
132 * Arguments: tp - tchunkptr from which memmory may be released |
|
133 * psize - incoming tchunk size |
|
134 * prev_npages - number of pages that has been already unmapped from this chunk |
|
135 * Return: total number of pages that has been unmapped from this chunk (new unmapped pages + prev_npages) |
|
136 * Note: pageout headers will be set from insert_large_chunk(), not here. |
|
137 */ |
|
138 TInt RSymbianDLHeap::unmap_chunk_pages(tchunkptr tp, size_t psize, size_t prev_npages) |
|
139 { |
|
140 size_t npages = 0; |
|
141 char *a_addr = tchunk_page_align(tp); |
|
142 size_t offset = address_offset(a_addr,tp); |
|
143 if(offset < psize && (psize - offset) >= mparams.page_size) |
|
144 { /* check for new pages to decommit */ |
|
145 npages = ( psize - offset) >> pageshift; |
|
146 if(npages > prev_npages) { |
|
147 unmap(a_addr, npages*mparams.page_size); // assuming kernel takes care of already unmapped pages |
|
148 TRACE_DL_CHUNK_UNMAP(tp, psize, a_addr, npages*mparams.page_size); |
|
149 iChunkSize += prev_npages*mparams.page_size; //adjust actual chunk size |
|
150 ASSERT_RCHUNK_SIZE(); |
|
151 TRACE_UNMAPPED_CHUNK((npages-prev_npages)*mparams.page_size); |
|
152 assert((a_addr + npages*mparams.page_size - 1) < (char*)next_chunk(tp)); |
|
153 } |
|
154 } |
|
155 |
|
156 #ifdef OOM_LOGGING |
|
157 if(npages && (npages < prev_npages)) |
|
158 MEM_LOG("CHUNK_PAGE_ERROR:unmap_chunk_pages, error in npages"); |
|
159 if(npages > prev_npages) { |
|
160 /* check that end of decommited address lie within this chunk */ |
|
161 if((a_addr + npages*mparams.page_size - 1) >= (char*)next_chunk(tp)) |
|
162 MEM_LOG("CHUNK_PAGE_ERROR:unmap_chunk_pages, error chunk boundary"); |
|
163 } |
|
164 #endif |
|
165 #ifdef DL_CHUNK_MEM_DEBUG |
|
166 mchunkptr next = next_chunk(tp); |
|
167 do_check_any_chunk_access(next, chunksize(next)); |
|
168 if(!npages) do_check_any_chunk_access((mchunkptr)tp, psize); |
|
169 #endif |
|
170 |
|
171 return (npages); |
|
172 } |
|
173 |
|
174 /* Purpose: Unmap all pages between previously unmapped and end of top chunk |
|
175 and reset top to beginning of prev chunk |
|
176 * Arguments: fm - global malloc state |
|
177 * prev - previous chunk which has unmapped pages |
|
178 * psize - size of previous chunk |
|
179 * prev_npages - number of unmapped pages from previous chunk |
|
180 * Return: nonzero if sucessful, else 0 |
|
181 * Note: |
|
182 */ |
|
183 TInt RSymbianDLHeap::sys_trim_partial(mstate m, mchunkptr prev, size_t psize, size_t prev_npages) |
|
184 { |
|
185 size_t released = 0; |
|
186 size_t extra = 0; |
|
187 if (is_initialized(m)) { |
|
188 psize += m->topsize; |
|
189 char *a_addr = tchunk_page_align(prev); // includes space for TOP footer |
|
190 size_t addr_offset = address_offset(a_addr, prev); |
|
191 assert(addr_offset > TOP_FOOT_SIZE); //always assert? |
|
192 assert((char*)iTop >= a_addr); //always assert? |
|
193 if((char*)iTop > a_addr) |
|
194 extra = address_offset(iTop, a_addr); |
|
195 |
|
196 #ifdef OOM_LOGGING |
|
197 if ((char*)iTop < a_addr) |
|
198 MEM_LOGF(_L8("RSymbianDLHeap::sys_trim_partial - incorrect iTop value, top=%x, iTop=%x"), m->top, iTop); |
|
199 #endif |
|
200 msegmentptr sp = segment_holding(m, (TUint8*)prev); |
|
201 if (!is_extern_segment(sp)) { |
|
202 if (is_mmapped_segment(sp)) { |
|
203 if (HAVE_MMAP && sp->size >= extra && !has_segment_link(m, sp)) { /* can't shrink if pinned */ |
|
204 size_t newsize = sp->size - extra; |
|
205 /* Prefer mremap, fall back to munmap */ |
|
206 if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) || |
|
207 (CALL_MUNMAP(sp->base + newsize, extra) == 0)) { |
|
208 released = extra; |
|
209 } |
|
210 } |
|
211 } |
|
212 else if (HAVE_MORECORE) { |
|
213 if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */ |
|
214 extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - mparams.granularity; |
|
215 ACQUIRE_MORECORE_LOCK(m); |
|
216 { |
|
217 /* Make sure end of memory is where we last set it. */ |
|
218 TUint8* old_br = (TUint8*)(CALL_MORECORE(0)); |
|
219 if (old_br == sp->base + sp->size) { |
|
220 TUint8* rel_br = (TUint8*)(CALL_MORECORE(-extra)); |
|
221 TUint8* new_br = (TUint8*)(CALL_MORECORE(0)); |
|
222 if (rel_br != CMFAIL && new_br < old_br) |
|
223 released = old_br - new_br; |
|
224 } |
|
225 } |
|
226 RELEASE_MORECORE_LOCK(m); |
|
227 } |
|
228 } |
|
229 |
|
230 if (released != 0) { |
|
231 TRACE_DL_CHUNK_UNMAP(prev, psize, a_addr, released); |
|
232 iChunkSize += prev_npages*mparams.page_size; // prev_unmapped was already unmapped |
|
233 TRACE_UNMAPPED_CHUNK(-1*prev_npages*mparams.page_size); |
|
234 ASSERT_RCHUNK_SIZE(); |
|
235 sp->size -= released; |
|
236 m->footprint -= released; |
|
237 } |
|
238 |
|
239 /* reset top to prev chunk */ |
|
240 init_top(m, prev, addr_offset - TOP_FOOT_SIZE); |
|
241 check_top_chunk(m, m->top); |
|
242 } |
|
243 |
|
244 // DL region not initalized, do not reset top here |
|
245 return (released != 0)? 1 : 0; |
|
246 } |
|
247 |
48 |
248 |
49 UEXPORT_C RSymbianDLHeap::RSymbianDLHeap(TInt aMaxLength, TInt aAlign, TBool aSingleThread) |
249 UEXPORT_C RSymbianDLHeap::RSymbianDLHeap(TInt aMaxLength, TInt aAlign, TBool aSingleThread) |
50 // constructor for a fixed heap. Just use DL allocator |
250 // constructor for a fixed heap. Just use DL allocator |
51 :iMinLength(aMaxLength), iMaxLength(aMaxLength), iOffset(0), iGrowBy(0), iChunkHandle(0), |
251 :iMinLength(aMaxLength), iMaxLength(aMaxLength), iOffset(0), iGrowBy(0), iChunkHandle(0), |
52 iNestingLevel(0), iAllocCount(0), iFailType(ENone), iTestData(NULL), iChunkSize(aMaxLength) |
252 iNestingLevel(0), iAllocCount(0), iFailType(ENone), iTestData(NULL), iChunkSize(aMaxLength) |
415 v = t; |
622 v = t; |
416 } |
623 } |
417 t = leftmost_child(t); |
624 t = leftmost_child(t); |
418 } |
625 } |
419 /* If dv is a better fit, return 0 so malloc will use it */ |
626 /* If dv is a better fit, return 0 so malloc will use it */ |
420 if (v != 0 && rsize < (size_t)(m->dvsize - nb)) { |
627 if (v != 0) { |
421 if (RTCHECK(ok_address(m, v))) { /* split */ |
628 if (RTCHECK(ok_address(m, v))) { /* split */ |
422 mchunkptr r = chunk_plus_offset(v, nb); |
629 mchunkptr r = chunk_plus_offset(v, nb); |
423 assert(chunksize(v) == rsize + nb); |
630 assert(chunksize(v) == rsize + nb); |
|
631 |
|
632 /* check for chunk memory page-in */ |
|
633 size_t npages_out = 0; |
|
634 if(page_not_in_memory(v, chunksize(v))) { |
|
635 if(!is_small(rsize) && rsize>=CHUNK_PAGEOUT_THESHOLD) { |
|
636 // partial chunk page mapping |
|
637 TInt result = map_chunk_pages_partial(v, chunksize(v), (tchunkptr)r, rsize); |
|
638 if (result < 0) return 0; // Failed to Commit RAM |
|
639 else npages_out = (size_t)result; |
|
640 } |
|
641 else { |
|
642 // full chunk page map needed |
|
643 TInt err = map_chunk_pages(v, chunksize(v)); |
|
644 if(err != KErrNone) return 0; // Failed to Commit RAM |
|
645 } |
|
646 } |
|
647 |
424 if (RTCHECK(ok_next(v, r))) { |
648 if (RTCHECK(ok_next(v, r))) { |
425 unlink_large_chunk(m, v); |
649 unlink_large_chunk(m, v); |
426 if (rsize < MIN_CHUNK_SIZE) |
650 if (rsize < free_chunk_threshold) // exaust if less than slab threshold |
427 set_inuse_and_pinuse(m, v, (rsize + nb)); |
651 set_inuse_and_pinuse(m, v, (rsize + nb)); |
428 else { |
652 else { |
429 set_size_and_pinuse_of_inuse_chunk(m, v, nb); |
653 set_size_and_pinuse_of_inuse_chunk(m, v, nb); |
430 set_size_and_pinuse_of_free_chunk(r, rsize); |
654 set_size_and_pinuse_of_free_chunk(r, rsize); |
431 insert_chunk(m, r, rsize); |
655 insert_chunk(m, r, rsize, npages_out); |
432 } |
656 } |
433 return chunk2mem(v); |
657 return chunk2mem(v); |
434 } |
658 } |
435 } |
659 } |
436 CORRUPTION_ERROR_ACTION(m); |
660 CORRUPTION_ERROR_ACTION(m); |
1182 size_t tsize = m->topsize += qsize; |
1420 size_t tsize = m->topsize += qsize; |
1183 m->top = q; |
1421 m->top = q; |
1184 q->head = tsize | PINUSE_BIT; |
1422 q->head = tsize | PINUSE_BIT; |
1185 check_top_chunk(m, q); |
1423 check_top_chunk(m, q); |
1186 } |
1424 } |
1187 else if (oldfirst == m->dv) { |
|
1188 size_t dsize = m->dvsize += qsize; |
|
1189 m->dv = q; |
|
1190 set_size_and_pinuse_of_free_chunk(q, dsize); |
|
1191 } |
|
1192 else { |
1425 else { |
1193 if (!cinuse(oldfirst)) { |
1426 if (!cinuse(oldfirst)) { |
1194 size_t nsize = chunksize(oldfirst); |
1427 size_t nsize = chunksize(oldfirst); |
|
1428 |
|
1429 /* check for chunk memory page-in */ |
|
1430 if(page_not_in_memory(oldfirst, nsize)) |
|
1431 map_chunk_pages((tchunkptr)oldfirst, nsize); //Err Ignored, branch not reachable. |
|
1432 |
1195 unlink_chunk(m, oldfirst, nsize); |
1433 unlink_chunk(m, oldfirst, nsize); |
1196 oldfirst = chunk_plus_offset(oldfirst, nsize); |
1434 oldfirst = chunk_plus_offset(oldfirst, nsize); |
1197 qsize += nsize; |
1435 qsize += nsize; |
1198 } |
1436 } |
1199 set_free_with_pinuse(q, qsize, oldfirst); |
1437 set_free_with_pinuse(q, qsize, oldfirst); |
1200 insert_chunk(m, q, qsize); |
1438 insert_chunk(m, q, qsize, 0); |
1201 check_free_chunk(m, q); |
1439 check_free_chunk(m, q); |
1202 } |
1440 } |
1203 |
1441 |
1204 check_malloced_chunk(m, chunk2mem(p), nb); |
1442 check_malloced_chunk(m, chunk2mem(p), nb); |
1205 return chunk2mem(p); |
1443 return chunk2mem(p); |
1402 void* RSymbianDLHeap::dlmalloc(size_t bytes) { |
1635 void* RSymbianDLHeap::dlmalloc(size_t bytes) { |
1403 /* |
1636 /* |
1404 Basic algorithm: |
1637 Basic algorithm: |
1405 If a small request (< 256 bytes minus per-chunk overhead): |
1638 If a small request (< 256 bytes minus per-chunk overhead): |
1406 1. If one exists, use a remainderless chunk in associated smallbin. |
1639 1. If one exists, use a remainderless chunk in associated smallbin. |
1407 (Remainderless means that there are too few excess bytes to |
1640 (Remainderless means that there are too few excess bytes to represent as a chunk.) |
1408 represent as a chunk.) |
1641 2. If one exists, split the smallest available chunk in a bin, saving remainder in bin. |
1409 2. If it is big enough, use the dv chunk, which is normally the |
|
1410 chunk adjacent to the one used for the most recent small request. |
|
1411 3. If one exists, split the smallest available chunk in a bin, |
|
1412 saving remainder in dv. |
|
1413 4. If it is big enough, use the top chunk. |
1642 4. If it is big enough, use the top chunk. |
1414 5. If available, get memory from system and use it |
1643 5. If available, get memory from system and use it |
1415 Otherwise, for a large request: |
1644 Otherwise, for a large request: |
1416 1. Find the smallest available binned chunk that fits, and use it |
1645 1. Find the smallest available binned chunk that fits, splitting if necessary. |
1417 if it is better fitting than dv chunk, splitting if necessary. |
|
1418 2. If better fitting than any binned chunk, use the dv chunk. |
|
1419 3. If it is big enough, use the top chunk. |
1646 3. If it is big enough, use the top chunk. |
1420 4. If request size >= mmap threshold, try to directly mmap this chunk. |
1647 4. If request size >= mmap threshold, try to directly mmap this chunk. |
1421 5. If available, get memory from system and use it |
1648 5. If available, get memory from system and use it |
1422 |
1649 |
1423 The ugly goto's here ensure that postaction occurs along all paths. |
1650 The ugly goto's here ensure that postaction occurs along all paths. |
1457 p = b->fd; |
1682 p = b->fd; |
1458 assert(chunksize(p) == small_index2size(i)); |
1683 assert(chunksize(p) == small_index2size(i)); |
1459 unlink_first_small_chunk(gm, b, p, i); |
1684 unlink_first_small_chunk(gm, b, p, i); |
1460 rsize = small_index2size(i) - nb; |
1685 rsize = small_index2size(i) - nb; |
1461 /* Fit here cannot be remainderless if 4byte sizes */ |
1686 /* Fit here cannot be remainderless if 4byte sizes */ |
1462 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) |
1687 if (rsize < free_chunk_threshold) |
1463 set_inuse_and_pinuse(gm, p, small_index2size(i)); |
1688 set_inuse_and_pinuse(gm, p, small_index2size(i)); |
1464 else { |
1689 else { |
1465 set_size_and_pinuse_of_inuse_chunk(gm, p, nb); |
1690 set_size_and_pinuse_of_inuse_chunk(gm, p, nb); |
1466 r = chunk_plus_offset(p, nb); |
1691 r = chunk_plus_offset(p, nb); |
1467 set_size_and_pinuse_of_free_chunk(r, rsize); |
1692 set_size_and_pinuse_of_free_chunk(r, rsize); |
1468 replace_dv(gm, r, rsize); |
1693 insert_chunk(gm, r, rsize, 0); |
1469 } |
1694 } |
1470 mem = chunk2mem(p); |
1695 mem = chunk2mem(p); |
1471 check_malloced_chunk(gm, mem, nb); |
1696 check_malloced_chunk(gm, mem, nb); |
1472 goto postaction; |
1697 goto postaction; |
1473 } |
1698 } |
1474 |
|
1475 else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) { |
1699 else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) { |
1476 check_malloced_chunk(gm, mem, nb); |
1700 check_malloced_chunk(gm, mem, nb); |
1477 goto postaction; |
1701 goto postaction; |
1478 } |
1702 } |
1479 } |
1703 } |
1480 } |
1704 } /* else - large alloc request */ |
1481 else if (bytes >= MAX_REQUEST) |
1705 else if (bytes >= MAX_REQUEST) |
1482 nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ |
1706 nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ |
1483 else { |
1707 else { |
1484 nb = pad_request(bytes); |
1708 nb = pad_request(bytes); |
1485 if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) { |
1709 if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) { |
1486 check_malloced_chunk(gm, mem, nb); |
1710 check_malloced_chunk(gm, mem, nb); |
1487 goto postaction; |
1711 goto postaction; |
1488 } |
1712 } |
1489 } |
1713 } |
1490 |
1714 |
1491 if (nb <= gm->dvsize) { |
1715 if (nb < gm->topsize) { /* Split top */ |
1492 size_t rsize = gm->dvsize - nb; |
|
1493 mchunkptr p = gm->dv; |
|
1494 if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ |
|
1495 mchunkptr r = gm->dv = chunk_plus_offset(p, nb); |
|
1496 gm->dvsize = rsize; |
|
1497 set_size_and_pinuse_of_free_chunk(r, rsize); |
|
1498 set_size_and_pinuse_of_inuse_chunk(gm, p, nb); |
|
1499 } |
|
1500 else { /* exhaust dv */ |
|
1501 size_t dvs = gm->dvsize; |
|
1502 gm->dvsize = 0; |
|
1503 gm->dv = 0; |
|
1504 set_inuse_and_pinuse(gm, p, dvs); |
|
1505 } |
|
1506 mem = chunk2mem(p); |
|
1507 check_malloced_chunk(gm, mem, nb); |
|
1508 goto postaction; |
|
1509 } |
|
1510 |
|
1511 else if (nb < gm->topsize) { /* Split top */ |
|
1512 size_t rsize = gm->topsize -= nb; |
1716 size_t rsize = gm->topsize -= nb; |
1513 mchunkptr p = gm->top; |
1717 mchunkptr p = gm->top; |
1514 mchunkptr r = gm->top = chunk_plus_offset(p, nb); |
1718 mchunkptr r = gm->top = chunk_plus_offset(p, nb); |
1515 r->head = rsize | PINUSE_BIT; |
1719 r->head = rsize | PINUSE_BIT; |
1516 set_size_and_pinuse_of_inuse_chunk(gm, p, nb); |
1720 set_size_and_pinuse_of_inuse_chunk(gm, p, nb); |
1609 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) |
1804 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) |
1610 { |
1805 { |
1611 if (!cinuse(next)) |
1806 if (!cinuse(next)) |
1612 { /* consolidate forward */ |
1807 { /* consolidate forward */ |
1613 if (next == fm->top) |
1808 if (next == fm->top) |
1614 { |
1809 { |
1615 size_t tsize = fm->topsize += psize; |
1810 if(prev_chunk_unmapped) { // previous chunk is unmapped |
1616 fm->top = p; |
1811 /* unmap all pages between previously unmapped and end of top chunk |
1617 p->head = tsize | PINUSE_BIT; |
1812 and reset top to beginning of prev chunk - done in sys_trim_partial() */ |
1618 if (p == fm->dv) |
1813 sys_trim_partial(fm, p, psize, unmapped_pages); |
1619 { |
1814 do_check_any_chunk_access(fm->top, fm->topsize); |
1620 fm->dv = 0; |
1815 goto postaction; |
1621 fm->dvsize = 0; |
|
1622 } |
1816 } |
1623 if (should_trim(fm, tsize)) |
1817 else { // forward merge to top |
1624 sys_trim(fm, 0); |
1818 size_t tsize = fm->topsize += psize; |
1625 goto postaction; |
1819 fm->top = p; |
1626 } |
1820 p->head = tsize | PINUSE_BIT; |
1627 else if (next == fm->dv) |
1821 if (should_trim(fm, tsize)) |
1628 { |
1822 sys_trim(fm, 0); |
1629 size_t dsize = fm->dvsize += psize; |
1823 do_check_any_chunk_access(fm->top, fm->topsize); |
1630 fm->dv = p; |
1824 goto postaction; |
1631 set_size_and_pinuse_of_free_chunk(p, dsize); |
1825 } |
1632 goto postaction; |
|
1633 } |
1826 } |
1634 else |
1827 else |
1635 { |
1828 { |
1636 size_t nsize = chunksize(next); |
1829 size_t nsize = chunksize(next); |
|
1830 int next_chunk_unmapped = 0; |
|
1831 if( page_not_in_memory(next, nsize) ) { |
|
1832 next_chunk_unmapped = 1; |
|
1833 unmapped_pages += ((tchunkptr)next)->npages; |
|
1834 } |
|
1835 |
1637 psize += nsize; |
1836 psize += nsize; |
1638 unlink_chunk(fm, next, nsize); |
1837 unlink_chunk(fm, next, nsize); |
1639 set_size_and_pinuse_of_free_chunk(p, psize); |
1838 set_size_and_pinuse_of_free_chunk(p, psize); |
1640 if (p == fm->dv) |
|
1641 { |
|
1642 fm->dvsize = psize; |
|
1643 goto postaction; |
|
1644 } |
|
1645 } |
1839 } |
1646 } |
1840 } |
1647 else |
1841 else |
1648 set_free_with_pinuse(p, psize, next); |
1842 set_free_with_pinuse(p, psize, next); |
1649 insert_chunk(fm, p, psize); |
1843 |
|
1844 /* check if chunk memmory can be released */ |
|
1845 size_t npages_out = 0; |
|
1846 if(!is_small(psize) && psize>=CHUNK_PAGEOUT_THESHOLD) |
|
1847 npages_out = unmap_chunk_pages((tchunkptr)p, psize, unmapped_pages); |
|
1848 |
|
1849 insert_chunk(fm, p, psize, npages_out); |
1650 check_free_chunk(fm, p); |
1850 check_free_chunk(fm, p); |
|
1851 do_chunk_page_release_check(p, psize, fm, npages_out); |
1651 goto postaction; |
1852 goto postaction; |
1652 } |
1853 } |
1653 } |
1854 } |
1654 erroraction: |
1855 erroraction: |
1655 USAGE_ERROR_ACTION(fm, p); |
1856 USAGE_ERROR_ACTION(fm, p); |
2265 return c; |
2467 return c; |
2266 ++c; |
2468 ++c; |
2267 } |
2469 } |
2268 } |
2470 } |
2269 |
2471 |
|
2472 /* Only for debugging purpose - start*/ |
|
2473 #ifdef DL_CHUNK_MEM_DEBUG |
|
2474 void RSymbianDLHeap::debug_check_small_chunk_access(mchunkptr p, size_t psize) |
|
2475 { |
|
2476 size_t sz = chunksize(p); |
|
2477 char ch = *((char*)chunk_plus_offset(p, psize-1)); |
|
2478 } |
|
2479 |
|
2480 void RSymbianDLHeap::debug_check_any_chunk_access(mchunkptr p, size_t psize) |
|
2481 { |
|
2482 if(p==0 || psize==0) return; |
|
2483 |
|
2484 mchunkptr next = chunk_plus_offset(p, psize); |
|
2485 char* t = (char*)chunk_plus_offset(p, mparams.page_size); |
|
2486 char ch = *((char*)p); |
|
2487 while((size_t)t<(size_t)next) |
|
2488 { |
|
2489 ch = *t; |
|
2490 t = (char*)chunk_plus_offset(t, mparams.page_size); |
|
2491 }; |
|
2492 } |
|
2493 |
|
2494 void RSymbianDLHeap::debug_check_large_chunk_access(tchunkptr p, size_t psize) |
|
2495 { |
|
2496 mchunkptr next = chunk_plus_offset(p, psize); |
|
2497 char* t = (char*)chunk_plus_offset(p, mparams.page_size); |
|
2498 char ch = *((char*)p); |
|
2499 while((size_t)t<(size_t)next) |
|
2500 { |
|
2501 ch = *t; |
|
2502 t = (char*)chunk_plus_offset(t, mparams.page_size); |
|
2503 }; |
|
2504 } |
|
2505 |
|
2506 void RSymbianDLHeap::debug_chunk_page_release_check(mchunkptr p, size_t psize, mstate fm, int mem_released) |
|
2507 { |
|
2508 if(mem_released) |
|
2509 { |
|
2510 if(!page_not_in_memory(p, psize) ) |
|
2511 MEM_LOG("CHUNK_PAGE_ERROR::dlfree, error - page_in_mem flag is corrupt"); |
|
2512 if(chunk_plus_offset(p, psize) > fm->top) |
|
2513 MEM_LOG("CHUNK_PAGE_ERROR: error Top chunk address invalid"); |
|
2514 if(fm->dv >= p && fm->dv < chunk_plus_offset(p, psize)) |
|
2515 MEM_LOG("CHUNK_PAGE_ERROR: error DV chunk address invalid"); |
|
2516 } |
|
2517 } |
2270 #endif |
2518 #endif |
|
2519 |
|
2520 #ifdef OOM_LOGGING |
|
2521 #include <hal.h> |
|
2522 void RSymbianDLHeap::dump_large_chunk(mstate m, tchunkptr t) { |
|
2523 tchunkptr u = t; |
|
2524 bindex_t tindex = t->index; |
|
2525 size_t tsize = chunksize(t); |
|
2526 bindex_t idx; |
|
2527 compute_tree_index(tsize, idx); |
|
2528 |
|
2529 size_t free = 0; |
|
2530 int nfree = 0; |
|
2531 do |
|
2532 { /* traverse through chain of same-sized nodes */ |
|
2533 if (u->child[0] != 0) |
|
2534 { |
|
2535 dump_large_chunk(m, u->child[0]); |
|
2536 } |
|
2537 |
|
2538 if (u->child[1] != 0) |
|
2539 { |
|
2540 dump_large_chunk(m, u->child[1]); |
|
2541 } |
|
2542 |
|
2543 free += chunksize(u); |
|
2544 nfree++; |
|
2545 u = u->fd; |
|
2546 } |
|
2547 while (u != t); |
|
2548 C_LOGF(_L8("LARGE_BIN,%d,%d,%d"), tsize, free, nfree); |
|
2549 } |
|
2550 |
|
2551 void RSymbianDLHeap::dump_dl_free_chunks() |
|
2552 { |
|
2553 C_LOG(""); |
|
2554 C_LOG("------------ dump_dl_free_chunks start -------------"); |
|
2555 C_LOG("BinType,BinSize,FreeSize,FreeCount"); |
|
2556 |
|
2557 // dump small bins |
|
2558 for (int i = 0; i < NSMALLBINS; ++i) |
|
2559 { |
|
2560 sbinptr b = smallbin_at(gm, i); |
|
2561 unsigned int empty = (gm->smallmap & (1 << i)) == 0; |
|
2562 int nfree = 0; |
|
2563 if (!empty) |
|
2564 { |
|
2565 int nfree = 0; |
|
2566 size_t free = 0; |
|
2567 mchunkptr p = b->bk; |
|
2568 size_t size = chunksize(p); |
|
2569 for (; p != b; p = p->bk) |
|
2570 { |
|
2571 free += chunksize(p); |
|
2572 nfree++; |
|
2573 } |
|
2574 |
|
2575 C_LOGF(_L8("SMALL_BIN,%d,%d,%d"), size, free, nfree); |
|
2576 } |
|
2577 } |
|
2578 |
|
2579 // dump large bins |
|
2580 for (int i = 0; i < NTREEBINS; ++i) |
|
2581 { |
|
2582 tbinptr* tb = treebin_at(gm, i); |
|
2583 tchunkptr t = *tb; |
|
2584 int empty = (gm->treemap & (1 << i)) == 0; |
|
2585 if (!empty) |
|
2586 dump_large_chunk(gm, t); |
|
2587 } |
|
2588 |
|
2589 C_LOG("------------ dump_dl_free_chunks end -------------"); |
|
2590 C_LOG(""); |
|
2591 } |
|
2592 |
|
2593 void RSymbianDLHeap::dump_heap_logs(size_t fail_size) |
|
2594 { |
|
2595 MEM_LOG(""); |
|
2596 if (fail_size) { |
|
2597 MEM_LOG("MEMDEBUG::RSymbianDLHeap OOM Log dump *************** start"); |
|
2598 MEM_LOGF(_L8("Failing to alloc size: %d"), fail_size); |
|
2599 } |
|
2600 else |
|
2601 MEM_LOG("MEMDEBUG::RSymbianDLHeap Log dump *************** start"); |
|
2602 |
|
2603 TInt dl_chunk_size = ptrdiff(iTop,iBase); |
|
2604 TInt slabp_chunk_size = iChunkSize + iUnmappedChunkSize - dl_chunk_size; |
|
2605 TInt freeMem = 0; |
|
2606 HAL::Get(HALData::EMemoryRAMFree, freeMem); |
|
2607 MEM_LOGF(_L8("System Free RAM Size: %d"), freeMem); |
|
2608 MEM_LOGF(_L8("Allocator Commited Chunk Size: %d"), iChunkSize); |
|
2609 MEM_LOGF(_L8("DLHeap Arena Size=%d"), dl_chunk_size); |
|
2610 MEM_LOGF(_L8("DLHeap unmapped chunk size: %d"), iUnmappedChunkSize); |
|
2611 MEM_LOGF(_L8("Slab-Page Allocator Chunk Size=%d"), slabp_chunk_size); |
|
2612 |
|
2613 mallinfo info = dlmallinfo(); |
|
2614 TUint heapAlloc = info.uordblks; |
|
2615 TUint heapFree = info.fordblks; |
|
2616 MEM_LOGF(_L8("DLHeap allocated size: %d"), heapAlloc); |
|
2617 MEM_LOGF(_L8("DLHeap free size: %d"), heapFree); |
|
2618 |
|
2619 if (fail_size) { |
|
2620 MEM_LOG("MEMDEBUG::RSymbianDLHeap OOM Log dump *************** end"); |
|
2621 }else { |
|
2622 MEM_LOG("MEMDEBUG::RSymbianDLHeap Log dump *************** end"); |
|
2623 } |
|
2624 MEM_LOG(""); |
|
2625 } |
|
2626 |
|
2627 #endif |
|
2628 /* Only for debugging purpose - end*/ |
|
2629 |
|
2630 #endif |