webengine/osswebengine/MemoryManager/Src/heap.cpp
changeset 16 a359256acfc6
parent 8 7c90e6132015
child 68 92a765b5b3e7
child 70 8bfb9186a8b8
equal deleted inserted replaced
13:10e98eab6f85 16:a359256acfc6
    27 #define TraceChunkUsage(a,b,c)
    27 #define TraceChunkUsage(a,b,c)
    28 #endif
    28 #endif
    29 
    29 
    30 #ifdef __NEW_ALLOCATOR__
    30 #ifdef __NEW_ALLOCATOR__
    31 
    31 
       
    32 #include "MemoryLogger.h"
    32 #include "SymbianDLHeap.h"
    33 #include "SymbianDLHeap.h"
    33 
    34 
    34 _LIT(KDLHeapPanicCategory, "DL Heap");
    35 _LIT(KDLHeapPanicCategory, "DL Heap");
    35 #define	GET_PAGE_SIZE(x)			UserHal::PageSizeInBytes(x)
    36 #define	GET_PAGE_SIZE(x)			UserHal::PageSizeInBytes(x)
    36 #define	__CHECK_CELL(p)
    37 #define	__CHECK_CELL(p)
    43 	User::Panic(KDLHeapPanicCategory, aPanic);
    44 	User::Panic(KDLHeapPanicCategory, aPanic);
    44 	}
    45 	}
    45 
    46 
    46 #undef UEXPORT_C
    47 #undef UEXPORT_C
    47 #define UEXPORT_C 
    48 #define UEXPORT_C 
       
    49 
       
    50 
       
    51 /* Purpose:     Map chunk memory pages from system RAM
       
    52  * Arguments:   tp - tchunkptr in which memmory should be mapped
       
    53  *              psize - incoming tchunk size
       
    54  * Return:      KErrNone if successful, else KErrNoMemory  
       
    55  * Note:        
       
    56  */
       
    57 TInt RSymbianDLHeap::map_chunk_pages(tchunkptr tp, size_t psize)
       
    58 {
       
    59     if(page_not_in_memory(tp, psize)) {
       
    60         char *a_addr = tchunk_page_align(tp);
       
    61         size_t npages = tp->npages;
       
    62     
       
    63 #ifdef OOM_LOGGING
       
    64         // check that npages matches the psize
       
    65         size_t offset = address_offset(a_addr,tp);        
       
    66         if(offset < psize && (psize - offset) >= mparams.page_size )
       
    67         {               
       
    68             size_t tpages = ( psize - offset) >> pageshift;            
       
    69             if(tpages != tp->npages) //assert condition                
       
    70                 MEM_LOG("CHUNK_PAGE_ERROR:map_chunk_pages, error in npages");                        
       
    71         }
       
    72         else
       
    73             MEM_LOG("CHUNK_PAGE_ERROR::map_chunk_pages: - Incorrect page-in-memmory flag");
       
    74 #endif        
       
    75     
       
    76         if(map(a_addr, npages*mparams.page_size)) {        
       
    77             TRACE_DL_CHUNK_MAP(tp, psize, a_addr, npages*mparams.page_size);
       
    78             ASSERT_RCHUNK_SIZE();
       
    79             TRACE_UNMAPPED_CHUNK(-1*npages*mparams.page_size);
       
    80             return KErrNone;
       
    81         }
       
    82         else { 
       
    83             MEM_LOGF(_L8("CHUNK_PAGE_ERROR:: map_chunk_pages - Failed to Commit RAM, page_addr=%x, npages=%d, chunk_size=%d"), a_addr, npages, psize);
       
    84             MEM_DUMP_OOM_LOGS(psize, "RSymbianDLHeap::map_chunk_pages - Failed to Commit RAM");
       
    85             return KErrNoMemory;
       
    86         }
       
    87     }
       
    88     return KErrNone;
       
    89 }
       
    90 
       
    91 /* Purpose:     Map partial chunk memory pages from system RAM
       
    92  * Arguments:   tp - tchunkptr in which memmory should be mapped
       
    93  *              psize - incoming tchunk size
       
    94  *              r - remainder chunk pointer
       
    95  *              rsize - remainder chunk size  
       
    96  * Return:      Number of unmapped pages from remainder chunk if successful (0 or more), else KErrNoMemory
       
    97  * Note:        Remainder chunk should be large enough to be mapped out (checked before invoking this function)    
       
    98  *              pageout headers will be set from insert_large_chunk(), not here.
       
    99  */
       
   100 TInt RSymbianDLHeap::map_chunk_pages_partial(tchunkptr tp, size_t psize, tchunkptr r, size_t rsize)
       
   101 {
       
   102     if(page_not_in_memory(tp, psize)) {
       
   103         size_t npages = tp->npages; // total no of pages unmapped in this chunk        
       
   104         char *page_addr_map = tchunk_page_align(tp); // address to begin page map
       
   105         char *page_addr_rem = tchunk_page_align(r);  // address in remainder chunk to remain unmapped
       
   106         assert(address_offset(page_addr_rem, r) < rsize);
       
   107         size_t npages_map = address_offset(page_addr_rem, page_addr_map) >> pageshift; // no of pages to be mapped
       
   108         if(npages_map > 0) {
       
   109             if(map(page_addr_map, npages_map*mparams.page_size)) {            
       
   110                 TRACE_DL_CHUNK_MAP(tp, psize, page_addr_map, npages_map*mparams.page_size);
       
   111                 ASSERT_RCHUNK_SIZE();
       
   112                 TRACE_UNMAPPED_CHUNK(-1*npages_map*mparams.page_size);    
       
   113                 return (npages - npages_map);
       
   114             }
       
   115             else { 
       
   116                 MEM_LOGF(_L8("CHUNK_PAGE_ERROR:: map_chunk_pages_partial - Failed to Commit RAM, page_addr=%x, npages=%d, chunk_size=%d"), page_addr_map, npages_map, psize);
       
   117                 MEM_DUMP_OOM_LOGS(psize, "RSymbianDLHeap::map_chunk_pages_partial - Failed to Commit RAM");
       
   118                 return KErrNoMemory;
       
   119             }              
       
   120         }
       
   121         else {
       
   122              // map not needed, first page is already mapped
       
   123              return npages;
       
   124         }
       
   125     }
       
   126     
       
   127     return 0;
       
   128 }
       
   129 
       
   130 
       
   131 /* Purpose:     Release (unmap) chunk memory pages to system RAM
       
   132  * Arguments:   tp - tchunkptr from which memmory may be released
       
   133  *              psize - incoming tchunk size
       
   134  *              prev_npages - number of pages that has been already unmapped from this chunk
       
   135  * Return:      total number of pages that has been unmapped from this chunk (new unmapped pages + prev_npages)
       
   136  * Note:        pageout headers will be set from insert_large_chunk(), not here.    
       
   137  */
       
   138 TInt RSymbianDLHeap::unmap_chunk_pages(tchunkptr tp, size_t psize, size_t prev_npages)
       
   139 {
       
   140     size_t npages = 0;
       
   141     char *a_addr = tchunk_page_align(tp);
       
   142     size_t offset = address_offset(a_addr,tp);    
       
   143     if(offset < psize && (psize - offset) >= mparams.page_size)
       
   144     { /* check for new pages to decommit */
       
   145         npages = ( psize - offset) >> pageshift;
       
   146         if(npages > prev_npages) {
       
   147             unmap(a_addr, npages*mparams.page_size);    // assuming kernel takes care of already unmapped pages
       
   148             TRACE_DL_CHUNK_UNMAP(tp, psize, a_addr, npages*mparams.page_size);
       
   149             iChunkSize += prev_npages*mparams.page_size; //adjust actual chunk size
       
   150             ASSERT_RCHUNK_SIZE();
       
   151             TRACE_UNMAPPED_CHUNK((npages-prev_npages)*mparams.page_size);
       
   152             assert((a_addr + npages*mparams.page_size - 1) < (char*)next_chunk(tp));
       
   153         }        
       
   154     }
       
   155 
       
   156 #ifdef OOM_LOGGING        
       
   157     if(npages && (npages < prev_npages))
       
   158         MEM_LOG("CHUNK_PAGE_ERROR:unmap_chunk_pages, error in npages");    
       
   159     if(npages > prev_npages) {
       
   160         /* check that end of decommited address lie within this chunk */      
       
   161         if((a_addr + npages*mparams.page_size - 1) >= (char*)next_chunk(tp))
       
   162             MEM_LOG("CHUNK_PAGE_ERROR:unmap_chunk_pages, error chunk boundary");
       
   163     }
       
   164 #endif
       
   165 #ifdef DL_CHUNK_MEM_DEBUG 
       
   166     mchunkptr next = next_chunk(tp);
       
   167     do_check_any_chunk_access(next, chunksize(next));
       
   168     if(!npages)  do_check_any_chunk_access((mchunkptr)tp, psize);
       
   169 #endif
       
   170    
       
   171     return (npages);
       
   172 }
       
   173 
       
   174 /* Purpose:     Unmap all pages between previously unmapped and end of top chunk 
       
   175                 and reset top to beginning of prev chunk
       
   176  * Arguments:   fm - global malloc state
       
   177  *              prev - previous chunk which has unmapped pages
       
   178  *              psize - size of previous chunk
       
   179  *              prev_npages - number of unmapped pages from previous chunk
       
   180  * Return:      nonzero if sucessful, else 0
       
   181  * Note:                    
       
   182  */
       
   183 TInt RSymbianDLHeap::sys_trim_partial(mstate m, mchunkptr prev, size_t psize, size_t prev_npages)
       
   184 {
       
   185     size_t released = 0;
       
   186     size_t extra = 0;
       
   187     if (is_initialized(m)) {
       
   188       psize += m->topsize;
       
   189       char *a_addr = tchunk_page_align(prev); // includes space for TOP footer 
       
   190       size_t addr_offset = address_offset(a_addr, prev);
       
   191       assert(addr_offset > TOP_FOOT_SIZE); //always assert?
       
   192       assert((char*)iTop >= a_addr); //always assert?
       
   193       if((char*)iTop > a_addr)
       
   194           extra = address_offset(iTop, a_addr);
       
   195 
       
   196 #ifdef OOM_LOGGING      
       
   197       if ((char*)iTop < a_addr)
       
   198           MEM_LOGF(_L8("RSymbianDLHeap::sys_trim_partial - incorrect iTop value, top=%x, iTop=%x"), m->top, iTop);
       
   199 #endif            
       
   200         msegmentptr sp = segment_holding(m, (TUint8*)prev);
       
   201         if (!is_extern_segment(sp)) {
       
   202           if (is_mmapped_segment(sp)) {
       
   203             if (HAVE_MMAP &&  sp->size >= extra && !has_segment_link(m, sp)) { /* can't shrink if pinned */
       
   204               size_t newsize = sp->size - extra;
       
   205               /* Prefer mremap, fall back to munmap */
       
   206               if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
       
   207                   (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
       
   208                 released = extra;
       
   209               }
       
   210             }
       
   211           }
       
   212           else if (HAVE_MORECORE) {
       
   213             if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
       
   214                 extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - mparams.granularity;
       
   215             ACQUIRE_MORECORE_LOCK(m);
       
   216             {
       
   217               /* Make sure end of memory is where we last set it. */
       
   218               TUint8* old_br = (TUint8*)(CALL_MORECORE(0));
       
   219               if (old_br == sp->base + sp->size) {
       
   220                 TUint8* rel_br = (TUint8*)(CALL_MORECORE(-extra));
       
   221                 TUint8* new_br = (TUint8*)(CALL_MORECORE(0));
       
   222                 if (rel_br != CMFAIL && new_br < old_br)
       
   223                   released = old_br - new_br;
       
   224               }
       
   225             }
       
   226             RELEASE_MORECORE_LOCK(m);
       
   227           }
       
   228         }
       
   229         
       
   230         if (released != 0) {
       
   231           TRACE_DL_CHUNK_UNMAP(prev, psize, a_addr, released);
       
   232           iChunkSize += prev_npages*mparams.page_size; // prev_unmapped was already unmapped
       
   233           TRACE_UNMAPPED_CHUNK(-1*prev_npages*mparams.page_size);
       
   234           ASSERT_RCHUNK_SIZE();
       
   235           sp->size -= released;
       
   236           m->footprint -= released;
       
   237         }
       
   238         
       
   239         /* reset top to prev chunk */
       
   240         init_top(m, prev, addr_offset - TOP_FOOT_SIZE);
       
   241         check_top_chunk(m, m->top);
       
   242     }
       
   243 
       
   244     // DL region not initalized, do not reset top here
       
   245     return (released != 0)? 1 : 0;
       
   246 }
       
   247 
    48 
   248 
    49 UEXPORT_C RSymbianDLHeap::RSymbianDLHeap(TInt aMaxLength, TInt aAlign, TBool aSingleThread)
   249 UEXPORT_C RSymbianDLHeap::RSymbianDLHeap(TInt aMaxLength, TInt aAlign, TBool aSingleThread)
    50 // constructor for a fixed heap. Just use DL allocator
   250 // constructor for a fixed heap. Just use DL allocator
    51 	:iMinLength(aMaxLength), iMaxLength(aMaxLength), iOffset(0), iGrowBy(0), iChunkHandle(0),
   251 	:iMinLength(aMaxLength), iMaxLength(aMaxLength), iOffset(0), iGrowBy(0), iChunkHandle(0),
    52 	iNestingLevel(0), iAllocCount(0), iFailType(ENone), iTestData(NULL), iChunkSize(aMaxLength)
   252 	iNestingLevel(0), iAllocCount(0), iFailType(ENone), iTestData(NULL), iChunkSize(aMaxLength)
    82 	// if the heap is created with aMinLength==aMaxLength then it cannot allocate slab or page memory
   282 	// if the heap is created with aMinLength==aMaxLength then it cannot allocate slab or page memory
    83 	// so these sub-allocators should be disabled. Otherwise initialise with default values
   283 	// so these sub-allocators should be disabled. Otherwise initialise with default values
    84 	if (aMinLength == aMaxLength)
   284 	if (aMinLength == aMaxLength)
    85 		Init(0, 0, 0);
   285 		Init(0, 0, 0);
    86 	else
   286 	else
    87 		Init(0x3fff, 16, 0x10000);	// all slabs, page {64KB}, trim {64KB}
   287 		Init(0x3fff, 15, 0x10000);	// all slabs, page {32KB}, trim {64KB}
    88 //		Init(0xabe, 16, iPageSize*4);	// slabs {48, 40, 32, 24, 20, 16, 12}, page {64KB}, trim {16KB}
   288 //		Init(0xabe, 16, iPageSize*4);	// slabs {48, 40, 32, 24, 20, 16, 12}, page {64KB}, trim {16KB}
    89 	}
   289 	}
    90 
   290 
    91 UEXPORT_C TAny* RSymbianDLHeap::operator new(TUint aSize, TAny* aBase) __NO_THROW
   291 UEXPORT_C TAny* RSymbianDLHeap::operator new(TUint aSize, TAny* aBase) __NO_THROW
    92 	{
   292 	{
   110 
   310 
   111 	slab_init(aBitmapSlab);
   311 	slab_init(aBitmapSlab);
   112 
   312 
   113 	/*10-1K,11-2K,12-4k,13-8K,14-16K,15-32K,16-64K*/
   313 	/*10-1K,11-2K,12-4k,13-8K,14-16K,15-32K,16-64K*/
   114 	paged_init(aPagePower);
   314 	paged_init(aPagePower);
       
   315 
       
   316 #ifdef OOM_LOGGING    
       
   317     iUnmappedChunkSize = 0;
       
   318 #endif    
   115 	}
   319 	}
   116 
   320 
   117 UEXPORT_C RSymbianDLHeap::SCell* RSymbianDLHeap::GetAddress(const TAny* aCell) const
   321 UEXPORT_C RSymbianDLHeap::SCell* RSymbianDLHeap::GetAddress(const TAny* aCell) const
   118 //
   322 //
   119 // As much as possible, check a cell address and backspace it
   323 // As much as possible, check a cell address and backspace it
   161 		addr = dlmalloc(aSize);
   365 		addr = dlmalloc(aSize);
   162 		}
   366 		}
   163 	else
   367 	else
   164 		{
   368 		{
   165 		addr = paged_allocate(aSize);
   369 		addr = paged_allocate(aSize);
       
   370 		if(!addr) { // paged_allocator failed, try in dlmalloc 
       
   371             addr = dlmalloc(aSize);
       
   372 		}
   166 		}
   373 		}
   167 
   374 
   168 	Unlock();
   375 	Unlock();
   169 
   376 
   170 	return addr;
   377 	return addr;
   415       v = t;
   622       v = t;
   416     }
   623     }
   417     t = leftmost_child(t);
   624     t = leftmost_child(t);
   418   }
   625   }
   419   /*  If dv is a better fit, return 0 so malloc will use it */
   626   /*  If dv is a better fit, return 0 so malloc will use it */
   420   if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
   627     if (v != 0) {
   421     if (RTCHECK(ok_address(m, v))) { /* split */
   628     if (RTCHECK(ok_address(m, v))) { /* split */
   422       mchunkptr r = chunk_plus_offset(v, nb);
   629       mchunkptr r = chunk_plus_offset(v, nb);
   423       assert(chunksize(v) == rsize + nb);
   630       assert(chunksize(v) == rsize + nb);
       
   631       
       
   632       /* check for chunk memory page-in */
       
   633       size_t npages_out = 0;
       
   634       if(page_not_in_memory(v, chunksize(v))) {
       
   635           if(!is_small(rsize) && rsize>=CHUNK_PAGEOUT_THESHOLD) {
       
   636               // partial chunk page mapping
       
   637               TInt result = map_chunk_pages_partial(v, chunksize(v), (tchunkptr)r, rsize);
       
   638               if (result < 0) return 0; // Failed to Commit RAM
       
   639               else npages_out = (size_t)result;              
       
   640           }          
       
   641           else {
       
   642               // full chunk page map needed
       
   643               TInt err = map_chunk_pages(v, chunksize(v));
       
   644               if(err != KErrNone)  return 0; // Failed to Commit RAM
       
   645           }
       
   646       }
       
   647 
   424       if (RTCHECK(ok_next(v, r))) {
   648       if (RTCHECK(ok_next(v, r))) {
   425         unlink_large_chunk(m, v);
   649         unlink_large_chunk(m, v);
   426         if (rsize < MIN_CHUNK_SIZE)
   650         if (rsize < free_chunk_threshold) // exaust if less than slab threshold
   427           set_inuse_and_pinuse(m, v, (rsize + nb));
   651           set_inuse_and_pinuse(m, v, (rsize + nb));
   428         else {
   652         else {
   429           set_size_and_pinuse_of_inuse_chunk(m, v, nb);
   653           set_size_and_pinuse_of_inuse_chunk(m, v, nb);
   430           set_size_and_pinuse_of_free_chunk(r, rsize);
   654           set_size_and_pinuse_of_free_chunk(r, rsize);
   431           insert_chunk(m, r, rsize);
   655           insert_chunk(m, r, rsize, npages_out);
   432         }
   656         }
   433         return chunk2mem(v);
   657         return chunk2mem(v);
   434       }
   658       }
   435     }
   659     }
   436     CORRUPTION_ERROR_ACTION(m);
   660     CORRUPTION_ERROR_ACTION(m);
   458   }
   682   }
   459 
   683 
   460   if (RTCHECK(ok_address(m, v))) {
   684   if (RTCHECK(ok_address(m, v))) {
   461     mchunkptr r = chunk_plus_offset(v, nb);
   685     mchunkptr r = chunk_plus_offset(v, nb);
   462     assert(chunksize(v) == rsize + nb);
   686     assert(chunksize(v) == rsize + nb);
       
   687     
       
   688     /* check for chunk memory page-in */
       
   689       if(page_not_in_memory(v, chunksize(v))) {
       
   690           TInt err = map_chunk_pages(v, chunksize(v));
       
   691           if(err != KErrNone)  return 0; // Failed to Commit RAM
       
   692       }
       
   693       
   463     if (RTCHECK(ok_next(v, r))) {
   694     if (RTCHECK(ok_next(v, r))) {
   464       unlink_large_chunk(m, v);
   695       unlink_large_chunk(m, v);
   465       if (rsize < MIN_CHUNK_SIZE)
   696       if (rsize < free_chunk_threshold) // exaust if less than slab threshold
   466         set_inuse_and_pinuse(m, v, (rsize + nb));
   697         set_inuse_and_pinuse(m, v, (rsize + nb));
   467       else {
   698       else {
   468         set_size_and_pinuse_of_inuse_chunk(m, v, nb);
   699         set_size_and_pinuse_of_inuse_chunk(m, v, nb);
   469         set_size_and_pinuse_of_free_chunk(r, rsize);
   700         set_size_and_pinuse_of_free_chunk(r, rsize);
   470         replace_dv(m, r, rsize);
   701         insert_chunk(m, r, rsize, 0);      
   471       }
   702       }
   472       return chunk2mem(v);
   703       return chunk2mem(v);
   473     }
   704     }
   474   }
   705   }
   475   CORRUPTION_ERROR_ACTION(m);
   706   CORRUPTION_ERROR_ACTION(m);
   513         newp = mmap_resize(m, oldp, nb);
   744         newp = mmap_resize(m, oldp, nb);
   514       else
   745       else
   515 	  if (oldsize >= nb) { /* already big enough */
   746 	  if (oldsize >= nb) { /* already big enough */
   516         size_t rsize = oldsize - nb;
   747         size_t rsize = oldsize - nb;
   517         newp = oldp;
   748         newp = oldp;
   518         if (rsize >= MIN_CHUNK_SIZE) {
   749         if (rsize >= free_chunk_threshold) {
   519           mchunkptr remainder = chunk_plus_offset(newp, nb);
   750           mchunkptr remainder = chunk_plus_offset(newp, nb);
   520           set_inuse(m, newp, nb);
   751           set_inuse(m, newp, nb);
   521           set_inuse(m, remainder, rsize);
   752           set_inuse(m, remainder, rsize);
   522           extra = chunk2mem(remainder);
   753           extra = chunk2mem(remainder);
   523         }
   754         }
   831       check_malloced_chunk(m, chunk2mem(p), nb);
  1062       check_malloced_chunk(m, chunk2mem(p), nb);
   832       return chunk2mem(p);
  1063       return chunk2mem(p);
   833     }
  1064     }
   834   }
  1065   }
   835   /*need to check this*/
  1066   /*need to check this*/
       
  1067   MEM_DUMP_OOM_LOGS(nb, "sys_alloc:: FAILED to get more memory");
       
  1068   
   836   //errno = -1;
  1069   //errno = -1;
   837   return 0;
  1070   return 0;
   838 }
  1071 }
   839 msegmentptr RSymbianDLHeap::segment_holding(mstate m, TUint8* addr) {
  1072 msegmentptr RSymbianDLHeap::segment_holding(mstate m, TUint8* addr) {
   840   msegmentptr sp = &m->seg;
  1073   msegmentptr sp = &m->seg;
   881   P->fd = F;
  1114   P->fd = F;
   882   P->bk = B;
  1115   P->bk = B;
   883 }
  1116 }
   884 
  1117 
   885 
  1118 
   886 inline void RSymbianDLHeap::insert_chunk(mstate M,mchunkptr P,size_t S)
  1119 inline void RSymbianDLHeap::insert_chunk(mstate M,mchunkptr P,size_t S,size_t NPAGES)
   887 {
  1120 {
   888 	if (is_small(S))
  1121 	if (is_small(S))
   889 		insert_small_chunk(M, P, S);
  1122 		insert_small_chunk(M, P, S);
   890 	else{
  1123 	else{
   891 		tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S);
  1124 		tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S, NPAGES);
   892 	 }
  1125 	 }
   893 }
  1126 }
   894 
  1127 
   895 inline void RSymbianDLHeap::unlink_large_chunk(mstate M,tchunkptr X)
  1128 inline void RSymbianDLHeap::unlink_large_chunk(mstate M,tchunkptr X)
   896 {
  1129 {
   897   tchunkptr XP = X->parent;
  1130   tchunkptr XP = X->parent;
   898   tchunkptr R;
  1131   tchunkptr R;
       
  1132   reset_tchunk_mem_pageout(X); // clear chunk pageout flag
   899   if (X->bk != X) {
  1133   if (X->bk != X) {
   900     tchunkptr F = X->fd;
  1134     tchunkptr F = X->fd;
   901     R = X->bk;
  1135     R = X->bk;
   902     if (RTCHECK(ok_address(M, F))) {
  1136     if (RTCHECK(ok_address(M, F))) {
   903       F->bk = R;
  1137       F->bk = R;
  1014 }
  1248 }
  1015 
  1249 
  1016 /* ------------------------- Operations on trees ------------------------- */
  1250 /* ------------------------- Operations on trees ------------------------- */
  1017 
  1251 
  1018 /* Insert chunk into tree */
  1252 /* Insert chunk into tree */
  1019 inline void RSymbianDLHeap::insert_large_chunk(mstate M,tchunkptr X,size_t S)
  1253 inline void RSymbianDLHeap::insert_large_chunk(mstate M,tchunkptr X,size_t S,size_t NPAGES)
  1020 {
  1254 {
  1021   tbinptr* H;
  1255   tbinptr* H;
  1022   bindex_t I;
  1256   bindex_t I;
  1023   compute_tree_index(S, I);
  1257   compute_tree_index(S, I);
  1024   H = treebin_at(M, I);
  1258   H = treebin_at(M, I);
  1025   X->index = I;
  1259   X->index = I;
  1026   X->child[0] = X->child[1] = 0;
  1260   X->child[0] = X->child[1] = 0;
       
  1261 
       
  1262   if(NPAGES) { set_tchunk_mem_pageout(X, NPAGES) }
       
  1263   else  { reset_tchunk_mem_pageout(X) }
       
  1264   
  1027   if (!treemap_is_marked(M, I)) {
  1265   if (!treemap_is_marked(M, I)) {
  1028     mark_treemap(M, I);
  1266     mark_treemap(M, I);
  1029     *H = X;
  1267     *H = X;
  1030     X->parent = (tchunkptr)H;
  1268     X->parent = (tchunkptr)H;
  1031     X->fd = X->bk = X;
  1269     X->fd = X->bk = X;
  1155   if (csp != old_top) {
  1393   if (csp != old_top) {
  1156     mchunkptr q = (mchunkptr)old_top;
  1394     mchunkptr q = (mchunkptr)old_top;
  1157     size_t psize = csp - old_top;
  1395     size_t psize = csp - old_top;
  1158     mchunkptr tn = chunk_plus_offset(q, psize);
  1396     mchunkptr tn = chunk_plus_offset(q, psize);
  1159     set_free_with_pinuse(q, psize, tn);
  1397     set_free_with_pinuse(q, psize, tn);
  1160     insert_chunk(m, q, psize);
  1398     insert_chunk(m, q, psize, 0);
  1161   }
  1399   }
  1162 
  1400 
  1163   check_top_chunk(m, m->top);
  1401   check_top_chunk(m, m->top);
  1164 }
  1402 }
  1165 
  1403 
  1182     size_t tsize = m->topsize += qsize;
  1420     size_t tsize = m->topsize += qsize;
  1183     m->top = q;
  1421     m->top = q;
  1184     q->head = tsize | PINUSE_BIT;
  1422     q->head = tsize | PINUSE_BIT;
  1185     check_top_chunk(m, q);
  1423     check_top_chunk(m, q);
  1186   }
  1424   }
  1187   else if (oldfirst == m->dv) {
       
  1188     size_t dsize = m->dvsize += qsize;
       
  1189     m->dv = q;
       
  1190     set_size_and_pinuse_of_free_chunk(q, dsize);
       
  1191   }
       
  1192   else {
  1425   else {
  1193     if (!cinuse(oldfirst)) {
  1426     if (!cinuse(oldfirst)) {
  1194       size_t nsize = chunksize(oldfirst);
  1427       size_t nsize = chunksize(oldfirst);
       
  1428       
       
  1429       /* check for chunk memory page-in */
       
  1430       if(page_not_in_memory(oldfirst, nsize))
       
  1431         map_chunk_pages((tchunkptr)oldfirst, nsize);       //Err Ignored, branch not reachable.
       
  1432       
  1195       unlink_chunk(m, oldfirst, nsize);
  1433       unlink_chunk(m, oldfirst, nsize);
  1196       oldfirst = chunk_plus_offset(oldfirst, nsize);
  1434       oldfirst = chunk_plus_offset(oldfirst, nsize);
  1197       qsize += nsize;
  1435       qsize += nsize;
  1198     }
  1436     }
  1199     set_free_with_pinuse(q, qsize, oldfirst);
  1437     set_free_with_pinuse(q, qsize, oldfirst);
  1200     insert_chunk(m, q, qsize);
  1438     insert_chunk(m, q, qsize, 0);
  1201     check_free_chunk(m, q);
  1439     check_free_chunk(m, q);
  1202   }
  1440   }
  1203 
  1441 
  1204   check_malloced_chunk(m, chunk2mem(p), nb);
  1442   check_malloced_chunk(m, chunk2mem(p), nb);
  1205   return chunk2mem(p);
  1443   return chunk2mem(p);
  1319 	      mchunkptr p = align_as_chunk(base);
  1557 	      mchunkptr p = align_as_chunk(base);
  1320 	      size_t psize = chunksize(p);
  1558 	      size_t psize = chunksize(p);
  1321 	      /* Can unmap if first chunk holds entire segment and not pinned */
  1559 	      /* Can unmap if first chunk holds entire segment and not pinned */
  1322 	      if (!cinuse(p) && (TUint8*)p + psize >= base + size - TOP_FOOT_SIZE) {
  1560 	      if (!cinuse(p) && (TUint8*)p + psize >= base + size - TOP_FOOT_SIZE) {
  1323 	        tchunkptr tp = (tchunkptr)p;
  1561 	        tchunkptr tp = (tchunkptr)p;
       
  1562 	        size_t npages_out = tp->npages;
  1324 	        assert(segment_holds(sp, (TUint8*)sp));
  1563 	        assert(segment_holds(sp, (TUint8*)sp));
  1325 	        if (p == m->dv) {
  1564             unlink_large_chunk(m, tp);
  1326 	          m->dv = 0;
       
  1327 	          m->dvsize = 0;
       
  1328 	        }
       
  1329 	        else {
       
  1330 	          unlink_large_chunk(m, tp);
       
  1331 	        }
       
  1332 	        if (CALL_MUNMAP(base, size) == 0) {
  1565 	        if (CALL_MUNMAP(base, size) == 0) {
  1333 	          released += size;
  1566 	          released += size;
  1334 	          m->footprint -= size;
  1567 	          m->footprint -= size;
  1335 	          /* unlink obsoleted record */
  1568 	          /* unlink obsoleted record */
  1336 	          sp = pred;
  1569 	          sp = pred;
  1337 	          sp->next = next;
  1570 	          sp->next = next;
  1338 	        }
  1571 	        }
  1339 	        else { /* back out if cannot unmap */
  1572 	        else { /* back out if cannot unmap */
  1340 	          insert_large_chunk(m, tp, psize);
  1573 	          insert_large_chunk(m, tp, psize, npages_out);
  1341 	        }
  1574 	        }
  1342 	      }
  1575 	      }
  1343 	    }
  1576 	    }
  1344 	    pred = sp;
  1577 	    pred = sp;
  1345 	    sp = next;
  1578 	    sp = next;
  1402 void* RSymbianDLHeap::dlmalloc(size_t bytes) {
  1635 void* RSymbianDLHeap::dlmalloc(size_t bytes) {
  1403   /*
  1636   /*
  1404      Basic algorithm:
  1637      Basic algorithm:
  1405      If a small request (< 256 bytes minus per-chunk overhead):
  1638      If a small request (< 256 bytes minus per-chunk overhead):
  1406        1. If one exists, use a remainderless chunk in associated smallbin.
  1639        1. If one exists, use a remainderless chunk in associated smallbin.
  1407           (Remainderless means that there are too few excess bytes to
  1640           (Remainderless means that there are too few excess bytes to represent as a chunk.)
  1408           represent as a chunk.)
  1641        2. If one exists, split the smallest available chunk in a bin, saving remainder in bin.
  1409        2. If it is big enough, use the dv chunk, which is normally the
       
  1410           chunk adjacent to the one used for the most recent small request.
       
  1411        3. If one exists, split the smallest available chunk in a bin,
       
  1412           saving remainder in dv.
       
  1413        4. If it is big enough, use the top chunk.
  1642        4. If it is big enough, use the top chunk.
  1414        5. If available, get memory from system and use it
  1643        5. If available, get memory from system and use it
  1415      Otherwise, for a large request:
  1644      Otherwise, for a large request:
  1416        1. Find the smallest available binned chunk that fits, and use it
  1645        1. Find the smallest available binned chunk that fits, splitting if necessary.
  1417           if it is better fitting than dv chunk, splitting if necessary.
       
  1418        2. If better fitting than any binned chunk, use the dv chunk.
       
  1419        3. If it is big enough, use the top chunk.
  1646        3. If it is big enough, use the top chunk.
  1420        4. If request size >= mmap threshold, try to directly mmap this chunk.
  1647        4. If request size >= mmap threshold, try to directly mmap this chunk.
  1421        5. If available, get memory from system and use it
  1648        5. If available, get memory from system and use it
  1422 
  1649 
  1423      The ugly goto's here ensure that postaction occurs along all paths.
  1650      The ugly goto's here ensure that postaction occurs along all paths.
  1441         unlink_first_small_chunk(gm, b, p, idx);
  1668         unlink_first_small_chunk(gm, b, p, idx);
  1442         set_inuse_and_pinuse(gm, p, small_index2size(idx));
  1669         set_inuse_and_pinuse(gm, p, small_index2size(idx));
  1443         mem = chunk2mem(p);
  1670         mem = chunk2mem(p);
  1444         check_malloced_chunk(gm, mem, nb);
  1671         check_malloced_chunk(gm, mem, nb);
  1445         goto postaction;
  1672         goto postaction;
  1446       }
  1673       } else {
  1447 
       
  1448       else if (nb > gm->dvsize) {
       
  1449         if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
  1674         if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
  1450           mchunkptr b, p, r;
  1675           mchunkptr b, p, r;
  1451           size_t rsize;
  1676           size_t rsize;
  1452           bindex_t i;
  1677           bindex_t i;
  1453           binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
  1678           binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
  1457           p = b->fd;
  1682           p = b->fd;
  1458           assert(chunksize(p) == small_index2size(i));
  1683           assert(chunksize(p) == small_index2size(i));
  1459           unlink_first_small_chunk(gm, b, p, i);
  1684           unlink_first_small_chunk(gm, b, p, i);
  1460           rsize = small_index2size(i) - nb;
  1685           rsize = small_index2size(i) - nb;
  1461           /* Fit here cannot be remainderless if 4byte sizes */
  1686           /* Fit here cannot be remainderless if 4byte sizes */
  1462           if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
  1687           if (rsize < free_chunk_threshold)
  1463             set_inuse_and_pinuse(gm, p, small_index2size(i));
  1688             set_inuse_and_pinuse(gm, p, small_index2size(i));
  1464           else {
  1689           else {
  1465             set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
  1690             set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
  1466             r = chunk_plus_offset(p, nb);
  1691             r = chunk_plus_offset(p, nb);
  1467             set_size_and_pinuse_of_free_chunk(r, rsize);
  1692             set_size_and_pinuse_of_free_chunk(r, rsize);
  1468             replace_dv(gm, r, rsize);
  1693             insert_chunk(gm, r, rsize, 0);
  1469           }
  1694           }
  1470           mem = chunk2mem(p);
  1695           mem = chunk2mem(p);
  1471           check_malloced_chunk(gm, mem, nb);
  1696           check_malloced_chunk(gm, mem, nb);
  1472           goto postaction;
  1697           goto postaction;
  1473         }
  1698         }
  1474 
       
  1475         else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
  1699         else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
  1476           check_malloced_chunk(gm, mem, nb);
  1700           check_malloced_chunk(gm, mem, nb);
  1477           goto postaction;
  1701           goto postaction;
  1478         }
  1702         }
  1479       }
  1703       }
  1480     }
  1704     } /* else - large alloc request */ 
  1481     else if (bytes >= MAX_REQUEST)
  1705     else if (bytes >= MAX_REQUEST)
  1482       nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
  1706       nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
  1483     else {
  1707     else {
  1484       nb = pad_request(bytes);
  1708       nb = pad_request(bytes);
  1485       if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
  1709       if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
  1486         check_malloced_chunk(gm, mem, nb);
  1710         check_malloced_chunk(gm, mem, nb);
  1487         goto postaction;
  1711         goto postaction;
  1488       }
  1712       }
  1489     }
  1713     }
  1490 
  1714 
  1491     if (nb <= gm->dvsize) {
  1715     if (nb < gm->topsize) { /* Split top */
  1492       size_t rsize = gm->dvsize - nb;
       
  1493       mchunkptr p = gm->dv;
       
  1494       if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
       
  1495         mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
       
  1496         gm->dvsize = rsize;
       
  1497         set_size_and_pinuse_of_free_chunk(r, rsize);
       
  1498         set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
       
  1499       }
       
  1500       else { /* exhaust dv */
       
  1501         size_t dvs = gm->dvsize;
       
  1502         gm->dvsize = 0;
       
  1503         gm->dv = 0;
       
  1504         set_inuse_and_pinuse(gm, p, dvs);
       
  1505       }
       
  1506       mem = chunk2mem(p);
       
  1507       check_malloced_chunk(gm, mem, nb);
       
  1508       goto postaction;
       
  1509     }
       
  1510 
       
  1511     else if (nb < gm->topsize) { /* Split top */
       
  1512       size_t rsize = gm->topsize -= nb;
  1716       size_t rsize = gm->topsize -= nb;
  1513       mchunkptr p = gm->top;
  1717       mchunkptr p = gm->top;
  1514       mchunkptr r = gm->top = chunk_plus_offset(p, nb);
  1718       mchunkptr r = gm->top = chunk_plus_offset(p, nb);
  1515       r->head = rsize | PINUSE_BIT;
  1719       r->head = rsize | PINUSE_BIT;
  1516       set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
  1720       set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
  1522 
  1726 
  1523     mem = sys_alloc(gm, nb);
  1727     mem = sys_alloc(gm, nb);
  1524 
  1728 
  1525   postaction:
  1729   postaction:
  1526     POSTACTION(gm);
  1730     POSTACTION(gm);
       
  1731 #ifdef DL_CHUNK_MEM_DEBUG    
       
  1732     if(mem) {
       
  1733         mchunkptr pp = mem2chunk(mem);
       
  1734         do_check_any_chunk_access(pp, chunksize(pp));
       
  1735     }
       
  1736 #endif   
       
  1737 
  1527     return mem;
  1738     return mem;
  1528   }
  1739   }
  1529 
  1740 
  1530   return 0;
  1741   return 0;
  1531 }
  1742 }
  1537      with special cases for top, dv, mmapped chunks, and usage errors.
  1748      with special cases for top, dv, mmapped chunks, and usage errors.
  1538   */
  1749   */
  1539 
  1750 
  1540 	if (mem != 0)
  1751 	if (mem != 0)
  1541 	{
  1752 	{
       
  1753 	    size_t unmapped_pages = 0;
       
  1754 	    int prev_chunk_unmapped = 0;
  1542 		mchunkptr p  = mem2chunk(mem);
  1755 		mchunkptr p  = mem2chunk(mem);
  1543 #if FOOTERS
  1756 #if FOOTERS
  1544 		mstate fm = get_mstate_for(p);
  1757 		mstate fm = get_mstate_for(p);
  1545 		if (!ok_magic(fm))
  1758 		if (!ok_magic(fm))
  1546 		{
  1759 		{
  1563 					size_t prevsize = p->prev_foot;
  1776 					size_t prevsize = p->prev_foot;
  1564 					if ((prevsize & IS_MMAPPED_BIT) != 0)
  1777 					if ((prevsize & IS_MMAPPED_BIT) != 0)
  1565 					{
  1778 					{
  1566 						prevsize &= ~IS_MMAPPED_BIT;
  1779 						prevsize &= ~IS_MMAPPED_BIT;
  1567 						psize += prevsize + MMAP_FOOT_PAD;
  1780 						psize += prevsize + MMAP_FOOT_PAD;
  1568 						/*TInt tmp = TOP_FOOT_SIZE;
       
  1569 						TUint8* top = (TUint8*)fm->top + fm->topsize + 40;
       
  1570 						if((top == (TUint8*)p)&& fm->topsize > 4096)
       
  1571 						{
       
  1572 							fm->topsize += psize;
       
  1573 							msegmentptr sp = segment_holding(fm, (TUint8*)fm->top);
       
  1574 							sp->size+=psize;
       
  1575 							if (should_trim(fm, fm->topsize))
       
  1576 								sys_trim(fm, 0);
       
  1577  							goto postaction;
       
  1578 						}
       
  1579 						else*/
       
  1580 						{
       
  1581 							if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
  1781 							if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
  1582 								fm->footprint -= psize;
  1782 								fm->footprint -= psize;
  1583 							goto postaction;
  1783 							goto postaction;
  1584 						}
       
  1585 					}
  1784 					}
  1586 					else
  1785 					else
  1587 					{
  1786 					{
  1588 						mchunkptr prev = chunk_minus_offset(p, prevsize);
  1787 						mchunkptr prev = chunk_minus_offset(p, prevsize);
       
  1788 						if(page_not_in_memory(prev, prevsize)) {
       
  1789                             prev_chunk_unmapped = 1;
       
  1790                             unmapped_pages = ((tchunkptr)prev)->npages;
       
  1791 						}
       
  1792 						
  1589 						psize += prevsize;
  1793 						psize += prevsize;
  1590 						p = prev;
  1794 						p = prev;
  1591 						if (RTCHECK(ok_address(fm, prev)))
  1795 						if (RTCHECK(ok_address(fm, prev)))
  1592 						{ /* consolidate backward */
  1796 						{ /* consolidate backward */
  1593 							if (p != fm->dv)
  1797 						    unlink_chunk(fm, p, prevsize);							
  1594 							{
       
  1595 								unlink_chunk(fm, p, prevsize);
       
  1596 							}
       
  1597 							else if ((next->head & INUSE_BITS) == INUSE_BITS)
       
  1598 							{
       
  1599 								fm->dvsize = psize;
       
  1600 								set_free_with_pinuse(p, psize, next);
       
  1601 								goto postaction;
       
  1602 							}
       
  1603 						}
  1798 						}
  1604 						else
  1799 						else
  1605 							goto erroraction;
  1800 							goto erroraction;
  1606 					}
  1801 					}
  1607 				}
  1802 				}
  1609 				if (RTCHECK(ok_next(p, next) && ok_pinuse(next)))
  1804 				if (RTCHECK(ok_next(p, next) && ok_pinuse(next)))
  1610 				{
  1805 				{
  1611 					if (!cinuse(next))
  1806 					if (!cinuse(next))
  1612 					{  /* consolidate forward */
  1807 					{  /* consolidate forward */
  1613 						if (next == fm->top)
  1808 						if (next == fm->top)
  1614 						{
  1809 						{							
  1615 							size_t tsize = fm->topsize += psize;
  1810 							if(prev_chunk_unmapped) { // previous chunk is unmapped
  1616 							fm->top = p;
  1811                             /* unmap all pages between previously unmapped and end of top chunk 
  1617 							p->head = tsize | PINUSE_BIT;
  1812                                and reset top to beginning of prev chunk - done in sys_trim_partial() */
  1618 							if (p == fm->dv)
  1813                                 sys_trim_partial(fm, p, psize, unmapped_pages);
  1619 							{
  1814                                 do_check_any_chunk_access(fm->top, fm->topsize);
  1620 								fm->dv = 0;
  1815                                 goto postaction;
  1621 								fm->dvsize = 0;
       
  1622 							}
  1816 							}
  1623 							if (should_trim(fm, tsize))
  1817 							else { // forward merge to top
  1624 								sys_trim(fm, 0);
  1818                                 size_t tsize = fm->topsize += psize;
  1625 							goto postaction;
  1819                                 fm->top = p;
  1626 						}
  1820                                 p->head = tsize | PINUSE_BIT;
  1627 						else if (next == fm->dv)
  1821                                 if (should_trim(fm, tsize))
  1628 						{
  1822                                     sys_trim(fm, 0);
  1629 							size_t dsize = fm->dvsize += psize;
  1823                                 do_check_any_chunk_access(fm->top, fm->topsize);
  1630 							fm->dv = p;
  1824                                 goto postaction;							
  1631 							set_size_and_pinuse_of_free_chunk(p, dsize);
  1825 							}							    							    
  1632 							goto postaction;
       
  1633 						}
  1826 						}
  1634 						else
  1827 						else
  1635 						{
  1828 						{
  1636 							size_t nsize = chunksize(next);
  1829                             size_t nsize = chunksize(next);
       
  1830                             int next_chunk_unmapped = 0;
       
  1831                             if( page_not_in_memory(next, nsize) ) {
       
  1832                                 next_chunk_unmapped = 1;
       
  1833                                 unmapped_pages += ((tchunkptr)next)->npages;
       
  1834                             }
       
  1835                             
  1637 							psize += nsize;
  1836 							psize += nsize;
  1638 							unlink_chunk(fm, next, nsize);
  1837 							unlink_chunk(fm, next, nsize);
  1639 							set_size_and_pinuse_of_free_chunk(p, psize);
  1838 							set_size_and_pinuse_of_free_chunk(p, psize);
  1640 							if (p == fm->dv)
       
  1641 							{
       
  1642 								fm->dvsize = psize;
       
  1643 								goto postaction;
       
  1644 							}
       
  1645 						}
  1839 						}
  1646 					}
  1840 					}
  1647 					else
  1841 					else
  1648 						set_free_with_pinuse(p, psize, next);
  1842 						set_free_with_pinuse(p, psize, next);
  1649 					insert_chunk(fm, p, psize);
  1843 					
       
  1844  		            /* check if chunk memmory can be released */
       
  1845 				    size_t npages_out = 0;
       
  1846 		            if(!is_small(psize) && psize>=CHUNK_PAGEOUT_THESHOLD)   
       
  1847 		                npages_out = unmap_chunk_pages((tchunkptr)p, psize, unmapped_pages);
       
  1848 
       
  1849 					insert_chunk(fm, p, psize, npages_out);
  1650 					check_free_chunk(fm, p);
  1850 					check_free_chunk(fm, p);
       
  1851 					do_chunk_page_release_check(p, psize, fm, npages_out);
  1651 					goto postaction;
  1852 					goto postaction;
  1652 				}
  1853 				}
  1653 			}
  1854 			}
  1654 erroraction:
  1855 erroraction:
  1655     	USAGE_ERROR_ACTION(fm, p);
  1856     	USAGE_ERROR_ACTION(fm, p);
  1957 			c.size = sz;
  2158 			c.size = sz;
  1958 			c.partial = 0;
  2159 			c.partial = 0;
  1959 		}
  2160 		}
  1960 		sizemap[sz>>2] = ix;
  2161 		sizemap[sz>>2] = ix;
  1961 	}
  2162 	}
       
  2163 	
       
  2164     free_chunk_threshold = pad_request(slab_threshold);
  1962 }
  2165 }
  1963 
  2166 
  1964 void* RSymbianDLHeap::slab_allocate(slabset& ss)
  2167 void* RSymbianDLHeap::slab_allocate(slabset& ss)
  1965 //
  2168 //
  1966 // Allocate a cell from the given slabset
  2169 // Allocate a cell from the given slabset
  2107 	if (p)
  2310 	if (p)
  2108 	{
  2311 	{
  2109 		TInt r = chunk.Commit(iOffset + ptrdiff(p, this),sz);
  2312 		TInt r = chunk.Commit(iOffset + ptrdiff(p, this),sz);
  2110 		if (r < 0)
  2313 		if (r < 0)
  2111 			return 0;
  2314 			return 0;
  2112 		ASSERT(p = offset(this, r - iOffset));
       
  2113 		iChunkSize += sz;	
  2315 		iChunkSize += sz;	
  2114 		return p;
  2316 		return p;
  2115 	}
  2317 	}
  2116 
  2318 
  2117 	TInt r = chunk.Allocate(sz);
  2319 	TInt r = chunk.Allocate(sz);
  2265 			return c;
  2467 			return c;
  2266 		++c;
  2468 		++c;
  2267 	}
  2469 	}
  2268 }
  2470 }
  2269 
  2471 
       
  2472 /* Only for debugging purpose - start*/
       
  2473 #ifdef DL_CHUNK_MEM_DEBUG
       
  2474 void RSymbianDLHeap::debug_check_small_chunk_access(mchunkptr p, size_t psize)
       
  2475 {
       
  2476     size_t sz = chunksize(p);
       
  2477     char ch = *((char*)chunk_plus_offset(p, psize-1));
       
  2478 }
       
  2479 
       
  2480 void RSymbianDLHeap::debug_check_any_chunk_access(mchunkptr p, size_t psize)
       
  2481 {
       
  2482     if(p==0 || psize==0) return;
       
  2483     
       
  2484     mchunkptr next = chunk_plus_offset(p, psize);
       
  2485     char* t = (char*)chunk_plus_offset(p, mparams.page_size);
       
  2486     char ch = *((char*)p);
       
  2487     while((size_t)t<(size_t)next)
       
  2488     {
       
  2489         ch = *t;
       
  2490         t = (char*)chunk_plus_offset(t, mparams.page_size);
       
  2491     };
       
  2492 }
       
  2493 
       
  2494 void RSymbianDLHeap::debug_check_large_chunk_access(tchunkptr p, size_t psize)
       
  2495 {
       
  2496     mchunkptr next = chunk_plus_offset(p, psize);
       
  2497     char* t = (char*)chunk_plus_offset(p, mparams.page_size);
       
  2498     char ch = *((char*)p);
       
  2499     while((size_t)t<(size_t)next)
       
  2500     {
       
  2501         ch = *t;
       
  2502         t = (char*)chunk_plus_offset(t, mparams.page_size);
       
  2503     };
       
  2504 }
       
  2505 
       
  2506 void RSymbianDLHeap::debug_chunk_page_release_check(mchunkptr p, size_t psize, mstate fm, int mem_released)
       
  2507 {
       
  2508     if(mem_released)
       
  2509     {        
       
  2510         if(!page_not_in_memory(p, psize) )
       
  2511             MEM_LOG("CHUNK_PAGE_ERROR::dlfree, error - page_in_mem flag is corrupt");          
       
  2512         if(chunk_plus_offset(p, psize) > fm->top)
       
  2513             MEM_LOG("CHUNK_PAGE_ERROR: error Top chunk address invalid");
       
  2514         if(fm->dv >= p && fm->dv < chunk_plus_offset(p, psize))
       
  2515             MEM_LOG("CHUNK_PAGE_ERROR: error DV chunk address invalid");    
       
  2516     }
       
  2517 }
  2270 #endif
  2518 #endif
       
  2519 
       
  2520 #ifdef OOM_LOGGING
       
  2521 #include <hal.h>
       
  2522 void RSymbianDLHeap::dump_large_chunk(mstate m, tchunkptr t) {
       
  2523     tchunkptr u = t;
       
  2524     bindex_t tindex = t->index;
       
  2525     size_t tsize = chunksize(t);
       
  2526     bindex_t idx;
       
  2527     compute_tree_index(tsize, idx);
       
  2528 
       
  2529     size_t free = 0;
       
  2530     int nfree = 0;
       
  2531     do
       
  2532         {   /* traverse through chain of same-sized nodes */
       
  2533         if (u->child[0] != 0)
       
  2534             {
       
  2535             dump_large_chunk(m, u->child[0]);
       
  2536             }
       
  2537 
       
  2538         if (u->child[1] != 0)
       
  2539             {
       
  2540             dump_large_chunk(m, u->child[1]);
       
  2541             }
       
  2542 
       
  2543         free += chunksize(u);
       
  2544         nfree++;
       
  2545         u = u->fd;
       
  2546         }
       
  2547     while (u != t);
       
  2548     C_LOGF(_L8("LARGE_BIN,%d,%d,%d"), tsize, free, nfree);
       
  2549 }
       
  2550 
       
  2551 void RSymbianDLHeap::dump_dl_free_chunks()
       
  2552 { 
       
  2553     C_LOG("");    
       
  2554     C_LOG("------------ dump_dl_free_chunks start -------------");
       
  2555     C_LOG("BinType,BinSize,FreeSize,FreeCount");
       
  2556     
       
  2557     // dump small bins
       
  2558     for (int i = 0; i < NSMALLBINS; ++i)
       
  2559         {
       
  2560         sbinptr b = smallbin_at(gm, i);
       
  2561         unsigned int empty = (gm->smallmap & (1 << i)) == 0;
       
  2562         int nfree = 0;
       
  2563         if (!empty)
       
  2564             {
       
  2565             int nfree = 0;
       
  2566             size_t free = 0;
       
  2567             mchunkptr p = b->bk;
       
  2568             size_t size = chunksize(p);
       
  2569             for (; p != b; p = p->bk)
       
  2570                 {
       
  2571                 free += chunksize(p);
       
  2572                 nfree++;
       
  2573                 }
       
  2574 
       
  2575             C_LOGF(_L8("SMALL_BIN,%d,%d,%d"), size, free, nfree);
       
  2576             }
       
  2577         }
       
  2578 
       
  2579     // dump large bins
       
  2580     for (int i = 0; i < NTREEBINS; ++i)
       
  2581         {
       
  2582         tbinptr* tb = treebin_at(gm, i);
       
  2583         tchunkptr t = *tb;
       
  2584         int empty = (gm->treemap & (1 << i)) == 0;
       
  2585         if (!empty)
       
  2586             dump_large_chunk(gm, t);
       
  2587         }
       
  2588 
       
  2589     C_LOG("------------ dump_dl_free_chunks end -------------");
       
  2590     C_LOG("");
       
  2591     }
       
  2592 
       
  2593 void RSymbianDLHeap::dump_heap_logs(size_t fail_size)
       
  2594 {
       
  2595     MEM_LOG("");
       
  2596     if (fail_size) {
       
  2597         MEM_LOG("MEMDEBUG::RSymbianDLHeap OOM Log dump *************** start");
       
  2598         MEM_LOGF(_L8("Failing to alloc size: %d"), fail_size);
       
  2599     }
       
  2600     else
       
  2601         MEM_LOG("MEMDEBUG::RSymbianDLHeap Log dump *************** start");
       
  2602     
       
  2603     TInt dl_chunk_size = ptrdiff(iTop,iBase);
       
  2604     TInt slabp_chunk_size = iChunkSize + iUnmappedChunkSize - dl_chunk_size;
       
  2605     TInt freeMem = 0;    
       
  2606     HAL::Get(HALData::EMemoryRAMFree, freeMem);
       
  2607     MEM_LOGF(_L8("System Free RAM Size: %d"), freeMem);
       
  2608     MEM_LOGF(_L8("Allocator Commited Chunk Size: %d"), iChunkSize);
       
  2609     MEM_LOGF(_L8("DLHeap Arena Size=%d"), dl_chunk_size);
       
  2610     MEM_LOGF(_L8("DLHeap unmapped chunk size: %d"), iUnmappedChunkSize);
       
  2611     MEM_LOGF(_L8("Slab-Page Allocator Chunk Size=%d"), slabp_chunk_size);
       
  2612     
       
  2613     mallinfo info = dlmallinfo();   
       
  2614     TUint heapAlloc = info.uordblks;
       
  2615     TUint heapFree = info.fordblks;
       
  2616     MEM_LOGF(_L8("DLHeap allocated size: %d"), heapAlloc);
       
  2617     MEM_LOGF(_L8("DLHeap free size: %d"), heapFree);
       
  2618         
       
  2619     if (fail_size) {
       
  2620         MEM_LOG("MEMDEBUG::RSymbianDLHeap OOM Log dump *************** end");
       
  2621     }else {
       
  2622         MEM_LOG("MEMDEBUG::RSymbianDLHeap Log dump *************** end");
       
  2623     }
       
  2624     MEM_LOG("");
       
  2625 }
       
  2626 
       
  2627 #endif
       
  2628 /* Only for debugging purpose - end*/
       
  2629 
       
  2630 #endif