614 struct malloc_tree_chunk* bk; |
614 struct malloc_tree_chunk* bk; |
615 |
615 |
616 struct malloc_tree_chunk* child[2]; |
616 struct malloc_tree_chunk* child[2]; |
617 struct malloc_tree_chunk* parent; |
617 struct malloc_tree_chunk* parent; |
618 bindex_t index; |
618 bindex_t index; |
|
619 size_t pageout; /* chunk pageout flag */ |
|
620 size_t npages; /* chunk pageout size */ |
619 }; |
621 }; |
620 |
622 |
621 typedef struct malloc_tree_chunk tchunk; |
623 typedef struct malloc_tree_chunk tchunk; |
622 typedef struct malloc_tree_chunk* tchunkptr; |
624 typedef struct malloc_tree_chunk* tchunkptr; |
623 typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */ |
625 typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */ |
650 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD) |
652 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD) |
651 |
653 |
652 struct malloc_state { |
654 struct malloc_state { |
653 binmap_t smallmap; |
655 binmap_t smallmap; |
654 binmap_t treemap; |
656 binmap_t treemap; |
655 size_t dvsize; |
657 size_t dvsize; // unused |
656 size_t topsize; |
658 size_t topsize; |
657 TUint8* least_addr; |
659 TUint8* least_addr; |
658 mchunkptr dv; |
660 mchunkptr dv; // unused |
659 mchunkptr top; |
661 mchunkptr top; |
660 size_t trim_check; |
662 size_t trim_check; |
661 size_t magic; |
663 size_t magic; |
662 mchunkptr smallbins[(NSMALLBINS+1)*2]; |
664 mchunkptr smallbins[(NSMALLBINS+1)*2]; |
663 tbinptr treebins[NTREEBINS]; |
665 tbinptr treebins[NTREEBINS]; |
1043 { |
1045 { |
1044 void* page; |
1046 void* page; |
1045 unsigned size; |
1047 unsigned size; |
1046 }; |
1048 }; |
1047 /******CODE TO SUPORT SLAB ALLOCATOR******/ |
1049 /******CODE TO SUPORT SLAB ALLOCATOR******/ |
|
1050 |
|
1051 /****** COMMON DEFS CHUNK PAGE MAP/UNMAP *****/ |
|
1052 #define CHUNK_PAGEOUT_THESHOLD (12*1024U) |
|
1053 #define CHUNK_PAGE_OUT_FLAG (98989U) |
|
1054 #define tchunk_page_align(p) (char*)page_align((size_t)(p) + sizeof(tchunk) + TOP_FOOT_SIZE) |
|
1055 #define address_offset(HIGH, LOW) (size_t)((char*)(HIGH) - (char*)(LOW)) |
|
1056 |
|
1057 /* tree_malloc_chunk pageout header operations */ |
|
1058 #define set_tchunk_mem_pageout(TP, NPAGES) \ |
|
1059 { (TP)->pageout = CHUNK_PAGE_OUT_FLAG; (TP)->npages = (NPAGES); } |
|
1060 #define reset_tchunk_mem_pageout(TP) \ |
|
1061 { (TP)->pageout = 0; (TP)->npages = 0; } |
|
1062 #define page_not_in_memory(P, S) \ |
|
1063 ( !is_small(S) && ( (((tchunkptr)(P))->pageout==CHUNK_PAGE_OUT_FLAG)?1:0 ) ) |
|
1064 |
|
1065 |
|
1066 #ifdef DL_CHUNK_MEM_DEBUG |
|
1067 #define ASSERT_RCHUNK_SIZE() \ |
|
1068 {RChunk rchunk; rchunk.SetHandle(iChunkHandle); assert(iChunkSize == rchunk.Size());} |
|
1069 #define TRACE_DL_CHUNK_MAP(c_addr, csize, page_addr, page_size) \ |
|
1070 MEM_LOGF(_L8("DL_CHUNK_MAP$$:: chunk_addr=%x, chunk_size=%d, page_addr=%x, page_size=%d"), c_addr, csize, page_addr, (page_size)); |
|
1071 #define TRACE_DL_CHUNK_UNMAP(c_addr, csize, page_addr, page_size) \ |
|
1072 MEM_LOGF(_L8("DL_CHUNK_UNMAP:: chunk_addr=%x, chunk_size=%d, page_addr=%x, page_size=%d"), c_addr, csize, page_addr, (page_size)); |
|
1073 #else |
|
1074 #define ASSERT_RCHUNK_SIZE() |
|
1075 #define TRACE_DL_CHUNK_MAP(c_addr, csize, p_addr, psize) |
|
1076 #define TRACE_DL_CHUNK_UNMAP(c_addr, csize, p_addr, psize) |
|
1077 #endif |
|
1078 |
|
1079 #ifdef OOM_LOGGING |
|
1080 #define TRACE_UNMAPPED_CHUNK(SZ) \ |
|
1081 iUnmappedChunkSize += (SZ); |
|
1082 #define MEM_DUMP_OOM_LOGS(NB, MSG) \ |
|
1083 MEM_LOG(MSG); \ |
|
1084 C_LOG(MSG); \ |
|
1085 dump_heap_logs(NB) |
|
1086 #else |
|
1087 #define MEM_DUMP_OOM_LOGS(NB, MSG) |
|
1088 #define TRACE_UNMAPPED_CHUNK(SZ) |
|
1089 #endif |
|
1090 |
|
1091 /****** COMMON DEFS PAGE MAP/UNMAP *****/ |
1048 #endif/*__DLA__*/ |
1092 #endif/*__DLA__*/ |