189
|
1 |
// Copyright (c) 1995-2009 Nokia Corporation and/or its subsidiary(-ies).
|
|
2 |
// All rights reserved.
|
|
3 |
// This component and the accompanying materials are made available
|
|
4 |
// under the terms of the License "Eclipse Public License v1.0"
|
|
5 |
// which accompanies this distribution, and is available
|
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
|
|
7 |
//
|
|
8 |
// Initial Contributors:
|
|
9 |
// Nokia Corporation - initial contribution.
|
|
10 |
//
|
|
11 |
// Contributors:
|
|
12 |
//
|
|
13 |
// Description:
|
|
14 |
// e32test\heap\t_heapslab.cpp
|
|
15 |
// Overview:
|
|
16 |
// Tests RHybridHeap class: slab allocator
|
|
17 |
// API Information:
|
|
18 |
// RHybridHeap/RHeap
|
|
19 |
// Details:
|
|
20 |
//- Starts with empty allocator configured to use slab allocation
|
|
21 |
// on all cell sizes less than slab threshold (49).
|
|
22 |
//- Allocate enough cells of the same size to fill 128 slabs.
|
|
23 |
//- Check the number of pages used corresponds to the number of slabs.
|
|
24 |
//- Check that a new slab is taken from a partially filled page if available.
|
|
25 |
//- Check that a partially filled slab is used if available.
|
|
26 |
//- Check that if all four slabs in a page are free, the page is freed.
|
|
27 |
//- Free cells to give empty slab.
|
|
28 |
//- Free cells to give partial slab.
|
|
29 |
//- Reallocate cells.
|
|
30 |
//- RAllocator::Check() is used to check internal consistency.
|
|
31 |
// Platforms/Drives/Compatibility:
|
|
32 |
// All
|
|
33 |
// Assumptions/Requirement/Pre-requisites:
|
|
34 |
// Failures and causes:
|
|
35 |
// Base Port information:
|
|
36 |
//
|
|
37 |
//
|
|
38 |
|
|
39 |
#include <e32test.h>
|
|
40 |
#include <e32hal.h>
|
|
41 |
#include <e32def.h>
|
|
42 |
#include <e32def_private.h>
|
|
43 |
#include "dla.h"
|
|
44 |
#include "slab.h"
|
|
45 |
#include "page_alloc.h"
|
|
46 |
#include "heap_hybrid.h"
|
|
47 |
|
|
48 |
#define MAX_THREADS 4
|
|
49 |
#define MAX_ALLOCS 20000 // 16128, if slab count is 128 and alloc size is 8
|
|
50 |
|
|
51 |
//#define TSTSLAB_DBG(a) a
|
|
52 |
#define TSTSLAB_DBG(a)
|
|
53 |
|
|
54 |
struct TSlabTestThreadParm
|
|
55 |
{
|
|
56 |
RHeap* iHeap;
|
|
57 |
TInt iAllocSize;
|
|
58 |
TInt iInitSlabCount;
|
|
59 |
TBool iUseRandomSize;
|
|
60 |
TInt iThreadCount;
|
|
61 |
TInt iThreadIndex;
|
|
62 |
};
|
|
63 |
|
|
64 |
struct TMetaData
|
|
65 |
{
|
|
66 |
TBool iDLOnly;
|
|
67 |
RFastLock* iLock;
|
|
68 |
TInt iChunkSize;
|
|
69 |
TInt iSlabThreshold;
|
|
70 |
unsigned iSlabInitThreshold;
|
|
71 |
unsigned iSlabConfigBits;
|
|
72 |
slab* iPartialPage;
|
|
73 |
slab* iFullSlab;
|
|
74 |
page* iSparePage;
|
|
75 |
TUint8* iMemBase;
|
|
76 |
unsigned char iSizeMap[(MAXSLABSIZE>>2)+1];
|
|
77 |
slabset iSlabAlloc[MAXSLABSIZE>>2];
|
|
78 |
slab** iSlabAllocRealRootAddress[MAXSLABSIZE>>2];
|
|
79 |
};
|
|
80 |
|
|
81 |
class TestHybridHeap
|
|
82 |
{
|
|
83 |
public:
|
|
84 |
static void GetHeapMetaData(RHeap& aHeap, TMetaData& aMeta);
|
|
85 |
};
|
|
86 |
|
|
87 |
LOCAL_D RTest test(_L("T_HEAPSLAB"));
|
|
88 |
|
|
89 |
LOCAL_D TInt PageSize;
|
|
90 |
|
|
91 |
LOCAL_D TAny* PtrBuf[MAX_THREADS][MAX_ALLOCS];
|
|
92 |
LOCAL_D TSlabTestThreadParm ThreadParm[MAX_THREADS];
|
|
93 |
|
|
94 |
enum TTestWalkFunc {ETestWalk, ETestFindSlab};
|
|
95 |
|
|
96 |
|
|
97 |
static unsigned SlabHeaderPagemap(unsigned h) {return (h&0x00000f00)>>8;}
|
|
98 |
|
|
99 |
void TestHybridHeap::GetHeapMetaData(RHeap& aHeap, TMetaData& aMeta)
|
|
100 |
{
|
|
101 |
RHybridHeap::STestCommand cmd;
|
|
102 |
cmd.iCommand = RHybridHeap::EHeapMetaData;
|
|
103 |
TInt ret = aHeap.DebugFunction(RHeap::EHybridHeap, &cmd, 0);
|
|
104 |
test(ret == KErrNone);
|
|
105 |
|
|
106 |
RHybridHeap* hybridHeap = (RHybridHeap*) cmd.iData;
|
|
107 |
|
|
108 |
aMeta.iDLOnly = hybridHeap->iDLOnly;
|
|
109 |
aMeta.iLock = &hybridHeap->iLock;
|
|
110 |
aMeta.iChunkSize = hybridHeap->iChunkSize;
|
|
111 |
aMeta.iSlabThreshold = hybridHeap->iSlabThreshold;
|
|
112 |
aMeta.iSlabInitThreshold = hybridHeap->iSlabInitThreshold;
|
|
113 |
aMeta.iSlabConfigBits = hybridHeap->iSlabConfigBits;
|
|
114 |
aMeta.iPartialPage = hybridHeap->iPartialPage;
|
|
115 |
aMeta.iFullSlab = hybridHeap->iFullSlab;
|
|
116 |
aMeta.iSparePage = hybridHeap->iSparePage;
|
|
117 |
aMeta.iMemBase = hybridHeap->iMemBase;
|
|
118 |
|
|
119 |
TInt i;
|
|
120 |
TInt count;
|
|
121 |
count = sizeof(aMeta.iSizeMap)/sizeof(unsigned char);
|
|
122 |
for (i=0; i<count; ++i)
|
|
123 |
{
|
|
124 |
aMeta.iSizeMap[i] = hybridHeap->iSizeMap[i];
|
|
125 |
}
|
|
126 |
count = sizeof(aMeta.iSlabAlloc)/sizeof(slabset);
|
|
127 |
for (i=0; i<count; ++i)
|
|
128 |
{
|
|
129 |
aMeta.iSlabAlloc[i].iPartial = hybridHeap->iSlabAlloc[i].iPartial;
|
|
130 |
aMeta.iSlabAllocRealRootAddress[i] = &hybridHeap->iSlabAlloc[i].iPartial;
|
|
131 |
}
|
|
132 |
}
|
|
133 |
|
|
134 |
LOCAL_C void GetMeta(RHeap& aHeap, TMetaData& aMeta)
|
|
135 |
{
|
|
136 |
TestHybridHeap::GetHeapMetaData(aHeap, aMeta);
|
|
137 |
}
|
|
138 |
|
|
139 |
/*LOCAL_C void PrintMeta(const char* aText, TMetaData& aMeta)
|
|
140 |
{
|
|
141 |
RDebug::Printf("=========== HeapMetaData (local) - begin: %s", aText);
|
|
142 |
|
|
143 |
RDebug::Printf("iDLOnly: 0x%08x", aMeta.iDLOnly);
|
|
144 |
RDebug::Printf("iChunkSize: 0x%08x", aMeta.iChunkSize);
|
|
145 |
RDebug::Printf("iSlabThreshold: 0x%08x / %d", aMeta.iSlabThreshold, aMeta.iSlabThreshold);
|
|
146 |
RDebug::Printf("iSlabInitThreshold: 0x%08x / %d", aMeta.iSlabInitThreshold, aMeta.iSlabInitThreshold);
|
|
147 |
RDebug::Printf("iSlabConfigBits: 0x%08x", aMeta.iSlabConfigBits);
|
|
148 |
RDebug::Printf("iPartialPage: 0x%08x", aMeta.iPartialPage);
|
|
149 |
RDebug::Printf("iFullSlab: 0x%08x", aMeta.iFullSlab);
|
|
150 |
RDebug::Printf("iSparePage: 0x%08x", aMeta.iSparePage);
|
|
151 |
RDebug::Printf("iMemBase: 0x%08x", aMeta.iMemBase);
|
|
152 |
|
|
153 |
TInt i;
|
|
154 |
TInt count;
|
|
155 |
count = sizeof(aMeta.iSizeMap)/sizeof(unsigned char);
|
|
156 |
for (i=0; i<count; ++i)
|
|
157 |
{
|
|
158 |
RDebug::Printf("iSizeMap[%d]: %d", i, aMeta.iSizeMap[i]);
|
|
159 |
}
|
|
160 |
count = sizeof(aMeta.iSlabAlloc)/sizeof(slabset);
|
|
161 |
for (i=0; i<count; ++i)
|
|
162 |
{
|
|
163 |
RDebug::Printf("iSlabAlloc[%d].iPartial: 0x%08x", i, aMeta.iSlabAlloc[i].iPartial);
|
|
164 |
}
|
|
165 |
for (i=0; i<count; ++i)
|
|
166 |
{
|
|
167 |
RDebug::Printf("iSlabAllocRealRootAddress[%d]: 0x%08x", i, aMeta.iSlabAllocRealRootAddress[i]);
|
|
168 |
}
|
|
169 |
RDebug::Printf("=========== HeapMetaData (local) - end");
|
|
170 |
}
|
|
171 |
|
|
172 |
LOCAL_C void GetAndPrintMeta(RHeap& aHeap, const char* aText, TMetaData& aMeta)
|
|
173 |
{
|
|
174 |
(void)aText;
|
|
175 |
GetMeta(aHeap, aMeta);
|
|
176 |
TSTSLAB_DBG(PrintMeta(aText, aMeta));
|
|
177 |
}
|
|
178 |
|
|
179 |
#ifndef __KERNEL_MODE__
|
|
180 |
LOCAL_C void Lock(TMetaData& aMeta)
|
|
181 |
{((RFastLock&)*aMeta.iLock).Wait();}
|
|
182 |
|
|
183 |
LOCAL_C void Unlock(TMetaData& aMeta)
|
|
184 |
{((RFastLock&)*aMeta.iLock).Signal();}
|
|
185 |
#else
|
|
186 |
LOCAL_C void Lock(TMetaData& aMeta)
|
|
187 |
{;}
|
|
188 |
|
|
189 |
LOCAL_C void Unlock(TMetaData& aMeta)
|
|
190 |
{;}
|
|
191 |
#endif
|
|
192 |
*/
|
|
193 |
|
|
194 |
LOCAL_C page* PageFor(slab* s)
|
|
195 |
{
|
|
196 |
return reinterpret_cast<page*>(Floor(s, PAGESIZE));
|
|
197 |
}
|
|
198 |
|
|
199 |
|
|
200 |
LOCAL_C slab* SlabFor(const void* p)
|
|
201 |
{
|
|
202 |
return (slab*)(Floor(p, SLABSIZE));
|
|
203 |
}
|
|
204 |
|
|
205 |
LOCAL_C TInt TreeWalk(slab** aRealRootAddress, slab* const* aRoot, TTestWalkFunc aFunc, TAny* aParm, TInt& aOutParm)
|
|
206 |
{
|
|
207 |
TInt count = 0;
|
|
208 |
aOutParm = 0;
|
|
209 |
|
|
210 |
slab* s = *aRoot;
|
|
211 |
if (!s)
|
|
212 |
return count;
|
|
213 |
|
|
214 |
for (;;)
|
|
215 |
{
|
|
216 |
slab* c;
|
|
217 |
while ((c = s->iChild1) != 0)
|
|
218 |
s = c; // walk down left side to end
|
|
219 |
for (;;)
|
|
220 |
{
|
|
221 |
count++;
|
|
222 |
TSTSLAB_DBG(RDebug::Printf("TreeWalk - slab: 0x%08x", s));
|
|
223 |
(void)aParm;
|
|
224 |
if (aFunc == ETestWalk)
|
|
225 |
{
|
|
226 |
;
|
|
227 |
}
|
|
228 |
else if (aFunc == ETestFindSlab)
|
|
229 |
{
|
|
230 |
if ((slab*)aParm == s)
|
|
231 |
{
|
|
232 |
aOutParm = 1;
|
|
233 |
return 0;
|
|
234 |
}
|
|
235 |
}
|
|
236 |
|
|
237 |
c = s->iChild2;
|
|
238 |
if (c)
|
|
239 |
{ // one step down right side, now try and walk down left
|
|
240 |
s = c;
|
|
241 |
break;
|
|
242 |
}
|
|
243 |
for (;;)
|
|
244 |
{ // loop to walk up right side
|
|
245 |
slab** pp = s->iParent;
|
|
246 |
if (pp == aRealRootAddress)
|
|
247 |
return count;
|
|
248 |
s = SlabFor(pp);
|
|
249 |
if (pp == &s->iChild1)
|
|
250 |
break;
|
|
251 |
}
|
|
252 |
}
|
|
253 |
}
|
|
254 |
}
|
|
255 |
|
|
256 |
LOCAL_C TInt WalkSlabSet(TInt aSlabsetIndex, TMetaData& aMeta, TTestWalkFunc aFunc, TAny* aParm, TInt& aOutParm)
|
|
257 |
{
|
|
258 |
if (aSlabsetIndex >= (MAXSLABSIZE>>2))
|
|
259 |
{
|
|
260 |
return 0;
|
|
261 |
}
|
|
262 |
return TreeWalk(aMeta.iSlabAllocRealRootAddress[aSlabsetIndex], &aMeta.iSlabAlloc[aSlabsetIndex].iPartial, aFunc, aParm, aOutParm);
|
|
263 |
}
|
|
264 |
|
|
265 |
/*LOCAL_C void DebugPrintSlabs(TInt aSlabsetIndex, TMetaData& aMeta)
|
|
266 |
{
|
|
267 |
//RDebug::Printf("=========== DebugPrintSlabs: %s", aText);
|
|
268 |
RDebug::Printf("=========== DebugPrintSlabs");
|
|
269 |
|
|
270 |
RDebug::Printf("iSparePage: 0x%08x", aMeta.iSparePage);
|
|
271 |
|
|
272 |
slab* fullSlab = aMeta.iFullSlab;
|
|
273 |
TInt fullSlabCount = 0;
|
|
274 |
while (fullSlab)
|
|
275 |
{
|
|
276 |
RDebug::Printf("fullSlab: 0x%08x", fullSlab);
|
|
277 |
fullSlabCount++;
|
|
278 |
fullSlab = fullSlab->iChild1;
|
|
279 |
}
|
|
280 |
|
|
281 |
TInt outParm;
|
|
282 |
TInt partialTreeSlabCount = 0;
|
|
283 |
partialTreeSlabCount += WalkSlabSet(aSlabsetIndex, aMeta, ETestWalk, 0, outParm);
|
|
284 |
|
|
285 |
slab* partialPageSlab = aMeta.iPartialPage;
|
|
286 |
TInt partialPageSlabCount = 0;
|
|
287 |
while (partialPageSlab)
|
|
288 |
{
|
|
289 |
RDebug::Printf("partialPageSlab (empty): 0x%08x", partialPageSlab);
|
|
290 |
partialPageSlabCount++;
|
|
291 |
partialPageSlab = partialPageSlab->iChild1;
|
|
292 |
}
|
|
293 |
}*/
|
|
294 |
|
|
295 |
LOCAL_C void TestSlabFixedSizeManyThreads(TSlabTestThreadParm& aParm)
|
|
296 |
{
|
|
297 |
RHeap* heap = aParm.iHeap;
|
|
298 |
TInt allocSize = aParm.iAllocSize;
|
|
299 |
TInt initSlabCount = aParm.iInitSlabCount;
|
|
300 |
//TBool useRandomSize = aParm.iUseRandomSize;
|
|
301 |
//TInt threadCount = aParm.iThreadCount;
|
|
302 |
TInt threadIndex = aParm.iThreadIndex;
|
|
303 |
|
|
304 |
TInt slabsPerPage = PageSize/SLABSIZE;
|
|
305 |
|
|
306 |
test(initSlabCount % slabsPerPage == 0); // for this test
|
|
307 |
|
|
308 |
#ifdef _DEBUG
|
|
309 |
TInt allocRealCellSize = allocSize + RHeap::EDebugHdrSize;
|
|
310 |
#else
|
|
311 |
TInt allocRealCellSize = allocSize;
|
|
312 |
#endif
|
|
313 |
|
|
314 |
TMetaData metaData;
|
|
315 |
GetMeta(*heap, metaData);
|
|
316 |
|
|
317 |
if (allocRealCellSize >= metaData.iSlabThreshold)
|
|
318 |
{
|
|
319 |
allocRealCellSize = metaData.iSlabThreshold - 1;
|
|
320 |
#ifdef _DEBUG
|
|
321 |
allocSize = allocRealCellSize - RHeap::EDebugHdrSize;
|
|
322 |
#else
|
|
323 |
allocSize = allocRealCellSize;
|
|
324 |
#endif
|
|
325 |
}
|
|
326 |
|
|
327 |
TAny** pBuf = &PtrBuf[threadIndex][0];
|
|
328 |
TInt i;
|
|
329 |
for (i=0; i<MAX_ALLOCS; ++i)
|
|
330 |
{
|
|
331 |
pBuf[i] = 0;
|
|
332 |
}
|
|
333 |
|
|
334 |
//Allocate enough cells of the same size to fill initSlabCount (128) slabs
|
|
335 |
TInt slabsetIndex = metaData.iSizeMap[(allocRealCellSize+3)>>2];
|
|
336 |
test(slabsetIndex != 0xff);
|
|
337 |
TInt slabCellSize = 4 + (slabsetIndex * 4);
|
|
338 |
|
|
339 |
TInt slabPayloadSize = SLABSIZE - sizeof(slabhdr);
|
|
340 |
TInt cellCountPerSlab = slabPayloadSize / slabCellSize;
|
|
341 |
TInt initCellCount = initSlabCount * cellCountPerSlab;
|
|
342 |
|
|
343 |
TSTSLAB_DBG(RDebug::Printf("=========== Allocate enough cells of the same size to fill %d slabs", initSlabCount));
|
|
344 |
TSTSLAB_DBG(RDebug::Printf("=========== counts: %d %d %d", cellCountPerSlab, initCellCount, slabCellSize));
|
|
345 |
|
|
346 |
for (i=0; i<initCellCount; ++i)
|
|
347 |
{
|
|
348 |
pBuf[i] = heap->Alloc(allocSize);
|
|
349 |
test(pBuf[i] != 0);
|
|
350 |
}
|
|
351 |
|
|
352 |
heap->Check();
|
|
353 |
|
|
354 |
TInt maxI5 = initCellCount + (cellCountPerSlab*(slabsPerPage+1));
|
|
355 |
for (i=initCellCount; i<maxI5; ++i)
|
|
356 |
{
|
|
357 |
pBuf[i] = heap->Alloc(allocSize);
|
|
358 |
test(pBuf[i] != 0);
|
|
359 |
}
|
|
360 |
|
|
361 |
heap->Check();
|
|
362 |
|
|
363 |
TAny* p2 = heap->Alloc(allocSize);
|
|
364 |
test(p2 != 0);
|
|
365 |
|
|
366 |
heap->Check();
|
|
367 |
heap->Free(p2);
|
|
368 |
|
|
369 |
heap->Check();
|
|
370 |
|
|
371 |
|
|
372 |
TInt page2pBufIndexFirst = cellCountPerSlab * slabsPerPage;
|
|
373 |
//TInt page2pBufIndexLast = page2pBufIndexFirst + (cellCountPerSlab * slabsPerPage);
|
|
374 |
|
|
375 |
slab* partialTreeSlabX1 = SlabFor(pBuf[page2pBufIndexFirst]);
|
|
376 |
page* partialTreeSlabPageX1 = PageFor(partialTreeSlabX1);
|
|
377 |
|
|
378 |
heap->Free(pBuf[page2pBufIndexFirst]);
|
|
379 |
pBuf[page2pBufIndexFirst] = 0;
|
|
380 |
|
|
381 |
heap->Check();
|
|
382 |
|
|
383 |
TAny* p3 = heap->Alloc(allocSize);
|
|
384 |
test(p3 != 0);
|
|
385 |
heap->Check();
|
|
386 |
heap->Free(p3);
|
|
387 |
heap->Check();
|
|
388 |
|
|
389 |
TInt size2 = metaData.iChunkSize;
|
|
390 |
TSTSLAB_DBG(RDebug::Printf("---- size2: 0x%08x", size2));
|
|
391 |
if (metaData.iSparePage)
|
|
392 |
{
|
|
393 |
size2 -= PageSize;
|
|
394 |
}
|
|
395 |
|
|
396 |
for (i=0; i<MAX_ALLOCS; ++i)
|
|
397 |
{
|
|
398 |
if (pBuf[i]) {
|
|
399 |
page* page1 = PageFor(SlabFor(pBuf[i]));
|
|
400 |
if (partialTreeSlabPageX1 == page1)
|
|
401 |
{
|
|
402 |
heap->Free(pBuf[i]);
|
|
403 |
pBuf[i] = 0;
|
|
404 |
}
|
|
405 |
}
|
|
406 |
}
|
|
407 |
|
|
408 |
heap->Check();
|
|
409 |
|
|
410 |
TInt size3 = metaData.iChunkSize;
|
|
411 |
if (metaData.iSparePage)
|
|
412 |
{
|
|
413 |
size3 -= PageSize;
|
|
414 |
}
|
|
415 |
|
|
416 |
TInt bufIndexFirst = cellCountPerSlab;
|
|
417 |
TInt maxI = bufIndexFirst + cellCountPerSlab;
|
|
418 |
for (i=bufIndexFirst; i<=maxI; ++i)
|
|
419 |
{
|
|
420 |
if (pBuf[i])
|
|
421 |
{
|
|
422 |
heap->Free(pBuf[i]);
|
|
423 |
pBuf[i] = 0;
|
|
424 |
}
|
|
425 |
}
|
|
426 |
|
|
427 |
heap->Check();
|
|
428 |
|
|
429 |
TInt firstI = cellCountPerSlab * 3;
|
|
430 |
maxI = firstI + cellCountPerSlab;
|
|
431 |
for (i=firstI; i<=maxI; ++i)
|
|
432 |
{
|
|
433 |
if (i % 3 == 0)
|
|
434 |
{
|
|
435 |
if (pBuf[i])
|
|
436 |
{
|
|
437 |
heap->Free(pBuf[i]);
|
|
438 |
pBuf[i] = 0;
|
|
439 |
}
|
|
440 |
}
|
|
441 |
}
|
|
442 |
|
|
443 |
heap->Check();
|
|
444 |
|
|
445 |
//Reallocate cells.
|
|
446 |
for (i=0; i<(MAX_ALLOCS); ++i)
|
|
447 |
{
|
|
448 |
if (pBuf[i] != 0)
|
|
449 |
{
|
|
450 |
pBuf[i] = heap->ReAlloc(pBuf[i], allocSize);
|
|
451 |
test(pBuf[i] != 0);
|
|
452 |
}
|
|
453 |
}
|
|
454 |
|
|
455 |
heap->Check();
|
|
456 |
|
|
457 |
//Allocate cells.
|
|
458 |
for (i=0; i<(MAX_ALLOCS/4); ++i)
|
|
459 |
{
|
|
460 |
if (pBuf[i] == 0)
|
|
461 |
{
|
|
462 |
pBuf[i] = heap->Alloc(allocSize);
|
|
463 |
test(pBuf[i] != 0);
|
|
464 |
}
|
|
465 |
}
|
|
466 |
|
|
467 |
heap->Check();
|
|
468 |
|
|
469 |
for (i=0; i<MAX_ALLOCS; ++i)
|
|
470 |
{
|
|
471 |
if (pBuf[i])
|
|
472 |
{
|
|
473 |
heap->Free(pBuf[i]);
|
|
474 |
pBuf[i] = 0;
|
|
475 |
}
|
|
476 |
}
|
|
477 |
heap->Check();
|
|
478 |
|
|
479 |
TSTSLAB_DBG(RDebug::Printf("=========== TestSlabFixedSizeManyThreads end"));
|
|
480 |
}
|
|
481 |
|
|
482 |
|
|
483 |
LOCAL_C void TestSlabFixedSizeOneThread(TSlabTestThreadParm& aParm)
|
|
484 |
{
|
|
485 |
RHeap* heap = aParm.iHeap;
|
|
486 |
TInt allocSize = aParm.iAllocSize;
|
|
487 |
TInt initSlabCount = aParm.iInitSlabCount;
|
|
488 |
//TBool useRandomSize = aParm.iUseRandomSize;
|
|
489 |
//TInt threadCount = aParm.iThreadCount;
|
|
490 |
TInt threadIndex = aParm.iThreadIndex;
|
|
491 |
|
|
492 |
TInt slabsPerPage = PageSize/SLABSIZE;
|
|
493 |
|
|
494 |
test(initSlabCount % slabsPerPage == 0); // for this test
|
|
495 |
|
|
496 |
#ifdef _DEBUG
|
|
497 |
TInt allocRealCellSize = allocSize + RHeap::EDebugHdrSize;
|
|
498 |
#else
|
|
499 |
TInt allocRealCellSize = allocSize;
|
|
500 |
#endif
|
|
501 |
|
|
502 |
TMetaData metaData;
|
|
503 |
GetMeta(*heap, metaData);
|
|
504 |
|
|
505 |
TSTSLAB_DBG(PrintMeta(" --- TestSlabFixedSizeOneThread start", metaData));
|
|
506 |
|
|
507 |
if (allocRealCellSize >= metaData.iSlabThreshold)
|
|
508 |
{
|
|
509 |
allocRealCellSize = metaData.iSlabThreshold - 1;
|
|
510 |
#ifdef _DEBUG
|
|
511 |
allocSize = allocRealCellSize - RHeap::EDebugHdrSize;
|
|
512 |
#else
|
|
513 |
allocSize = allocRealCellSize;
|
|
514 |
#endif
|
|
515 |
}
|
|
516 |
|
|
517 |
TAny** pBuf = &PtrBuf[threadIndex][0];
|
|
518 |
TInt i;
|
|
519 |
for (i=0; i<MAX_ALLOCS; ++i)
|
|
520 |
{
|
|
521 |
pBuf[i] = 0;
|
|
522 |
}
|
|
523 |
|
|
524 |
//Allocate enough cells of the same size to fill initSlabCount (128) slabs
|
|
525 |
TInt slabsetIndex = metaData.iSizeMap[(allocRealCellSize+3)>>2];
|
|
526 |
test(slabsetIndex != 0xff);
|
|
527 |
TInt slabCellSize = 4 + (slabsetIndex * 4);
|
|
528 |
|
|
529 |
TInt slabPayloadSize = SLABSIZE - sizeof(slabhdr);
|
|
530 |
TInt cellCountPerSlab = slabPayloadSize / slabCellSize;
|
|
531 |
TInt initCellCount = initSlabCount * cellCountPerSlab;
|
|
532 |
|
|
533 |
TSTSLAB_DBG(RDebug::Printf("=========== Allocate enough cells of the same size to fill %d slabs", initSlabCount));
|
|
534 |
TSTSLAB_DBG(RDebug::Printf("=========== counts: %d %d %d", cellCountPerSlab, initCellCount, slabCellSize));
|
|
535 |
|
|
536 |
for (i=0; i<initCellCount; ++i)
|
|
537 |
{
|
|
538 |
pBuf[i] = heap->Alloc(allocSize);
|
|
539 |
test(pBuf[i] != 0);
|
|
540 |
}
|
|
541 |
|
|
542 |
heap->Check();
|
|
543 |
GetMeta(*heap, metaData);
|
|
544 |
|
|
545 |
TSTSLAB_DBG(PrintMeta("after init allocs", metaData));
|
|
546 |
TSTSLAB_DBG(DebugPrintSlabs(slabsetIndex, metaData));
|
|
547 |
|
|
548 |
//Check the number of pages used corresponds to the number of slabs.
|
|
549 |
TSTSLAB_DBG(RDebug::Printf("=========== Check the number of pages used corresponds to the number of slabs"));
|
|
550 |
|
|
551 |
TInt pageCountForSlabs1 = (metaData.iChunkSize / PageSize) - 1;
|
|
552 |
TInt pageCountForSlabs2 = (initSlabCount+(slabsPerPage-1)) / slabsPerPage;
|
|
553 |
TSTSLAB_DBG(RDebug::Printf("=========== page counts: %d %d", pageCountForSlabs1, pageCountForSlabs2));
|
|
554 |
test(pageCountForSlabs1 == pageCountForSlabs2);
|
|
555 |
|
|
556 |
//-----------------------------------------
|
|
557 |
TSTSLAB_DBG(RDebug::Printf("=========== check slab counts in the lists"));
|
|
558 |
|
|
559 |
slab* fullSlab = metaData.iFullSlab;
|
|
560 |
TInt fullSlabCount = 0;
|
|
561 |
while (fullSlab)
|
|
562 |
{
|
|
563 |
TSTSLAB_DBG(RDebug::Printf("fullSlab: 0x%08x", fullSlab));
|
|
564 |
fullSlabCount++;
|
|
565 |
fullSlab = fullSlab->iChild1;
|
|
566 |
}
|
|
567 |
|
|
568 |
TInt outParm;
|
|
569 |
TInt partialTreeSlabCount = 0;
|
|
570 |
partialTreeSlabCount = WalkSlabSet(slabsetIndex, metaData, ETestWalk, 0, outParm);
|
|
571 |
|
|
572 |
slab* partialPageSlab = metaData.iPartialPage;
|
|
573 |
TInt partialPageSlabCount = 0;
|
|
574 |
while (partialPageSlab)
|
|
575 |
{
|
|
576 |
TSTSLAB_DBG(RDebug::Printf("partialPageSlab (empty): 0x%08x", partialPageSlab));
|
|
577 |
partialPageSlabCount++;
|
|
578 |
partialPageSlab = partialPageSlab->iChild1;
|
|
579 |
}
|
|
580 |
|
|
581 |
test(fullSlabCount == (initSlabCount-1));
|
|
582 |
test(partialTreeSlabCount == 1);
|
|
583 |
if (initSlabCount % slabsPerPage == 0)
|
|
584 |
{
|
|
585 |
test(partialPageSlabCount == 0);
|
|
586 |
}
|
|
587 |
else
|
|
588 |
{
|
|
589 |
test(partialPageSlabCount == 1);
|
|
590 |
}
|
|
591 |
//-----------------------------------------
|
|
592 |
TSTSLAB_DBG(RDebug::Printf("=========== alloc one cell more -> one full slab more"));
|
|
593 |
|
|
594 |
TAny* p = heap->Alloc(allocSize);
|
|
595 |
test(p != 0);
|
|
596 |
|
|
597 |
heap->Check();
|
|
598 |
GetMeta(*heap, metaData);
|
|
599 |
TSTSLAB_DBG(DebugPrintSlabs(slabsetIndex, metaData));
|
|
600 |
|
|
601 |
fullSlab = metaData.iFullSlab;
|
|
602 |
fullSlabCount = 0;
|
|
603 |
while (fullSlab)
|
|
604 |
{
|
|
605 |
TSTSLAB_DBG(RDebug::Printf("fullSlab: 0x%08x", fullSlab));
|
|
606 |
fullSlabCount++;
|
|
607 |
fullSlab = fullSlab->iChild1;
|
|
608 |
}
|
|
609 |
test(fullSlabCount == initSlabCount);
|
|
610 |
|
|
611 |
heap->Free(p);
|
|
612 |
|
|
613 |
heap->Check();
|
|
614 |
GetMeta(*heap, metaData);
|
|
615 |
TSTSLAB_DBG(DebugPrintSlabs(slabsetIndex, metaData));
|
|
616 |
|
|
617 |
//-----------------------------------------
|
|
618 |
//Check that a new slab is taken from a partially filled page if available.
|
|
619 |
TSTSLAB_DBG(RDebug::Printf("=========== Check that a new slab is taken from a partially filled page if available"));
|
|
620 |
|
|
621 |
// fill the first slab in the page (after iSparePage)
|
|
622 |
TInt maxI5 = initCellCount + (cellCountPerSlab*(slabsPerPage+1));
|
|
623 |
for (i=initCellCount; i<maxI5; ++i)
|
|
624 |
{
|
|
625 |
pBuf[i] = heap->Alloc(allocSize);
|
|
626 |
test(pBuf[i] != 0);
|
|
627 |
}
|
|
628 |
|
|
629 |
heap->Check();
|
|
630 |
GetMeta(*heap, metaData);
|
|
631 |
TSTSLAB_DBG(DebugPrintSlabs(slabsetIndex, metaData));
|
|
632 |
|
|
633 |
partialPageSlab = metaData.iPartialPage;
|
|
634 |
partialPageSlabCount = 0;
|
|
635 |
while (partialPageSlab)
|
|
636 |
{
|
|
637 |
TSTSLAB_DBG(RDebug::Printf("partialPageSlab (empty): 0x%08x", partialPageSlab));
|
|
638 |
partialPageSlabCount++;
|
|
639 |
partialPageSlab = partialPageSlab->iChild1;
|
|
640 |
}
|
|
641 |
test(partialPageSlabCount == 1);
|
|
642 |
|
|
643 |
page* page1 = PageFor(metaData.iPartialPage);
|
|
644 |
unsigned header = page1->iSlabs[0].iHeader;
|
|
645 |
unsigned pagemap = SlabHeaderPagemap(header);
|
|
646 |
unsigned slabix = LOWBIT(pagemap);
|
|
647 |
slab* partialPageSlab2 = &page1->iSlabs[slabix];
|
|
648 |
|
|
649 |
TAny* p2 = heap->Alloc(allocSize);
|
|
650 |
test(p2 != 0);
|
|
651 |
|
|
652 |
heap->Check();
|
|
653 |
TSTSLAB_DBG(RDebug::Printf("p2: 0x%08x; partialPageSlab2: 0x%08x", p2, partialPageSlab2));
|
|
654 |
test(partialPageSlab2 == SlabFor(p2));
|
|
655 |
heap->Free(p2);
|
|
656 |
|
|
657 |
heap->Check();
|
|
658 |
|
|
659 |
//-----------------------------
|
|
660 |
// use the second page for the next test
|
|
661 |
TInt page2pBufIndexFirst = cellCountPerSlab * slabsPerPage;
|
|
662 |
//TInt page2pBufIndexLast = page2pBufIndexFirst + (cellCountPerSlab * slabsPerPage);
|
|
663 |
|
|
664 |
//-----------------------------------------
|
|
665 |
//Check that a partially filled slab is used if available.
|
|
666 |
TSTSLAB_DBG(RDebug::Printf("=========== Check that a partially filled slab is used if available"));
|
|
667 |
|
|
668 |
slab* partialTreeSlabX1 = SlabFor(pBuf[page2pBufIndexFirst]);
|
|
669 |
page* partialTreeSlabPageX1 = PageFor(partialTreeSlabX1);
|
|
670 |
|
|
671 |
heap->Free(pBuf[page2pBufIndexFirst]);
|
|
672 |
pBuf[page2pBufIndexFirst] = 0;
|
|
673 |
|
|
674 |
heap->Check();
|
|
675 |
|
|
676 |
TAny* p3 = heap->Alloc(allocSize);
|
|
677 |
test(p3 != 0);
|
|
678 |
heap->Check();
|
|
679 |
test(partialTreeSlabX1 == SlabFor(p3));
|
|
680 |
heap->Free(p3);
|
|
681 |
heap->Check();
|
|
682 |
|
|
683 |
//-----------------------------------------
|
|
684 |
//Check that if all four slabs in a page are free the page is freed.
|
|
685 |
TSTSLAB_DBG(RDebug::Printf("=========== Check that if all four slabs in a page are free, the page is freed"));
|
|
686 |
|
|
687 |
GetMeta(*heap, metaData);
|
|
688 |
TSTSLAB_DBG(DebugPrintSlabs(slabsetIndex, metaData));
|
|
689 |
|
|
690 |
TInt size2 = metaData.iChunkSize;
|
|
691 |
TSTSLAB_DBG(RDebug::Printf("---- size2: 0x%08x", size2));
|
|
692 |
if (metaData.iSparePage)
|
|
693 |
{
|
|
694 |
size2 -= PageSize;
|
|
695 |
}
|
|
696 |
|
|
697 |
for (i=0; i<MAX_ALLOCS; ++i)
|
|
698 |
{
|
|
699 |
if (pBuf[i]) {
|
|
700 |
page* page1 = PageFor(SlabFor(pBuf[i]));
|
|
701 |
if (partialTreeSlabPageX1 == page1)
|
|
702 |
{
|
|
703 |
heap->Free(pBuf[i]);
|
|
704 |
pBuf[i] = 0;
|
|
705 |
}
|
|
706 |
}
|
|
707 |
}
|
|
708 |
|
|
709 |
heap->Check();
|
|
710 |
GetMeta(*heap, metaData);
|
|
711 |
TSTSLAB_DBG(DebugPrintSlabs(slabsetIndex, metaData));
|
|
712 |
|
|
713 |
TInt size3 = metaData.iChunkSize;
|
|
714 |
if (metaData.iSparePage)
|
|
715 |
{
|
|
716 |
size3 -= PageSize;
|
|
717 |
}
|
|
718 |
|
|
719 |
test(size3 == (size2-PageSize));
|
|
720 |
|
|
721 |
//-----------------------------------------
|
|
722 |
//Free cells to give empty slab (The second slab in the first page)
|
|
723 |
TSTSLAB_DBG(RDebug::Printf("=========== Free cells to give empty slab (The second slab in the first page)"));
|
|
724 |
slab* emptySlabAddr = (slab*)(metaData.iMemBase + SLABSIZE);
|
|
725 |
|
|
726 |
//Check that emptySlabAddr is not already in iPartialPage list
|
|
727 |
partialPageSlab = metaData.iPartialPage;
|
|
728 |
while (partialPageSlab)
|
|
729 |
{
|
|
730 |
if (partialPageSlab == emptySlabAddr)
|
|
731 |
{
|
|
732 |
test(0);
|
|
733 |
}
|
|
734 |
partialPageSlab = partialPageSlab->iChild1;
|
|
735 |
}
|
|
736 |
|
|
737 |
// free cells to give empty slab - emptySlabAddr
|
|
738 |
TInt bufIndexFirst = cellCountPerSlab;
|
|
739 |
TInt maxI = bufIndexFirst + cellCountPerSlab;
|
|
740 |
for (i=bufIndexFirst; i<=maxI; ++i)
|
|
741 |
{
|
|
742 |
if (pBuf[i])
|
|
743 |
{
|
|
744 |
heap->Free(pBuf[i]);
|
|
745 |
pBuf[i] = 0;
|
|
746 |
}
|
|
747 |
}
|
|
748 |
|
|
749 |
heap->Check();
|
|
750 |
GetMeta(*heap, metaData);
|
|
751 |
TSTSLAB_DBG(DebugPrintSlabs(slabsetIndex, metaData));
|
|
752 |
|
|
753 |
// Check that emptySlabAddr is not now in iPartialPage list
|
|
754 |
partialPageSlab = metaData.iPartialPage;
|
|
755 |
while (partialPageSlab)
|
|
756 |
{
|
|
757 |
if (partialPageSlab == emptySlabAddr)
|
|
758 |
{
|
|
759 |
break;
|
|
760 |
}
|
|
761 |
partialPageSlab = partialPageSlab->iChild1;
|
|
762 |
}
|
|
763 |
test(partialPageSlab != 0);
|
|
764 |
|
|
765 |
//Free cells to give partial slab (The third slab in the first page)
|
|
766 |
TSTSLAB_DBG(RDebug::Printf("=========== Free cells to give partial slab (The third slab in the first page)"));
|
|
767 |
slab* partialSlabAddr = (slab*)(metaData.iMemBase + (3*SLABSIZE));
|
|
768 |
|
|
769 |
// Check that partialSlabAddr is not now in iPartialSlab list
|
|
770 |
WalkSlabSet(slabsetIndex, metaData, ETestFindSlab, partialSlabAddr, outParm);
|
|
771 |
test(outParm == 0);
|
|
772 |
|
|
773 |
TInt firstI = cellCountPerSlab * 3;
|
|
774 |
maxI = firstI + cellCountPerSlab;
|
|
775 |
for (i=firstI; i<=maxI; ++i)
|
|
776 |
{
|
|
777 |
if (i % 3 == 0)
|
|
778 |
{
|
|
779 |
if (pBuf[i])
|
|
780 |
{
|
|
781 |
heap->Free(pBuf[i]);
|
|
782 |
pBuf[i] = 0;
|
|
783 |
}
|
|
784 |
}
|
|
785 |
}
|
|
786 |
|
|
787 |
heap->Check();
|
|
788 |
GetMeta(*heap, metaData);
|
|
789 |
TSTSLAB_DBG(DebugPrintSlabs(slabsetIndex, metaData));
|
|
790 |
|
|
791 |
// Check that partialSlabAddr is now in iPartialSlab list
|
|
792 |
WalkSlabSet(slabsetIndex, metaData, ETestFindSlab, partialSlabAddr, outParm);
|
|
793 |
test(outParm == 1);
|
|
794 |
|
|
795 |
//Reallocate cells.
|
|
796 |
for (i=0; i<(MAX_ALLOCS); ++i)
|
|
797 |
{
|
|
798 |
if (pBuf[i] != 0)
|
|
799 |
{
|
|
800 |
pBuf[i] = heap->ReAlloc(pBuf[i], allocSize);
|
|
801 |
test(pBuf[i] != 0);
|
|
802 |
}
|
|
803 |
}
|
|
804 |
|
|
805 |
heap->Check();
|
|
806 |
|
|
807 |
//Allocate cells.
|
|
808 |
for (i=0; i<(MAX_ALLOCS/4); ++i)
|
|
809 |
{
|
|
810 |
if (pBuf[i] == 0)
|
|
811 |
{
|
|
812 |
pBuf[i] = heap->Alloc(allocSize);
|
|
813 |
test(pBuf[i] != 0);
|
|
814 |
}
|
|
815 |
}
|
|
816 |
|
|
817 |
heap->Check();
|
|
818 |
|
|
819 |
for (i=0; i<MAX_ALLOCS; ++i)
|
|
820 |
{
|
|
821 |
if (pBuf[i])
|
|
822 |
{
|
|
823 |
heap->Free(pBuf[i]);
|
|
824 |
pBuf[i] = 0;
|
|
825 |
}
|
|
826 |
}
|
|
827 |
heap->Check();
|
|
828 |
|
|
829 |
TSTSLAB_DBG(RDebug::Printf("=========== TestSlabFixedSizeOneThread end"));
|
|
830 |
}
|
|
831 |
|
|
832 |
LOCAL_C RHeap* CreateSlabHeap(TInt aThreadCount)
|
|
833 |
{
|
|
834 |
//TPtrC slabHeap=_L("SlabHeap");
|
|
835 |
//RHeap* heap = User::ChunkHeap(&slabHeap, 0x1000, 0x10000);
|
|
836 |
TInt maxLth = 0x60000 * aThreadCount;
|
|
837 |
RHeap* heap = User::ChunkHeap(0, 0x1000, maxLth);
|
|
838 |
test(heap!=NULL);
|
|
839 |
|
|
840 |
// Configure heap for slab
|
|
841 |
RHybridHeap::STestCommand cmd;
|
|
842 |
cmd.iCommand = RHybridHeap::ESetConfig;
|
|
843 |
cmd.iConfig.iSlabBits = 0xabe;
|
|
844 |
cmd.iConfig.iDelayedSlabThreshold = 0;
|
|
845 |
cmd.iConfig.iPagePower = 0; // 16 // 0 -> no page allocator
|
|
846 |
TInt ret = heap->DebugFunction(RHeap::EHybridHeap, &cmd, 0);
|
|
847 |
test(ret == KErrNone);
|
|
848 |
|
|
849 |
return heap;
|
|
850 |
}
|
|
851 |
|
|
852 |
LOCAL_C TInt SlabTestManyThreads(TAny* aThreadParm)
|
|
853 |
{
|
|
854 |
TSlabTestThreadParm* parm = (TSlabTestThreadParm*)aThreadParm;
|
|
855 |
|
|
856 |
TInt i;
|
|
857 |
TInt maxLoops = 30; //300;
|
|
858 |
for (i=0; i<maxLoops; ++i)
|
|
859 |
{
|
|
860 |
TestSlabFixedSizeManyThreads(*parm);
|
|
861 |
}
|
|
862 |
|
|
863 |
return KErrNone;
|
|
864 |
}
|
|
865 |
|
|
866 |
LOCAL_C TInt SlabTestOneThread(TAny* aThreadParm)
|
|
867 |
{
|
|
868 |
TSlabTestThreadParm* parm = (TSlabTestThreadParm*)aThreadParm;
|
|
869 |
TestSlabFixedSizeOneThread(*parm);
|
|
870 |
return KErrNone;
|
|
871 |
}
|
|
872 |
|
|
873 |
TInt StartThreads(TInt aThreadCount, TSlabTestThreadParm& aThreadParm)
|
|
874 |
{
|
|
875 |
const TInt KSlabTestThreadStackSize=0x4000; //0x10000; //0x2000;
|
|
876 |
|
|
877 |
TRequestStatus theStatus[MAX_THREADS];
|
|
878 |
RThread theThreads[MAX_THREADS];
|
|
879 |
TBool threadInUse[MAX_THREADS];
|
|
880 |
|
|
881 |
TInt index;
|
|
882 |
TInt ret;
|
|
883 |
|
|
884 |
if (aThreadCount <= 0)
|
|
885 |
{
|
|
886 |
return KErrNone;
|
|
887 |
}
|
|
888 |
|
|
889 |
RHeap* heap = CreateSlabHeap(aThreadCount);
|
|
890 |
aThreadParm.iHeap = heap;
|
|
891 |
|
|
892 |
for (index = 0; index < aThreadCount; index++)
|
|
893 |
{
|
|
894 |
ThreadParm[index].iHeap = aThreadParm.iHeap;
|
|
895 |
ThreadParm[index].iAllocSize = aThreadParm.iAllocSize;
|
|
896 |
ThreadParm[index].iInitSlabCount = aThreadParm.iInitSlabCount;
|
|
897 |
ThreadParm[index].iUseRandomSize = aThreadParm.iUseRandomSize;
|
|
898 |
ThreadParm[index].iThreadCount = aThreadParm.iThreadCount;
|
|
899 |
|
|
900 |
ThreadParm[index].iThreadIndex = index;
|
|
901 |
|
|
902 |
TBuf<32> threadName;
|
|
903 |
threadName.Format(_L("SlabTest%d"), index);
|
|
904 |
if (aThreadCount == 1)
|
|
905 |
{
|
|
906 |
ret = theThreads[index].Create(threadName, SlabTestOneThread, KSlabTestThreadStackSize, NULL, (TAny*)&ThreadParm[index]);
|
|
907 |
}
|
|
908 |
else
|
|
909 |
{
|
|
910 |
ret = theThreads[index].Create(threadName, SlabTestManyThreads, KSlabTestThreadStackSize, NULL, (TAny*)&ThreadParm[index]);
|
|
911 |
}
|
|
912 |
test(ret == KErrNone);
|
|
913 |
theThreads[index].Logon(theStatus[index]);
|
|
914 |
test(theStatus[index] == KRequestPending);
|
|
915 |
threadInUse[index] = ETrue;
|
|
916 |
theThreads[index].Resume();
|
|
917 |
}
|
|
918 |
|
|
919 |
User::WaitForAnyRequest();
|
|
920 |
|
|
921 |
TBool anyUsed = ETrue;
|
|
922 |
while (anyUsed)
|
|
923 |
{
|
|
924 |
User::After(1001000);
|
|
925 |
anyUsed = EFalse;
|
|
926 |
for (index = 0; index < aThreadCount; index++)
|
|
927 |
{
|
|
928 |
if (threadInUse[index])
|
|
929 |
{
|
|
930 |
if (theThreads[index].ExitType() != EExitPending)
|
|
931 |
{
|
|
932 |
threadInUse[index] = EFalse;
|
|
933 |
}
|
|
934 |
else
|
|
935 |
{
|
|
936 |
anyUsed = ETrue;
|
|
937 |
}
|
|
938 |
}
|
|
939 |
}
|
|
940 |
}
|
|
941 |
|
|
942 |
for (index = 0; index < aThreadCount; index++)
|
|
943 |
{
|
|
944 |
theThreads[index].Close();
|
|
945 |
}
|
|
946 |
TSTSLAB_DBG(RDebug::Printf("=========== StartThreads end"));
|
|
947 |
heap->Close();
|
|
948 |
|
|
949 |
return KErrNone;
|
|
950 |
}
|
|
951 |
|
|
952 |
GLDEF_C TInt E32Main(void)
|
|
953 |
{
|
|
954 |
TInt ret;
|
|
955 |
|
|
956 |
test.Title();
|
|
957 |
|
|
958 |
__KHEAP_MARK;
|
|
959 |
|
|
960 |
test.Start(_L("TestSlab"));
|
|
961 |
UserHal::PageSizeInBytes(PageSize);
|
|
962 |
|
|
963 |
RHeap* heap = CreateSlabHeap(1);
|
|
964 |
|
|
965 |
TMetaData metaData;
|
|
966 |
GetMeta(*heap, metaData);
|
|
967 |
|
|
968 |
heap->Close();
|
|
969 |
|
|
970 |
if (metaData.iDLOnly)
|
|
971 |
{
|
|
972 |
test.Printf(_L("Slab allocator is not used, no tests to run\n"));
|
|
973 |
__KHEAP_MARKEND;
|
|
974 |
test.End();
|
|
975 |
return(0);
|
|
976 |
}
|
|
977 |
|
|
978 |
TSlabTestThreadParm threadParm;
|
|
979 |
threadParm.iHeap = heap;
|
|
980 |
threadParm.iAllocSize = 17;
|
|
981 |
threadParm.iInitSlabCount = 128; // 12
|
|
982 |
threadParm.iUseRandomSize = EFalse;
|
|
983 |
|
|
984 |
test.Next(_L("TestSlab - one thread"));
|
|
985 |
|
|
986 |
TInt threadCount;
|
|
987 |
threadCount = 1;
|
|
988 |
if (threadCount > MAX_THREADS)
|
|
989 |
{
|
|
990 |
threadCount = MAX_THREADS;
|
|
991 |
}
|
|
992 |
threadParm.iThreadCount = threadCount;
|
|
993 |
|
|
994 |
#if 0
|
|
995 |
ret = StartThreads(threadCount, threadParm);
|
|
996 |
test(ret==KErrNone);
|
|
997 |
|
|
998 |
#else
|
|
999 |
|
|
1000 |
TInt i;
|
|
1001 |
for (i=1; i<metaData.iSlabThreshold; ++i)
|
|
1002 |
{
|
|
1003 |
#ifdef _DEBUG
|
|
1004 |
if ((i + RHeap::EDebugHdrSize) >= metaData.iSlabThreshold)
|
|
1005 |
{
|
|
1006 |
break;
|
|
1007 |
}
|
|
1008 |
#endif // _DEBUG
|
|
1009 |
TSTSLAB_DBG(RDebug::Printf("=========== StartThreads size: %d", i));
|
|
1010 |
threadParm.iAllocSize = i;
|
|
1011 |
test.Printf(_L("AllocSize: %d\n"), i);
|
|
1012 |
ret = StartThreads(threadCount, threadParm);
|
|
1013 |
test(ret==KErrNone);
|
|
1014 |
}
|
|
1015 |
#endif
|
|
1016 |
|
|
1017 |
|
|
1018 |
test.Next(_L("TestSlab - many threads"));
|
|
1019 |
|
|
1020 |
threadParm.iAllocSize = 17;
|
|
1021 |
|
|
1022 |
threadCount = 3;
|
|
1023 |
if (threadCount > MAX_THREADS)
|
|
1024 |
{
|
|
1025 |
threadCount = MAX_THREADS;
|
|
1026 |
}
|
|
1027 |
threadParm.iThreadCount = threadCount;
|
|
1028 |
|
|
1029 |
#if 1
|
|
1030 |
ret = StartThreads(threadCount, threadParm);
|
|
1031 |
test(ret==KErrNone);
|
|
1032 |
|
|
1033 |
#else
|
|
1034 |
|
|
1035 |
TInt i;
|
|
1036 |
for (i=1; i<metaData.iSlabThreshold; ++i)
|
|
1037 |
{
|
|
1038 |
#ifdef _DEBUG
|
|
1039 |
if ((i + RHeap::EDebugHdrSize) >= metaData.iSlabThreshold)
|
|
1040 |
{
|
|
1041 |
break;
|
|
1042 |
}
|
|
1043 |
#endif // _DEBUG
|
|
1044 |
TSTSLAB_DBG(RDebug::Printf("=========== StartThreads size: %d", i));
|
|
1045 |
threadParm.iAllocSize = i;
|
|
1046 |
test.Printf(_L("AllocSize: %d\n"), i);
|
|
1047 |
ret = StartThreads(threadCount, threadParm);
|
|
1048 |
test(ret==KErrNone);
|
|
1049 |
}
|
|
1050 |
#endif
|
|
1051 |
|
|
1052 |
__KHEAP_MARKEND;
|
|
1053 |
|
|
1054 |
test.End();
|
|
1055 |
return(0);
|
|
1056 |
}
|