|
1 // Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of the License "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // e32test\demandpaging\t_datapaging.cpp |
|
15 // Functional tests for data paging. |
|
16 // 002 Test UserHeap::ChunkHeap data paging attributes |
|
17 // 003 Test RThread::Create data paging attributes |
|
18 // |
|
19 // |
|
20 |
|
21 //! @SYMTestCaseID KBASE-T_DATAPAGING |
|
22 //! @SYMTestType UT |
|
23 //! @SYMPREQ PREQ1954 |
|
24 //! @SYMTestCaseDesc Data Paging functional tests. |
|
25 //! @SYMTestActions 001 Test RChunk data paging attributes |
|
26 //! @SYMTestExpectedResults All tests should pass. |
|
27 //! @SYMTestPriority High |
|
28 //! @SYMTestStatus Implemented |
|
29 |
|
30 #define __E32TEST_EXTENSION__ |
|
31 #include <e32test.h> |
|
32 #include <dptest.h> |
|
33 #include <e32hal.h> |
|
34 #include <u32exec.h> |
|
35 #include <e32svr.h> |
|
36 #include <e32panic.h> |
|
37 #include "u32std.h" |
|
38 #include <e32msgqueue.h> |
|
39 #include <e32atomics.h> |
|
40 #include <e32math.h> |
|
41 |
|
42 #include "t_dpcmn.h" |
|
43 #include "../mmu/mmudetect.h" |
|
44 #include "../mmu/d_memorytest.h" |
|
45 #include "../mmu/paging_info.h" |
|
46 |
|
47 RTest test(_L("T_DATAPAGING")); |
|
48 |
|
49 _LIT(KChunkName, "t_datapaging chunk"); |
|
50 |
|
51 class TRandom |
|
52 { |
|
53 public: |
|
54 TRandom(); |
|
55 TUint32 Next(); |
|
56 |
|
57 private: |
|
58 enum |
|
59 { |
|
60 KA = 1664525, |
|
61 KB = 1013904223 |
|
62 }; |
|
63 TUint32 iV; |
|
64 }; |
|
65 |
|
66 TRandom::TRandom() |
|
67 { |
|
68 iV = RThread().Id() + User::NTickCount() + 23; |
|
69 } |
|
70 |
|
71 TUint32 TRandom::Next() |
|
72 { |
|
73 iV = KA * iV + KB; |
|
74 return iV; |
|
75 } |
|
76 |
|
77 void CreatePagedChunk(TInt aSizeInPages, TInt aWipeByte = -1) |
|
78 { |
|
79 test_Equal(0,gChunk.Handle()); |
|
80 |
|
81 TChunkCreateInfo createInfo; |
|
82 TInt size = aSizeInPages * gPageSize; |
|
83 createInfo.SetNormal(size, size); |
|
84 createInfo.SetPaging(TChunkCreateInfo::EPaged); |
|
85 createInfo.SetOwner(EOwnerProcess); |
|
86 createInfo.SetGlobal(KChunkName); |
|
87 if (aWipeByte != -1) |
|
88 createInfo.SetClearByte(aWipeByte); |
|
89 test_KErrNone(gChunk.Create(createInfo)); |
|
90 test(gChunk.IsPaged()); // this is only ever called if data paging is supported |
|
91 } |
|
92 |
|
93 // The contents of a page is represented as type from enum below ORed with a byte value |
|
94 enum TPageContent |
|
95 { |
|
96 ETypeUniform = 0 << 8, |
|
97 ETypeIncreasing = 1 << 8, |
|
98 |
|
99 EContentValueMask = 255, |
|
100 EContentTypeMask = 255 << 8 |
|
101 }; |
|
102 |
|
103 // Write to a page to page it in and verify its previous contents |
|
104 void WritePage(TInt aIndex, TUint aExpectedContents, TUint aNewContents) |
|
105 { |
|
106 test.Printf(_L(" %3d Write %x\n"), aIndex, aNewContents); |
|
107 |
|
108 TUint oldType = aExpectedContents & EContentTypeMask; |
|
109 TUint oldValue = aExpectedContents & EContentValueMask; |
|
110 |
|
111 TUint type = aNewContents & EContentTypeMask; |
|
112 TUint value = aNewContents & EContentValueMask; |
|
113 |
|
114 TUint8* page = gChunk.Base() + (gPageSize * aIndex); |
|
115 |
|
116 // write first byte first so page is paged in or rejuvenated with write permissions |
|
117 page[0] = 0; |
|
118 |
|
119 for (TInt i = 0 ; i < gPageSize ; ++i) |
|
120 { |
|
121 if (i != 0) |
|
122 test_Equal(oldValue, page[i]); |
|
123 if (oldType == ETypeIncreasing) |
|
124 oldValue = (oldValue + 1) & 255; |
|
125 |
|
126 page[i] = value; |
|
127 if (type == ETypeIncreasing) |
|
128 value = (value + 1) & 255; |
|
129 } |
|
130 } |
|
131 |
|
132 // Read a page and verify its contents |
|
133 void ReadPage(TInt aIndex, TUint aExpectedContents) |
|
134 { |
|
135 test.Printf(_L(" %3d Read %x\n"), aIndex, aExpectedContents); |
|
136 TUint type = aExpectedContents & EContentTypeMask; |
|
137 TUint value = aExpectedContents & EContentValueMask; |
|
138 TUint8* page = gChunk.Base() + (gPageSize * aIndex); |
|
139 for (TInt i = 0 ; i < gPageSize ; ++i) |
|
140 { |
|
141 test_Equal(value, page[i]); |
|
142 if (type == ETypeIncreasing) |
|
143 value = (value + 1) & 255; |
|
144 } |
|
145 } |
|
146 |
|
147 void PageOut() |
|
148 { |
|
149 test.Printf(_L(" PageOut\n")); |
|
150 DPTest::FlushCache(); |
|
151 } |
|
152 |
|
153 void TestOnePage() |
|
154 { |
|
155 CreatePagedChunk(1, 0xed); |
|
156 |
|
157 // Test initial contents (read) |
|
158 ReadPage(0, ETypeUniform | 0xed); |
|
159 |
|
160 // Test read initial contents after flush (may or may not actually been paged out) |
|
161 PageOut(); |
|
162 ReadPage(0, ETypeUniform | 0xed); |
|
163 |
|
164 // Test page out / page in (read) of dirty contents |
|
165 WritePage(0, ETypeUniform | 0xed, ETypeIncreasing | 0x1a); |
|
166 PageOut(); |
|
167 ReadPage(0, ETypeIncreasing | 0x1a); |
|
168 |
|
169 // Test page out / page in (read) of clean contents |
|
170 PageOut(); |
|
171 ReadPage(0, ETypeIncreasing | 0x1a); |
|
172 |
|
173 // Test page out / page in (write) of dirty contents |
|
174 WritePage(0, ETypeIncreasing | 0x1a, ETypeIncreasing | 0x23); |
|
175 PageOut(); |
|
176 WritePage(0, ETypeIncreasing | 0x23, ETypeIncreasing | 0x45); |
|
177 |
|
178 CLOSE_AND_WAIT(gChunk); |
|
179 CreatePagedChunk(1, 0x0d); |
|
180 |
|
181 // Test initial contents (write) |
|
182 WritePage(0, ETypeUniform | 0x0d, ETypeIncreasing | 0x1a); |
|
183 |
|
184 // Test page out / page in (read) of dirty contents |
|
185 PageOut(); |
|
186 ReadPage(0, ETypeIncreasing | 0x1a); |
|
187 |
|
188 CLOSE_AND_WAIT(gChunk); |
|
189 } |
|
190 |
|
191 TInt PageInThreadFunc(TAny* aArg) |
|
192 { |
|
193 TUint8* page = (TUint8*)aArg; |
|
194 for (;;) |
|
195 { |
|
196 DPTest::FlushCache(); |
|
197 RDebug::Printf("Start page in..."); |
|
198 volatile TInt i = page[0]; |
|
199 (void)i; |
|
200 RDebug::Printf(" done."); |
|
201 } |
|
202 } |
|
203 |
|
204 TInt PageOutThreadFunc(TAny* aArg) |
|
205 { |
|
206 TUint8* page = (TUint8*)aArg; |
|
207 for (;;) |
|
208 { |
|
209 page[0] = 1; // make page dirty |
|
210 RDebug::Printf("Start page out..."); |
|
211 DPTest::FlushCache(); |
|
212 RDebug::Printf(" done."); |
|
213 } |
|
214 } |
|
215 |
|
216 void TestKillThread(TThreadFunction aFunc, TInt aIterations) |
|
217 { |
|
218 __KHEAP_MARK; |
|
219 TRandom random; |
|
220 CreatePagedChunk(1); |
|
221 TUint8* page = gChunk.Base(); |
|
222 page[0] = 0; // make page dirty |
|
223 DPTest::FlushCache(); |
|
224 for (TInt i = 0 ; i < aIterations ; ++i) |
|
225 { |
|
226 RThread thread; |
|
227 test_KErrNone(thread.Create(KNullDesC, aFunc, gPageSize, NULL, page)); |
|
228 TRequestStatus status; |
|
229 thread.Logon(status); |
|
230 thread.Resume(); |
|
231 User::AfterHighRes((random.Next() % 50 + 1) * 1000); |
|
232 thread.Kill(123); |
|
233 User::WaitForRequest(status); |
|
234 test_Equal(123, status.Int()); |
|
235 CLOSE_AND_WAIT(thread); |
|
236 } |
|
237 CLOSE_AND_WAIT(gChunk); |
|
238 User::After(1000000); |
|
239 __KHEAP_MARKEND; |
|
240 } |
|
241 |
|
242 struct SSoakTestArgs |
|
243 { |
|
244 TInt iThreadIndex; |
|
245 TInt iPages; |
|
246 }; |
|
247 |
|
248 TUint32* PageBasePtr(TInt aPage) |
|
249 { |
|
250 return (TUint32*)(gChunk.Base() + (gPageSize * aPage)); |
|
251 } |
|
252 |
|
253 TUint32* PageDataPtr(TInt aPage, TInt aThreadIndex) |
|
254 { |
|
255 return (TUint32*)((TUint8*)PageBasePtr(aPage) + ((aThreadIndex * 2 + 1) * sizeof(TUint32))); |
|
256 } |
|
257 |
|
258 TUint32 PageTag(TInt aPage) |
|
259 { |
|
260 return 0x80000000 | aPage; |
|
261 } |
|
262 |
|
263 void StopSoakTest(RMsgQueue<TInt> aMsgQueue) |
|
264 { |
|
265 while(aMsgQueue.Send(0) != KErrOverflow) |
|
266 ; |
|
267 } |
|
268 |
|
269 TBool ContinueSoakTest(RMsgQueue<TInt> aMsgQueue) |
|
270 { |
|
271 TInt msg; |
|
272 return aMsgQueue.Receive(msg) == KErrUnderflow; |
|
273 } |
|
274 |
|
275 _LIT(KMsgQueueName, "t_datapaging_queue"); |
|
276 |
|
277 TInt PinPagesFunc(TAny* aArg) |
|
278 { |
|
279 SSoakTestArgs* args = (SSoakTestArgs*)aArg; |
|
280 |
|
281 RMemoryTestLdd ldd; |
|
282 TInt r = ldd.Open(); |
|
283 if (r != KErrNone) |
|
284 return r; |
|
285 r = ldd.CreateVirtualPinObject(); |
|
286 if (r != KErrNone) |
|
287 return r; |
|
288 |
|
289 RMsgQueue<TInt> msgQueue; |
|
290 r = msgQueue.OpenGlobal(KMsgQueueName, EOwnerThread); |
|
291 if (r != KErrNone) |
|
292 return r; |
|
293 |
|
294 TInt i = 0; |
|
295 TRandom random; |
|
296 while (ContinueSoakTest(msgQueue)) |
|
297 { |
|
298 TInt count = 1 + random.Next() % (args->iPages / 4); |
|
299 TInt start = random.Next() % (args->iPages - count); |
|
300 TInt sleepInMs = 1 + random.Next() % 20; |
|
301 TUint32* ptr = PageBasePtr(start); |
|
302 |
|
303 r = ldd.PinVirtualMemory((TLinAddr)ptr, count * gPageSize); |
|
304 if (r != KErrNone) |
|
305 return r; |
|
306 |
|
307 User::AfterHighRes(sleepInMs * 1000); |
|
308 |
|
309 r = ldd.UnpinVirtualMemory(); |
|
310 if (r != KErrNone) |
|
311 return r; |
|
312 |
|
313 ++i; |
|
314 } |
|
315 |
|
316 msgQueue.Close(); |
|
317 |
|
318 r = ldd.DestroyVirtualPinObject(); |
|
319 if (r != KErrNone) |
|
320 return r; |
|
321 ldd.Close(); |
|
322 |
|
323 RDebug::Printf(" thread %d performed %d iterations (pinning)", args->iThreadIndex, i); |
|
324 return KErrNone; |
|
325 } |
|
326 |
|
327 TBool TestReadWord(TUint32* aPtr, TUint32 aExpected, TInt aThread, TInt aPage, TInt aIteration, TInt aLine, RMsgQueue<TInt> aMsgQueue) |
|
328 { |
|
329 TUint32 aActual = *aPtr; |
|
330 if (aActual != aExpected) |
|
331 { |
|
332 StopSoakTest(aMsgQueue); |
|
333 RDebug::Printf(" thread %d failure reading page %d at iteration %d address %08x: expected %08x but got %08x", |
|
334 aThread, aPage, aIteration, aPtr, aExpected, aActual); |
|
335 return EFalse; |
|
336 } |
|
337 return ETrue; |
|
338 } |
|
339 |
|
340 TInt SoakTestFunc(TAny* aArg) |
|
341 { |
|
342 SSoakTestArgs* args = (SSoakTestArgs*)aArg; |
|
343 |
|
344 |
|
345 RMsgQueue<TInt> msgQueue; |
|
346 TInt r = msgQueue.OpenGlobal(KMsgQueueName, EOwnerThread); |
|
347 if (r != KErrNone) |
|
348 return r; |
|
349 |
|
350 TUint32* contents = new TUint32[args->iPages]; |
|
351 if (contents == NULL) |
|
352 return KErrNoMemory; |
|
353 Mem::Fill(contents, args->iPages * sizeof(TUint32), 0); |
|
354 |
|
355 TInt i = 0; |
|
356 TRandom random; |
|
357 while (ContinueSoakTest(msgQueue)) |
|
358 { |
|
359 TUint32 rand = random.Next(); |
|
360 TInt page = rand % args->iPages; |
|
361 TUint32* ptr = PageDataPtr(page, args->iThreadIndex); |
|
362 TInt action = rand >> 31; |
|
363 if (action == 0) |
|
364 { |
|
365 if (!TestReadWord(PageBasePtr(page), PageTag(page), args->iThreadIndex, page, i, __LINE__, msgQueue)) |
|
366 return KErrGeneral; |
|
367 if (!TestReadWord(&ptr[0], contents[page], args->iThreadIndex, page, i, __LINE__, msgQueue)) |
|
368 return KErrGeneral; |
|
369 if (!TestReadWord(&ptr[1], contents[page], args->iThreadIndex, page, i, __LINE__, msgQueue)) |
|
370 return KErrGeneral; |
|
371 } |
|
372 else |
|
373 { |
|
374 TUint newContents = args->iThreadIndex+0x100+(contents[page]&~0xff); |
|
375 ptr[0] = newContents; |
|
376 if (!TestReadWord(PageBasePtr(page), PageTag(page), args->iThreadIndex, page, i, __LINE__, msgQueue)) |
|
377 return KErrGeneral; |
|
378 if (!TestReadWord(&ptr[1], contents[page], args->iThreadIndex, page, i, __LINE__, msgQueue)) |
|
379 return KErrGeneral; |
|
380 ptr[1] = newContents; |
|
381 contents[page] = newContents; |
|
382 } |
|
383 ++i; |
|
384 } |
|
385 |
|
386 for (TInt j = 0 ; j < args->iPages ; ++j) |
|
387 { |
|
388 TUint32* ptr = PageDataPtr(j, args->iThreadIndex); |
|
389 if (!TestReadWord(PageBasePtr(j), PageTag(j), args->iThreadIndex, j, i, __LINE__, msgQueue)) |
|
390 return KErrGeneral; |
|
391 if (!TestReadWord(&ptr[0], contents[j], args->iThreadIndex, j, i, __LINE__, msgQueue)) |
|
392 return KErrGeneral; |
|
393 if (!TestReadWord(&ptr[1], contents[j], args->iThreadIndex, j, i, __LINE__, msgQueue)) |
|
394 return KErrGeneral; |
|
395 } |
|
396 |
|
397 delete [] contents; |
|
398 msgQueue.Close(); |
|
399 |
|
400 RDebug::Printf(" thread %d performed %d iterations", args->iThreadIndex, i); |
|
401 return KErrNone; |
|
402 } |
|
403 |
|
404 TInt SoakProcess(TInt aProcessIndex, TInt aThreads, TInt aPages, TBool aPinPages) |
|
405 { |
|
406 TInt pinThreadIndex = aPinPages ? aThreads++ : -1; |
|
407 |
|
408 test_KErrNone(gChunk.OpenGlobal(KChunkName, EFalse)); |
|
409 |
|
410 SSoakTestArgs* testArgs = new SSoakTestArgs[aThreads]; |
|
411 test_NotNull(testArgs); |
|
412 |
|
413 RThread* threads = new RThread[aThreads]; |
|
414 test_NotNull(threads); |
|
415 |
|
416 TRequestStatus* statuses = new TRequestStatus[aThreads]; |
|
417 test_NotNull(statuses); |
|
418 |
|
419 TInt i; |
|
420 for (i = 0 ; i < aThreads ; ++i) |
|
421 { |
|
422 testArgs[i].iThreadIndex = aProcessIndex * aThreads + i; |
|
423 testArgs[i].iPages = aPages; |
|
424 TThreadFunction func = i == pinThreadIndex ? PinPagesFunc : SoakTestFunc; |
|
425 test_KErrNone(threads[i].Create(KNullDesC, func, gPageSize, NULL, &testArgs[i])); |
|
426 threads[i].Logon(statuses[i]); |
|
427 } |
|
428 |
|
429 // todo: rendezvous here? |
|
430 |
|
431 for (i = 0 ; i < aThreads ; ++i) |
|
432 threads[i].Resume(); |
|
433 |
|
434 TBool ok = ETrue; |
|
435 for (i = 0 ; i < aThreads ; ++i) |
|
436 { |
|
437 User::WaitForRequest(statuses[i]); |
|
438 if (threads[i].ExitType() != EExitKill || statuses[i].Int() != KErrNone) |
|
439 ok = EFalse; |
|
440 threads[i].Close(); |
|
441 } |
|
442 |
|
443 delete [] testArgs; |
|
444 delete [] threads; |
|
445 delete [] statuses; |
|
446 gChunk.Close(); |
|
447 |
|
448 return ok ? KErrNone : KErrGeneral; |
|
449 } |
|
450 |
|
451 TInt RunSoakProcess() |
|
452 { |
|
453 TBuf<80> buf; |
|
454 if (User::CommandLineLength() > buf.MaxLength()) |
|
455 return KErrArgument; |
|
456 User::CommandLine(buf); |
|
457 TLex lex(buf); |
|
458 |
|
459 TInt index; |
|
460 TInt r = lex.Val(index); |
|
461 if (r != KErrNone) |
|
462 return r; |
|
463 lex.SkipSpace(); |
|
464 |
|
465 TInt threads; |
|
466 r = lex.Val(threads); |
|
467 if (r != KErrNone) |
|
468 return r; |
|
469 lex.SkipSpace(); |
|
470 |
|
471 TInt pages; |
|
472 r = lex.Val(pages); |
|
473 if (r != KErrNone) |
|
474 return r; |
|
475 lex.SkipSpace(); |
|
476 |
|
477 TBool pinPages; |
|
478 r = lex.Val(pinPages); |
|
479 if (r != KErrNone) |
|
480 return r; |
|
481 |
|
482 return SoakProcess(index, threads, pages, pinPages); |
|
483 } |
|
484 |
|
485 void SoakTest(TInt aProcesses, TInt aThreads, TInt aPages, TBool aPinPages, TInt aDurationInSeconds) |
|
486 { |
|
487 RDebug::Printf("Soak test: %d processes, %d threads, %d pages, %s pinning for %d seconds", |
|
488 aProcesses, aThreads, aPages, (aPinPages ? "with" : "without"), aDurationInSeconds); |
|
489 DPTest::FlushCache(); |
|
490 |
|
491 TInt totalThreads = (aThreads + (aPinPages ? 1 : 0)) * aProcesses; |
|
492 test(totalThreads < 512); // each thread uses two words in a page |
|
493 |
|
494 TMediaPagingStats dummy=EMediaPagingStatsRomAndCode; |
|
495 PagingInfo::ResetBenchmarks(-1, dummy); // Don't worry about locmedia stats. |
|
496 |
|
497 RMsgQueue<TInt> msgQueue; |
|
498 test_KErrNone(msgQueue.CreateGlobal(KMsgQueueName, totalThreads, EOwnerThread)); |
|
499 |
|
500 CreatePagedChunk(aPages, 0); |
|
501 TInt i; |
|
502 for (i = 0 ; i < aPages ; ++i) |
|
503 *PageBasePtr(i) = PageTag(i); |
|
504 |
|
505 RProcess* processes = new RProcess[aProcesses]; |
|
506 TRequestStatus* statuses = new TRequestStatus[aProcesses]; |
|
507 for (i = 0 ; i < aProcesses ; ++i) |
|
508 { |
|
509 TBuf<80> args; |
|
510 args.AppendFormat(_L("%d %d %d %d"), i, aThreads, aPages, aPinPages); |
|
511 test_KErrNone(processes[i].Create(_L("t_datapaging"), args)); |
|
512 processes[i].Logon(statuses[i]); |
|
513 } |
|
514 |
|
515 RThread().SetPriority(EPriorityMore); // so we don't get starved of CPU by worker threads |
|
516 |
|
517 for (i = 0 ; i < aProcesses ; ++i) |
|
518 processes[i].Resume(); |
|
519 |
|
520 User::After(aDurationInSeconds * 1000000); |
|
521 StopSoakTest(msgQueue); |
|
522 |
|
523 TBool ok = ETrue; |
|
524 for (i = 0 ; i < aProcesses ; ++i) |
|
525 { |
|
526 User::WaitForRequest(statuses[i]); |
|
527 if (processes[i].ExitType() != EExitKill || statuses[i].Int() != KErrNone) |
|
528 { |
|
529 ok = EFalse; |
|
530 RDebug::Printf(" process %i died with %d,%d", i, processes[i].ExitType(), statuses[i].Int()); |
|
531 } |
|
532 processes[i].Close(); |
|
533 } |
|
534 |
|
535 RThread().SetPriority(EPriorityNormal); |
|
536 |
|
537 if (!ok) |
|
538 { |
|
539 for (i = 0 ; i < aPages ; ++i) |
|
540 { |
|
541 test.Printf(_L("%3d %08x"), i, *PageBasePtr(i)); |
|
542 for (TInt j = 0 ; j < totalThreads ; ++j) |
|
543 { |
|
544 TUint32* ptr = PageDataPtr(i, j); |
|
545 test.Printf(_L(" %08x,%08x"), ptr[0], ptr[1]); |
|
546 } |
|
547 test.Printf(_L("\n"), i); |
|
548 } |
|
549 } |
|
550 test(ok); |
|
551 |
|
552 gChunk.Close(); |
|
553 |
|
554 User::After(1000000); |
|
555 RDebug::Printf(" done"); |
|
556 RDebug::Printf("\n"); |
|
557 |
|
558 msgQueue.Close(); |
|
559 delete [] processes; |
|
560 delete [] statuses; |
|
561 |
|
562 PagingInfo::PrintBenchmarks(-1, dummy); // Don't worry about locmedia stats. |
|
563 } |
|
564 |
|
565 void CommitPage(RChunk chunk, TInt aPageIndex) |
|
566 { |
|
567 test_KErrNone(chunk.Commit(aPageIndex * gPageSize, gPageSize)); |
|
568 } |
|
569 |
|
570 void DecommitPage(RChunk chunk, TInt aPageIndex) |
|
571 { |
|
572 test_KErrNone(chunk.Decommit(aPageIndex * gPageSize, gPageSize)); |
|
573 } |
|
574 |
|
575 void WaitForNotifiers() |
|
576 { |
|
577 // wait until notifiers have had chance to signal us... |
|
578 UserSvr::HalFunction(EHalGroupKernel, EKernelHalSupervisorBarrier, 0, 0); |
|
579 } |
|
580 |
|
581 void TestSwapHal() |
|
582 { |
|
583 test.Next(_L("Test EVMHalGetSwapInfo")); |
|
584 |
|
585 TChunkCreateInfo createInfo; |
|
586 createInfo.SetDisconnected(0, 0, 256 * gPageSize); |
|
587 createInfo.SetPaging(TChunkCreateInfo::EPaged); |
|
588 RChunk chunk; |
|
589 test_KErrNone(chunk.Create(createInfo)); |
|
590 if (gDataPagingSupported) |
|
591 test(chunk.IsPaged()); |
|
592 |
|
593 SVMSwapInfo swapInfo; |
|
594 test_KErrNone(UserSvr::HalFunction(EHalGroupVM, EVMHalGetSwapInfo, &swapInfo, 0)); |
|
595 test(swapInfo.iSwapFree <= swapInfo.iSwapSize); |
|
596 test.Printf(_L(" Swap size == 0x%x bytes\n"), swapInfo.iSwapSize); |
|
597 test.Printf(_L(" Swap free == 0x%x bytes\n"), swapInfo.iSwapFree); |
|
598 if (!gDataPagingSupported) |
|
599 { |
|
600 test_Equal(0, swapInfo.iSwapSize); |
|
601 } |
|
602 else |
|
603 { |
|
604 test(swapInfo.iSwapSize != 0); |
|
605 |
|
606 CommitPage(chunk, 0); |
|
607 SVMSwapInfo swapInfo2; |
|
608 test_KErrNone(UserSvr::HalFunction(EHalGroupVM, EVMHalGetSwapInfo, &swapInfo2, 0)); |
|
609 test_Equal(swapInfo.iSwapSize, swapInfo2.iSwapSize); |
|
610 test_Equal(swapInfo.iSwapFree - gPageSize, swapInfo2.iSwapFree); |
|
611 |
|
612 DecommitPage(chunk, 0); |
|
613 test_KErrNone(UserSvr::HalFunction(EHalGroupVM, EVMHalGetSwapInfo, &swapInfo2, 0)); |
|
614 test_Equal(swapInfo.iSwapSize, swapInfo2.iSwapSize); |
|
615 test_Equal(swapInfo.iSwapFree, swapInfo2.iSwapFree); |
|
616 |
|
617 // Test that closing the chunk releases the swap page. |
|
618 CommitPage(chunk, 0); |
|
619 test_KErrNone(UserSvr::HalFunction(EHalGroupVM, EVMHalGetSwapInfo, &swapInfo2, 0)); |
|
620 test_Equal(swapInfo.iSwapSize, swapInfo2.iSwapSize); |
|
621 test_Equal(swapInfo.iSwapFree - gPageSize, swapInfo2.iSwapFree); |
|
622 |
|
623 chunk.Close(); |
|
624 test_KErrNone(UserSvr::HalFunction(EHalGroupVM, EVMHalGetSwapInfo, &swapInfo2, 0)); |
|
625 test_Equal(swapInfo.iSwapSize, swapInfo2.iSwapSize); |
|
626 test_Equal(swapInfo.iSwapFree, swapInfo2.iSwapFree); |
|
627 |
|
628 // Chunk must be created for rest of testing. |
|
629 test_KErrNone(chunk.Create(createInfo)); |
|
630 if (gDataPagingSupported) |
|
631 test(chunk.IsPaged()); |
|
632 } |
|
633 |
|
634 // EVMHalSetSwapThresholds, |
|
635 test.Next(_L("Test EVMHalSetSwapThresholds")); |
|
636 SVMSwapThresholds thresholds; |
|
637 thresholds.iLowThreshold = 1; |
|
638 thresholds.iGoodThreshold = 0; |
|
639 test_Equal(KErrArgument, UserSvr::HalFunction(EHalGroupVM, EVMHalSetSwapThresholds, &thresholds, 0)); |
|
640 thresholds.iLowThreshold = swapInfo.iSwapSize + 1; |
|
641 thresholds.iGoodThreshold = swapInfo.iSwapSize + 1; |
|
642 test_Equal(KErrArgument, UserSvr::HalFunction(EHalGroupVM, EVMHalSetSwapThresholds, &thresholds, 0)); |
|
643 thresholds.iLowThreshold = 0; |
|
644 thresholds.iGoodThreshold = 0; |
|
645 test_KErrNone(UserSvr::HalFunction(EHalGroupVM, EVMHalSetSwapThresholds, &thresholds, 0)); |
|
646 thresholds.iLowThreshold = swapInfo.iSwapSize; |
|
647 thresholds.iGoodThreshold = swapInfo.iSwapSize; |
|
648 test_KErrNone(UserSvr::HalFunction(EHalGroupVM, EVMHalSetSwapThresholds, &thresholds, 0)); |
|
649 |
|
650 // test thresholds trigger ok |
|
651 |
|
652 RChangeNotifier changes; |
|
653 test_KErrNone(changes.Create()); |
|
654 TRequestStatus status; |
|
655 test_KErrNone(changes.Logon(status)); |
|
656 User::WaitForRequest(status); |
|
657 test_KErrNone(changes.Logon(status)); |
|
658 test_Equal(KRequestPending, status.Int()); |
|
659 |
|
660 thresholds.iLowThreshold = swapInfo.iSwapFree - 2 * gPageSize; |
|
661 thresholds.iGoodThreshold = swapInfo.iSwapFree - gPageSize; |
|
662 test_KErrNone(UserSvr::HalFunction(EHalGroupVM, EVMHalSetSwapThresholds, &thresholds, 0)); |
|
663 |
|
664 CommitPage(chunk, 0); |
|
665 CommitPage(chunk, 1); |
|
666 WaitForNotifiers(); |
|
667 test_Equal(KRequestPending, status.Int()); |
|
668 CommitPage(chunk, 2); |
|
669 WaitForNotifiers(); |
|
670 test_Equal(EChangesFreeMemory | EChangesLowMemory, status.Int()); |
|
671 User::WaitForRequest(status); |
|
672 |
|
673 test_KErrNone(changes.Logon(status)); |
|
674 DecommitPage(chunk, 2); |
|
675 WaitForNotifiers(); |
|
676 test_Equal(KRequestPending, status.Int()); |
|
677 DecommitPage(chunk, 1); |
|
678 WaitForNotifiers(); |
|
679 test_Equal(EChangesFreeMemory, status.Int()); |
|
680 User::WaitForRequest(status); |
|
681 DecommitPage(chunk, 0); |
|
682 |
|
683 CLOSE_AND_WAIT(changes); |
|
684 |
|
685 // leave some sensible thresholds set |
|
686 thresholds.iLowThreshold = (10 * swapInfo.iSwapSize) / 100; |
|
687 thresholds.iGoodThreshold = (20 * swapInfo.iSwapSize) / 100; |
|
688 test_KErrNone(UserSvr::HalFunction(EHalGroupVM, EVMHalSetSwapThresholds, &thresholds, 0)); |
|
689 |
|
690 CLOSE_AND_WAIT(chunk); |
|
691 } |
|
692 |
|
693 void TestSwapHalNotSupported() |
|
694 { |
|
695 test_Equal(KErrNotSupported, UserSvr::HalFunction(EHalGroupVM, EVMHalGetSwapInfo, 0, 0)); |
|
696 test_Equal(KErrNotSupported, UserSvr::HalFunction(EHalGroupVM, EVMHalSetSwapThresholds, 0, 0)); |
|
697 } |
|
698 |
|
699 void TestHal() |
|
700 { |
|
701 if (gDataPagingSupported) |
|
702 TestSwapHal(); |
|
703 else |
|
704 TestSwapHalNotSupported(); |
|
705 } |
|
706 |
|
707 |
|
708 TBool gStealEnable = false; |
|
709 |
|
710 TInt DecommitThread(TAny*) |
|
711 { |
|
712 RThread().SetPriority(EPriorityLess); // so this thread gets pre-empted by StealThread |
|
713 TUint8* base = gChunk.Base(); |
|
714 TInt size = gChunk.MaxSize(); |
|
715 for(;;) |
|
716 { |
|
717 // dirty all pages |
|
718 for(TInt i=0; i<size; i+=gPageSize) |
|
719 base[i] = 0; |
|
720 // free pages... |
|
721 gStealEnable = true; |
|
722 gChunk.Adjust(0); |
|
723 gStealEnable = false; |
|
724 // recommit pages... |
|
725 TInt r = gChunk.Adjust(size); |
|
726 if(r!=KErrNone) |
|
727 return r; // error |
|
728 } |
|
729 } |
|
730 |
|
731 |
|
732 TInt StealThread(TAny*) |
|
733 { |
|
734 for(;;) |
|
735 { |
|
736 while(!gStealEnable) |
|
737 User::AfterHighRes(0); |
|
738 DPTest::FlushCache(); |
|
739 } |
|
740 } |
|
741 |
|
742 |
|
743 void TestDecommitAndStealInteraction(TInt aSeconds) |
|
744 { |
|
745 __KHEAP_MARK; |
|
746 |
|
747 CreatePagedChunk(256); |
|
748 |
|
749 RThread thread1; |
|
750 test_KErrNone(thread1.Create(_L("DecommitThread"), DecommitThread, gPageSize, NULL, 0)); |
|
751 TRequestStatus status1; |
|
752 thread1.Logon(status1); |
|
753 |
|
754 RThread thread2; |
|
755 test_KErrNone(thread2.Create(_L("StealThread"), StealThread, gPageSize, NULL, 0)); |
|
756 TRequestStatus status2; |
|
757 thread1.Logon(status2); |
|
758 |
|
759 RTimer timer; |
|
760 test_KErrNone(timer.CreateLocal()); |
|
761 TRequestStatus timeoutStatus; |
|
762 timer.After(timeoutStatus,aSeconds*1000000); |
|
763 |
|
764 thread1.Resume(); |
|
765 thread2.Resume(); |
|
766 User::WaitForAnyRequest(); |
|
767 |
|
768 thread1.Kill(123); |
|
769 User::WaitForRequest(status1); |
|
770 test_Equal(123, status1.Int()); |
|
771 CLOSE_AND_WAIT(thread1); |
|
772 |
|
773 thread2.Kill(123); |
|
774 User::WaitForRequest(status2); |
|
775 test_Equal(123, status2.Int()); |
|
776 CLOSE_AND_WAIT(thread2); |
|
777 |
|
778 CLOSE_AND_WAIT(timer); |
|
779 test_KErrNone(timeoutStatus.Int()); |
|
780 |
|
781 CLOSE_AND_WAIT(gChunk); |
|
782 __KHEAP_MARKEND; |
|
783 } |
|
784 |
|
785 TInt ThreadAtomic64Flush(TAny*) |
|
786 { |
|
787 TInt64 seed = 0x33333333; |
|
788 FOREVER |
|
789 { |
|
790 DPTest::FlushCache(); |
|
791 User::After(Math::Rand(seed) & 0x48); |
|
792 } |
|
793 } |
|
794 |
|
795 enum TAtomic64Test |
|
796 { |
|
797 EAtomic64Add, |
|
798 EAtomic64Logic, |
|
799 EAtomic64Cas, |
|
800 EAtomic64Steps, |
|
801 }; |
|
802 |
|
803 struct SAtomic64Args |
|
804 { |
|
805 TUint iIters; |
|
806 TUint64* iData; |
|
807 TInt iIncs; |
|
808 TUint iClears[64]; |
|
809 TUint iSets[64]; |
|
810 }; |
|
811 |
|
812 |
|
813 TInt ThreadAtomic64Cas(TAny* aArgs) |
|
814 { |
|
815 SAtomic64Args& args = *(SAtomic64Args*)aArgs; |
|
816 for (TUint i = 0; i < args.iIters; i++) |
|
817 { |
|
818 TUint64 setMask = UI64LIT(0xffffffffffffffff); |
|
819 TUint64 clrMask = 0; |
|
820 if (__e32_atomic_cas_ord64(args.iData, &setMask, clrMask)) |
|
821 args.iClears[0]++; |
|
822 // Undo any clearing of setMask which will happen if iData is 0. |
|
823 setMask = UI64LIT(0xffffffffffffffff); |
|
824 if (__e32_atomic_cas_ord64(args.iData, &clrMask, setMask)) |
|
825 args.iSets[0]++; |
|
826 } |
|
827 return KErrNone; |
|
828 } |
|
829 |
|
830 |
|
831 TInt ThreadAtomic64Logic(TAny* aArgs) |
|
832 { |
|
833 TInt r = KErrNone; |
|
834 SAtomic64Args& args = *(SAtomic64Args*)aArgs; |
|
835 for(TUint i = 0; i < args.iIters; i++) |
|
836 { |
|
837 TUint bitNo = (i & 0x3f); |
|
838 TUint64 bitMask = ((TUint64)1) << bitNo; |
|
839 TUint64 andMask = ~bitMask; |
|
840 |
|
841 TUint64 old = __e32_atomic_and_ord64(args.iData, andMask); |
|
842 if (old & bitMask) |
|
843 args.iClears[bitNo]++; |
|
844 |
|
845 old = __e32_atomic_ior_ord64(args.iData, bitMask); |
|
846 if (!(old & bitMask)) |
|
847 args.iSets[bitNo]++; |
|
848 |
|
849 old = __e32_atomic_xor_ord64(args.iData, bitMask); |
|
850 if (old & bitMask) |
|
851 args.iClears[bitNo]++; |
|
852 else |
|
853 args.iSets[bitNo]++; |
|
854 |
|
855 old = __e32_atomic_axo_ord64(args.iData, UI64LIT(0xffffffffffffffff), bitMask); |
|
856 if (old & bitMask) |
|
857 args.iClears[bitNo]++; |
|
858 else |
|
859 args.iSets[bitNo]++; |
|
860 |
|
861 } |
|
862 return r; |
|
863 } |
|
864 |
|
865 |
|
866 TInt ThreadAtomic64Add(TAny* aArgs) |
|
867 { |
|
868 TInt r = KErrNone; |
|
869 SAtomic64Args& args = *(SAtomic64Args*)aArgs; |
|
870 for(TUint i = 0; i < args.iIters; i++) |
|
871 { |
|
872 TUint64 old = __e32_atomic_add_ord64(args.iData, 1); |
|
873 args.iIncs += 1; |
|
874 old = __e32_atomic_tau_ord64(args.iData, 1000, 1, 2); |
|
875 args.iIncs += (old >= 1000)? 1 : 2; |
|
876 old = __e32_atomic_tas_ord64(args.iData, 1000, 1, -1); |
|
877 args.iIncs += (old >= 1000)? 1 : -1; |
|
878 } |
|
879 return r; |
|
880 } |
|
881 |
|
882 |
|
883 void TestAtomic64() |
|
884 { |
|
885 CreatePagedChunk(sizeof(TUint64)); |
|
886 TUint64* data = (TUint64*)gChunk.Base(); |
|
887 |
|
888 const TUint KThreads = 25; |
|
889 RThread threads[KThreads]; |
|
890 TRequestStatus stats[KThreads]; |
|
891 SAtomic64Args* args = new SAtomic64Args[KThreads]; |
|
892 test_NotNull(args); |
|
893 |
|
894 for (TInt testStep = EAtomic64Add; testStep < EAtomic64Steps; testStep++) |
|
895 { |
|
896 switch (testStep) |
|
897 { |
|
898 case EAtomic64Add: |
|
899 test.Next(_L("Test 64-bit atomic addition operations")); |
|
900 break; |
|
901 case EAtomic64Logic: |
|
902 test.Next(_L("Test 64-bit atomic logic operations")); |
|
903 break; |
|
904 case EAtomic64Cas: |
|
905 test.Next(_L("Test 64-bit atomic cas operations")); |
|
906 break; |
|
907 } |
|
908 *data = 0; |
|
909 RThread threadFlush; |
|
910 test_KErrNone(threadFlush.Create(_L("ThreadAtomicFlush"), ThreadAtomic64Flush, gPageSize, NULL, NULL)); |
|
911 TRequestStatus status1; |
|
912 threadFlush.Logon(status1); |
|
913 threadFlush.SetPriority(EPriorityAbsoluteHigh); |
|
914 |
|
915 memclr(args, sizeof(SAtomic64Args)*KThreads); |
|
916 TUint i = 0; |
|
917 for (; i < KThreads; i++) |
|
918 { |
|
919 args[i].iIters = 10000; |
|
920 args[i].iData = data; |
|
921 switch (testStep) |
|
922 { |
|
923 case EAtomic64Add: |
|
924 test_KErrNone(threads[i].Create(KNullDesC, ThreadAtomic64Add, gPageSize, NULL, (TAny*)&args[i])); |
|
925 break; |
|
926 case EAtomic64Logic: |
|
927 test_KErrNone(threads[i].Create(KNullDesC, ThreadAtomic64Logic, gPageSize, NULL, (TAny*)&args[i])); |
|
928 break; |
|
929 case EAtomic64Cas: |
|
930 test_KErrNone(threads[i].Create(KNullDesC, ThreadAtomic64Cas, gPageSize, NULL, (TAny*)&args[i])); |
|
931 break; |
|
932 } |
|
933 threads[i].Logon(stats[i]); |
|
934 } |
|
935 threadFlush.Resume(); |
|
936 for (i = 0; i < KThreads; i++) |
|
937 { |
|
938 threads[i].Resume(); |
|
939 } |
|
940 |
|
941 // Wait for add threads to complete and kill flushing thread. |
|
942 for (i = 0; i < KThreads; i++) |
|
943 { |
|
944 User::WaitForRequest(stats[i]); |
|
945 test_KErrNone(stats[i].Int()); |
|
946 } |
|
947 threadFlush.Kill(KErrNone); |
|
948 User::WaitForRequest(status1); |
|
949 test_KErrNone(status1.Int()); |
|
950 TInt64 expected = 0; |
|
951 switch (testStep) |
|
952 { |
|
953 case EAtomic64Add: |
|
954 { |
|
955 for (TUint i = 0; i < KThreads; i++) |
|
956 { |
|
957 threads[i].Close(); |
|
958 expected += args[i].iIncs; |
|
959 } |
|
960 break; |
|
961 } |
|
962 case EAtomic64Logic: |
|
963 { |
|
964 TUint totalSets[64]; |
|
965 TUint totalClears[64]; |
|
966 memclr(totalSets, sizeof(TUint)*64); |
|
967 memclr(totalClears, sizeof(TUint)*64); |
|
968 for (TUint i = 0; i < KThreads; i++) |
|
969 { |
|
970 threads[i].Close(); |
|
971 for (TUint j = 0; j < 64; j++) |
|
972 { |
|
973 totalSets[j] += args[i].iSets[j]; |
|
974 totalClears[j] += args[i].iClears[j]; |
|
975 } |
|
976 } |
|
977 for (TUint j = 0; j < 64; j++) |
|
978 { |
|
979 TUint64 bitMask = 1 << j; |
|
980 if (totalSets[j] > totalClears[j]) |
|
981 { |
|
982 test_Equal(totalSets[j] - 1, totalClears[j]); |
|
983 expected |= bitMask; |
|
984 } |
|
985 else |
|
986 {// Can only clear a bit if it was previously set. |
|
987 test_Equal(totalClears[j], totalSets[j]); |
|
988 } |
|
989 } |
|
990 break; |
|
991 } |
|
992 case EAtomic64Cas: |
|
993 { |
|
994 TUint totalSets = 0; |
|
995 TUint totalClears = 0; |
|
996 for (TUint i = 0; i < KThreads; i++) |
|
997 { |
|
998 threads[i].Close(); |
|
999 totalSets += args[i].iSets[0]; |
|
1000 totalClears += args[i].iClears[0]; |
|
1001 } |
|
1002 if (totalSets > totalClears) |
|
1003 { |
|
1004 test_Equal(totalSets - 1, totalClears); |
|
1005 expected = UI64LIT(0xffffffffffffffff); |
|
1006 } |
|
1007 else |
|
1008 {// Can only clear a word if it was previously set. |
|
1009 test_Equal(totalClears, totalSets); |
|
1010 } |
|
1011 break; |
|
1012 } |
|
1013 } |
|
1014 test_Equal(expected, *data); |
|
1015 CLOSE_AND_WAIT(threadFlush); |
|
1016 } |
|
1017 delete[] args; |
|
1018 CLOSE_AND_WAIT(gChunk); |
|
1019 } |
|
1020 |
|
1021 |
|
1022 // |
|
1023 // soak test for writeable paged code... |
|
1024 // |
|
1025 |
|
1026 const TUint KCodeStride = 20; // spacing between generated code |
|
1027 |
|
1028 void CodeStart(TUint8* aCode, TUint8* aTarget, TUint32 aInit) |
|
1029 { |
|
1030 #if defined(__CPU_X86) |
|
1031 aCode[0] = 0xb8; *(TUint32*)&(aCode[1]) = aInit; // mov eax,aInit |
|
1032 aCode[5] = 0xe9; *(TUint32*)&(aCode[6]) = aTarget-(aCode+10); // jmp aTarget |
|
1033 __ASSERT_COMPILE(KCodeStride>=10); |
|
1034 |
|
1035 #elif defined(__CPU_ARM) |
|
1036 *(TUint32*)&(aCode[0]) = 0xe59f0000; // ldr r0, [pc, #0] |
|
1037 TInt32 offset = (aTarget-aCode-4-8)/4; |
|
1038 if(offset&0xff000000u) |
|
1039 { |
|
1040 offset ^= 0xff000000u; |
|
1041 test_Equal(0,offset&0xff000000u); |
|
1042 } |
|
1043 *(TUint32*)&(aCode[4]) = 0xea000000|offset; // b aTarget |
|
1044 *(TUint32*)&(aCode[8]) = aInit; // dcd aInit |
|
1045 __ASSERT_COMPILE(KCodeStride>=12); |
|
1046 |
|
1047 #else |
|
1048 #error Unknown CPU |
|
1049 #endif |
|
1050 } |
|
1051 |
|
1052 |
|
1053 void CodeStep(TUint8* aCode, TUint8* aTarget, TUint32 aAdd) |
|
1054 { |
|
1055 #if defined(__CPU_X86) |
|
1056 aCode[0] = 0xd1; aCode[1] = 0xc0; // rol eax, 1 |
|
1057 aCode[2] = 0x05; *(TUint32*)&(aCode[3]) = aAdd; // add eax, aAdd |
|
1058 aCode[7] = 0xe9; *(TUint32*)&(aCode[8]) = aTarget-(aCode+12); // jmp aTarget |
|
1059 __ASSERT_COMPILE(KCodeStride>=12); |
|
1060 |
|
1061 #elif defined(__CPU_ARM) |
|
1062 *(TUint32*)&(aCode[0]) = 0xe1a00fe0; // ror r0, r0, #31 |
|
1063 *(TUint32*)&(aCode[4]) = 0xe59f1004; // ldr r1, [pc, #4] |
|
1064 *(TUint32*)&(aCode[8]) = 0xe0800001; // add r0, r0, r1 |
|
1065 TInt32 offset = (aTarget-aCode-12-8)/4; |
|
1066 if(offset&0xff000000u) |
|
1067 { |
|
1068 offset ^= 0xff000000u; |
|
1069 test_Equal(0,offset&0xff000000u); |
|
1070 } |
|
1071 *(TUint32*)&(aCode[12]) = 0xea000000|offset; // b aTarget |
|
1072 *(TUint32*)&(aCode[16]) = aAdd; // dcd aAdd |
|
1073 __ASSERT_COMPILE(KCodeStride>=20); |
|
1074 |
|
1075 #else |
|
1076 #error Unknown CPU |
|
1077 #endif |
|
1078 } |
|
1079 |
|
1080 |
|
1081 void CodeEnd(TUint8* aCode) |
|
1082 { |
|
1083 #if defined(__CPU_X86) |
|
1084 aCode[0] = 0xc3; // ret |
|
1085 __ASSERT_COMPILE(KCodeStride>=1); |
|
1086 |
|
1087 #elif defined(__CPU_ARM) |
|
1088 *(TUint32*)&(aCode[0]) = 0xe12fff1e; // bx lr |
|
1089 __ASSERT_COMPILE(KCodeStride>=4); |
|
1090 |
|
1091 #else |
|
1092 #error Unknown CPU |
|
1093 #endif |
|
1094 } |
|
1095 |
|
1096 |
|
1097 void TestExecutableMemory() |
|
1098 { |
|
1099 __KHEAP_MARK; |
|
1100 |
|
1101 #if defined(__CPU_ARM) |
|
1102 const TUint KMaxChunkSize = 31*1024*1024; // ARM branch instruction limit |
|
1103 #else |
|
1104 const TUint KMaxChunkSize = 1024*1024*1024; // 1GB |
|
1105 #endif |
|
1106 const TUint KMaxPages = KMaxChunkSize/gPageSize; |
|
1107 TUint sizeInPages = gMaxCacheSize*2; |
|
1108 if(sizeInPages>KMaxPages) |
|
1109 sizeInPages = KMaxPages; |
|
1110 |
|
1111 // create code chunk... |
|
1112 test.Start(_L("Create code chunk")); |
|
1113 TChunkCreateInfo createInfo; |
|
1114 TInt size = sizeInPages * gPageSize; |
|
1115 createInfo.SetCode(size, size); |
|
1116 createInfo.SetPaging(TChunkCreateInfo::EPaged); |
|
1117 createInfo.SetClearByte(0); |
|
1118 RChunk chunk; |
|
1119 test_KErrNone(chunk.Create(createInfo)); |
|
1120 test(chunk.IsPaged()); // this is only ever called if data paging is supported |
|
1121 TUint8* base = chunk.Base(); |
|
1122 |
|
1123 // create code path through the pages in the chunk with quadratic distribution... |
|
1124 test.Next(_L("Weave path")); |
|
1125 TInt pathLength = 0; |
|
1126 const TUint maxStepsPerPage = gPageSize/KCodeStride; |
|
1127 const TInt maxPathLength = sizeInPages*maxStepsPerPage; |
|
1128 TUint8** path = (TUint8**)User::Alloc(maxPathLength*sizeof(TUint8*)); |
|
1129 test(path!=0); |
|
1130 for(TUint page=0; page<sizeInPages; ++page) |
|
1131 { |
|
1132 TUint step = (maxStepsPerPage-1)*(page*page)/(sizeInPages*sizeInPages)+1; |
|
1133 do path[pathLength++] = base+page*gPageSize+step*KCodeStride; |
|
1134 while(--step); |
|
1135 } |
|
1136 TUint32 rand = 0x12345678; |
|
1137 for(TUint scramble=pathLength*4; scramble>0; --scramble) |
|
1138 { |
|
1139 // swap random pair of entries on path... |
|
1140 TUint i = (TUint)(TUint64(TUint64(rand)*TUint64(pathLength))>>32); |
|
1141 rand = rand*69069+1; |
|
1142 TUint j = (TUint)(TUint64(TUint64(rand)*TUint64(pathLength))>>32); |
|
1143 rand = rand*69069+1; |
|
1144 TUint8* t = path[i]; |
|
1145 path[i] = path[j]; |
|
1146 path[j] = t; |
|
1147 } |
|
1148 |
|
1149 // write code to generated path... |
|
1150 test.Next(_L("Write code")); |
|
1151 TUint32 a = 0; |
|
1152 TUint32 (*code)() = (TUint32 (*)())path[pathLength-1]; |
|
1153 CodeStart(path[pathLength-1],path[pathLength-2],a); |
|
1154 while(--pathLength>1) |
|
1155 { |
|
1156 rand = rand*69069+1; |
|
1157 CodeStep(path[pathLength-1],path[pathLength-2],rand); |
|
1158 a = (a<<1)+(a>>31); |
|
1159 a += rand; |
|
1160 } |
|
1161 CodeEnd(path[0]); |
|
1162 --pathLength; |
|
1163 test_Equal(0,pathLength); |
|
1164 test.Next(_L("IMB")); |
|
1165 User::IMB_Range(base,base+chunk.Size()); |
|
1166 |
|
1167 // run code... |
|
1168 TMediaPagingStats dummy=EMediaPagingStatsRomAndCode; |
|
1169 PagingInfo::ResetBenchmarks(-1, dummy); // Don't worry about locmedia stats. |
|
1170 test.Next(_L("Execute code")); |
|
1171 TUint32 result = code(); |
|
1172 test_Equal(a,result); |
|
1173 PagingInfo::PrintBenchmarks(-1, dummy); // Don't worry about locmedia stats. |
|
1174 |
|
1175 // cleanup... |
|
1176 test.Next(_L("Cleanup")); |
|
1177 User::Free(path); |
|
1178 CLOSE_AND_WAIT(chunk); |
|
1179 |
|
1180 test.End(); |
|
1181 |
|
1182 UserSvr::HalFunction(EHalGroupKernel, EKernelHalSupervisorBarrier, 0, 0); |
|
1183 __KHEAP_MARKEND; |
|
1184 } |
|
1185 |
|
1186 |
|
1187 |
|
1188 TInt E32Main() |
|
1189 { |
|
1190 test_KErrNone(UserHal::PageSizeInBytes(gPageSize)); |
|
1191 |
|
1192 if (User::CommandLineLength() != 0) |
|
1193 return RunSoakProcess(); |
|
1194 |
|
1195 test.Title(); |
|
1196 test_KErrNone(GetGlobalPolicies()); |
|
1197 |
|
1198 test.Start(_L("Test HAL APIs")); |
|
1199 TestHal(); |
|
1200 |
|
1201 if (gDataPagingSupported) |
|
1202 { |
|
1203 test.Next(_L("Test 64-bit atomic operations are atomic with paged out data")); |
|
1204 TestAtomic64(); |
|
1205 |
|
1206 test.Next(_L("Test reading and writing to a single page")); |
|
1207 TestOnePage(); |
|
1208 |
|
1209 test.Next(_L("Test interaction between decommit and steal")); |
|
1210 TestDecommitAndStealInteraction(10); |
|
1211 |
|
1212 test.Next(_L("Test killing a thread while it's paging in")); |
|
1213 TestKillThread(PageInThreadFunc, 200); |
|
1214 |
|
1215 test.Next(_L("Test killing a thread while it's paging out")); |
|
1216 TestKillThread(PageOutThreadFunc, 200); |
|
1217 |
|
1218 test.Next(_L("Test executable memory")); |
|
1219 TestExecutableMemory(); |
|
1220 |
|
1221 test.Next(_L("Soak tests")); |
|
1222 DPTest::FlushCache(); |
|
1223 for (TUint totalThreads = 1 ; totalThreads <= 64 ; totalThreads *= 4) |
|
1224 { |
|
1225 for (TUint processes = 1 ; processes <= 16 && processes <= totalThreads ; processes *= 4) |
|
1226 { |
|
1227 TUint threads = totalThreads / processes; |
|
1228 for (TUint pages = gMaxCacheSize / 2 ; pages <= gMaxCacheSize * 2 ; pages *= 2) |
|
1229 { |
|
1230 for (TUint pin = 0 ; pin <= 1 ; ++pin) |
|
1231 { |
|
1232 test.Printf(_L("processes=%d threads=%d pages=%d pin=%d\r\n"),processes, threads, pages, pin); |
|
1233 SoakTest(processes, threads, pages, pin, 3); |
|
1234 } |
|
1235 } |
|
1236 } |
|
1237 } |
|
1238 } |
|
1239 |
|
1240 test.End(); |
|
1241 return 0; |
|
1242 } |