|
1 // Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of the License "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // e32test/mmu/t_shbuf.cpp |
|
15 // |
|
16 |
|
17 #define __E32TEST_EXTENSION__ |
|
18 |
|
19 #include <e32test.h> |
|
20 #include <hal.h> |
|
21 #include <e32svr.h> |
|
22 #include <u32hal.h> |
|
23 #include "d_shbuf.h" |
|
24 #include <e32shbuf.h> |
|
25 #include <e32def.h> |
|
26 #include <e32def_private.h> |
|
27 |
|
28 RTest test(_L("T_SHBUF")); |
|
29 |
|
30 RShPool P1; // User-side pool |
|
31 RShPool P2; // Kernel-side pool |
|
32 |
|
33 const TInt KTestPoolSizeInBytes = 1 << 20; // 1MB |
|
34 const TInt BufferSize[] = {128, 853, 4096, 5051, 131072, 1, 0}; // Last element must be 0 |
|
35 |
|
36 const TInt* PtrBufSize; |
|
37 |
|
38 RShBufTestChannel Ldd; |
|
39 |
|
40 _LIT(KTestSlave, "SLAVE"); |
|
41 _LIT(KTestLowSpaceSemaphore, "LowSpaceSemaphore"); |
|
42 |
|
43 enum TTestSlave |
|
44 { |
|
45 ETestSlaveError, |
|
46 ETestSlaveNoDeallocation, |
|
47 }; |
|
48 |
|
49 enum TTestPoolType |
|
50 { |
|
51 ETestNonPageAligned, |
|
52 ETestPageAligned, |
|
53 ETestPageAlignedGrowing, |
|
54 }; |
|
55 |
|
56 TInt Log2(TInt aNum) |
|
57 { |
|
58 TInt res = -1; |
|
59 while(aNum) |
|
60 { |
|
61 res++; |
|
62 aNum >>= 1; |
|
63 } |
|
64 return res; |
|
65 } |
|
66 |
|
67 TInt RoundUp(TInt aNum, TInt aAlignmentLog2) |
|
68 { |
|
69 if (aNum % (1 << aAlignmentLog2) == 0) |
|
70 { |
|
71 return aNum; |
|
72 } |
|
73 return (aNum & ~((1 << aAlignmentLog2) - 1)) + (1 << aAlignmentLog2); |
|
74 } |
|
75 |
|
76 void LoadDeviceDrivers() |
|
77 { |
|
78 TInt r; |
|
79 #ifdef TEST_CLIENT_THREAD |
|
80 r= User::LoadLogicalDevice(_L("D_SHBUF_CLIENT.LDD")); |
|
81 if (r != KErrAlreadyExists) |
|
82 { |
|
83 test_KErrNone(r); |
|
84 } |
|
85 #else |
|
86 r = User::LoadLogicalDevice(_L("D_SHBUF_OWN.LDD")); |
|
87 if (r != KErrAlreadyExists) |
|
88 { |
|
89 test_KErrNone(r); |
|
90 } |
|
91 #endif |
|
92 } |
|
93 |
|
94 void FreeDeviceDrivers() |
|
95 { |
|
96 TInt r = User::FreeLogicalDevice(KTestShBufClient); |
|
97 test_KErrNone(r); |
|
98 r = User::FreeLogicalDevice(KTestShBufOwn); |
|
99 test_KErrNone(r); |
|
100 } |
|
101 |
|
102 void FillShBuf(RShBuf& aBuffer, TUint8 aValue) |
|
103 { |
|
104 TUint size = aBuffer.Size(); |
|
105 TUint8* base = aBuffer.Ptr(); |
|
106 test(size!=0); |
|
107 test(base!=0); |
|
108 memset(base,aValue,size); |
|
109 } |
|
110 |
|
111 TBool CheckFillShBuf(RShBuf& aBuffer, TUint8 aValue) |
|
112 { |
|
113 TUint size = aBuffer.Size(); |
|
114 TUint8* base = aBuffer.Ptr(); |
|
115 test(size!=0); |
|
116 test(base!=0); |
|
117 TUint8* ptr = base; |
|
118 TUint8* end = ptr+size; |
|
119 while(ptr<end) |
|
120 { |
|
121 TUint8 b = *ptr++; |
|
122 if(b!=aValue) |
|
123 { |
|
124 RDebug::Printf("CheckFillShBuf failed at offset 0x%x, expected 0x%02x but got 0x%02x ",ptr-base-1,aValue,b); |
|
125 return EFalse; |
|
126 } |
|
127 } |
|
128 return ETrue; |
|
129 } |
|
130 |
|
131 TBool CheckNotFillShBuf(RShBuf& aBuffer, TUint8 aValue) |
|
132 { |
|
133 TUint size = aBuffer.Size(); |
|
134 TUint8* base = aBuffer.Ptr(); |
|
135 test(size!=0); |
|
136 test(base!=0); |
|
137 TUint8* ptr = base; |
|
138 TUint8* end = ptr+size; |
|
139 while(ptr<end) |
|
140 { |
|
141 TUint8 b = *ptr++; |
|
142 if(b==aValue) |
|
143 { |
|
144 RDebug::Printf("CheckNotFillShBuf failed at offset 0x%x, expected not 0x%02x",ptr-base-1,aValue); |
|
145 return EFalse; |
|
146 } |
|
147 } |
|
148 return ETrue; |
|
149 } |
|
150 |
|
151 /* |
|
152 @SYMTestCaseID 1 |
|
153 @SYMTestCaseDesc Create pool from user-side |
|
154 @SYMREQ REQ11423 |
|
155 @SYMTestActions |
|
156 1. Test Thread creates a pool (P1) and passes handle to device driver. |
|
157 2. Device driver opens pool and checks its attributes. |
|
158 @SYMTestExpectedResults |
|
159 All OK. |
|
160 @SYMTestPriority Critical |
|
161 */ |
|
162 |
|
163 void CreateUserPool(TTestPoolType aPoolType) |
|
164 { |
|
165 test.Next(_L("Create user-side pool")); |
|
166 TInt r; |
|
167 TInt pagesize; |
|
168 r = HAL::Get(HAL::EMemoryPageSize, pagesize); |
|
169 test_KErrNone(r); |
|
170 |
|
171 switch (aPoolType) |
|
172 { |
|
173 case ETestNonPageAligned: |
|
174 // Non-page-aligned pool |
|
175 { |
|
176 test.Printf(_L("Non-page-aligned\n")); |
|
177 test_Equal(0, P1.Handle()); |
|
178 TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize, KTestPoolSizeInBufs, 8); |
|
179 r = P1.Create(inf,KDefaultPoolHandleFlags); |
|
180 test_KErrNone(r); |
|
181 |
|
182 r = P1.SetBufferWindow(-1, ETrue); |
|
183 test_Equal(KErrNotSupported, r); |
|
184 |
|
185 TShPoolInfo poolinfotokernel; |
|
186 poolinfotokernel.iBufSize = *PtrBufSize; |
|
187 poolinfotokernel.iInitialBufs = KTestPoolSizeInBufs; |
|
188 poolinfotokernel.iMaxBufs = KTestPoolSizeInBufs; |
|
189 poolinfotokernel.iGrowTriggerRatio = 0; |
|
190 poolinfotokernel.iGrowByRatio = 0; |
|
191 poolinfotokernel.iShrinkHysteresisRatio = 0; |
|
192 poolinfotokernel.iAlignment = 8; |
|
193 poolinfotokernel.iFlags = EShPoolNonPageAlignedBuffer; |
|
194 r = Ldd.OpenUserPool(P1.Handle(), poolinfotokernel); |
|
195 test_KErrNone(r); |
|
196 |
|
197 TShPoolInfo poolinfo; |
|
198 P1.GetInfo(poolinfo); |
|
199 test_Equal(*PtrBufSize, poolinfo.iBufSize); |
|
200 test_Equal(KTestPoolSizeInBufs, poolinfo.iInitialBufs); |
|
201 test_Equal(KTestPoolSizeInBufs, poolinfo.iMaxBufs); |
|
202 test_Equal(0, poolinfo.iGrowTriggerRatio); |
|
203 test_Equal(0, poolinfo.iGrowByRatio); |
|
204 test_Equal(0, poolinfo.iShrinkHysteresisRatio); |
|
205 test_Equal(8, poolinfo.iAlignment); |
|
206 test(poolinfo.iFlags & EShPoolNonPageAlignedBuffer); |
|
207 test(!(poolinfo.iFlags & EShPoolPageAlignedBuffer)); |
|
208 break; |
|
209 } |
|
210 case ETestPageAligned: |
|
211 // Page-aligned pool |
|
212 { |
|
213 test.Printf(_L("Page-aligned\n")); |
|
214 test_Equal(0, P1.Handle()); |
|
215 |
|
216 TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, KTestPoolSizeInBufs); |
|
217 r = P1.Create(inf,KDefaultPoolHandleFlags); |
|
218 test_KErrNone(r); |
|
219 |
|
220 r = P1.SetBufferWindow(-1, ETrue); |
|
221 test_KErrNone(r); |
|
222 |
|
223 TShPoolInfo poolinfo; |
|
224 P1.GetInfo(poolinfo); |
|
225 test_Equal(*PtrBufSize, poolinfo.iBufSize); |
|
226 test_Equal(KTestPoolSizeInBufs, poolinfo.iInitialBufs); |
|
227 test_Equal(KTestPoolSizeInBufs, poolinfo.iMaxBufs); |
|
228 test_Equal(0, poolinfo.iGrowTriggerRatio); |
|
229 test_Equal(0, poolinfo.iGrowByRatio); |
|
230 test_Equal(0, poolinfo.iShrinkHysteresisRatio); |
|
231 test_Equal(Log2(pagesize), poolinfo.iAlignment); |
|
232 test(poolinfo.iFlags & EShPoolPageAlignedBuffer); |
|
233 test(!(poolinfo.iFlags & EShPoolNonPageAlignedBuffer)); |
|
234 |
|
235 r = Ldd.OpenUserPool(P1.Handle(), poolinfo); |
|
236 test_KErrNone(r); |
|
237 break; |
|
238 } |
|
239 case ETestPageAlignedGrowing: |
|
240 // Page-aligned growing pool |
|
241 { |
|
242 test.Printf(_L("Page-aligned growing\n")); |
|
243 test_Equal(0, P1.Handle()); |
|
244 |
|
245 TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, KTestPoolSizeInBufs / 2); |
|
246 // Set shrink hysteresis high so pool can't shrink |
|
247 r = inf.SetSizingAttributes(KTestPoolSizeInBufs, 25, 26, 25600); |
|
248 test_KErrNone(r); |
|
249 r = P1.Create(inf,KDefaultPoolHandleFlags); |
|
250 test_KErrNone(r); |
|
251 |
|
252 r = P1.SetBufferWindow(-1, ETrue); |
|
253 test_KErrNone(r); |
|
254 |
|
255 TShPoolInfo poolinfo; |
|
256 P1.GetInfo(poolinfo); |
|
257 test_Equal(*PtrBufSize, poolinfo.iBufSize); |
|
258 test_Equal(KTestPoolSizeInBufs / 2, poolinfo.iInitialBufs); |
|
259 test_Equal(KTestPoolSizeInBufs, poolinfo.iMaxBufs); |
|
260 test_Equal(25, poolinfo.iGrowTriggerRatio); |
|
261 test_Equal(26, poolinfo.iGrowByRatio); |
|
262 test_Equal(25600, poolinfo.iShrinkHysteresisRatio); |
|
263 test_Equal(Log2(pagesize), poolinfo.iAlignment); |
|
264 test(poolinfo.iFlags & EShPoolPageAlignedBuffer); |
|
265 test(!(poolinfo.iFlags & EShPoolNonPageAlignedBuffer)); |
|
266 |
|
267 r = Ldd.OpenUserPool(P1.Handle(), poolinfo); |
|
268 test_KErrNone(r); |
|
269 break; |
|
270 } |
|
271 default: |
|
272 test(EFalse); |
|
273 } |
|
274 } |
|
275 |
|
276 /* |
|
277 @SYMTestCaseID 2 |
|
278 @SYMTestCaseDesc Create pool from kernel-side |
|
279 @SYMREQ REQ11423 |
|
280 @SYMTestActions |
|
281 1. Device Driver creates a pool (P2) and passes handle to this thread. |
|
282 2. Test Thread opens pool and checks its attributes. |
|
283 @SYMTestExpectedResults |
|
284 1. Ok. |
|
285 2. Ok. |
|
286 @SYMTestPriority Critical |
|
287 */ |
|
288 |
|
289 void CreateKernelPool(TTestPoolType aPoolType) |
|
290 { |
|
291 test.Next(_L("Create kernel-side pool")); |
|
292 TInt r; |
|
293 TInt pagesize; |
|
294 r = HAL::Get(HAL::EMemoryPageSize, pagesize); |
|
295 test_KErrNone(r); |
|
296 TInt handle; |
|
297 |
|
298 switch (aPoolType) |
|
299 { |
|
300 case ETestNonPageAligned: |
|
301 // Non-page-aligned pool |
|
302 { |
|
303 test.Printf(_L("Non-page-aligned\n")); |
|
304 test_Equal(0, P2.Handle()); |
|
305 |
|
306 TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize, KTestPoolSizeInBufs, 8); |
|
307 r = Ldd.OpenKernelPool(inf, handle); |
|
308 test_KErrNone(r); |
|
309 P2.SetHandle(handle); |
|
310 |
|
311 TShPoolInfo poolinfo; |
|
312 P2.GetInfo(poolinfo); |
|
313 test_Equal(*PtrBufSize, poolinfo.iBufSize); |
|
314 test_Equal(KTestPoolSizeInBufs, poolinfo.iInitialBufs); |
|
315 test_Equal(KTestPoolSizeInBufs, poolinfo.iMaxBufs); |
|
316 test_Equal(0, poolinfo.iGrowTriggerRatio); |
|
317 test_Equal(0, poolinfo.iGrowByRatio); |
|
318 test_Equal(0, poolinfo.iShrinkHysteresisRatio); |
|
319 test_Equal(8, poolinfo.iAlignment); |
|
320 test(poolinfo.iFlags & EShPoolNonPageAlignedBuffer); |
|
321 test(!(poolinfo.iFlags & EShPoolPageAlignedBuffer)); |
|
322 break; |
|
323 } |
|
324 case ETestPageAligned: |
|
325 // Page-aligned pool |
|
326 { |
|
327 test.Printf(_L("Page-aligned\n")); |
|
328 test_Equal(0, P2.Handle()); |
|
329 |
|
330 TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, KTestPoolSizeInBufs); |
|
331 r = Ldd.OpenKernelPool(inf, handle); |
|
332 test_KErrNone(r); |
|
333 P2.SetHandle(handle); |
|
334 |
|
335 r = P2.SetBufferWindow(-1, ETrue); |
|
336 test_KErrNone(r); |
|
337 |
|
338 TShPoolInfo poolinfo; |
|
339 P2.GetInfo(poolinfo); |
|
340 test_Equal(*PtrBufSize, poolinfo.iBufSize); |
|
341 test_Equal(KTestPoolSizeInBufs, poolinfo.iInitialBufs); |
|
342 test_Equal(KTestPoolSizeInBufs, poolinfo.iMaxBufs); |
|
343 test_Equal(0, poolinfo.iGrowTriggerRatio); |
|
344 test_Equal(0, poolinfo.iGrowByRatio); |
|
345 test_Equal(0, poolinfo.iShrinkHysteresisRatio); |
|
346 test_Equal(Log2(pagesize), poolinfo.iAlignment); |
|
347 test(poolinfo.iFlags & EShPoolPageAlignedBuffer); |
|
348 test(!(poolinfo.iFlags & EShPoolNonPageAlignedBuffer)); |
|
349 break; |
|
350 } |
|
351 case ETestPageAlignedGrowing: |
|
352 // Page-aligned pool growing |
|
353 { |
|
354 test.Printf(_L("Page-aligned growing\n")); |
|
355 test_Equal(0, P2.Handle()); |
|
356 |
|
357 TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, KTestPoolSizeInBufs / 2); |
|
358 // Set shrink hysteresis high so pool can't shrink |
|
359 r = inf.SetSizingAttributes(KTestPoolSizeInBufs, 25, 26, 25600); |
|
360 test_KErrNone(r); |
|
361 r = Ldd.OpenKernelPool(inf, handle); |
|
362 test_KErrNone(r); |
|
363 P2.SetHandle(handle); |
|
364 |
|
365 r = P2.SetBufferWindow(-1, ETrue); |
|
366 test_KErrNone(r); |
|
367 |
|
368 TShPoolInfo poolinfo; |
|
369 P2.GetInfo(poolinfo); |
|
370 test_Equal(*PtrBufSize, poolinfo.iBufSize); |
|
371 test_Equal(KTestPoolSizeInBufs / 2, poolinfo.iInitialBufs); |
|
372 test_Equal(KTestPoolSizeInBufs, poolinfo.iMaxBufs); |
|
373 test_Equal(25, poolinfo.iGrowTriggerRatio); |
|
374 test_Equal(26, poolinfo.iGrowByRatio); |
|
375 test_Equal(25600, poolinfo.iShrinkHysteresisRatio); |
|
376 test_Equal(Log2(pagesize), poolinfo.iAlignment); |
|
377 test(poolinfo.iFlags & EShPoolPageAlignedBuffer); |
|
378 test(!(poolinfo.iFlags & EShPoolNonPageAlignedBuffer)); |
|
379 break; |
|
380 } |
|
381 default: |
|
382 test(EFalse); |
|
383 } |
|
384 } |
|
385 |
|
386 /* |
|
387 @SYMTestCaseID 20 |
|
388 @SYMTestCaseDesc Close pool from kernel-side |
|
389 @SYMREQ REQ11423 |
|
390 @SYMTestActions |
|
391 1. Device Driver closes P2. |
|
392 2. Test Thread closes P2. |
|
393 @SYMTestExpectedResults |
|
394 1. OK and Access Count is now 1. |
|
395 2. OK |
|
396 @SYMTestPriority Critical |
|
397 */ |
|
398 |
|
399 void CloseKernelPool() |
|
400 { |
|
401 test.Next(_L("Close kernel-side pool")); |
|
402 TInt r; |
|
403 |
|
404 r = Ldd.CloseKernelPool(); |
|
405 test_KErrNone(r); |
|
406 |
|
407 P2.Close(); |
|
408 |
|
409 // wait for memory to be freed |
|
410 r = UserSvr::HalFunction(EHalGroupKernel, EKernelHalSupervisorBarrier, (TAny*)5000, 0); |
|
411 test_KErrNone(r); |
|
412 |
|
413 } |
|
414 |
|
415 /* |
|
416 @SYMTestCaseID 21 |
|
417 @SYMTestCaseDesc Close pool from user-side |
|
418 @SYMREQ REQ11423 |
|
419 @SYMTestActions |
|
420 1. Test Thread closes P1. |
|
421 2. Device Driver closes P1. |
|
422 @SYMTestExpectedResults |
|
423 1. OK and Access Count is now 1. |
|
424 2. OK. |
|
425 @SYMTestPriority Critical |
|
426 */ |
|
427 |
|
428 void CloseUserPool() |
|
429 { |
|
430 test.Next(_L("Close user-side pool")); |
|
431 TInt r; |
|
432 |
|
433 P1.Close(); |
|
434 |
|
435 r = Ldd.CloseUserPool(); |
|
436 test_KErrNone(r); |
|
437 |
|
438 // wait for memory to be freed |
|
439 r = UserSvr::HalFunction(EHalGroupKernel, EKernelHalSupervisorBarrier, (TAny*)5000, 0); |
|
440 test_KErrNone(r); |
|
441 } |
|
442 |
|
443 /* |
|
444 @SYMTestCaseID 3 |
|
445 @SYMTestCaseDesc Buffer allocation from user-side |
|
446 @SYMREQ REQ11423 |
|
447 @SYMTestActions |
|
448 1. Test Thread creates a shared buffer on P1. |
|
449 2. Test Thread passes buffer to Device Driver. |
|
450 3. Device Driver obtains buffer and manipulates its contents. |
|
451 4. Device Driver releases buffer. |
|
452 5. Test Thread releases buffer. |
|
453 @SYMTestExpectedResults |
|
454 1. Ok. |
|
455 2. Ok. |
|
456 3. Ok. |
|
457 4. Ok. |
|
458 5. Ok. Buffer de-allocated. |
|
459 @SYMTestPriority Critical |
|
460 */ |
|
461 |
|
462 void AllocateUserBuffer() |
|
463 { |
|
464 test.Next(_L("Allocate user-side buffer")); |
|
465 TInt r; |
|
466 RShBuf buf; |
|
467 |
|
468 // Allocate buffer on POOL 1 |
|
469 __KHEAP_MARK; |
|
470 r = buf.Alloc(P1); |
|
471 test_KErrNone(r); |
|
472 __KHEAP_CHECK(0); |
|
473 |
|
474 TInt i; |
|
475 TShPoolInfo poolinfo1; |
|
476 P1.GetInfo(poolinfo1); |
|
477 TInt blocks = poolinfo1.iBufSize / KTestData1().Length(); |
|
478 |
|
479 for (i = 0; i < blocks; i++) |
|
480 { |
|
481 TPtr8(buf.Ptr() + (i * KTestData1().Length()), KTestData1().Length(),KTestData1().Length()).Copy(KTestData1()); |
|
482 } |
|
483 r = Ldd.ManipulateUserBuffer(buf.Handle()); |
|
484 |
|
485 test_KErrNone(r); |
|
486 |
|
487 TBuf8<64> tmp; |
|
488 |
|
489 P1.GetInfo(poolinfo1); |
|
490 blocks = poolinfo1.iBufSize / tmp.MaxSize(); |
|
491 |
|
492 for (i = 0 ; i < blocks; i++) |
|
493 { |
|
494 tmp.Fill(i); |
|
495 TPtrC8 ptrc(buf.Ptr() + (i * tmp.Length()), tmp.Length()); |
|
496 r = tmp.Compare(ptrc); |
|
497 test_Equal(0, r); |
|
498 } |
|
499 buf.Close(); |
|
500 __KHEAP_MARKEND; |
|
501 |
|
502 // Allocate buffer on POOL 2 |
|
503 __KHEAP_MARK; |
|
504 r = buf.Alloc(P2); |
|
505 test_KErrNone(r); |
|
506 __KHEAP_CHECK(0); |
|
507 |
|
508 TShPoolInfo poolinfo2; |
|
509 P2.GetInfo(poolinfo2); |
|
510 blocks = poolinfo2.iBufSize / KTestData1().Length(); // PC REMOVE |
|
511 |
|
512 for (i = 0; i < blocks; i++) |
|
513 { |
|
514 TPtr8(buf.Ptr() + (i * KTestData1().Length()), KTestData1().Length(),KTestData1().Length()).Copy(KTestData1()); |
|
515 } |
|
516 |
|
517 r = Ldd.ManipulateUserBuffer(buf.Handle()); |
|
518 test_KErrNone(r); |
|
519 |
|
520 P2.GetInfo(poolinfo2); |
|
521 blocks = poolinfo2.iBufSize / tmp.MaxSize(); // PC REMOVE |
|
522 |
|
523 for (i = 0 ; i < blocks; i++) |
|
524 { |
|
525 tmp.Fill(i); |
|
526 r = tmp.Compare(TPtr8(buf.Ptr() + (i * tmp.Length()), tmp.Length(), tmp.Length())); |
|
527 test_Equal(0, r); |
|
528 } |
|
529 buf.Close(); |
|
530 __KHEAP_MARKEND; |
|
531 } |
|
532 |
|
533 /* |
|
534 @SYMTestCaseID 4 |
|
535 @SYMTestCaseDesc Buffer allocation from kernel-side |
|
536 @SYMREQ REQ11423 |
|
537 @SYMTestActions |
|
538 1. Device Driver creates a buffer on P2. |
|
539 2. Device Driver manipulates buffer and passes it to Test Thread. |
|
540 3. Test Thread manipulates buffer and send it back to Device Driver. |
|
541 4. Device Driver check buffer's contents and releases it. |
|
542 @SYMTestExpectedResults |
|
543 1. Ok. |
|
544 2. Ok. |
|
545 3. Ok. |
|
546 4. Ok. Buffer de-allocated. |
|
547 @SYMTestPriority Critical |
|
548 */ |
|
549 |
|
550 void AllocateKernelBuffer() |
|
551 { |
|
552 test.Next(_L("Allocate kernel-side buffer")); |
|
553 TInt r; |
|
554 TInt handle; |
|
555 RShBuf kbuf0, kbuf1; |
|
556 |
|
557 // Allocate buffer on POOL 1 |
|
558 r = Ldd.AllocateKernelBuffer(0, handle); |
|
559 test_KErrNone(r); |
|
560 kbuf0.SetHandle(handle); |
|
561 |
|
562 TInt i; |
|
563 TShPoolInfo poolinfo1; |
|
564 P1.GetInfo(poolinfo1); |
|
565 TInt blocks = poolinfo1.iBufSize / KTestData2().Length(); |
|
566 for (i = 0; i < blocks; i++) |
|
567 { |
|
568 r = KTestData2().Compare(TPtr8(kbuf0.Ptr() + (i * KTestData2().Length()), KTestData2().Length(), KTestData2().Length())); |
|
569 |
|
570 test_Equal(0, r); |
|
571 } |
|
572 kbuf0.Close(); |
|
573 |
|
574 // Allocate buffer on POOL 2 |
|
575 r = Ldd.AllocateKernelBuffer(1, handle); |
|
576 test_KErrNone(r); |
|
577 kbuf1.SetHandle(handle); |
|
578 |
|
579 TShPoolInfo poolinfo2; |
|
580 P2.GetInfo(poolinfo2); |
|
581 blocks = poolinfo2.iBufSize / KTestData2().Length(); |
|
582 |
|
583 for (i = 0; i < blocks; i++) |
|
584 { |
|
585 r = KTestData2().Compare(TPtr8(kbuf1.Ptr() + (i * KTestData2().Length()), KTestData2().Length(), KTestData2().Length())); |
|
586 |
|
587 test_Equal(0, r); |
|
588 } |
|
589 kbuf1.Close(); |
|
590 } |
|
591 |
|
592 |
|
593 /* |
|
594 @SYMTestCaseID X1 |
|
595 @SYMTestCaseDesc Allocate maximum number of buffers in a pool (user/kernel) |
|
596 @SYMREQ REQ11423 |
|
597 @SYMTestActions |
|
598 Allocate as many buffers on a pool as possible. |
|
599 Free them all and re-allocate them again. |
|
600 Free them all. |
|
601 @SYMTestExpectedResults |
|
602 Ok. |
|
603 @SYMTestPriority High |
|
604 */ |
|
605 |
|
606 void AllocateUserMax(RShPool& aPool) |
|
607 { |
|
608 test.Next(_L("Exhaust pool memory from user-side")); |
|
609 TInt r; |
|
610 |
|
611 TShPoolInfo poolinfo; |
|
612 aPool.GetInfo(poolinfo); |
|
613 TBool aligned = (poolinfo.iFlags & EShPoolPageAlignedBuffer); |
|
614 RDebug::Printf("aligned=%d",aligned); |
|
615 |
|
616 RArray<RShBuf> bufarray; |
|
617 do |
|
618 { |
|
619 RShBuf buf; |
|
620 r = buf.Alloc(aPool); |
|
621 if (r==KErrNoMemory && KTestPoolSizeInBufs>bufarray.Count()) |
|
622 { |
|
623 // try again after a delay, to allow for background resource allocation |
|
624 |
|
625 User::After(1000000); |
|
626 r = buf.Alloc(aPool); |
|
627 } |
|
628 if (!r) |
|
629 { |
|
630 r = bufarray.Append(buf); |
|
631 test_KErrNone(r); |
|
632 FillShBuf(buf,0x99); |
|
633 } |
|
634 } |
|
635 while (r == KErrNone); |
|
636 test_Equal(KErrNoMemory, r); |
|
637 test_Compare(KTestPoolSizeInBufs, <=, bufarray.Count()); |
|
638 |
|
639 TInt n = bufarray.Count(); |
|
640 while (n) |
|
641 { |
|
642 bufarray[--n].Close(); |
|
643 } |
|
644 |
|
645 User::After(500000); |
|
646 |
|
647 // Do it once more |
|
648 n = 0; |
|
649 while (n<bufarray.Count()) |
|
650 { |
|
651 r = bufarray[n].Alloc(aPool); |
|
652 if (r==KErrNoMemory) |
|
653 { |
|
654 // try again after a delay, to allow for background resource allocation |
|
655 User::After(1000000); |
|
656 r = bufarray[n].Alloc(aPool); |
|
657 } |
|
658 test_Assert(r == KErrNone, test.Printf(_L("n=%d r=%d\n"), n, r)); |
|
659 if(aligned) |
|
660 test(CheckNotFillShBuf(bufarray[n],0x99)); |
|
661 ++n; |
|
662 } |
|
663 |
|
664 RShBuf extrabuf; |
|
665 r = extrabuf.Alloc(aPool); |
|
666 test_Equal(KErrNoMemory, r); |
|
667 |
|
668 while (n) |
|
669 { |
|
670 bufarray[--n].Close(); |
|
671 } |
|
672 |
|
673 bufarray.Close(); |
|
674 } |
|
675 |
|
676 void AllocateKernelMax() |
|
677 { |
|
678 test.Next(_L("Exhaust pool memory from kernel-side")); |
|
679 TInt r; |
|
680 TInt allocated; |
|
681 r = Ldd.AllocateMax(0, allocated); // P1 |
|
682 test_KErrNone(r); |
|
683 test_Equal(KTestPoolSizeInBufs, allocated); |
|
684 r = Ldd.AllocateMax(1, allocated); // P2 |
|
685 test_KErrNone(r); |
|
686 test_Equal(KTestPoolSizeInBufs, allocated); |
|
687 } |
|
688 |
|
689 |
|
690 /* |
|
691 @SYMTestCaseID 11 |
|
692 @SYMTestCaseDesc Buffer alignment (kernel/user) |
|
693 @SYMREQ REQ11423 |
|
694 @SYMTestActions |
|
695 1. Test Thread creates several pools with different buffer alignment |
|
696 requirements: |
|
697 2. Test Thread allocates buffers on all pools. |
|
698 3. Test Thread frees all buffers and close pools. |
|
699 @SYMTestExpectedResults |
|
700 1. Ok. |
|
701 2. Buffers are aligned to the desired boundary. |
|
702 3. Ok. |
|
703 @SYMTestPriority High |
|
704 */ |
|
705 |
|
706 void BufferAlignmentUser() |
|
707 { |
|
708 test.Next(_L("Buffer alignment (User)")); |
|
709 TInt pagesize; |
|
710 TInt r; |
|
711 r = HAL::Get(HAL::EMemoryPageSize, pagesize); |
|
712 test_KErrNone(r); |
|
713 |
|
714 // Non page aligned buffers |
|
715 TInt i; |
|
716 for (i = 0; i <= Log2(pagesize); i++) |
|
717 { |
|
718 test.Printf(_L(".")); |
|
719 TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize, 20, i); // TODO: Change minbufs back to 8 when the pool growing code works |
|
720 RShPool pool; |
|
721 r = pool.Create(inf,KDefaultPoolHandleFlags); |
|
722 test_KErrNone(r); |
|
723 |
|
724 TInt j; |
|
725 RShBuf buf[20]; |
|
726 for (j = 0; j < 20; j++) |
|
727 { |
|
728 r = buf[j].Alloc(pool); |
|
729 test_KErrNone(r); |
|
730 } |
|
731 |
|
732 TInt alignment = i; |
|
733 if (alignment < KTestMinimumAlignmentLog2) |
|
734 { |
|
735 alignment = KTestMinimumAlignmentLog2; |
|
736 } |
|
737 for (j = 0; j < 20; j++) |
|
738 { |
|
739 test_Assert(!((TUint32) buf[j].Ptr() & ((1 << alignment) - 1)), |
|
740 test.Printf(_L("Pool%d buf[%d].Base() == 0x%08x"), i, j, buf[j].Ptr())); |
|
741 } |
|
742 |
|
743 for (j = 0; j < 20; j++) |
|
744 { |
|
745 buf[j].Close(); |
|
746 } |
|
747 pool.Close(); |
|
748 // delay to allow the management dfc to run and close pool |
|
749 User::After(100000); |
|
750 } |
|
751 test.Printf(_L("\n")); |
|
752 |
|
753 // Page aligned buffers |
|
754 TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, 20); // TODO: Change minbufs back to 8 when the pool growing code works |
|
755 RShPool pool; |
|
756 r = pool.Create(inf,KDefaultPoolHandleFlags); |
|
757 test_KErrNone(r); |
|
758 |
|
759 r = pool.SetBufferWindow(-1, ETrue); |
|
760 test_KErrNone(r); |
|
761 |
|
762 TInt j; |
|
763 RShBuf buf[20]; |
|
764 for (j = 0; j < 20; j++) |
|
765 { |
|
766 r = buf[j].Alloc(pool); |
|
767 test_KErrNone(r); |
|
768 } |
|
769 |
|
770 for (j = 0; j < 20; j++) |
|
771 { |
|
772 test_Assert(!((TUint32) buf[j].Ptr() & (pagesize - 1)), |
|
773 test.Printf(_L("buf[%d].Base() == 0x%08x"), j, buf[j].Ptr())); |
|
774 } |
|
775 for (j = 0; j < 20; j++) |
|
776 { |
|
777 buf[j].Close(); |
|
778 } |
|
779 pool.Close(); |
|
780 } |
|
781 |
|
782 void BufferAlignmentKernel() |
|
783 { |
|
784 test.Next(_L("Buffer alignment (Kernel)")); |
|
785 TInt r; |
|
786 |
|
787 TInt pagesize; |
|
788 r = HAL::Get(HAL::EMemoryPageSize, pagesize); |
|
789 test_KErrNone(r); |
|
790 |
|
791 for (TInt i = 0; i < Log2(pagesize); i++) |
|
792 { |
|
793 test.Printf(_L(".")); |
|
794 r = Ldd.BufferAlignmentKernel(*PtrBufSize, i); |
|
795 test_KErrNone(r); |
|
796 // delay to allow the management dfc to run |
|
797 User::After(100000); |
|
798 } |
|
799 test.Printf(_L("\n")); |
|
800 } |
|
801 |
|
802 /* |
|
803 @SYMTestCaseID 6 |
|
804 @SYMTestCaseDesc Create pool at specific physical address |
|
805 @SYMREQ REQ11423 |
|
806 @SYMTestActions |
|
807 1. Device Driver allocates memory chunk. |
|
808 2. Device Driver requests physical address of this memory chunk. |
|
809 3. Device Driver creates pool at physical address of the memory chunk. |
|
810 3. Device Driver allocate buffers on pool, free them and close pool. |
|
811 @SYMTestExpectedResults |
|
812 1. Ok. |
|
813 2. Ok. |
|
814 3. Ok. |
|
815 4. Ok |
|
816 @SYMTestPriority High |
|
817 */ |
|
818 |
|
819 void CreateKernelPoolPhysAddr() |
|
820 { |
|
821 test.Next(_L("Create pool at specific physical address")); |
|
822 TInt r; |
|
823 test.Start(_L("Contiguous physical memory")); |
|
824 r = Ldd.CreatePoolPhysAddrCont(*PtrBufSize); |
|
825 test_KErrNone(r); |
|
826 test.Next(_L("Discontiguous physical memory")); |
|
827 r = Ldd.CreatePoolPhysAddrNonCont(*PtrBufSize); |
|
828 test_KErrNone(r); |
|
829 test.End(); |
|
830 } |
|
831 |
|
832 /* |
|
833 @SYMTestCaseID 14 |
|
834 @SYMTestCaseDesc Buffer separation and overwrites |
|
835 @SYMREQ REQ11423 |
|
836 @SYMTestActions |
|
837 1. Test Thread creates two pools: |
|
838 - A pool with no guard pages. |
|
839 - A pool with guard pages. |
|
840 2. Allocate two buffers on each pool. |
|
841 3. Test Thread creates Secondary Thread. |
|
842 4. Secondary Thread starts reading contents of the first buffer and keep |
|
843 reading beyond its limits (using a pointer, not a descriptor). |
|
844 5. Secondary Thread starts writing on the first buffer and keep writing beyond |
|
845 its limits (using a pointer, not a descriptor). |
|
846 6. Free buffers and close pools. |
|
847 @SYMTestExpectedResults |
|
848 1. Ok. |
|
849 2. Ok. |
|
850 3. Ok. |
|
851 4. Secondary Thread panics when it attempts to read the guard page, if there |
|
852 is one. Otherwise, it moves on to the second buffer. (Secondary Thread will |
|
853 have to be restarted). |
|
854 5. Secondary Thread panics when it attempts to write on the guard page if |
|
855 there is one. Otherwise, it carries on writing on to the second buffer. |
|
856 6. Ok. |
|
857 @SYMTestPriority High |
|
858 */ |
|
859 |
|
860 TInt ThreadGuardPagesRead(TAny* aArg) |
|
861 { |
|
862 TUint8* ptr = (TUint8*) aArg; |
|
863 if (ptr == NULL) |
|
864 { |
|
865 return KErrArgument; |
|
866 } |
|
867 TInt bufsize = *PtrBufSize; |
|
868 TInt i; |
|
869 TUint8 val = '$'; |
|
870 TBool isok = ETrue; |
|
871 for (i = 0; i < bufsize; i++) |
|
872 { |
|
873 if (*(ptr + i) != val) |
|
874 { |
|
875 isok = EFalse; |
|
876 } |
|
877 } |
|
878 if (!isok) |
|
879 { |
|
880 return KErrUnknown; |
|
881 } |
|
882 return KErrNone; |
|
883 } |
|
884 |
|
885 TInt ThreadGuardPagesWrite(TAny* aArg) |
|
886 { |
|
887 TUint8* ptr = (TUint8*) aArg; |
|
888 if (ptr == NULL) |
|
889 { |
|
890 return KErrArgument; |
|
891 } |
|
892 TInt bufsize = *PtrBufSize; |
|
893 TInt i; |
|
894 for (i = 0; i < bufsize; i++) |
|
895 { |
|
896 *(ptr + i) = '#'; |
|
897 } |
|
898 return KErrNone; |
|
899 } |
|
900 |
|
901 void GuardPages() |
|
902 { |
|
903 test.Next(_L("Guard pages")); |
|
904 TInt pagesize; |
|
905 TInt r; |
|
906 r = HAL::Get(HAL::EMemoryPageSize, pagesize); |
|
907 test_KErrNone(r); |
|
908 |
|
909 // Create pools |
|
910 RShPool pool1; |
|
911 RShPool pool2; |
|
912 TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, KTestPoolSizeInBufs); |
|
913 r = pool1.Create(inf,KDefaultPoolHandleFlags); |
|
914 test_KErrNone(r); |
|
915 |
|
916 r = pool1.SetBufferWindow(-1, ETrue); |
|
917 test_KErrNone(r); |
|
918 |
|
919 r = inf.SetGuardPages(); |
|
920 test_KErrNone(r); |
|
921 r = pool2.Create(inf,KDefaultPoolHandleFlags); |
|
922 test_KErrNone(r); |
|
923 |
|
924 r = pool2.SetBufferWindow(-1, ETrue); |
|
925 test_KErrNone(r); |
|
926 |
|
927 // Allocate buffers |
|
928 RShBuf bufs1[KTestPoolSizeInBufs]; |
|
929 RShBuf bufs2[KTestPoolSizeInBufs]; |
|
930 TInt i; |
|
931 for (i = 0; i < KTestPoolSizeInBufs; i++) |
|
932 { |
|
933 r = bufs1[i].Alloc(pool1); |
|
934 test_Assert(r == KErrNone, test.Printf(_L("Pool1: i=%d r=%d\n"), i, r)); |
|
935 TPtr8 ptr(bufs1[i].Ptr(), bufs1[i].Size(),bufs1[i].Size()); |
|
936 ptr.Fill('$'); |
|
937 } |
|
938 for (i = 0; i < KTestPoolSizeInBufs; i++) |
|
939 { |
|
940 r = bufs2[i].Alloc(pool2); |
|
941 test_Assert(r == KErrNone, test.Printf(_L("Pool2: i=%d r=%d\n"), i, r)); |
|
942 TPtr8 ptr(bufs2[i].Ptr(), bufs1[i].Size(),bufs1[i].Size()); |
|
943 ptr.Fill('$'); |
|
944 } |
|
945 |
|
946 _LIT(KTestThreadRead, "GuardPagesReadTS%dP%dB%d"); |
|
947 for (i = 0; i < KTestPoolSizeInBufs - 1; i++) |
|
948 { |
|
949 TBuf<40> threadname; |
|
950 RThread thread; |
|
951 TRequestStatus rs; |
|
952 |
|
953 // 1. Simple read within buffer |
|
954 // Pool 1 |
|
955 threadname.Format(KTestThreadRead, 1, 1, i); |
|
956 r = thread.Create(threadname, ThreadGuardPagesRead, KDefaultStackSize, KMinHeapSize, KMinHeapSize, |
|
957 (TAny*) bufs1[i].Ptr()); |
|
958 test_KErrNone(r); |
|
959 thread.Logon(rs); |
|
960 thread.Resume(); |
|
961 User::WaitForRequest(rs); |
|
962 test_KErrNone(rs.Int()); |
|
963 test_Equal(EExitKill, thread.ExitType()); |
|
964 test_KErrNone(thread.ExitReason()); |
|
965 thread.Close(); |
|
966 // Pool 2 |
|
967 threadname.Format(KTestThreadRead, 1, 2, i); |
|
968 r = thread.Create(threadname, ThreadGuardPagesRead, KDefaultStackSize, KMinHeapSize, KMinHeapSize, |
|
969 (TAny*) bufs2[i].Ptr()); |
|
970 test_KErrNone(r); |
|
971 thread.Logon(rs); |
|
972 thread.Resume(); |
|
973 User::WaitForRequest(rs); |
|
974 test_KErrNone(rs.Int()); |
|
975 test_Equal(EExitKill, thread.ExitType()); |
|
976 test_KErrNone(thread.ExitReason()); |
|
977 thread.Close(); |
|
978 |
|
979 // 2. If the buffer size is not a multiple of the MMU page size, it should be |
|
980 // possible to read after the buffer end until the page boundary |
|
981 if (*PtrBufSize % pagesize) |
|
982 { |
|
983 // Pool 1 |
|
984 threadname.Format(KTestThreadRead, 2, 1, i); |
|
985 r = thread.Create(threadname, ThreadGuardPagesRead, KDefaultStackSize, KMinHeapSize, KMinHeapSize, |
|
986 (TAny*) (bufs1[i].Ptr() + pagesize - *PtrBufSize % pagesize)); |
|
987 test_KErrNone(r); |
|
988 thread.Logon(rs); |
|
989 thread.Resume(); |
|
990 User::WaitForRequest(rs); |
|
991 if (rs.Int() != KErrNone) |
|
992 { |
|
993 test_Equal(KErrUnknown, rs.Int()); |
|
994 test_Equal(KErrUnknown, thread.ExitReason()); |
|
995 } |
|
996 test_Equal(EExitKill, thread.ExitType()); |
|
997 thread.Close(); |
|
998 // Pool 2 |
|
999 threadname.Format(KTestThreadRead, 2, 2, i); |
|
1000 r = thread.Create(threadname, ThreadGuardPagesRead, KDefaultStackSize, KMinHeapSize, KMinHeapSize, |
|
1001 (TAny*) (bufs2[i].Ptr() + pagesize - *PtrBufSize % pagesize)); |
|
1002 test_KErrNone(r); |
|
1003 thread.Logon(rs); |
|
1004 thread.Resume(); |
|
1005 User::WaitForRequest(rs); |
|
1006 if (rs.Int() != KErrNone) |
|
1007 { |
|
1008 test_Equal(KErrUnknown, rs.Int()); |
|
1009 test_Equal(KErrUnknown, thread.ExitReason()); |
|
1010 } |
|
1011 test_Equal(EExitKill, thread.ExitType()); |
|
1012 thread.Close(); |
|
1013 } |
|
1014 |
|
1015 // 3. Now we attempt to read the first byte on the next page after the end of |
|
1016 // our buffer. |
|
1017 TInt offset; |
|
1018 if (*PtrBufSize % pagesize) |
|
1019 { |
|
1020 offset = pagesize - *PtrBufSize % pagesize + 1; |
|
1021 } |
|
1022 else |
|
1023 { |
|
1024 offset = 1; |
|
1025 } |
|
1026 // Pool 1 |
|
1027 if (bufs1[i + 1].Ptr() == bufs1[i].Ptr() + RoundUp(*PtrBufSize, Log2(pagesize))) |
|
1028 { |
|
1029 // Only perform this test if the next buffer comes immediately next to this |
|
1030 // one. This is not necessarily the case on the Flexible Memory Model. |
|
1031 threadname.Format(KTestThreadRead, 3, 1, i); |
|
1032 r = thread.Create(threadname, ThreadGuardPagesRead, KDefaultStackSize, KMinHeapSize, KMinHeapSize, |
|
1033 (TAny*) (bufs1[i].Ptr() + offset)); |
|
1034 test_KErrNone(r); |
|
1035 thread.Logon(rs); |
|
1036 thread.Resume(); |
|
1037 User::WaitForRequest(rs); |
|
1038 if (rs.Int() != KErrNone) // No guard page, so it should be fine |
|
1039 { |
|
1040 test_Equal(KErrUnknown, rs.Int()); |
|
1041 test_Equal(KErrUnknown, thread.ExitReason()); |
|
1042 } |
|
1043 test_Equal(EExitKill, thread.ExitType()); |
|
1044 thread.Close(); |
|
1045 } |
|
1046 // Pool 2 |
|
1047 TBool jit = User::JustInTime(); |
|
1048 User::SetJustInTime(EFalse); |
|
1049 threadname.Format(KTestThreadRead, 3, 2, i); |
|
1050 r = thread.Create(threadname, ThreadGuardPagesRead, KDefaultStackSize, KMinHeapSize, KMinHeapSize, |
|
1051 (TAny*) (bufs2[i].Ptr() + offset)); |
|
1052 test_KErrNone(r); |
|
1053 thread.Logon(rs); |
|
1054 thread.Resume(); |
|
1055 User::WaitForRequest(rs); |
|
1056 test_Equal(3, rs.Int()); |
|
1057 test_Equal(EExitPanic, thread.ExitType()); |
|
1058 test_Equal(3, thread.ExitReason()); // KERN-EXEC 3 |
|
1059 thread.Close(); |
|
1060 User::SetJustInTime(jit); |
|
1061 } |
|
1062 |
|
1063 _LIT(KTestThreadWrite, "GuardPagesWriteTS%dP%dB%d"); |
|
1064 for (i = 0; i < KTestPoolSizeInBufs - 1; i++) |
|
1065 { |
|
1066 TBuf<40> threadname; |
|
1067 RThread thread; |
|
1068 TRequestStatus rs; |
|
1069 |
|
1070 // 1. Simple write within buffer |
|
1071 // Pool 1 |
|
1072 threadname.Format(KTestThreadWrite, 1, 1, i); |
|
1073 r = thread.Create(threadname, ThreadGuardPagesWrite, KDefaultStackSize, KMinHeapSize, KMinHeapSize, |
|
1074 (TAny*) bufs1[i].Ptr()); |
|
1075 test_KErrNone(r); |
|
1076 thread.Logon(rs); |
|
1077 thread.Resume(); |
|
1078 User::WaitForRequest(rs); |
|
1079 test_KErrNone(rs.Int()); |
|
1080 test_Equal(EExitKill, thread.ExitType()); |
|
1081 test_KErrNone(thread.ExitReason()); |
|
1082 thread.Close(); |
|
1083 // Pool 2 |
|
1084 threadname.Format(KTestThreadWrite, 1, 2, i); |
|
1085 r = thread.Create(threadname, ThreadGuardPagesWrite, KDefaultStackSize, KMinHeapSize, KMinHeapSize, |
|
1086 (TAny*) bufs2[i].Ptr()); |
|
1087 test_KErrNone(r); |
|
1088 thread.Logon(rs); |
|
1089 thread.Resume(); |
|
1090 User::WaitForRequest(rs); |
|
1091 test_KErrNone(rs.Int()); |
|
1092 test_Equal(EExitKill, thread.ExitType()); |
|
1093 test_KErrNone(thread.ExitReason()); |
|
1094 thread.Close(); |
|
1095 |
|
1096 // 2. If the buffer size is not a multiple of the MMU page size, it should be |
|
1097 // possible to write after the buffer end until the page boundary |
|
1098 if (*PtrBufSize % pagesize) |
|
1099 { |
|
1100 // Pool 1 |
|
1101 threadname.Format(KTestThreadWrite, 2, 1, i); |
|
1102 r = thread.Create(threadname, ThreadGuardPagesWrite, KDefaultStackSize, KMinHeapSize, KMinHeapSize, |
|
1103 (TAny*) (bufs1[i].Ptr() + pagesize - *PtrBufSize % pagesize)); |
|
1104 test_KErrNone(r); |
|
1105 thread.Logon(rs); |
|
1106 thread.Resume(); |
|
1107 User::WaitForRequest(rs); |
|
1108 test_KErrNone(rs.Int()); |
|
1109 test_Equal(EExitKill, thread.ExitType()); |
|
1110 test_KErrNone(thread.ExitReason()); |
|
1111 thread.Close(); |
|
1112 // Pool 2 |
|
1113 threadname.Format(KTestThreadWrite, 2, 2, i); |
|
1114 r = thread.Create(threadname, ThreadGuardPagesWrite, KDefaultStackSize, KMinHeapSize, KMinHeapSize, |
|
1115 (TAny*) (bufs2[i].Ptr() + pagesize - *PtrBufSize % pagesize)); |
|
1116 test_KErrNone(r); |
|
1117 thread.Logon(rs); |
|
1118 thread.Resume(); |
|
1119 User::WaitForRequest(rs); |
|
1120 test_KErrNone(rs.Int()); |
|
1121 test_Equal(EExitKill, thread.ExitType()); |
|
1122 test_KErrNone(thread.ExitReason()); |
|
1123 thread.Close(); |
|
1124 } |
|
1125 |
|
1126 // 3. Now we attempt to write on the first byte on the next page after the |
|
1127 // end of our buffer. |
|
1128 TInt offset; |
|
1129 if (*PtrBufSize % pagesize) |
|
1130 { |
|
1131 offset = pagesize - *PtrBufSize % pagesize + 1; |
|
1132 } |
|
1133 else |
|
1134 { |
|
1135 offset = 1; |
|
1136 } |
|
1137 // Pool 1 |
|
1138 if (bufs1[i + 1].Ptr() == bufs1[i].Ptr() + RoundUp(*PtrBufSize, Log2(pagesize))) |
|
1139 { |
|
1140 // Only perform this test if the next buffer comes immediately next to this |
|
1141 // one. This is not necessarily the case on the Flexible Memory Model. |
|
1142 threadname.Format(KTestThreadWrite, 3, 1, i); |
|
1143 r = thread.Create(threadname, ThreadGuardPagesWrite, KDefaultStackSize, KMinHeapSize, KMinHeapSize, |
|
1144 (TAny*) (bufs1[i].Ptr() + offset)); |
|
1145 test_KErrNone(r); |
|
1146 thread.Logon(rs); |
|
1147 thread.Resume(); |
|
1148 User::WaitForRequest(rs); |
|
1149 test_KErrNone(rs.Int()); |
|
1150 test_Equal(EExitKill, thread.ExitType()); |
|
1151 test_KErrNone(thread.ExitReason()); |
|
1152 thread.Close(); |
|
1153 } |
|
1154 |
|
1155 // Pool 2 |
|
1156 TBool jit = User::JustInTime(); |
|
1157 User::SetJustInTime(EFalse); |
|
1158 threadname.Format(KTestThreadWrite, 3, 2, i); |
|
1159 r = thread.Create(threadname, ThreadGuardPagesWrite, KDefaultStackSize, KMinHeapSize, KMinHeapSize, |
|
1160 (TAny*) (bufs2[i].Ptr() + offset)); |
|
1161 test_KErrNone(r); |
|
1162 thread.Logon(rs); |
|
1163 thread.Resume(); |
|
1164 User::WaitForRequest(rs); |
|
1165 test_Equal(3, rs.Int()); |
|
1166 test_Equal(EExitPanic, thread.ExitType()); |
|
1167 test_Equal(3, thread.ExitReason()); // KERN-EXEC 3 |
|
1168 thread.Close(); |
|
1169 User::SetJustInTime(jit); |
|
1170 } |
|
1171 |
|
1172 // Free buffers |
|
1173 for (i = 0; i < KTestPoolSizeInBufs; i++) |
|
1174 { |
|
1175 bufs1[i].Close(); |
|
1176 bufs2[i].Close(); |
|
1177 } |
|
1178 pool1.Close(); |
|
1179 pool2.Close(); |
|
1180 } |
|
1181 |
|
1182 /* |
|
1183 @SYMTestCaseID 12 |
|
1184 @SYMTestCaseDesc Buffer mapping |
|
1185 @SYMREQ REQ11423 |
|
1186 @SYMTestActions |
|
1187 1. Test Thread allocates buffer on a mappable pool. |
|
1188 2. Test Thread spawns Slave Process. |
|
1189 3. Test Thread passes buffer handle to Slave Process. |
|
1190 4. Slave Process attempts to read buffer then write to buffer. |
|
1191 5. Slave Process maps buffer. |
|
1192 6. Slave Process attempts to read buffer then write to buffer. |
|
1193 7. Slave Process unmaps buffer. |
|
1194 8. Slave Process attempts to read buffer then write to buffer. |
|
1195 9. Test Thread kills Slave Process and frees buffer. |
|
1196 @SYMTestExpectedResults |
|
1197 1. Ok. |
|
1198 2. Ok. |
|
1199 3. Ok. |
|
1200 4. Slave Process panics. (and will have to be restarted) |
|
1201 5. Ok. |
|
1202 6. Ok. |
|
1203 7. Ok. |
|
1204 8. Slave Process panics. |
|
1205 9. Ok. |
|
1206 @SYMTestPriority High |
|
1207 */ |
|
1208 |
|
1209 TInt ThreadBufferMappingRead(TAny* aArg) |
|
1210 { |
|
1211 if (!aArg) |
|
1212 { |
|
1213 return KErrArgument; |
|
1214 } |
|
1215 RShBuf* buf = (RShBuf*) aArg; |
|
1216 TUint x = 0; |
|
1217 TUint i; |
|
1218 volatile TUint8* ptr = buf->Ptr(); |
|
1219 |
|
1220 for (i = 0; i < buf->Size(); i++) |
|
1221 { |
|
1222 x += *(ptr + i); |
|
1223 } |
|
1224 return KErrNone; |
|
1225 } |
|
1226 |
|
1227 TInt ThreadBufferMappingWrite(TAny* aArg) |
|
1228 { |
|
1229 if (!aArg) |
|
1230 { |
|
1231 return KErrArgument; |
|
1232 } |
|
1233 RShBuf* buf = (RShBuf*) aArg; |
|
1234 TPtr8 ptr(buf->Ptr(), buf->Size(),buf->Size()); |
|
1235 ptr.Fill('Q'); |
|
1236 return KErrNone; |
|
1237 } |
|
1238 |
|
1239 const TInt KTestBufferMappingPoolTypes = 8; |
|
1240 const TInt KTestBufferMappingTypes = 8; |
|
1241 |
|
1242 void BufferMapping() |
|
1243 { |
|
1244 test.Next(_L("Buffer Mapping")); |
|
1245 #ifdef __WINS__ |
|
1246 test.Printf(_L("Does not run on the emulator. Skipped\n")); |
|
1247 #else |
|
1248 TInt r; |
|
1249 RShPool pool[KTestBufferMappingPoolTypes]; |
|
1250 RShBuf buf[KTestBufferMappingTypes][KTestBufferMappingPoolTypes]; |
|
1251 TUint poolflags[KTestBufferMappingPoolTypes]; |
|
1252 TInt bufferwindow[KTestBufferMappingPoolTypes]; |
|
1253 TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, KTestBufferMappingTypes); |
|
1254 |
|
1255 // POOL TYPES |
|
1256 // ------------------------------------------ |
|
1257 // Pool no. AutoMap Writeable BufWindow |
|
1258 // 0 0 0 -1 |
|
1259 // 1 1 0 -1 |
|
1260 // 2 0 0 0 |
|
1261 // 3 1 0 0 |
|
1262 // 4 0 1 -1 |
|
1263 // 5 1 1 -1 |
|
1264 // 6 0 1 0 |
|
1265 // 7 1 1 0 |
|
1266 |
|
1267 TInt i; |
|
1268 test.Printf(_L("Create pools:")); |
|
1269 for (i = 0; i < KTestBufferMappingPoolTypes; i++) |
|
1270 { |
|
1271 poolflags[i] = EShPoolAllocate; |
|
1272 bufferwindow[i] = 0; |
|
1273 if (i % 2) |
|
1274 { |
|
1275 poolflags[i] |= EShPoolAutoMapBuf; |
|
1276 } |
|
1277 if (i > 3) |
|
1278 { |
|
1279 poolflags[i] |= EShPoolWriteable; |
|
1280 } |
|
1281 if (i % 4 > 1) |
|
1282 { |
|
1283 bufferwindow[i] = -1; |
|
1284 } |
|
1285 r = pool[i].Create(inf, poolflags[i] & ~EShPoolAutoMapBuf); |
|
1286 test_KErrNone(r); |
|
1287 r = pool[i].SetBufferWindow(bufferwindow[i], poolflags[i] & EShPoolAutoMapBuf); |
|
1288 test_KErrNone(r); |
|
1289 test.Printf(_L(".")); |
|
1290 } |
|
1291 test.Printf(_L("\n")); |
|
1292 |
|
1293 // BUFFER TYPES |
|
1294 // Buffer no. Actions |
|
1295 // 0 Alloc unmapped. |
|
1296 // 1 Alloc unmapped then unmap again. |
|
1297 // 2 Default Alloc. Unmap if it is a AutoMap pool. |
|
1298 // 3 Alloc unmapped. Map Read-Only. |
|
1299 // 4 Default Alloc. Unmap if it is a R/W pool and re-map Read-Only. |
|
1300 // 5 Alloc unmapped. Map R/W |
|
1301 // 6 Default Alloc. Unmap and re-map. |
|
1302 // 7 Default Alloc R/W. Map again with Read-Only setting. |
|
1303 // Depending on the pool type, the actions above might not always be possible. |
|
1304 |
|
1305 // Buffer allocation |
|
1306 TInt j; |
|
1307 test.Printf(_L("Allocate buffers\n")); |
|
1308 for (j = 0; j < KTestBufferMappingPoolTypes; j++) |
|
1309 { |
|
1310 test.Printf(_L("\nPool %d:"), j); |
|
1311 for (i = 0; i < KTestBufferMappingTypes; i++) |
|
1312 { |
|
1313 switch (i % KTestBufferMappingTypes) |
|
1314 { |
|
1315 // Unmapped buffers |
|
1316 case 0: |
|
1317 case 1: |
|
1318 // This should always result in an unmapped buffer |
|
1319 r = buf[i][j].Alloc(pool[j], EShPoolAllocNoMap); |
|
1320 test_KErrNone(r); |
|
1321 |
|
1322 if((i % KTestBufferMappingTypes) == 1) |
|
1323 { |
|
1324 // Alloc unmapped then unmap again. |
|
1325 r = buf[i][j].UnMap(); |
|
1326 test_Equal(KErrNotFound, r); |
|
1327 } |
|
1328 break; |
|
1329 case 2: |
|
1330 r = buf[i][j].Alloc(pool[j]); |
|
1331 if (poolflags[j] & EShPoolAutoMapBuf) |
|
1332 { |
|
1333 if (bufferwindow[j] == 0) |
|
1334 { |
|
1335 // Can't ask for a mapped buffer when buffer window is not set |
|
1336 test_Equal(KErrNoMemory, r); |
|
1337 } |
|
1338 else |
|
1339 { |
|
1340 // Alloc'd buffer was mapped - unmap it |
|
1341 test_KErrNone(r); |
|
1342 r = buf[i][j].UnMap(); |
|
1343 test_KErrNone(r); |
|
1344 } |
|
1345 } |
|
1346 else |
|
1347 { |
|
1348 // Buffer not mapped |
|
1349 test_KErrNone(r); |
|
1350 } |
|
1351 break; |
|
1352 |
|
1353 // Read-Only buffers |
|
1354 case 3: |
|
1355 r = buf[i][j].Alloc(pool[j], EShPoolAllocNoMap); |
|
1356 test_KErrNone(r); |
|
1357 r = buf[i][j].Map(ETrue); |
|
1358 if (bufferwindow[j]) |
|
1359 { |
|
1360 test_KErrNone(r); |
|
1361 } |
|
1362 else |
|
1363 { |
|
1364 test_Equal(KErrNoMemory, r); |
|
1365 } |
|
1366 break; |
|
1367 case 4: |
|
1368 r = buf[i][j].Alloc(pool[j]); |
|
1369 if (poolflags[j] & EShPoolAutoMapBuf) |
|
1370 { |
|
1371 if (bufferwindow[j] == 0) |
|
1372 { |
|
1373 // Can't ask for a mapped buffer when buffer window is not set |
|
1374 test_Equal(KErrNoMemory, r); |
|
1375 } |
|
1376 else if (poolflags[j] & EShPoolWriteable) |
|
1377 { |
|
1378 // Alloc'd buffer was mapped R/W - re-map it R/O |
|
1379 test_KErrNone(r); |
|
1380 r = buf[i][j].UnMap(); |
|
1381 test_KErrNone(r); |
|
1382 r = buf[i][j].Map(ETrue); |
|
1383 test_KErrNone(r); |
|
1384 } |
|
1385 else |
|
1386 { |
|
1387 // Nothing to do |
|
1388 test_KErrNone(r); |
|
1389 } |
|
1390 } |
|
1391 else |
|
1392 { |
|
1393 // Buffer not mapped |
|
1394 test_KErrNone(r); |
|
1395 if (bufferwindow[j]) |
|
1396 { |
|
1397 if (poolflags[j] & EShPoolWriteable) |
|
1398 { |
|
1399 // Explicitly map Read-Only |
|
1400 r = buf[i][j].Map(ETrue); |
|
1401 test_KErrNone(r); |
|
1402 } |
|
1403 else |
|
1404 { |
|
1405 // If Pool is RO, map default |
|
1406 r = buf[i][j].Map(); |
|
1407 test_KErrNone(r); |
|
1408 } |
|
1409 } |
|
1410 else |
|
1411 { |
|
1412 // Can't map buffer |
|
1413 r = buf[i][j].Map(ETrue); |
|
1414 test_Equal(KErrNoMemory, r); |
|
1415 } |
|
1416 } |
|
1417 break; |
|
1418 |
|
1419 // Mapped for Read-Write |
|
1420 case 5: |
|
1421 r = buf[i][j].Alloc(pool[j], EShPoolAllocNoMap); |
|
1422 test_KErrNone(r); |
|
1423 r = buf[i][j].Map(); |
|
1424 if (bufferwindow[j] == 0) |
|
1425 { |
|
1426 test_Equal(KErrNoMemory, r); |
|
1427 } |
|
1428 else if (!(poolflags[j] & EShPoolWriteable)) |
|
1429 { |
|
1430 test_KErrNone(r); |
|
1431 } |
|
1432 else |
|
1433 { |
|
1434 test_KErrNone(r); |
|
1435 } |
|
1436 break; |
|
1437 case 6: |
|
1438 case 7: |
|
1439 r = buf[i][j].Alloc(pool[j]); |
|
1440 if (poolflags[j] & EShPoolAutoMapBuf) |
|
1441 { |
|
1442 if (bufferwindow[j] == 0) |
|
1443 { |
|
1444 // Can't ask for a mapped buffer when buffer window is not set |
|
1445 test_Equal(KErrNoMemory, r); |
|
1446 } |
|
1447 else if (poolflags[j] & EShPoolWriteable) |
|
1448 { |
|
1449 // Alloc'd buffer was mapped R/W |
|
1450 test_KErrNone(r); |
|
1451 |
|
1452 if((i % KTestBufferMappingTypes) == 7) |
|
1453 { |
|
1454 // Mapped for Read-Write then remapped as Read-Only |
|
1455 r = buf[i][j].Map(true); |
|
1456 test_Equal(KErrAlreadyExists, r); |
|
1457 } |
|
1458 } |
|
1459 } |
|
1460 else |
|
1461 { |
|
1462 // Buffer not mapped |
|
1463 test_KErrNone(r); |
|
1464 if (bufferwindow[j]) |
|
1465 { |
|
1466 if (poolflags[j] & EShPoolWriteable) |
|
1467 { |
|
1468 // Default mapping |
|
1469 r = buf[i][j].Map(); |
|
1470 test_KErrNone(r); |
|
1471 |
|
1472 if((i % KTestBufferMappingTypes) == 7) |
|
1473 { |
|
1474 // Mapped for Read-Write then remapped as Read-Only |
|
1475 r = buf[i][j].Map(true); |
|
1476 test_Equal(KErrAlreadyExists, r); |
|
1477 } |
|
1478 } |
|
1479 } |
|
1480 else |
|
1481 { |
|
1482 // Can't map buffer |
|
1483 r = buf[i][j].Map(ETrue); |
|
1484 test_Equal(KErrNoMemory, r); |
|
1485 } |
|
1486 } |
|
1487 break; |
|
1488 |
|
1489 default: test(EFalse); |
|
1490 } |
|
1491 test.Printf(_L(".")); |
|
1492 } |
|
1493 } |
|
1494 test.Printf(_L("\n")); |
|
1495 |
|
1496 // Read and write tests |
|
1497 _LIT(KTestThreadName, "BufferMappingBuf%d(Test%d)"); |
|
1498 test.Printf(_L("Read & Write tests\n")); |
|
1499 for (j = 0; j < KTestBufferMappingPoolTypes; j++) |
|
1500 { |
|
1501 for (i = 0; i < KTestBufferMappingTypes; i++) |
|
1502 { |
|
1503 if (buf[i][j].Handle()) |
|
1504 { |
|
1505 switch (i % KTestBufferMappingTypes) |
|
1506 { |
|
1507 case 1: |
|
1508 case 2: |
|
1509 // Buffer not mapped - Read should fail |
|
1510 if (buf[i][j].Ptr() == NULL) |
|
1511 { |
|
1512 RThread thread; |
|
1513 TRequestStatus threadrs; |
|
1514 TBuf<40> threadname; |
|
1515 threadname.Format(KTestThreadName, i, (i % KTestBufferMappingTypes) + 1); |
|
1516 r = thread.Create(threadname, ThreadBufferMappingRead, KDefaultStackSize, KMinHeapSize, KMinHeapSize, (TAny*) &buf[i][j]); |
|
1517 test_KErrNone(r); |
|
1518 thread.Logon(threadrs); |
|
1519 thread.Resume(); |
|
1520 User::WaitForRequest(threadrs); |
|
1521 test_Equal(3, threadrs.Int()); |
|
1522 test_Equal(EExitPanic, thread.ExitType()); |
|
1523 test_Equal(3, thread.ExitReason()); // KERN-EXEC 3 |
|
1524 CLOSE_AND_WAIT(thread); |
|
1525 // Map buffer read-only for next test |
|
1526 r = buf[i][j].Map(ETrue); |
|
1527 if (bufferwindow[j]) |
|
1528 { |
|
1529 test_KErrNone(r); |
|
1530 } |
|
1531 else |
|
1532 { |
|
1533 test_Equal(KErrNoMemory, r); |
|
1534 } |
|
1535 } |
|
1536 case 3: |
|
1537 case 4: |
|
1538 // Buffer mapped for R/O access - Read should not fail |
|
1539 if (bufferwindow[j] == 0) |
|
1540 { |
|
1541 break; |
|
1542 } |
|
1543 else |
|
1544 { |
|
1545 RThread thread; |
|
1546 TRequestStatus threadrs; |
|
1547 TBuf<40> threadname; |
|
1548 threadname.Format(KTestThreadName, i, (i % KTestBufferMappingTypes) + 1); |
|
1549 r = thread.Create(threadname, ThreadBufferMappingRead, KDefaultStackSize, KMinHeapSize, KMinHeapSize, (TAny*) &buf[i][j]); |
|
1550 test_KErrNone(r); |
|
1551 thread.Logon(threadrs); |
|
1552 thread.Resume(); |
|
1553 User::WaitForRequest(threadrs); |
|
1554 test_KErrNone(threadrs.Int()); |
|
1555 test_Equal(EExitKill, thread.ExitType()); |
|
1556 test_KErrNone(thread.ExitReason()); |
|
1557 CLOSE_AND_WAIT(thread); |
|
1558 } |
|
1559 // Write should fail |
|
1560 if (buf[i][j].Ptr()) |
|
1561 { |
|
1562 RThread thread; |
|
1563 TRequestStatus threadrs; |
|
1564 TBuf<40> threadname; |
|
1565 threadname.Format(KTestThreadName, i, (i % KTestBufferMappingTypes) + 2); |
|
1566 r = thread.Create(threadname, ThreadBufferMappingWrite, KDefaultStackSize, KMinHeapSize, KMinHeapSize,(TAny*) &buf[i][j]); |
|
1567 test_KErrNone(r); |
|
1568 thread.Logon(threadrs); |
|
1569 thread.Resume(); |
|
1570 User::WaitForRequest(threadrs); |
|
1571 test_Equal(3, threadrs.Int()); |
|
1572 test_Equal(EExitPanic, thread.ExitType()); |
|
1573 test_Equal(3, thread.ExitReason()); // KERN-EXEC 3 |
|
1574 CLOSE_AND_WAIT(thread); |
|
1575 // Map buffer read-write for next test |
|
1576 r = buf[i][j].UnMap(); |
|
1577 if(r != KErrNotFound) |
|
1578 { |
|
1579 test_KErrNone(r); |
|
1580 } |
|
1581 r = buf[i][j].Map(); |
|
1582 test_KErrNone(r); |
|
1583 } |
|
1584 case 5: |
|
1585 case 6: |
|
1586 // Buffer mapped for R/W access - Write should not fail |
|
1587 if (bufferwindow[j] == 0 || !(poolflags[j] & EShPoolWriteable)) |
|
1588 { |
|
1589 break; |
|
1590 } |
|
1591 else |
|
1592 { |
|
1593 RThread thread; |
|
1594 TRequestStatus threadrs; |
|
1595 TBuf<40> threadname; |
|
1596 threadname.Format(KTestThreadName, i, (i % KTestBufferMappingTypes) + 1); |
|
1597 r = thread.Create(threadname, ThreadBufferMappingWrite, KDefaultStackSize, KMinHeapSize, KMinHeapSize,(TAny*) &buf[i][j]); |
|
1598 test_KErrNone(r); |
|
1599 thread.Logon(threadrs); |
|
1600 thread.Resume(); |
|
1601 User::WaitForRequest(threadrs); |
|
1602 test_KErrNone(threadrs.Int()); |
|
1603 test_Equal(EExitKill, thread.ExitType()); |
|
1604 test_KErrNone(thread.ExitReason()); |
|
1605 CLOSE_AND_WAIT(thread); |
|
1606 // Unmap buffer for next test |
|
1607 r = buf[i][j].UnMap(); |
|
1608 test_KErrNone(r); |
|
1609 } |
|
1610 // Buffer not mapped - Read should fail |
|
1611 if (buf[i][j].Ptr()) |
|
1612 { |
|
1613 RThread thread; |
|
1614 TRequestStatus threadrs; |
|
1615 TBuf<40> threadname; |
|
1616 threadname.Format(KTestThreadName, i, (i % KTestBufferMappingTypes) + 2); |
|
1617 r = thread.Create(threadname, ThreadBufferMappingRead, KDefaultStackSize, KMinHeapSize, KMinHeapSize,(TAny*) &buf[i][j]); |
|
1618 test_KErrNone(r); |
|
1619 thread.Logon(threadrs); |
|
1620 thread.Resume(); |
|
1621 User::WaitForRequest(threadrs); |
|
1622 test_Equal(3, threadrs.Int()); |
|
1623 test_Equal(EExitPanic, thread.ExitType()); |
|
1624 test_Equal(3, thread.ExitReason()); // KERN-EXEC 3 |
|
1625 CLOSE_AND_WAIT(thread); |
|
1626 } |
|
1627 } |
|
1628 } |
|
1629 buf[i][j].Close(); |
|
1630 test.Printf(_L(".")); |
|
1631 } |
|
1632 pool[j].Close(); |
|
1633 test.Printf(_L("\n")); |
|
1634 } |
|
1635 #endif |
|
1636 } |
|
1637 |
|
1638 void BufferWindow() |
|
1639 { |
|
1640 test.Next(_L("Buffer Window tests")); |
|
1641 #ifdef __WINS__ |
|
1642 test.Printf(_L("Does not run on the emulator. Skipped\n")); |
|
1643 #else |
|
1644 TInt r; |
|
1645 RShPool pool; |
|
1646 RShBuf buf[KTestPoolSizeInBufs * 2 + 1]; |
|
1647 TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, KTestPoolSizeInBufs * 2); |
|
1648 r = pool.Create(inf, KDefaultPoolHandleFlags); |
|
1649 test_KErrNone(r); |
|
1650 |
|
1651 // Allocate buffer but don't map them to this process memory |
|
1652 TInt i; |
|
1653 for (i = 0; i < KTestPoolSizeInBufs * 2; i++) |
|
1654 { |
|
1655 r = buf[i].Alloc(pool, EShPoolAllocNoMap); |
|
1656 test_KErrNone(r); |
|
1657 } |
|
1658 |
|
1659 // Pool is full |
|
1660 r = buf[KTestPoolSizeInBufs * 2].Alloc(pool, EShPoolAllocNoMap); |
|
1661 test_Equal(KErrNoMemory, r); |
|
1662 r = buf[0].Map(); |
|
1663 test_Equal(KErrNoMemory, r); |
|
1664 |
|
1665 // Open a one-buffer window |
|
1666 r = pool.SetBufferWindow(1, ETrue); |
|
1667 test_KErrNone(r); |
|
1668 r = buf[0].Map(); |
|
1669 test_KErrNone(r); |
|
1670 TPtr8 ptr0(buf[0].Ptr(), buf[0].Size(),buf[0].Size()); |
|
1671 ptr0.Fill('>'); |
|
1672 r = buf[1].Map(); |
|
1673 test_Equal(KErrNoMemory, r); |
|
1674 r = buf[0].UnMap(); |
|
1675 test_KErrNone(r); |
|
1676 r = buf[1].Map(); |
|
1677 test_KErrNone(r); |
|
1678 TPtr8 ptr1(buf[0].Ptr(), buf[0].Size(),buf[0].Size()); |
|
1679 ptr1.Fill('<'); |
|
1680 r = buf[2].Map(); |
|
1681 test_Equal(KErrNoMemory, r); |
|
1682 |
|
1683 // Enlarge window by one buffer |
|
1684 r = pool.SetBufferWindow(2, ETrue); |
|
1685 test_Equal(KErrAlreadyExists, r); |
|
1686 |
|
1687 // Close All buffers |
|
1688 for (i = 0; i < KTestPoolSizeInBufs * 2; i++) |
|
1689 { |
|
1690 buf[i].Close(); |
|
1691 } |
|
1692 |
|
1693 pool.Close(); |
|
1694 r = pool.Create(inf, KDefaultPoolHandleFlags); |
|
1695 test_KErrNone(r); |
|
1696 |
|
1697 r = pool.SetBufferWindow(KTestPoolSizeInBufs, ETrue); // Half the pool size |
|
1698 test_KErrNone(r); |
|
1699 for (i = 0; i < KTestPoolSizeInBufs * 2 - 1; i++) |
|
1700 { |
|
1701 if (i < KTestPoolSizeInBufs) |
|
1702 { |
|
1703 r = buf[i].Alloc(pool, 0); |
|
1704 test_KErrNone(r); |
|
1705 TPtr8 ptr(buf[0].Ptr(), buf[0].Size(),buf[0].Size()); |
|
1706 ptr.Fill('?'); |
|
1707 } |
|
1708 else |
|
1709 { |
|
1710 r = buf[i].Alloc(pool, EShPoolAllocNoMap); |
|
1711 test_KErrNone(r); |
|
1712 } |
|
1713 } |
|
1714 r = buf[KTestPoolSizeInBufs * 2].Alloc(pool, 0); |
|
1715 test_Equal(KErrNoMemory, r); |
|
1716 r = buf[KTestPoolSizeInBufs].Map(); |
|
1717 test_Equal(KErrNoMemory, r); |
|
1718 r = buf[KTestPoolSizeInBufs * 2].Alloc(pool, EShPoolAllocNoMap); |
|
1719 test_KErrNone(r); |
|
1720 |
|
1721 // That's it |
|
1722 for (i = 0; i < (KTestPoolSizeInBufs * 2) + 1; i++) |
|
1723 { |
|
1724 buf[i].Close(); |
|
1725 } |
|
1726 pool.Close(); |
|
1727 |
|
1728 // Try again with automap set to false |
|
1729 RShPool pool2; |
|
1730 r = pool2.Create(inf, KDefaultPoolHandleFlags); |
|
1731 test_KErrNone(r); |
|
1732 for (i = 0; i < KTestPoolSizeInBufs * 2; i++) |
|
1733 { |
|
1734 r = buf[i].Alloc(pool2, 0); |
|
1735 test_KErrNone(r); |
|
1736 } |
|
1737 r = pool2.SetBufferWindow(-1, EFalse); |
|
1738 test_KErrNone(r); |
|
1739 for (i = 0; i < KTestPoolSizeInBufs * 2; i++) |
|
1740 { |
|
1741 r = buf[i].Map(ETrue); |
|
1742 test_KErrNone(r); |
|
1743 } |
|
1744 for (i = 0; i < KTestPoolSizeInBufs * 2; i++) |
|
1745 { |
|
1746 buf[i].Close(); |
|
1747 } |
|
1748 pool2.Close(); |
|
1749 #endif |
|
1750 } |
|
1751 |
|
1752 /* |
|
1753 @SYMTestCaseID 7 |
|
1754 @SYMTestCaseDesc Trigger notifications |
|
1755 @SYMREQ REQ11423 |
|
1756 @SYMTestActions |
|
1757 Set Low Space Notifications on various thresholds. |
|
1758 In a separate thread, keep allocating buffers. |
|
1759 @SYMTestExpectedResults |
|
1760 Notifications are completed when their respective levels are reached. |
|
1761 @SYMTestPriority Medium |
|
1762 */ |
|
1763 |
|
1764 TInt ThreadNotifications(TAny* aArg) |
|
1765 { |
|
1766 if (!aArg) |
|
1767 { |
|
1768 return KErrArgument; |
|
1769 } |
|
1770 RShPool* pool = (RShPool*) aArg; |
|
1771 RArray<RShBuf> bufarray; |
|
1772 TInt r; |
|
1773 RSemaphore sem; |
|
1774 r = sem.OpenGlobal(KTestLowSpaceSemaphore); |
|
1775 if (r) |
|
1776 { |
|
1777 RDebug::Printf("Line %d: r=%d", __LINE__, r); |
|
1778 return r; |
|
1779 } |
|
1780 // Start allocating buffers |
|
1781 while (pool->FreeCount() > 1) |
|
1782 { |
|
1783 RShBuf buf; |
|
1784 r = buf.Alloc(*pool); |
|
1785 if (r) |
|
1786 { |
|
1787 RDebug::Printf("Line %d: count=%d r=%d", __LINE__, bufarray.Count(), r); |
|
1788 return r; |
|
1789 } |
|
1790 bufarray.Append(buf); |
|
1791 if ((bufarray.Count() == 1) // wait for low3 |
|
1792 || (bufarray.Count() == KTestPoolSizeInBufs - 2) // wait for low2 |
|
1793 || (bufarray.Count() == KTestPoolSizeInBufs - 1)) // wait for low1/low4 |
|
1794 { |
|
1795 r = sem.Wait(5000000); // 5 second timeout |
|
1796 if (r) |
|
1797 { |
|
1798 RDebug::Printf("Line %d: count=%d r=%d", __LINE__, bufarray.Count(), r); |
|
1799 return r; |
|
1800 } |
|
1801 } |
|
1802 } |
|
1803 |
|
1804 // Free all buffers |
|
1805 while (bufarray.Count()) |
|
1806 { |
|
1807 bufarray[0].Close(); |
|
1808 bufarray.Remove(0); |
|
1809 if ((bufarray.Count() == KTestPoolSizeInBufs - 2) // wait for free3 |
|
1810 || (bufarray.Count() == 1) // wait for free2 |
|
1811 || (bufarray.Count() == 0)) // wait for free1/free4 |
|
1812 { |
|
1813 r = sem.Wait(5000000); // 5 second timeout |
|
1814 if (r) |
|
1815 { |
|
1816 RDebug::Printf("Line %d: count=%d r=%d", __LINE__, bufarray.Count(), r); |
|
1817 return r; |
|
1818 } |
|
1819 } |
|
1820 } |
|
1821 bufarray.Close(); |
|
1822 sem.Close(); |
|
1823 return KErrNone; |
|
1824 } |
|
1825 |
|
1826 enum TTestLowSpaceType |
|
1827 { |
|
1828 ETestCancelNonExistent, |
|
1829 ETestCancelTwice |
|
1830 }; |
|
1831 |
|
1832 struct TTestThreadLowSpacePanicArgs |
|
1833 { |
|
1834 RShPool* iPool; |
|
1835 TUint iThreshold1; |
|
1836 TUint iThreshold2; |
|
1837 TTestLowSpaceType iType; |
|
1838 }; |
|
1839 |
|
1840 TInt ThreadLowSpacePanic(TAny* aArg) |
|
1841 { |
|
1842 if (!aArg) |
|
1843 { |
|
1844 return KErrArgument; |
|
1845 } |
|
1846 TTestThreadLowSpacePanicArgs& targs = *(TTestThreadLowSpacePanicArgs*) aArg; |
|
1847 TRequestStatus rs; |
|
1848 if (targs.iType == ETestCancelNonExistent) |
|
1849 { |
|
1850 targs.iPool->CancelLowSpaceNotification(rs); // should panic |
|
1851 } |
|
1852 else if (targs.iType == ETestCancelTwice) |
|
1853 { |
|
1854 targs.iPool->RequestLowSpaceNotification(targs.iThreshold1, rs); |
|
1855 targs.iPool->CancelLowSpaceNotification(rs); |
|
1856 targs.iPool->CancelLowSpaceNotification(rs); // should panic |
|
1857 } |
|
1858 else |
|
1859 { |
|
1860 return KErrArgument; |
|
1861 } |
|
1862 return KErrNone; |
|
1863 } |
|
1864 |
|
1865 /* |
|
1866 * CancelLowSpaceNotification() no longer panic()s if it can't find the |
|
1867 * notification, so this routine not currently called. |
|
1868 */ |
|
1869 void RequestLowSpacePanic(RShPool& aPool, TUint aThreshold1, TUint aThreshold2, TTestLowSpaceType aType, TInt aLine) |
|
1870 { |
|
1871 static TInt count = 0; |
|
1872 count++; |
|
1873 test.Printf(_L("RequestLowSpacePanic@%d(%d)\n"), aLine, count); |
|
1874 TBool jit = User::JustInTime(); |
|
1875 User::SetJustInTime(EFalse); |
|
1876 TInt expectedpaniccode = KErrNone; // Initialised to silence compiler warnings |
|
1877 switch (aType) |
|
1878 { |
|
1879 case ETestCancelNonExistent: |
|
1880 case ETestCancelTwice: |
|
1881 expectedpaniccode = KErrNotFound; |
|
1882 break; |
|
1883 default: |
|
1884 test(EFalse); |
|
1885 } |
|
1886 // |
|
1887 TTestThreadLowSpacePanicArgs targs; |
|
1888 targs.iPool = &aPool; |
|
1889 targs.iThreshold1 = aThreshold1; |
|
1890 targs.iThreshold2 = aThreshold2; |
|
1891 targs.iType = aType; |
|
1892 // |
|
1893 RThread threadpanic; |
|
1894 TRequestStatus threadpanicrs; |
|
1895 TInt r; |
|
1896 TBuf<30> threadname; |
|
1897 threadname.Format(_L("ThreadLowSpacePanic%d"), count); |
|
1898 r = threadpanic.Create(threadname, ThreadLowSpacePanic, KDefaultStackSize, KMinHeapSize, 1 << 20, (TAny*) &targs); |
|
1899 test_KErrNone(r); |
|
1900 threadpanic.Logon(threadpanicrs); |
|
1901 threadpanic.Resume(); |
|
1902 User::WaitForRequest(threadpanicrs); |
|
1903 // |
|
1904 test_Equal(expectedpaniccode, threadpanicrs.Int()); |
|
1905 test_Equal(EExitPanic, threadpanic.ExitType()); |
|
1906 test_Equal(expectedpaniccode, threadpanic.ExitReason()); |
|
1907 threadpanic.Close(); |
|
1908 User::SetJustInTime(jit); |
|
1909 } |
|
1910 |
|
1911 void NotificationRequests(RShPool& aPool) |
|
1912 { |
|
1913 test.Next(_L("Notifications")); |
|
1914 TInt r; |
|
1915 |
|
1916 RSemaphore sem; |
|
1917 r = sem.CreateGlobal(KTestLowSpaceSemaphore, 0); |
|
1918 test_KErrNone(r); |
|
1919 RTimer timer; |
|
1920 r = timer.CreateLocal(); |
|
1921 test_KErrNone(r); |
|
1922 RThread thread; |
|
1923 TRequestStatus threadrs; |
|
1924 r = thread.Create(_L("ThreadNotifications"), ThreadNotifications, KDefaultStackSize, KMinHeapSize, 1 << 20, (TAny*) &aPool); |
|
1925 test_KErrNone(r); |
|
1926 thread.SetPriority(EPriorityMore); |
|
1927 thread.Logon(threadrs); |
|
1928 |
|
1929 test.Printf(_L("Low space notification\n")); |
|
1930 TRequestStatus low1; |
|
1931 TRequestStatus low2; |
|
1932 TRequestStatus low3; |
|
1933 TRequestStatus low4; |
|
1934 TRequestStatus low5; |
|
1935 TRequestStatus low6; |
|
1936 aPool.RequestLowSpaceNotification(1, low1); |
|
1937 test_Equal(KRequestPending, low1.Int()); |
|
1938 aPool.RequestLowSpaceNotification(2, low2); |
|
1939 test_Equal(KRequestPending, low2.Int()); |
|
1940 aPool.RequestLowSpaceNotification(aPool.FreeCount() - 1, low3); |
|
1941 test_Equal(KRequestPending, low3.Int()); |
|
1942 aPool.RequestLowSpaceNotification(1, low4); |
|
1943 test_Equal(KRequestPending, low4.Int()); |
|
1944 aPool.RequestLowSpaceNotification(0, low5); // Never completes |
|
1945 test_Equal(KRequestPending, low5.Int()); |
|
1946 aPool.RequestLowSpaceNotification(KMaxTUint, low6); // Completes instantly |
|
1947 TRequestStatus timeoutlow; |
|
1948 timer.After(timeoutlow, 5000000); // 5 seconds time out |
|
1949 User::WaitForRequest(low6, timeoutlow); |
|
1950 test_KErrNone(low6.Int()); |
|
1951 test_Equal(KRequestPending, low1.Int()); |
|
1952 test_Equal(KRequestPending, low2.Int()); |
|
1953 test_Equal(KRequestPending, low3.Int()); |
|
1954 test_Equal(KRequestPending, low4.Int()); |
|
1955 test_Equal(KRequestPending, low5.Int()); |
|
1956 timer.Cancel(); |
|
1957 User::WaitForRequest(timeoutlow); |
|
1958 thread.Resume(); |
|
1959 User::WaitForRequest(low3, threadrs); |
|
1960 test_KErrNone(low3.Int()); |
|
1961 test_Equal(KRequestPending, low1.Int()); |
|
1962 test_Equal(KRequestPending, low2.Int()); |
|
1963 test_Equal(KRequestPending, low4.Int()); |
|
1964 test_Equal(KRequestPending, low5.Int()); |
|
1965 sem.Signal(); |
|
1966 User::WaitForRequest(low2, threadrs); |
|
1967 test_KErrNone(low2.Int()) |
|
1968 test_Equal(KRequestPending, low1.Int()); |
|
1969 test_Equal(KRequestPending, low4.Int()); |
|
1970 test_Equal(KRequestPending, low5.Int()); |
|
1971 sem.Signal(); |
|
1972 User::WaitForRequest(low1, threadrs); |
|
1973 test_KErrNone(low1.Int()); |
|
1974 User::WaitForRequest(low4, threadrs); |
|
1975 test_KErrNone(low4.Int()); |
|
1976 test_Equal(KRequestPending, low5.Int()); |
|
1977 test_Equal(EExitPending, thread.ExitType()); // Thread is still running |
|
1978 test_Compare(aPool.FreeCount(), <=, 1); |
|
1979 |
|
1980 test.Printf(_L("Free space notification\n")); |
|
1981 TRequestStatus free1; |
|
1982 TRequestStatus free2; |
|
1983 TRequestStatus free3; |
|
1984 TRequestStatus free4; |
|
1985 TRequestStatus free5; |
|
1986 TRequestStatus free6; |
|
1987 aPool.RequestFreeSpaceNotification(KTestPoolSizeInBufs, free1); |
|
1988 test_Equal(KRequestPending, free1.Int()); |
|
1989 aPool.RequestFreeSpaceNotification(KTestPoolSizeInBufs - 1, free2); |
|
1990 test_Equal(KRequestPending, free2.Int()); |
|
1991 aPool.RequestFreeSpaceNotification(aPool.FreeCount() + 1, free3); |
|
1992 test_Equal(KRequestPending, free3.Int()); |
|
1993 aPool.RequestFreeSpaceNotification(KTestPoolSizeInBufs, free4); |
|
1994 test_Equal(KRequestPending, free4.Int()); |
|
1995 aPool.RequestFreeSpaceNotification(KTestPoolSizeInBufs + 1, free5); // Never completes |
|
1996 test_Equal(KRequestPending, free5.Int()); |
|
1997 aPool.RequestFreeSpaceNotification(0, free6); // Completes instantly |
|
1998 |
|
1999 TRequestStatus timeoutfree; |
|
2000 timer.After(timeoutfree, 5000000); // 5 seconds time out |
|
2001 User::WaitForRequest(free6, timeoutfree); |
|
2002 test_KErrNone(free6.Int()); |
|
2003 |
|
2004 test_Equal(KRequestPending, free1.Int()); |
|
2005 test_Equal(KRequestPending, free2.Int()); |
|
2006 test_Equal(KRequestPending, free3.Int()); |
|
2007 test_Equal(KRequestPending, free4.Int()); |
|
2008 test_Equal(KRequestPending, free5.Int()); |
|
2009 |
|
2010 timer.Cancel(); |
|
2011 User::WaitForRequest(timeoutfree); |
|
2012 |
|
2013 sem.Signal(); // resume thread execution |
|
2014 User::WaitForRequest(free3, threadrs); |
|
2015 test_KErrNone(free3.Int()); |
|
2016 test_Equal(KRequestPending, free1.Int()); |
|
2017 test_Equal(KRequestPending, free2.Int()); |
|
2018 test_Equal(KRequestPending, free4.Int()); |
|
2019 test_Equal(KRequestPending, free5.Int()); |
|
2020 |
|
2021 sem.Signal(); |
|
2022 User::WaitForRequest(free2, threadrs); |
|
2023 test_KErrNone(free2.Int()) |
|
2024 |
|
2025 test_Equal(KRequestPending, free1.Int()); |
|
2026 test_Equal(KRequestPending, free4.Int()); |
|
2027 test_Equal(KRequestPending, free5.Int()); |
|
2028 sem.Signal(); |
|
2029 |
|
2030 User::WaitForRequest(free1, threadrs); |
|
2031 test_KErrNone(free1.Int()); |
|
2032 test_KErrNone(free4.Int()); |
|
2033 |
|
2034 test_Equal(KRequestPending, free5.Int()); |
|
2035 test_Equal(EExitPending, thread.ExitType()); // Thread is still running |
|
2036 |
|
2037 test_Compare(aPool.FreeCount(), >=, KTestPoolSizeInBufs); |
|
2038 |
|
2039 // Complete the requests still pending... |
|
2040 aPool.CancelLowSpaceNotification(low5); |
|
2041 User::WaitForRequest(low5); |
|
2042 |
|
2043 aPool.CancelFreeSpaceNotification(free5); |
|
2044 User::WaitForRequest(free5); |
|
2045 |
|
2046 // Let thread complete |
|
2047 sem.Signal(); |
|
2048 User::WaitForRequest(threadrs); |
|
2049 test_Equal(EExitKill, thread.ExitType()); |
|
2050 test_KErrNone(thread.ExitReason()); |
|
2051 thread.Close(); |
|
2052 sem.Close(); |
|
2053 timer.Close(); |
|
2054 } |
|
2055 |
|
2056 /* |
|
2057 @SYMTestCaseID 9 |
|
2058 @SYMTestCaseDesc Cancel low- and free-space notifications |
|
2059 @SYMREQ REQ11423 |
|
2060 @SYMTestActions |
|
2061 Set Low/High LowSpace Notifications. |
|
2062 Cancel them. |
|
2063 @SYMTestExpectedResults |
|
2064 All OK. |
|
2065 @SYMTestPriority Medium |
|
2066 */ |
|
2067 |
|
2068 void CancelNotificationRequests(RShPool& aPool) |
|
2069 { |
|
2070 test.Next(_L("Cancel notifications")); |
|
2071 TInt r; |
|
2072 |
|
2073 RSemaphore sem; |
|
2074 r = sem.CreateGlobal(KTestLowSpaceSemaphore, 0); |
|
2075 test_KErrNone(r); |
|
2076 RThread thread; |
|
2077 TRequestStatus threadrs; |
|
2078 r = thread.Create(_L("ThreadCancelNotifications"), ThreadNotifications, KDefaultStackSize, KMinHeapSize, 1 << 20, (TAny*) &aPool); |
|
2079 test_KErrNone(r); |
|
2080 thread.SetPriority(EPriorityLess); |
|
2081 thread.Logon(threadrs); |
|
2082 |
|
2083 test.Printf(_L("Cancel low space notifications\n")); |
|
2084 // Low space notification cancel |
|
2085 TRequestStatus low; |
|
2086 aPool.RequestLowSpaceNotification(1, low); |
|
2087 aPool.CancelLowSpaceNotification(low); |
|
2088 test_Equal(KErrCancel, low.Int()); |
|
2089 // We should be able to cancel again without panic()ing |
|
2090 // (no guarantees on return code; maybe Cancel() should have void return type?) |
|
2091 aPool.CancelLowSpaceNotification(low); |
|
2092 test.Printf(_L("Second cancel returned %d\n"), low.Int()); |
|
2093 TRequestStatus low2; |
|
2094 aPool.RequestLowSpaceNotification(1, low2); // For thread sync |
|
2095 thread.Resume(); |
|
2096 sem.Signal(2); |
|
2097 User::WaitForRequest(low2, threadrs); |
|
2098 test_KErrNone(low2.Int()); |
|
2099 test_Equal(EExitPending, thread.ExitType()); // Thread is still running |
|
2100 test_Compare(aPool.FreeCount(), <=, 1); |
|
2101 |
|
2102 test.Printf(_L("Cancel free space notifications\n")); |
|
2103 TRequestStatus free; |
|
2104 aPool.CancelFreeSpaceNotification(free); // Cancel non-existant notification |
|
2105 aPool.RequestFreeSpaceNotification(KTestPoolSizeInBufs, free); |
|
2106 aPool.CancelLowSpaceNotification(free); // Use wrong method |
|
2107 aPool.CancelFreeSpaceNotification(free); // Use wrong method |
|
2108 test_Equal(KErrCancel, free.Int()); |
|
2109 aPool.CancelFreeSpaceNotification(free); // Already cancelled |
|
2110 |
|
2111 // Complete the requests still pending... |
|
2112 User::WaitForRequest(low); |
|
2113 |
|
2114 sem.Signal(4); // Resume thread execution and let it complete |
|
2115 User::WaitForRequest(threadrs); |
|
2116 test_KErrNone(threadrs.Int()); |
|
2117 test_Equal(EExitKill, thread.ExitType()); |
|
2118 test_KErrNone(thread.ExitReason()); |
|
2119 test_Compare(aPool.FreeCount(), >=, KTestPoolSizeInBufs); |
|
2120 thread.Close(); |
|
2121 sem.Close(); |
|
2122 } |
|
2123 |
|
2124 |
|
2125 /* |
|
2126 @SYMTestCaseID 10 |
|
2127 @SYMTestCaseDesc Grow and shrink pool |
|
2128 @SYMREQ REQ11423 |
|
2129 @SYMTestActions |
|
2130 1. Test Thread creates pools with various size attributes |
|
2131 2. Test Thread keeps allocating buffers on pool. |
|
2132 3. Test Thread keeps freeing buffers on pool |
|
2133 4. Test Thread frees all buffers and close pool. |
|
2134 @SYMTestExpectedResults |
|
2135 Pools grows and shrink grows as expected. |
|
2136 @SYMTestPriority High |
|
2137 */ |
|
2138 |
|
2139 const TInt KTestFreeCountTimeOut = 20000000; // 20 seconds (of thread inactivity) |
|
2140 const TInt KTestWaitBeforeRetry = 2000; // 0.002 second |
|
2141 |
|
2142 TUint MultFx248(TUint n, TUint f) |
|
2143 { |
|
2144 TUint64 r = (TUint64) n * f; |
|
2145 I64LSR(r, 8); |
|
2146 return r > KMaxTUint32 ? KMaxTUint32 : I64LOW(r); |
|
2147 } |
|
2148 |
|
2149 class TTestPoolModel |
|
2150 { |
|
2151 public: |
|
2152 TTestPoolModel(TShPoolInfo& aInfo); |
|
2153 void Alloc(); |
|
2154 void Free(); |
|
2155 TUint FreeCount(); |
|
2156 void DisplayCounters(); |
|
2157 private: |
|
2158 void CalcGSP(); |
|
2159 void CheckGrowShrink(); |
|
2160 void Grow(); |
|
2161 void Shrink(); |
|
2162 private: |
|
2163 TUint iAllocated; |
|
2164 TUint iFree; |
|
2165 // |
|
2166 TUint iInitial; |
|
2167 TUint iMax; |
|
2168 TUint iGrowTriggerRatio; |
|
2169 TUint iGrowByRatio; |
|
2170 TUint iShrinkByRatio; |
|
2171 TUint iShrinkHysteresisRatio; |
|
2172 TUint iPoolFlags; |
|
2173 // |
|
2174 TUint iGrowTrigger; |
|
2175 TUint iShrinkTrigger; |
|
2176 // |
|
2177 TBool iDebug; |
|
2178 }; |
|
2179 |
|
2180 TTestPoolModel::TTestPoolModel(TShPoolInfo& aInfo) |
|
2181 { |
|
2182 iInitial = aInfo.iInitialBufs; |
|
2183 iMax = aInfo.iMaxBufs; |
|
2184 iGrowTriggerRatio = aInfo.iGrowTriggerRatio; |
|
2185 iGrowByRatio = aInfo.iGrowByRatio; |
|
2186 iShrinkByRatio = 256 - 65536 / (256 + iGrowByRatio); |
|
2187 iShrinkHysteresisRatio = aInfo.iShrinkHysteresisRatio; |
|
2188 iPoolFlags = aInfo.iFlags; |
|
2189 iAllocated = 0; |
|
2190 iFree = iInitial; |
|
2191 iDebug = EFalse; // Set this to ETrue to display detailed information |
|
2192 |
|
2193 CalcGSP(); |
|
2194 if (iDebug) |
|
2195 { |
|
2196 test.Printf(_L("A F A+F GT ST \n")); |
|
2197 test.Printf(_L("==============================\n")); |
|
2198 DisplayCounters(); |
|
2199 } |
|
2200 } |
|
2201 |
|
2202 void TTestPoolModel::Alloc() |
|
2203 { |
|
2204 iAllocated++; |
|
2205 iFree--; |
|
2206 CheckGrowShrink(); |
|
2207 } |
|
2208 |
|
2209 void TTestPoolModel::Free() |
|
2210 { |
|
2211 iAllocated--; |
|
2212 iFree++; |
|
2213 CheckGrowShrink(); |
|
2214 } |
|
2215 |
|
2216 TUint TTestPoolModel::FreeCount() |
|
2217 { |
|
2218 return iFree; |
|
2219 } |
|
2220 |
|
2221 void TTestPoolModel::CalcGSP() |
|
2222 { |
|
2223 TUint n = iAllocated + iFree; |
|
2224 |
|
2225 // If the pool is at its maximum size, we can't grow |
|
2226 if (n >= iMax || iGrowTriggerRatio == 0 /*|| iCommittedPages >= iMaxPages*/) |
|
2227 { |
|
2228 iGrowTrigger = 0; |
|
2229 } |
|
2230 else |
|
2231 { |
|
2232 iGrowTrigger = MultFx248(n, iGrowTriggerRatio); |
|
2233 |
|
2234 // Deal with rounding towards zero |
|
2235 if (iGrowTrigger == 0) |
|
2236 iGrowTrigger = 1; |
|
2237 } |
|
2238 |
|
2239 // If no growing has happened, we can't shrink |
|
2240 if (n <= iInitial || iGrowTriggerRatio == 0 || (iPoolFlags & EShPoolSuppressShrink) != 0) |
|
2241 { |
|
2242 iShrinkTrigger = iMax; |
|
2243 } |
|
2244 else |
|
2245 { |
|
2246 // To ensure that shrinking doesn't immediately happen after growing, the trigger |
|
2247 // amount is the grow trigger + the grow amount (which is the number of free buffers |
|
2248 // just after a grow) times the shrink hysteresis value. |
|
2249 iShrinkTrigger = MultFx248(n, iGrowTriggerRatio + iGrowByRatio); |
|
2250 iShrinkTrigger = MultFx248(iShrinkTrigger, iShrinkHysteresisRatio); |
|
2251 |
|
2252 // Deal with rounding towards zero |
|
2253 if (iShrinkTrigger == 0) |
|
2254 iShrinkTrigger = 1; |
|
2255 |
|
2256 // If the shrink trigger ends up > the number of buffers currently in |
|
2257 // the pool, set it to that number (less 1, since the test is "> trigger"). |
|
2258 // This means the pool will only shrink when all the buffers have been freed. |
|
2259 if (iShrinkTrigger >= n) |
|
2260 iShrinkTrigger = n - 1; |
|
2261 } |
|
2262 if (iDebug) |
|
2263 { |
|
2264 DisplayCounters(); |
|
2265 } |
|
2266 } |
|
2267 |
|
2268 void TTestPoolModel::CheckGrowShrink() |
|
2269 { |
|
2270 if (iFree < iGrowTrigger) |
|
2271 { |
|
2272 Grow(); |
|
2273 CheckGrowShrink(); |
|
2274 } |
|
2275 if (iFree > iShrinkTrigger) |
|
2276 { |
|
2277 Shrink(); |
|
2278 CheckGrowShrink(); |
|
2279 } |
|
2280 } |
|
2281 |
|
2282 void TTestPoolModel::Grow() |
|
2283 { |
|
2284 TUint headroom = iMax - (iAllocated + iFree); |
|
2285 TUint growby = MultFx248(iAllocated + iFree, iGrowByRatio); |
|
2286 if (growby == 0) // Handle round-to-zero |
|
2287 growby = 1; |
|
2288 if (growby > headroom) |
|
2289 growby = headroom; |
|
2290 iFree += growby; |
|
2291 if (iDebug) |
|
2292 { |
|
2293 test.Printf(_L("GROW by %d!\n"), growby); |
|
2294 } |
|
2295 CalcGSP(); |
|
2296 } |
|
2297 |
|
2298 void TTestPoolModel::Shrink() |
|
2299 { |
|
2300 TUint grownBy = iAllocated + iFree - iInitial; |
|
2301 TUint shrinkby = MultFx248(iAllocated + iFree, iShrinkByRatio); |
|
2302 if (shrinkby == 0) // Handle round-to-zero |
|
2303 shrinkby = 1; |
|
2304 if (shrinkby > grownBy) |
|
2305 shrinkby = grownBy; |
|
2306 if (shrinkby > iFree) |
|
2307 shrinkby = iFree; |
|
2308 iFree -= shrinkby; |
|
2309 if (iDebug) |
|
2310 { |
|
2311 test.Printf(_L("SHRINK by %d!\n"), shrinkby); |
|
2312 } |
|
2313 CalcGSP(); |
|
2314 } |
|
2315 |
|
2316 void TTestPoolModel::DisplayCounters() |
|
2317 { |
|
2318 test.Printf(_L("%-6u%-6u%-6u%-6u%-6u\n"), iAllocated, iFree, iAllocated + iFree, iGrowTrigger, iShrinkTrigger); |
|
2319 } |
|
2320 |
|
2321 void PoolGrowingTestRoutine(const TShPoolCreateInfo& aInfo, TUint aBufferFlags = 0) |
|
2322 { |
|
2323 TInt r; |
|
2324 TInt timeout; |
|
2325 RShPool pool; |
|
2326 r = pool.Create(aInfo, KDefaultPoolHandleFlags); |
|
2327 test_KErrNone(r); |
|
2328 |
|
2329 TShPoolInfo info; |
|
2330 pool.GetInfo(info); |
|
2331 |
|
2332 // Only set the buffer window if we're going to map the buffers |
|
2333 if (!(aBufferFlags & EShPoolAllocNoMap) && (info.iFlags & EShPoolPageAlignedBuffer)) |
|
2334 { |
|
2335 r = pool.SetBufferWindow(-1, ETrue); |
|
2336 test_KErrNone(r) |
|
2337 } |
|
2338 |
|
2339 TTestPoolModel model(info); |
|
2340 RArray<RShBuf> bufarray; |
|
2341 test_Equal(info.iInitialBufs, pool.FreeCount()); |
|
2342 |
|
2343 // Buffer allocation |
|
2344 do |
|
2345 { |
|
2346 timeout = KTestFreeCountTimeOut / KTestWaitBeforeRetry; |
|
2347 while (model.FreeCount() != pool.FreeCount()) |
|
2348 { |
|
2349 User::After(KTestWaitBeforeRetry); |
|
2350 test_Assert(--timeout, |
|
2351 test.Printf(_L("Timeout: Free==%u (expected %u)\n"), pool.FreeCount(), model.FreeCount()); |
|
2352 model.DisplayCounters(); |
|
2353 ); |
|
2354 if ((timeout * KTestWaitBeforeRetry) % 1000000 == 0) |
|
2355 { |
|
2356 test.Printf(_L("Time out in %d seconds! (line %d)\n"), timeout * KTestWaitBeforeRetry / 1000000, __LINE__); |
|
2357 } |
|
2358 } |
|
2359 RShBuf buf; |
|
2360 r = buf.Alloc(pool, aBufferFlags); |
|
2361 if (r == KErrNoMemory) |
|
2362 { |
|
2363 // We expect to get a failure when all buffers are allocated |
|
2364 if ((TUint) bufarray.Count() == info.iMaxBufs) |
|
2365 break; |
|
2366 if (!(aBufferFlags & EShPoolAllocCanWait)) |
|
2367 { |
|
2368 // Give the Management DFC some time to run, then try allocating again |
|
2369 User::After(1000000); // 1 second |
|
2370 r = buf.Alloc(pool); |
|
2371 if (r) |
|
2372 { |
|
2373 test.Printf(_L("Alloc fail after %d of %d; Free==%u (expected %u)\n"), |
|
2374 bufarray.Count(), info.iMaxBufs, pool.FreeCount(), model.FreeCount()); |
|
2375 break; |
|
2376 } |
|
2377 } |
|
2378 } |
|
2379 |
|
2380 if (r == KErrNone) |
|
2381 { |
|
2382 model.Alloc(); |
|
2383 if (!(aBufferFlags & EShPoolAllocNoMap)) |
|
2384 { |
|
2385 TPtr8 ptr(buf.Ptr(), buf.Size(),buf.Size()); |
|
2386 ptr.Fill(bufarray.Count() % 256); |
|
2387 } |
|
2388 bufarray.Append(buf); |
|
2389 } |
|
2390 } |
|
2391 while (r == KErrNone); |
|
2392 |
|
2393 test_Equal(KErrNoMemory, r); |
|
2394 test_Equal(info.iMaxBufs, bufarray.Count()); |
|
2395 test_Equal(0, pool.FreeCount()); |
|
2396 |
|
2397 // Now free no more than 1/3 of these buffers... |
|
2398 while ((TUint) bufarray.Count() > 2 * info.iMaxBufs / 3) |
|
2399 { |
|
2400 // remove buffers from the back of the array |
|
2401 if (!(aBufferFlags & EShPoolAllocNoMap)) |
|
2402 { |
|
2403 TPtr8 ptr(bufarray[bufarray.Count() - 1].Ptr(), bufarray[bufarray.Count() - 1].Size(),bufarray[bufarray.Count() - 1].Size()); |
|
2404 ptr.Fill((bufarray.Count() + 1) % 256); |
|
2405 } |
|
2406 bufarray[bufarray.Count() - 1].Close(); |
|
2407 bufarray.Remove(bufarray.Count() - 1); |
|
2408 model.Free(); |
|
2409 |
|
2410 timeout = KTestFreeCountTimeOut / KTestWaitBeforeRetry; |
|
2411 while (model.FreeCount() != pool.FreeCount()) |
|
2412 { |
|
2413 User::After(KTestWaitBeforeRetry); |
|
2414 test_Assert(--timeout, |
|
2415 test.Printf(_L("Timeout: Free==%u (expected %u)\n"), pool.FreeCount(), model.FreeCount()); |
|
2416 model.DisplayCounters(); |
|
2417 ); |
|
2418 if ((timeout * KTestWaitBeforeRetry) % 1000000 == 0) |
|
2419 { |
|
2420 test.Printf(_L("Time out in %d seconds! (line %d)\n"), timeout * KTestWaitBeforeRetry / 1000000, __LINE__); |
|
2421 } |
|
2422 } |
|
2423 } |
|
2424 |
|
2425 // ... and re-allocate them |
|
2426 do |
|
2427 { |
|
2428 timeout = KTestFreeCountTimeOut / KTestWaitBeforeRetry; |
|
2429 while (model.FreeCount() != pool.FreeCount()) |
|
2430 { |
|
2431 User::After(KTestWaitBeforeRetry); |
|
2432 test_Assert(--timeout, |
|
2433 test.Printf(_L("Timeout: Free==%u (expected %u)\n"), pool.FreeCount(), model.FreeCount()); |
|
2434 model.DisplayCounters(); |
|
2435 ); |
|
2436 if ((timeout * KTestWaitBeforeRetry) % 1000000 == 0) |
|
2437 { |
|
2438 test.Printf(_L("Time out in %d seconds! (line %d)\n"), timeout * KTestWaitBeforeRetry / 1000000, __LINE__); |
|
2439 } |
|
2440 } |
|
2441 RShBuf buf; |
|
2442 r = buf.Alloc(pool, aBufferFlags); |
|
2443 if (r == KErrNoMemory) |
|
2444 { |
|
2445 // We expect to get a failure when all buffers are allocated |
|
2446 if ((TUint) bufarray.Count() == info.iMaxBufs) |
|
2447 break; |
|
2448 if (!(aBufferFlags & EShPoolAllocCanWait)) |
|
2449 { |
|
2450 // Give the Management DFC some time to run, then try allocating again |
|
2451 User::After(1000000); // 1 second |
|
2452 r = buf.Alloc(pool); |
|
2453 if (r) |
|
2454 { |
|
2455 test.Printf(_L("Alloc fail after %d of %d; Free==%u (expected %u)\n"), |
|
2456 bufarray.Count(), info.iMaxBufs, pool.FreeCount(), model.FreeCount()); |
|
2457 break; |
|
2458 } |
|
2459 } |
|
2460 } |
|
2461 |
|
2462 if (r == KErrNone) |
|
2463 { |
|
2464 model.Alloc(); |
|
2465 if (!(aBufferFlags & EShPoolAllocNoMap)) |
|
2466 { |
|
2467 TPtr8 ptr(buf.Ptr(), buf.Size(),buf.Size()); |
|
2468 ptr.Fill(bufarray.Count() % 256); |
|
2469 } |
|
2470 bufarray.Append(buf); |
|
2471 } |
|
2472 } |
|
2473 while (r == KErrNone); |
|
2474 |
|
2475 test_Equal(KErrNoMemory, r); |
|
2476 test_Equal(info.iMaxBufs, bufarray.Count()); |
|
2477 test_Equal(0, pool.FreeCount()); |
|
2478 |
|
2479 // Free all buffers |
|
2480 while (bufarray.Count()) |
|
2481 { |
|
2482 // remove buffers from the back of the array |
|
2483 if (!(aBufferFlags & EShPoolAllocNoMap)) |
|
2484 { |
|
2485 TPtr8 ptr(bufarray[bufarray.Count() - 1].Ptr(), bufarray[bufarray.Count() - 1].Size(),bufarray[bufarray.Count() - 1].Size()); |
|
2486 ptr.Fill((bufarray.Count() + 1) % 256); |
|
2487 } |
|
2488 bufarray[bufarray.Count() - 1].Close(); |
|
2489 bufarray.Remove(bufarray.Count() - 1); |
|
2490 model.Free(); |
|
2491 |
|
2492 timeout = KTestFreeCountTimeOut / KTestWaitBeforeRetry; |
|
2493 while (model.FreeCount() != pool.FreeCount()) |
|
2494 { |
|
2495 User::After(KTestWaitBeforeRetry); |
|
2496 test_Assert(--timeout, |
|
2497 test.Printf(_L("Timeout: Free==%u (expected %u)\n"), pool.FreeCount(), model.FreeCount()); |
|
2498 model.DisplayCounters(); |
|
2499 ); |
|
2500 if ((timeout * KTestWaitBeforeRetry) % 1000000 == 0) |
|
2501 { |
|
2502 test.Printf(_L("Time out in %d seconds! (line %d)\n"), timeout * KTestWaitBeforeRetry / 1000000, __LINE__); |
|
2503 } |
|
2504 } |
|
2505 } |
|
2506 |
|
2507 // Pool should have shrunk back to its initial size |
|
2508 test_Equal(info.iInitialBufs, pool.FreeCount()); |
|
2509 bufarray.Close(); |
|
2510 pool.Close(); |
|
2511 } |
|
2512 |
|
2513 void PoolGrowingUser() |
|
2514 { |
|
2515 test.Next(_L("Pool Growing/Shrinking (User)")); |
|
2516 TInt r; |
|
2517 TInt pagesize; |
|
2518 r = HAL::Get(HAL::EMemoryPageSize, pagesize); |
|
2519 test_KErrNone(r); |
|
2520 // Pool A: Non-page aligned pool (64-byte alignment) |
|
2521 { |
|
2522 TInt alignment = 6; |
|
2523 TInt maxbufs = KTestPoolSizeInBytes / RoundUp(*PtrBufSize, alignment); |
|
2524 if (maxbufs > 32000) |
|
2525 { |
|
2526 maxbufs = 32000; |
|
2527 } |
|
2528 TInt initialbufs = maxbufs / 2; |
|
2529 TInt growtrigger = 32; |
|
2530 TInt growby = 32; |
|
2531 TInt shrinkhys = 288; |
|
2532 test.Printf(_L("POOL A: BufSize=%d InitialBufs=%d MaxBufs=%d GrowTrigger=%d GrowBy=%d ShrinkHys=%d Alignment=%d\n"), |
|
2533 *PtrBufSize, initialbufs, maxbufs, growtrigger, growby, shrinkhys, alignment); |
|
2534 TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize, initialbufs, alignment); |
|
2535 r = inf.SetSizingAttributes(maxbufs, growtrigger, growby, shrinkhys); |
|
2536 test_KErrNone(r); |
|
2537 PoolGrowingTestRoutine(inf); |
|
2538 } |
|
2539 |
|
2540 // Pool B: Non-page aligned pool (maximum alignment) |
|
2541 { |
|
2542 TInt alignment = Log2(pagesize); |
|
2543 TInt maxbufs = KTestPoolSizeInBytes / RoundUp(*PtrBufSize, alignment); |
|
2544 if (maxbufs > 32000) |
|
2545 { |
|
2546 maxbufs = 32000; |
|
2547 } |
|
2548 TInt initialbufs = maxbufs / 4; |
|
2549 TInt growtrigger = 32; |
|
2550 TInt growby = 32; |
|
2551 TInt shrinkhys = 288; |
|
2552 test.Printf(_L("POOL B: BufSize=%d InitialBufs=%d MaxBufs=%d GrowTrigger=%d GrowBy=%d ShrinkHys=%d Alignment=%d\n"), |
|
2553 *PtrBufSize, initialbufs, maxbufs, growtrigger, growby, shrinkhys, alignment); |
|
2554 TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize, initialbufs, alignment); |
|
2555 r = inf.SetSizingAttributes(maxbufs, growtrigger, growby, shrinkhys); |
|
2556 test_KErrNone(r); |
|
2557 PoolGrowingTestRoutine(inf); |
|
2558 } |
|
2559 |
|
2560 // Pool C: Page aligned pool without guard pages |
|
2561 { |
|
2562 TInt maxbufs = KTestPoolSizeInBytes / RoundUp(*PtrBufSize, Log2(pagesize)); |
|
2563 if (maxbufs > 32000) |
|
2564 { |
|
2565 maxbufs = 32000; |
|
2566 } |
|
2567 TInt initialbufs = maxbufs * 3 / 8; |
|
2568 TInt growtrigger = 32; |
|
2569 TInt growby = 32; |
|
2570 TInt shrinkhys = 288; |
|
2571 test.Printf(_L("POOL C: BufSize=%d InitialBufs=%d MaxBufs=%d GrowTrigger=%d GrowBy=%d ShrinkHys=%d Page-Aligned\n"), |
|
2572 *PtrBufSize, initialbufs, maxbufs, growtrigger, growby, shrinkhys); |
|
2573 TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, initialbufs); |
|
2574 r = inf.SetSizingAttributes(maxbufs, growtrigger, growby, shrinkhys); |
|
2575 test_KErrNone(r); |
|
2576 PoolGrowingTestRoutine(inf); |
|
2577 } |
|
2578 |
|
2579 // Pool D: Page aligned pool without guard pages |
|
2580 { |
|
2581 TInt maxbufs = KTestPoolSizeInBytes / RoundUp(*PtrBufSize, Log2(pagesize)); |
|
2582 if (maxbufs > 32000) |
|
2583 { |
|
2584 maxbufs = 32000; |
|
2585 } |
|
2586 TInt initialbufs = maxbufs / 2; |
|
2587 TInt growtrigger = 32; |
|
2588 TInt growby = 32; |
|
2589 TInt shrinkhys = 288; |
|
2590 test.Printf(_L("POOL D: BufSize=%d InitialBufs=%d MaxBufs=%d GrowTrigger=%d GrowBy=%d ShrinkHys=%d Page-Aligned+Guard\n"), |
|
2591 *PtrBufSize, initialbufs, maxbufs, growtrigger, growby, shrinkhys); |
|
2592 TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, initialbufs); |
|
2593 r = inf.SetSizingAttributes(maxbufs, growtrigger, growby, shrinkhys); |
|
2594 test_KErrNone(r); |
|
2595 r = inf.SetGuardPages(); |
|
2596 test_KErrNone(r); |
|
2597 PoolGrowingTestRoutine(inf); |
|
2598 } |
|
2599 |
|
2600 // Pool A': Non-page aligned pool (64-byte alignment) |
|
2601 { |
|
2602 TInt alignment = 6; |
|
2603 TInt maxbufs = KTestPoolSizeInBytes / RoundUp(*PtrBufSize, alignment); |
|
2604 if (maxbufs > 32000) |
|
2605 { |
|
2606 maxbufs = 32000; |
|
2607 } |
|
2608 TInt initialbufs = 1; |
|
2609 TInt growtrigger = 32; |
|
2610 TInt growby = 256; |
|
2611 TInt shrinkhys = 512; |
|
2612 test.Printf(_L("POOL A': BufSize=%d InitialBufs=%d MaxBufs=%d GrowTrigger=%d GrowBy=%d ShrinkHys=%d Alignment=%d\n"), |
|
2613 *PtrBufSize, initialbufs, maxbufs, growtrigger, growby, shrinkhys, alignment); |
|
2614 TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize, initialbufs, alignment); |
|
2615 r = inf.SetSizingAttributes(maxbufs, growtrigger, growby, shrinkhys); |
|
2616 test_KErrNone(r); |
|
2617 PoolGrowingTestRoutine(inf); |
|
2618 } |
|
2619 |
|
2620 // Pool A'': Non-page aligned pool (64-byte alignment) - AllocCanWait |
|
2621 { |
|
2622 TInt alignment = 6; |
|
2623 TInt maxbufs = KTestPoolSizeInBytes / RoundUp(*PtrBufSize, alignment); |
|
2624 if (maxbufs > 32000) |
|
2625 { |
|
2626 maxbufs = 32000; |
|
2627 } |
|
2628 TInt initialbufs = 1; |
|
2629 TInt growtrigger = 1; |
|
2630 TInt growby = 1; |
|
2631 TInt shrinkhys = 257; |
|
2632 test.Printf(_L("POOL A'': BufSize=%d InitialBufs=%d MaxBufs=%d GrowTrigger=%d GrowBy=%d ShrinkHys=%d Alignment=%d\n"), |
|
2633 *PtrBufSize, initialbufs, maxbufs, growtrigger, growby, shrinkhys, alignment); |
|
2634 TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize, initialbufs, alignment); |
|
2635 r = inf.SetSizingAttributes(maxbufs, growtrigger, growby, shrinkhys); |
|
2636 test_KErrNone(r); |
|
2637 PoolGrowingTestRoutine(inf, EShPoolAllocCanWait); |
|
2638 } |
|
2639 |
|
2640 // Pool D': Page aligned pool without guard pages |
|
2641 { |
|
2642 TInt maxbufs = KTestPoolSizeInBytes / RoundUp(*PtrBufSize, Log2(pagesize)); |
|
2643 if (maxbufs > 32000) |
|
2644 { |
|
2645 maxbufs = 32000; |
|
2646 } |
|
2647 TInt initialbufs = 1; |
|
2648 TInt growtrigger = 1; |
|
2649 TInt growby = 1024; |
|
2650 TInt shrinkhys = 2048; |
|
2651 test.Printf(_L("POOL D': BufSize=%d InitialBufs=%d MaxBufs=%d GrowTrigger=%d GrowBy=%d ShrinkHys=%d Page-Aligned+Guard\n"), |
|
2652 *PtrBufSize, initialbufs, maxbufs, growtrigger, growby, shrinkhys); |
|
2653 TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, initialbufs); |
|
2654 r = inf.SetSizingAttributes(maxbufs, growtrigger, growby, shrinkhys); |
|
2655 test_KErrNone(r); |
|
2656 r = inf.SetGuardPages(); |
|
2657 test_KErrNone(r); |
|
2658 PoolGrowingTestRoutine(inf); |
|
2659 } |
|
2660 // Pool D'': Page aligned pool without guard pages - NoBufferMap |
|
2661 { |
|
2662 TInt maxbufs = KTestPoolSizeInBytes / RoundUp(*PtrBufSize, Log2(pagesize)); |
|
2663 if (maxbufs > 32000) |
|
2664 { |
|
2665 maxbufs = 32000; |
|
2666 } |
|
2667 TInt initialbufs = maxbufs / 2; |
|
2668 TInt growtrigger = 32; |
|
2669 TInt growby = 32; |
|
2670 TInt shrinkhys = 288; |
|
2671 test.Printf(_L("POOL D'': BufSize=%d InitialBufs=%d MaxBufs=%d GrowTrigger=%d GrowBy=%d ShrinkHys=%d Page-Aligned+Guard\n"), |
|
2672 *PtrBufSize, initialbufs, maxbufs, growtrigger, growby, shrinkhys); |
|
2673 TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, initialbufs); |
|
2674 r = inf.SetSizingAttributes(maxbufs, growtrigger, growby, shrinkhys); |
|
2675 test_KErrNone(r); |
|
2676 r = inf.SetGuardPages(); |
|
2677 test_KErrNone(r); |
|
2678 PoolGrowingTestRoutine(inf, EShPoolAllocNoMap); |
|
2679 } |
|
2680 } |
|
2681 |
|
2682 /* |
|
2683 @SYMTestCaseID X3 |
|
2684 @SYMTestCaseDesc Contiguous buffer allocation |
|
2685 @SYMREQ REQ11423 |
|
2686 @SYMTestActions |
|
2687 Create a pool with the Contiguous attribute and allocate buffers. |
|
2688 @SYMTestExpectedResults |
|
2689 Buffers memory is physically contiguous. |
|
2690 @SYMTestPriority High |
|
2691 */ |
|
2692 |
|
2693 void ContiguousPoolKernel() |
|
2694 { |
|
2695 test.Next(_L("Contiguous Pool (Kernel)")); |
|
2696 #ifdef __WINS__ |
|
2697 test.Printf(_L("Does not run on the emulator. Skipped\n")); |
|
2698 #else |
|
2699 TInt r; |
|
2700 TInt pagesize; |
|
2701 r = HAL::Get(HAL::EMemoryPageSize, pagesize); |
|
2702 test_KErrNone(r); |
|
2703 if (*PtrBufSize <= pagesize) |
|
2704 { |
|
2705 test.Printf(_L("Buffer size <= page size. Skipped.\n")); |
|
2706 return; |
|
2707 } |
|
2708 |
|
2709 TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, KTestPoolSizeInBufs); |
|
2710 // r = inf.SetSizingAttributes(KTestPoolSizeInBufs, 25, 25, 25600); |
|
2711 // test_KErrNone(r); |
|
2712 |
|
2713 r = Ldd.ContiguousPoolKernel(inf); |
|
2714 test_KErrNone(r); |
|
2715 |
|
2716 #endif // __WINS__ |
|
2717 } |
|
2718 |
|
2719 void ShBufPin() |
|
2720 { |
|
2721 test.Next(_L("Buffer pinning")); |
|
2722 #ifdef __WINS__ |
|
2723 test.Printf(_L("Does not run on the emulator. Skipped\n")); |
|
2724 #else |
|
2725 TInt r; |
|
2726 RShPool pool1; |
|
2727 RShBuf buf1; |
|
2728 TShPoolCreateInfo inf1(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize * KTestPoolSizeInBufs, 1, KTestMinimumAlignmentLog2); |
|
2729 r = pool1.Create(inf1, KDefaultPoolHandleFlags); |
|
2730 test_KErrNone(r); |
|
2731 r = buf1.Alloc(pool1); |
|
2732 test_KErrNone(r); |
|
2733 r = Ldd.PinBuffer(pool1.Handle(), buf1.Handle()); |
|
2734 test_KErrNone(r); |
|
2735 buf1.Close(); |
|
2736 pool1.Close(); |
|
2737 |
|
2738 RShPool pool2; |
|
2739 RShBuf buf2; |
|
2740 TShPoolCreateInfo inf2(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize * KTestPoolSizeInBufs, 1, KTestMinimumAlignmentLog2); |
|
2741 r = pool2.Create(inf2, KDefaultPoolHandleFlags); |
|
2742 test_KErrNone(r); |
|
2743 r = buf2.Alloc(pool2); |
|
2744 test_KErrNone(r); |
|
2745 r = Ldd.PinBuffer(pool2.Handle(), buf2.Handle()); |
|
2746 test_KErrNone(r); |
|
2747 buf2.Close(); |
|
2748 pool2.Close(); |
|
2749 #endif // _WINS_ |
|
2750 } |
|
2751 |
|
2752 /* |
|
2753 @SYMTestCaseID |
|
2754 @SYMTestCaseDesc |
|
2755 @SYMREQ |
|
2756 @SYMTestActions |
|
2757 @SYMTestExpectedResults |
|
2758 @SYMTestPriority |
|
2759 */ |
|
2760 |
|
2761 void SingleBufferPool() |
|
2762 { |
|
2763 test.Next(_L("Single Buffer Pool")); |
|
2764 TInt r; |
|
2765 |
|
2766 RShPool pool; |
|
2767 RShBuf buf; |
|
2768 RShBuf buf2; |
|
2769 |
|
2770 TShPoolCreateInfo infpa(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize * KTestPoolSizeInBufs, 1); |
|
2771 r = infpa.SetGuardPages(); |
|
2772 test_KErrNone(r); |
|
2773 r = pool.Create(infpa, KDefaultPoolHandleFlags); |
|
2774 test_KErrNone(r); |
|
2775 r = pool.SetBufferWindow(-1, ETrue); |
|
2776 test_KErrNone(r); |
|
2777 r = buf.Alloc(pool); |
|
2778 test_KErrNone(r); |
|
2779 r = buf2.Alloc(pool); |
|
2780 test_Equal(KErrNoMemory, r); |
|
2781 TPtr8(buf.Ptr(), buf.Size(), buf.Size()).Fill('!'); |
|
2782 buf.Close(); |
|
2783 pool.Close(); |
|
2784 |
|
2785 TShPoolCreateInfo infnpa(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize * KTestPoolSizeInBufs, 1, KTestMinimumAlignmentLog2); |
|
2786 r = pool.Create(infnpa, KDefaultPoolHandleFlags); |
|
2787 test_KErrNone(r); |
|
2788 r = buf.Alloc(pool); |
|
2789 test_KErrNone(r); |
|
2790 r = buf2.Alloc(pool); |
|
2791 test_Equal(KErrNoMemory, r); |
|
2792 TPtr8(buf.Ptr(), buf.Size(),buf.Size()).Fill('?'); |
|
2793 buf.Close(); |
|
2794 pool.Close(); |
|
2795 } |
|
2796 |
|
2797 /* |
|
2798 @SYMTestCaseID X4 |
|
2799 @SYMTestCaseDesc Negative tests (user/kernel) |
|
2800 @SYMREQ REQ11423 |
|
2801 @SYMTestActions |
|
2802 API calls with invalid arguments. |
|
2803 @SYMTestExpectedResults |
|
2804 Appropriate error code returned. |
|
2805 @SYMTestPriority High |
|
2806 */ |
|
2807 |
|
2808 void NegativeTestsUser() |
|
2809 { |
|
2810 test.Next(_L("Negative tests (User)")); |
|
2811 TInt r; |
|
2812 TInt pagesize; |
|
2813 TInt ram; |
|
2814 r = HAL::Get(HAL::EMemoryPageSize, pagesize); |
|
2815 test_KErrNone(r); |
|
2816 r = HAL::Get(HAL::EMemoryRAM, ram); |
|
2817 test_KErrNone(r); |
|
2818 |
|
2819 RShPool pool; |
|
2820 { TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 0, 0); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); } |
|
2821 { TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 100, 0); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); } |
|
2822 { TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 0, 100); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); } |
|
2823 { TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, KMaxTUint, 10); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); } |
|
2824 { TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 10, KMaxTUint); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); } |
|
2825 { TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, KMaxTUint, KMaxTUint); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); } |
|
2826 { TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 65537, 65536); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); } |
|
2827 { TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 10, 1 + (1 << (32 - Log2(pagesize)))); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); } |
|
2828 { TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 4096, 10); r = pool.Create(inf, KDefaultPoolHandleFlags); test_Equal(KErrNone, r); pool.Close(); } |
|
2829 // XXX The following test will need updating in Phase 2, when exclusive access will be supported |
|
2830 // (page-aligned-buffer pools only) |
|
2831 { TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 4096, 10); inf.SetExclusive(); r = pool.Create(inf, KDefaultPoolHandleFlags); test_Equal(KErrNotSupported, r); pool.Close(); } |
|
2832 { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 4096, 10, 12); r = pool.Create(inf, KDefaultPoolHandleFlags); test_Equal(KErrNone, r); pool.Close(); } |
|
2833 { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 4096, 10, 12); inf.SetExclusive(); r = pool.Create(inf, KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); pool.Close(); } |
|
2834 #ifndef __WINS__ |
|
2835 { TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 128 * pagesize, (ram / (128 * pagesize)) + 1); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrNoMemory, r); } |
|
2836 #endif |
|
2837 { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 0, 0, 0); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); } |
|
2838 { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 100, 0, 0); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); } |
|
2839 { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 0, 100, 0); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); } |
|
2840 { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, KMaxTUint, 10, 0); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); } |
|
2841 { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 10, KMaxTUint, 0); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); } |
|
2842 { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, KMaxTUint, KMaxTUint, 0); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); } |
|
2843 { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 65537, 65536, 0); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); } |
|
2844 { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 10, 10, KMaxTUint); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); } |
|
2845 { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 10, 10, 33); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); } |
|
2846 { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 10, 300, 24); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); } |
|
2847 { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 10, 65537, 16); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); } |
|
2848 { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 10, 10, Log2(pagesize) + 1); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); } |
|
2849 |
|
2850 { |
|
2851 TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, *BufferSize, KTestPoolSizeInBufs, 0); |
|
2852 inf.SetGuardPages(); |
|
2853 r = pool.Create(inf, KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); |
|
2854 r = inf.SetSizingAttributes(KTestPoolSizeInBufs - 1, 25, 25, 280); test_KErrNone(r); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); |
|
2855 // Either grow trigger ratio or grow by ratio == 0 => non-growable pool |
|
2856 // Such pools must have initial buffers == max buffers |
|
2857 r = inf.SetSizingAttributes(KTestPoolSizeInBufs * 2, 1, 0, 1); test_Equal(KErrArgument, r); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); |
|
2858 r = inf.SetSizingAttributes(KTestPoolSizeInBufs * 2, 1, 0, 0); test_Equal(KErrArgument, r); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); |
|
2859 // shrink hysteresis ratio must be > 256 |
|
2860 r = inf.SetSizingAttributes(KTestPoolSizeInBufs - 1, 25, 25, 256); test_Equal(KErrArgument, r); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); |
|
2861 // grow ratio must be < 256 |
|
2862 r = inf.SetSizingAttributes(KTestPoolSizeInBufs * 2, 256, 25, 260); test_Equal(KErrArgument, r); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); |
|
2863 } |
|
2864 |
|
2865 // Can't have a non-aligned, contiguous pool that grows |
|
2866 TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 200, 10, 0); |
|
2867 r = inf.SetSizingAttributes(KTestPoolSizeInBufs * 2, 25, 25, 280); |
|
2868 test_KErrNone(r); |
|
2869 } |
|
2870 |
|
2871 void NegativeTestsKernel() |
|
2872 { |
|
2873 test.Next(_L("Negative tests (Kernel)")); |
|
2874 TInt r; |
|
2875 r = Ldd.NegativeTestsKernel(); |
|
2876 test_KErrNone(r); |
|
2877 } |
|
2878 |
|
2879 /* |
|
2880 @SYMTestCaseID 23 |
|
2881 @SYMTestCaseDesc Out of memory testing |
|
2882 @SYMREQ |
|
2883 @SYMTestActions |
|
2884 TBD |
|
2885 @SYMTestExpectedResults |
|
2886 @SYMTestPriority High |
|
2887 */ |
|
2888 |
|
2889 void OutOfMemory() |
|
2890 { |
|
2891 test.Next(_L("Out of memory")); |
|
2892 #ifdef _DEBUG |
|
2893 |
|
2894 |
|
2895 const TInt KMaxKernelAllocations = 1024; |
|
2896 TInt i, r; |
|
2897 RShPool pool; |
|
2898 TShPoolCreateInfo inf0(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, 1); |
|
2899 TShPoolCreateInfo inf1(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize, 1, 0); |
|
2900 r = inf0.SetSizingAttributes(4, 100, 1024, 300); |
|
2901 test_KErrNone(r); |
|
2902 r = inf1.SetSizingAttributes(4, 100, 1024, 300); |
|
2903 test_KErrNone(r); |
|
2904 |
|
2905 for(TInt j = 0; j <= 1; j++) |
|
2906 { |
|
2907 |
|
2908 if(j == 0) |
|
2909 test.Printf(_L("OOM testing for page-aligned pool\n")); |
|
2910 else |
|
2911 test.Printf(_L("OOM testing for non-page-aligned pool\n")); |
|
2912 |
|
2913 r = KErrNoMemory; |
|
2914 |
|
2915 __KHEAP_RESET; |
|
2916 |
|
2917 //Create the pool |
|
2918 for (i = 0; i < KMaxKernelAllocations && r == KErrNoMemory; i++) |
|
2919 { |
|
2920 __KHEAP_FAILNEXT(i); |
|
2921 if(j == 0) |
|
2922 r = pool.Create(inf0,KDefaultPoolHandleFlags); |
|
2923 else |
|
2924 r = pool.Create(inf1,KDefaultPoolHandleFlags); |
|
2925 __KHEAP_RESET; |
|
2926 } |
|
2927 test.Printf(_L("Create pool took %d tries\n"),i); |
|
2928 test_KErrNone(r); |
|
2929 |
|
2930 //Allocate buffers with automatic pool growing enabled |
|
2931 r = KErrNoMemory; |
|
2932 RShBuf buf1; |
|
2933 for (i = 0; i < KMaxKernelAllocations && r == KErrNoMemory; i++) |
|
2934 { |
|
2935 __KHEAP_FAILNEXT(i); |
|
2936 if(j == 0) |
|
2937 r = buf1.Alloc(pool, EShPoolAllocNoMap); |
|
2938 else |
|
2939 r = buf1.Alloc(pool); |
|
2940 __KHEAP_RESET; |
|
2941 } |
|
2942 test.Printf(_L("Allocate shared buffer 1 took %d tries\n"),i); |
|
2943 test_KErrNone(r); |
|
2944 |
|
2945 // delay to allow the pool to grow |
|
2946 User::After(20000); |
|
2947 |
|
2948 r = KErrNoMemory; |
|
2949 RShBuf buf2; |
|
2950 for (i = 0; i < KMaxKernelAllocations && r == KErrNoMemory; i++) |
|
2951 { |
|
2952 __KHEAP_FAILNEXT(i); |
|
2953 if(j == 0) |
|
2954 r = buf2.Alloc(pool, EShPoolAllocNoMap); |
|
2955 else |
|
2956 r = buf2.Alloc(pool); |
|
2957 __KHEAP_RESET; |
|
2958 User::After(20000); |
|
2959 } |
|
2960 test.Printf(_L("Allocate shared buffer 2 took %d tries\n"),i); |
|
2961 test_KErrNone(r); |
|
2962 |
|
2963 // delay to allow the pool to grow again |
|
2964 User::After(20000); |
|
2965 |
|
2966 r = KErrNoMemory; |
|
2967 RShBuf buf3; |
|
2968 for (i = 0; i < KMaxKernelAllocations && r == KErrNoMemory; i++) |
|
2969 { |
|
2970 __KHEAP_FAILNEXT(i); |
|
2971 if(j == 0) |
|
2972 r = buf3.Alloc(pool, EShPoolAllocNoMap); |
|
2973 else |
|
2974 r = buf3.Alloc(pool); |
|
2975 __KHEAP_RESET; |
|
2976 } |
|
2977 test.Printf(_L("Allocate shared buffer 3 took %d tries\n"),i); |
|
2978 test_KErrNone(r); |
|
2979 |
|
2980 //Map a buffer in page-aligned-pool case |
|
2981 if(j == 0) |
|
2982 { |
|
2983 //Open a one-buffer window |
|
2984 r = pool.SetBufferWindow(1, ETrue); |
|
2985 test_KErrNone(r); |
|
2986 |
|
2987 //Map a buffer |
|
2988 r = KErrNoMemory; |
|
2989 for (i = 0; i < KMaxKernelAllocations && r == KErrNoMemory; i++) |
|
2990 { |
|
2991 buf1.UnMap(); |
|
2992 __KHEAP_FAILNEXT(i); |
|
2993 r = buf1.Map(); |
|
2994 __KHEAP_RESET; |
|
2995 } |
|
2996 test.Printf(_L("Mapping buffer 1 took %d tries\n"),i); |
|
2997 test_KErrNone(r); |
|
2998 } |
|
2999 |
|
3000 //Setup low-space notification |
|
3001 TRequestStatus low; |
|
3002 low = KErrNoMemory; |
|
3003 for (i = 0; i < KMaxKernelAllocations && low != KRequestPending; i++) |
|
3004 { |
|
3005 __KHEAP_FAILNEXT(i); |
|
3006 pool.RequestLowSpaceNotification(1, low); |
|
3007 __KHEAP_RESET; |
|
3008 } |
|
3009 test.Printf(_L("Setting up low-space notification took %d tries\n"),i); |
|
3010 test_Equal(low.Int(), KRequestPending); |
|
3011 |
|
3012 //Setup free-space notification |
|
3013 TRequestStatus free; |
|
3014 free = KErrNoMemory; |
|
3015 for (i = 0; i < KMaxKernelAllocations && free != KRequestPending; i++) |
|
3016 { |
|
3017 __KHEAP_FAILNEXT(i); |
|
3018 pool.RequestFreeSpaceNotification(4, free); |
|
3019 __KHEAP_RESET; |
|
3020 } |
|
3021 test.Printf(_L("Setting up free-space notification took %d tries\n"),i); |
|
3022 test_Equal(free.Int(), KRequestPending); |
|
3023 |
|
3024 //No allocations should occur here |
|
3025 __KHEAP_FAILNEXT(1); |
|
3026 if(j == 0) |
|
3027 { |
|
3028 //Unmap the buffer |
|
3029 r = buf1.UnMap(); |
|
3030 } |
|
3031 |
|
3032 //Cancel the notifications |
|
3033 pool.CancelLowSpaceNotification(low); |
|
3034 pool.CancelFreeSpaceNotification(free); |
|
3035 |
|
3036 //Close the buffers and the pool |
|
3037 buf1.Close(); |
|
3038 buf2.Close(); |
|
3039 buf3.Close(); |
|
3040 pool.Close(); |
|
3041 __KHEAP_RESET; |
|
3042 |
|
3043 } |
|
3044 |
|
3045 // Allocate kernel-side buffer on Pool 2 |
|
3046 TInt handle = 0; |
|
3047 RShBuf kbuf; |
|
3048 r = KErrNoMemory; |
|
3049 for (i = 0; i < KMaxKernelAllocations && r == KErrNoMemory; i++) |
|
3050 { |
|
3051 __KHEAP_FAILNEXT(i); |
|
3052 r = Ldd.AllocateKernelBuffer(1, handle); |
|
3053 __KHEAP_RESET; |
|
3054 } |
|
3055 test.Printf(_L("Allocate kernel buffer took %d tries\n"),i); |
|
3056 test_KErrNone(r); |
|
3057 |
|
3058 __KHEAP_FAILNEXT(1); |
|
3059 kbuf.SetHandle(handle); |
|
3060 __KHEAP_RESET; |
|
3061 |
|
3062 r = KErrNoMemory; |
|
3063 for (i = 0; i < KMaxKernelAllocations && r == KErrNoMemory; i++) |
|
3064 { |
|
3065 r = kbuf.UnMap(); |
|
3066 __KHEAP_FAILNEXT(i); |
|
3067 r = kbuf.Map(); |
|
3068 __KHEAP_RESET; |
|
3069 } |
|
3070 test.Printf(_L("Mapping kernel buffer took %d tries\n"),i); |
|
3071 test_KErrNone(r); |
|
3072 |
|
3073 __KHEAP_FAILNEXT(1); |
|
3074 r = kbuf.UnMap(); |
|
3075 kbuf.Close(); |
|
3076 __KHEAP_RESET; |
|
3077 |
|
3078 |
|
3079 #else // _DEBUG |
|
3080 test.Printf(_L("Debug builds only. Test skipped.")); |
|
3081 #endif // _DEBUG |
|
3082 } |
|
3083 |
|
3084 /* |
|
3085 @SYMTestCaseID 22 |
|
3086 @SYMTestCaseDesc Stress testing |
|
3087 @SYMREQ |
|
3088 @SYMTestActions |
|
3089 TBD |
|
3090 @SYMTestExpectedResults |
|
3091 @SYMTestPriority Medium |
|
3092 */ |
|
3093 |
|
3094 TInt StressThread1(TAny*) |
|
3095 { |
|
3096 TInt r; |
|
3097 TInt pagesize; |
|
3098 r = HAL::Get(HAL::EMemoryPageSize, pagesize); |
|
3099 test_KErrNone(r); |
|
3100 |
|
3101 TInt i = 0; |
|
3102 FOREVER |
|
3103 { |
|
3104 RShPool pool; |
|
3105 if (i % 2) |
|
3106 { |
|
3107 TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 1000, 512); |
|
3108 r = pool.Create(inf,KDefaultPoolHandleFlags); |
|
3109 if (r) |
|
3110 { |
|
3111 RDebug::Printf("Error %d line %d", r, __LINE__); |
|
3112 break; |
|
3113 } |
|
3114 |
|
3115 r = pool.SetBufferWindow(-1, ETrue); |
|
3116 test_KErrNone(r); |
|
3117 |
|
3118 } |
|
3119 else |
|
3120 { |
|
3121 TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 10000, 200, 0); |
|
3122 r = pool.Create(inf,KDefaultPoolHandleFlags); |
|
3123 if (r) |
|
3124 { |
|
3125 RDebug::Printf("Error %d line %d", r, __LINE__); |
|
3126 break; |
|
3127 } |
|
3128 } |
|
3129 pool.Close(); |
|
3130 i++; |
|
3131 if (i % 100 == 0) |
|
3132 { |
|
3133 RDebug::Printf("ST1 %d iterations", i); |
|
3134 } |
|
3135 } |
|
3136 return r; |
|
3137 } |
|
3138 |
|
3139 TInt StressThread2(TAny*) |
|
3140 { |
|
3141 TInt r = KErrUnknown; |
|
3142 TShPoolInfo inf1; |
|
3143 TShPoolInfo inf2; |
|
3144 P1.GetInfo(inf1); |
|
3145 P2.GetInfo(inf2); |
|
3146 TInt j = 0; |
|
3147 FOREVER |
|
3148 { |
|
3149 TUint i; |
|
3150 RArray<RShBuf> bufarray1; |
|
3151 RArray<RShBuf> bufarray2; |
|
3152 for (i = 0; i < inf1.iMaxBufs; i++) |
|
3153 { |
|
3154 RShBuf buf; |
|
3155 r = buf.Alloc(P1); |
|
3156 if (r) |
|
3157 { |
|
3158 RDebug::Printf("Error %d line %d i=%d", r, __LINE__, i); |
|
3159 break; |
|
3160 } |
|
3161 TPtr8(buf.Ptr(), buf.Size(),buf.Size()).Fill('1'); |
|
3162 r = bufarray1.Append(buf); |
|
3163 if (r) |
|
3164 { |
|
3165 buf.Close(); |
|
3166 RDebug::Printf("Error %d line %d i=%d", r, __LINE__, i); |
|
3167 break; |
|
3168 } |
|
3169 } |
|
3170 for (i = 0; i < inf2.iMaxBufs; i++) |
|
3171 { |
|
3172 RShBuf buf; |
|
3173 r = buf.Alloc(P2); |
|
3174 if (r) |
|
3175 { |
|
3176 RDebug::Printf("Error %d line %d i=%d", r, __LINE__, i); |
|
3177 break; |
|
3178 } |
|
3179 TPtr8(buf.Ptr(), buf.Size(),buf.Size()).Fill('2'); |
|
3180 bufarray2.Append(buf); |
|
3181 } |
|
3182 i = 0; |
|
3183 while (bufarray1.Count()) |
|
3184 { |
|
3185 bufarray1[0].Close(); |
|
3186 bufarray1.Remove(0); |
|
3187 i++; |
|
3188 } |
|
3189 |
|
3190 while (bufarray2.Count()) |
|
3191 { |
|
3192 bufarray2[0].Close(); |
|
3193 bufarray2.Remove(0); |
|
3194 } |
|
3195 bufarray1.Close(); |
|
3196 bufarray2.Close(); |
|
3197 if (r) |
|
3198 { |
|
3199 break; |
|
3200 } |
|
3201 j++; |
|
3202 if (j % 10 == 0) |
|
3203 { |
|
3204 RDebug::Printf("ST2 %d iterations", j); |
|
3205 } |
|
3206 } |
|
3207 return r; |
|
3208 } |
|
3209 |
|
3210 void StressTesting(TInt aSecs) |
|
3211 { |
|
3212 test.Next(_L("Stress testing")); |
|
3213 TInt r; |
|
3214 |
|
3215 test.Start(_L("Create pools")); |
|
3216 TShPoolCreateInfo inf1(TShPoolCreateInfo::ENonPageAlignedBuffer, 2000, 500, 11); |
|
3217 r = P1.Create(inf1,KDefaultPoolHandleFlags); |
|
3218 test_KErrNone(r); |
|
3219 TInt handle; |
|
3220 TShPoolCreateInfo inf2(TShPoolCreateInfo::EPageAlignedBuffer, 5000, 150); |
|
3221 r = Ldd.OpenKernelPool(inf2, handle); |
|
3222 test_KErrNone(r); |
|
3223 P2.SetHandle(handle); |
|
3224 |
|
3225 r = P2.SetBufferWindow(-1, ETrue); |
|
3226 test_KErrNone(r); |
|
3227 |
|
3228 test.Next(_L("Create threads")); |
|
3229 RThread t1; |
|
3230 r = t1.Create(_L("THREAD1"), StressThread1, KDefaultStackSize, KMinHeapSize, KMinHeapSize, NULL); |
|
3231 test_KErrNone(r); |
|
3232 RThread t2; |
|
3233 r = t2.Create(_L("THREAD2"), StressThread2, KDefaultStackSize*2, KMinHeapSize, 1 << 20, NULL); |
|
3234 test_KErrNone(r); |
|
3235 test.Next(_L("Start threads")); |
|
3236 test.Printf(_L("Wait for %d seconds\n"), aSecs); |
|
3237 RThread().SetPriority(EPriorityMore); |
|
3238 TRequestStatus t1rs; |
|
3239 TRequestStatus t2rs; |
|
3240 t1.Logon(t1rs); |
|
3241 t2.Logon(t2rs); |
|
3242 t1.Resume(); |
|
3243 t2.Resume(); |
|
3244 User::After(aSecs * 1000000); |
|
3245 |
|
3246 test.Next(_L("Kill threads")); |
|
3247 t1.Kill(KErrNone); |
|
3248 t2.Kill(KErrNone); |
|
3249 |
|
3250 // wait for threads to actually die |
|
3251 User::WaitForRequest(t1rs); |
|
3252 User::WaitForRequest(t2rs); |
|
3253 |
|
3254 t1.Close(); |
|
3255 t2.Close(); |
|
3256 RThread().SetPriority(EPriorityNormal); |
|
3257 |
|
3258 test.Next(_L("Close pools")); |
|
3259 P1.Close(); |
|
3260 r = Ldd.CloseKernelPool(); |
|
3261 test_KErrNone(r); |
|
3262 P2.Close(); |
|
3263 test.End(); |
|
3264 } |
|
3265 |
|
3266 /* |
|
3267 @SYMTestCaseID |
|
3268 @SYMTestCaseDesc |
|
3269 @SYMREQ |
|
3270 @SYMTestActions |
|
3271 @SYMTestExpectedResults |
|
3272 @SYMTestPriority |
|
3273 */ |
|
3274 |
|
3275 void NoDeallocation() |
|
3276 { |
|
3277 test.Next(_L("No deallocation")); |
|
3278 TInt r; |
|
3279 TBuf<10> command; |
|
3280 command.Format(_L("%S %d"), &KTestSlave, ETestSlaveNoDeallocation); |
|
3281 RProcess p; |
|
3282 r = p.Create(RProcess().FileName(), command); |
|
3283 test_KErrNone(r); |
|
3284 TRequestStatus rs; |
|
3285 p.Logon(rs); |
|
3286 p.Resume(); |
|
3287 User::WaitForRequest(rs); |
|
3288 |
|
3289 // wait for memory to be freed |
|
3290 r = UserSvr::HalFunction(EHalGroupKernel, EKernelHalSupervisorBarrier, (TAny*)5000, 0); |
|
3291 test_KErrNone(r); |
|
3292 |
|
3293 __KHEAP_MARKEND; |
|
3294 test_KErrNone(rs.Int()); |
|
3295 test_Equal(EExitKill, p.ExitType()); |
|
3296 test_KErrNone(p.ExitReason()); |
|
3297 p.Close(); |
|
3298 } |
|
3299 |
|
3300 TInt SlaveNoDeallocation() |
|
3301 { |
|
3302 __KHEAP_MARK; |
|
3303 TInt r; |
|
3304 RShPool pool; |
|
3305 TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *BufferSize, KTestPoolSizeInBufs); |
|
3306 r = pool.Create(inf,KDefaultPoolHandleFlags); |
|
3307 test_KErrNone(r); |
|
3308 |
|
3309 pool.SetBufferWindow(-1, ETrue); |
|
3310 test_KErrNone(r); |
|
3311 |
|
3312 if (!r) |
|
3313 { |
|
3314 RShBuf buf; |
|
3315 r = buf.Alloc(pool); |
|
3316 } |
|
3317 return r; |
|
3318 } |
|
3319 |
|
3320 TInt E32Main() |
|
3321 { |
|
3322 __UHEAP_MARK; |
|
3323 |
|
3324 // Parse command line for slave processes |
|
3325 TInt r = KErrArgument; |
|
3326 TBuf<KMaxFullName> cmd; |
|
3327 User::CommandLine(cmd); |
|
3328 TLex lex(cmd); |
|
3329 if (lex.NextToken() == KTestSlave) |
|
3330 { |
|
3331 TInt function; |
|
3332 TLex functionlex(lex.NextToken()); |
|
3333 functionlex.Val(function); |
|
3334 switch (function) |
|
3335 { |
|
3336 case ETestSlaveNoDeallocation: |
|
3337 r = SlaveNoDeallocation(); |
|
3338 break; |
|
3339 } |
|
3340 __UHEAP_MARKEND; |
|
3341 return r; |
|
3342 } |
|
3343 // Test starts here |
|
3344 test.Title(); |
|
3345 |
|
3346 test.Start(_L("Check for Shared Buffers availability")); |
|
3347 RShPool pool; |
|
3348 TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *BufferSize, KTestPoolSizeInBufs); |
|
3349 r = pool.Create(inf,KDefaultPoolHandleFlags); |
|
3350 if (r == KErrNotSupported) |
|
3351 { |
|
3352 test.Printf(_L("Not supported by this memory model.\n")); |
|
3353 } |
|
3354 else |
|
3355 { |
|
3356 test_KErrNone(r); |
|
3357 pool.Close(); |
|
3358 |
|
3359 test.Next(_L("No device driver")); |
|
3360 test.Start(_L("Start test loop")); |
|
3361 for (PtrBufSize = BufferSize; *PtrBufSize != 0; PtrBufSize++) |
|
3362 { |
|
3363 TBuf<30> title; |
|
3364 title.Format(_L("Buffer size = %d bytes"), *PtrBufSize); |
|
3365 test.Next(title); |
|
3366 test.Start(_L("New test iteration")); |
|
3367 BufferAlignmentUser(); |
|
3368 BufferMapping(); |
|
3369 BufferWindow(); |
|
3370 GuardPages(); |
|
3371 PoolGrowingUser(); |
|
3372 SingleBufferPool(); |
|
3373 test.End(); |
|
3374 } |
|
3375 test.End(); |
|
3376 test.Next(_L("Load Device Driver")); |
|
3377 LoadDeviceDrivers(); |
|
3378 |
|
3379 #ifdef TEST_CLIENT_THREAD |
|
3380 test.Next(_L("Device driver in client thread")); |
|
3381 r = Ldd.Open(0); |
|
3382 #else |
|
3383 test.Next(_L("Device driver in own thread")); |
|
3384 r = Ldd.Open(1); |
|
3385 #endif |
|
3386 |
|
3387 test_KErrNone(r); |
|
3388 |
|
3389 test.Start(_L("Start test loop")); |
|
3390 for (PtrBufSize = BufferSize; *PtrBufSize != 0; PtrBufSize++) |
|
3391 { |
|
3392 TBuf<30> title; |
|
3393 title.Format(_L("Buffer size = %d bytes"), *PtrBufSize); |
|
3394 test.Next(title); |
|
3395 test.Start(_L("New test iteration")); |
|
3396 CreateUserPool(ETestNonPageAligned); |
|
3397 CreateKernelPool(ETestNonPageAligned); |
|
3398 AllocateUserBuffer(); |
|
3399 AllocateKernelBuffer(); |
|
3400 AllocateUserMax(P1); |
|
3401 AllocateUserMax(P2); |
|
3402 AllocateKernelMax(); |
|
3403 BufferAlignmentKernel(); |
|
3404 CreateKernelPoolPhysAddr(); |
|
3405 NotificationRequests(P1); |
|
3406 NotificationRequests(P2); |
|
3407 CancelNotificationRequests(P1); |
|
3408 CancelNotificationRequests(P2); |
|
3409 ShBufPin(); |
|
3410 CloseKernelPool(); |
|
3411 CloseUserPool(); |
|
3412 ContiguousPoolKernel(); |
|
3413 CreateUserPool(ETestPageAligned); |
|
3414 CreateKernelPool(ETestPageAligned); |
|
3415 OutOfMemory(); |
|
3416 AllocateUserBuffer(); |
|
3417 AllocateKernelBuffer(); |
|
3418 AllocateUserMax(P1); |
|
3419 AllocateUserMax(P2); |
|
3420 AllocateKernelMax(); |
|
3421 NotificationRequests(P1); |
|
3422 NotificationRequests(P2); |
|
3423 CloseUserPool(); |
|
3424 CloseKernelPool(); |
|
3425 CreateUserPool(ETestPageAlignedGrowing); |
|
3426 CreateKernelPool(ETestPageAlignedGrowing); |
|
3427 OutOfMemory(); |
|
3428 AllocateKernelMax(); |
|
3429 AllocateUserMax(P1); |
|
3430 AllocateUserMax(P2); |
|
3431 CloseUserPool(); |
|
3432 CloseKernelPool(); |
|
3433 test.End(); |
|
3434 } |
|
3435 NegativeTestsKernel(); |
|
3436 StressTesting(5); |
|
3437 test.End(); |
|
3438 Ldd.Close(); |
|
3439 |
|
3440 NegativeTestsUser(); |
|
3441 NoDeallocation(); |
|
3442 |
|
3443 test.Next(_L("Unload Device Drivers")); |
|
3444 FreeDeviceDrivers(); |
|
3445 } |
|
3446 test.End(); |
|
3447 test.Close(); |
|
3448 |
|
3449 __UHEAP_MARKEND; |
|
3450 return KErrNone; |
|
3451 } |