kerneltest/e32test/mmu/t_shbuf.cpp
changeset 43 96e5fb8b040d
equal deleted inserted replaced
-1:000000000000 43:96e5fb8b040d
       
     1 // Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies).
       
     2 // All rights reserved.
       
     3 // This component and the accompanying materials are made available
       
     4 // under the terms of the License "Eclipse Public License v1.0"
       
     5 // which accompanies this distribution, and is available
       
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
       
     7 //
       
     8 // Initial Contributors:
       
     9 // Nokia Corporation - initial contribution.
       
    10 //
       
    11 // Contributors:
       
    12 //
       
    13 // Description:
       
    14 // e32test/mmu/t_shbuf.cpp
       
    15 //
       
    16 
       
    17 #define __E32TEST_EXTENSION__
       
    18 
       
    19 #include <e32test.h>
       
    20 #include <hal.h>
       
    21 #include <e32svr.h>
       
    22 #include <u32hal.h>
       
    23 #include "d_shbuf.h"
       
    24 #include <e32shbuf.h>
       
    25 #include <e32def.h>
       
    26 #include <e32def_private.h>
       
    27 
       
    28 #ifdef TEST_CLIENT_THREAD
       
    29 RTest test(_L("T_SHBUF_CLIENT"));
       
    30 #else
       
    31 RTest test(_L("T_SHBUF_OWN"));
       
    32 #endif
       
    33 
       
    34 RShPool P1; // User-side pool
       
    35 RShPool P2; // Kernel-side pool
       
    36 
       
    37 const TInt KTestPoolSizeInBytes = 1 << 20; // 1MB
       
    38 const TInt BufferSize[] = {128, 853, 4096, 5051, 131072, 1, 0}; // Last element must be 0
       
    39 
       
    40 const TInt* PtrBufSize;
       
    41 
       
    42 RShBufTestChannel Ldd;
       
    43 
       
    44 _LIT(KTestSlave, "SLAVE");
       
    45 _LIT(KTestLowSpaceSemaphore, "LowSpaceSemaphore");
       
    46 
       
    47 enum TTestSlave
       
    48 	{
       
    49 	ETestSlaveError,
       
    50 	ETestSlaveNoDeallocation,
       
    51 	};
       
    52 
       
    53 enum TTestPoolType
       
    54 	{
       
    55 	ETestNonPageAligned,
       
    56 	ETestPageAligned,
       
    57 	ETestPageAlignedGrowing,
       
    58 	};
       
    59 
       
    60 TInt Log2(TInt aNum)
       
    61 	{
       
    62 	TInt res = -1;
       
    63 	while(aNum)
       
    64 		{
       
    65 		res++;
       
    66 		aNum >>= 1;
       
    67 		}
       
    68 	return res;
       
    69 	}
       
    70 
       
    71 TInt RoundUp(TInt aNum, TInt aAlignmentLog2)
       
    72 	{
       
    73 	if (aNum % (1 << aAlignmentLog2) == 0)
       
    74 		{
       
    75 		return aNum;
       
    76 		}
       
    77 	return (aNum & ~((1 << aAlignmentLog2) - 1)) + (1 << aAlignmentLog2);
       
    78 	}
       
    79 
       
    80 void LoadDeviceDrivers()
       
    81 	{
       
    82 	TInt r;
       
    83 	#ifdef TEST_CLIENT_THREAD
       
    84 	r= User::LoadLogicalDevice(_L("D_SHBUF_CLIENT.LDD"));
       
    85 	if (r != KErrAlreadyExists)
       
    86 		{
       
    87 		test_KErrNone(r);
       
    88 		}
       
    89 	#else
       
    90 	r = User::LoadLogicalDevice(_L("D_SHBUF_OWN.LDD"));
       
    91 	if (r != KErrAlreadyExists)
       
    92 		{
       
    93 		test_KErrNone(r);
       
    94 		}
       
    95 	#endif
       
    96 	}
       
    97 
       
    98 void FreeDeviceDrivers()
       
    99 	{
       
   100 	TInt r = User::FreeLogicalDevice(KTestShBufClient);
       
   101 	test_KErrNone(r);
       
   102 	r = User::FreeLogicalDevice(KTestShBufOwn);
       
   103 	test_KErrNone(r);
       
   104 	}
       
   105 
       
   106 void FillShBuf(RShBuf& aBuffer, TUint8 aValue)
       
   107 	{
       
   108 	TUint size = aBuffer.Size();
       
   109 	TUint8* base = aBuffer.Ptr();
       
   110 	test(size!=0);
       
   111 	test(base!=0);
       
   112 	memset(base,aValue,size);
       
   113 	}
       
   114 
       
   115 TBool CheckFillShBuf(RShBuf& aBuffer, TUint8 aValue)
       
   116 	{
       
   117 	TUint size = aBuffer.Size();
       
   118 	TUint8* base = aBuffer.Ptr();
       
   119 	test(size!=0);
       
   120 	test(base!=0);
       
   121 	TUint8* ptr = base;
       
   122 	TUint8* end = ptr+size;
       
   123 	while(ptr<end)
       
   124 		{
       
   125 		TUint8 b = *ptr++;
       
   126 		if(b!=aValue)
       
   127 			{
       
   128 			RDebug::Printf("CheckFillShBuf failed at offset 0x%x, expected 0x%02x but got 0x%02x ",ptr-base-1,aValue,b);
       
   129 			return EFalse;
       
   130 			}
       
   131 		}
       
   132 	return ETrue;
       
   133 	}
       
   134 
       
   135 TBool CheckNotFillShBuf(RShBuf& aBuffer, TUint8 aValue)
       
   136 	{
       
   137 	TUint size = aBuffer.Size();
       
   138 	TUint8* base = aBuffer.Ptr();
       
   139 	test(size!=0);
       
   140 	test(base!=0);
       
   141 	TUint8* ptr = base;
       
   142 	TUint8* end = ptr+size;
       
   143 	while(ptr<end)
       
   144 		{
       
   145 		TUint8 b = *ptr++;
       
   146 		if(b==aValue)
       
   147 			{
       
   148 			RDebug::Printf("CheckNotFillShBuf failed at offset 0x%x, expected not 0x%02x",ptr-base-1,aValue);
       
   149 			return EFalse;
       
   150 			}
       
   151 		}
       
   152 	return ETrue;
       
   153 	}
       
   154 
       
   155 /*
       
   156 @SYMTestCaseID				1
       
   157 @SYMTestCaseDesc			Create pool from user-side
       
   158 @SYMREQ						REQ11423
       
   159 @SYMTestActions
       
   160 	1. Test Thread creates a pool (P1) and passes handle to device driver.
       
   161 	2. Device driver opens pool and checks its attributes.
       
   162 @SYMTestExpectedResults
       
   163 	All OK.
       
   164 @SYMTestPriority			Critical
       
   165 */
       
   166 
       
   167 void CreateUserPool(TTestPoolType aPoolType)
       
   168 	{
       
   169 	test.Next(_L("Create user-side pool"));
       
   170 	TInt r;
       
   171 	TInt pagesize;
       
   172 	r = HAL::Get(HAL::EMemoryPageSize, pagesize);
       
   173 	test_KErrNone(r);
       
   174 
       
   175 	switch (aPoolType)
       
   176 		{
       
   177 		case ETestNonPageAligned:
       
   178 		// Non-page-aligned pool
       
   179 			{
       
   180 			test.Printf(_L("Non-page-aligned\n"));
       
   181 			test_Equal(0, P1.Handle());
       
   182 			TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize, KTestPoolSizeInBufs, 8);
       
   183 			r = P1.Create(inf,KDefaultPoolHandleFlags);
       
   184 			test_KErrNone(r);
       
   185 
       
   186 			r = P1.SetBufferWindow(-1, ETrue);
       
   187 			test_Equal(KErrNotSupported, r);
       
   188 
       
   189 			TShPoolInfo poolinfotokernel;
       
   190 			poolinfotokernel.iBufSize = *PtrBufSize;
       
   191 			poolinfotokernel.iInitialBufs = KTestPoolSizeInBufs;
       
   192 			poolinfotokernel.iMaxBufs = KTestPoolSizeInBufs;
       
   193 			poolinfotokernel.iGrowTriggerRatio = 0;
       
   194 			poolinfotokernel.iGrowByRatio = 0;
       
   195 			poolinfotokernel.iShrinkHysteresisRatio = 0;
       
   196 			poolinfotokernel.iAlignment = 8;
       
   197 			poolinfotokernel.iFlags = EShPoolNonPageAlignedBuffer;
       
   198 			r = Ldd.OpenUserPool(P1.Handle(), poolinfotokernel);
       
   199 			test_KErrNone(r);
       
   200 
       
   201 			TShPoolInfo poolinfo;
       
   202 			P1.GetInfo(poolinfo);
       
   203 			test_Equal(*PtrBufSize, poolinfo.iBufSize);
       
   204 			test_Equal(KTestPoolSizeInBufs, poolinfo.iInitialBufs);
       
   205 			test_Equal(KTestPoolSizeInBufs, poolinfo.iMaxBufs);
       
   206 			test_Equal(0, poolinfo.iGrowTriggerRatio);
       
   207 			test_Equal(0, poolinfo.iGrowByRatio);
       
   208 			test_Equal(0, poolinfo.iShrinkHysteresisRatio);
       
   209 			test_Equal(8, poolinfo.iAlignment);
       
   210 			test(poolinfo.iFlags & EShPoolNonPageAlignedBuffer);
       
   211 			test(!(poolinfo.iFlags & EShPoolPageAlignedBuffer));
       
   212 			break;
       
   213 			}
       
   214 		case ETestPageAligned:
       
   215 		// Page-aligned pool
       
   216 			{
       
   217 			test.Printf(_L("Page-aligned\n"));
       
   218 			test_Equal(0, P1.Handle());
       
   219 
       
   220 			TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, KTestPoolSizeInBufs);
       
   221 			r = P1.Create(inf,KDefaultPoolHandleFlags);
       
   222 			test_KErrNone(r);
       
   223 
       
   224 			r = P1.SetBufferWindow(-1, ETrue);
       
   225 			test_KErrNone(r);
       
   226 
       
   227 			TShPoolInfo poolinfo;
       
   228 			P1.GetInfo(poolinfo);
       
   229 			test_Equal(*PtrBufSize, poolinfo.iBufSize);
       
   230 			test_Equal(KTestPoolSizeInBufs, poolinfo.iInitialBufs);
       
   231 			test_Equal(KTestPoolSizeInBufs, poolinfo.iMaxBufs);
       
   232 			test_Equal(0, poolinfo.iGrowTriggerRatio);
       
   233 			test_Equal(0, poolinfo.iGrowByRatio);
       
   234 			test_Equal(0, poolinfo.iShrinkHysteresisRatio);
       
   235 			test_Equal(Log2(pagesize), poolinfo.iAlignment);
       
   236 			test(poolinfo.iFlags & EShPoolPageAlignedBuffer);
       
   237 			test(!(poolinfo.iFlags & EShPoolNonPageAlignedBuffer));
       
   238 
       
   239 			r = Ldd.OpenUserPool(P1.Handle(), poolinfo);
       
   240 			test_KErrNone(r);
       
   241 			break;
       
   242 			}
       
   243 		case ETestPageAlignedGrowing:
       
   244 		// Page-aligned growing pool
       
   245 			{
       
   246 			test.Printf(_L("Page-aligned growing\n"));
       
   247 			test_Equal(0, P1.Handle());
       
   248 
       
   249 			TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, KTestPoolSizeInBufs / 2);
       
   250 			// Set shrink hysteresis high so pool can't shrink
       
   251 			r = inf.SetSizingAttributes(KTestPoolSizeInBufs, 25, 26, 25600);
       
   252 			test_KErrNone(r);
       
   253 			r = P1.Create(inf,KDefaultPoolHandleFlags);
       
   254 			test_KErrNone(r);
       
   255 
       
   256 			r = P1.SetBufferWindow(-1, ETrue);
       
   257 			test_KErrNone(r);
       
   258 
       
   259 			TShPoolInfo poolinfo;
       
   260 			P1.GetInfo(poolinfo);
       
   261 			test_Equal(*PtrBufSize, poolinfo.iBufSize);
       
   262 			test_Equal(KTestPoolSizeInBufs / 2, poolinfo.iInitialBufs);
       
   263 			test_Equal(KTestPoolSizeInBufs, poolinfo.iMaxBufs);
       
   264 			test_Equal(25, poolinfo.iGrowTriggerRatio);
       
   265 			test_Equal(26, poolinfo.iGrowByRatio);
       
   266 			test_Equal(25600, poolinfo.iShrinkHysteresisRatio);
       
   267 			test_Equal(Log2(pagesize), poolinfo.iAlignment);
       
   268 			test(poolinfo.iFlags & EShPoolPageAlignedBuffer);
       
   269 			test(!(poolinfo.iFlags & EShPoolNonPageAlignedBuffer));
       
   270 
       
   271 			r = Ldd.OpenUserPool(P1.Handle(), poolinfo);
       
   272 			test_KErrNone(r);
       
   273 			break;
       
   274 			}
       
   275 		default:
       
   276 			test(EFalse);
       
   277 		}
       
   278 	}
       
   279 
       
   280 /*
       
   281 @SYMTestCaseID				2
       
   282 @SYMTestCaseDesc			Create pool from kernel-side
       
   283 @SYMREQ						REQ11423
       
   284 @SYMTestActions
       
   285 	1. Device Driver creates a pool (P2) and passes handle to this thread.
       
   286 	2. Test Thread opens pool and checks its attributes.
       
   287 @SYMTestExpectedResults
       
   288 	1. Ok.
       
   289 	2. Ok.
       
   290 @SYMTestPriority			Critical
       
   291 */
       
   292 
       
   293 void CreateKernelPool(TTestPoolType aPoolType)
       
   294 	{
       
   295 	test.Next(_L("Create kernel-side pool"));
       
   296 	TInt r;
       
   297 	TInt pagesize;
       
   298 	r = HAL::Get(HAL::EMemoryPageSize, pagesize);
       
   299 	test_KErrNone(r);
       
   300 	TInt handle;
       
   301 
       
   302 	switch (aPoolType)
       
   303 		{
       
   304 		case ETestNonPageAligned:
       
   305 		// Non-page-aligned pool
       
   306 			{
       
   307 			test.Printf(_L("Non-page-aligned\n"));
       
   308 			test_Equal(0, P2.Handle());
       
   309 
       
   310 			TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize, KTestPoolSizeInBufs, 8);
       
   311 			r = Ldd.OpenKernelPool(inf, handle);
       
   312 			test_KErrNone(r);
       
   313 			P2.SetHandle(handle);
       
   314 
       
   315 			TShPoolInfo poolinfo;
       
   316 			P2.GetInfo(poolinfo);
       
   317 			test_Equal(*PtrBufSize, poolinfo.iBufSize);
       
   318 			test_Equal(KTestPoolSizeInBufs, poolinfo.iInitialBufs);
       
   319 			test_Equal(KTestPoolSizeInBufs, poolinfo.iMaxBufs);
       
   320 			test_Equal(0, poolinfo.iGrowTriggerRatio);
       
   321 			test_Equal(0, poolinfo.iGrowByRatio);
       
   322 			test_Equal(0, poolinfo.iShrinkHysteresisRatio);
       
   323 			test_Equal(8, poolinfo.iAlignment);
       
   324 			test(poolinfo.iFlags & EShPoolNonPageAlignedBuffer);
       
   325 			test(!(poolinfo.iFlags & EShPoolPageAlignedBuffer));
       
   326 			break;
       
   327 			}
       
   328 		case ETestPageAligned:
       
   329 		// Page-aligned pool
       
   330 			{
       
   331 			test.Printf(_L("Page-aligned\n"));
       
   332 			test_Equal(0, P2.Handle());
       
   333 
       
   334 			TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, KTestPoolSizeInBufs);
       
   335 			r = Ldd.OpenKernelPool(inf, handle);
       
   336 			test_KErrNone(r);
       
   337 			P2.SetHandle(handle);
       
   338 
       
   339 			r = P2.SetBufferWindow(-1, ETrue);
       
   340 			test_KErrNone(r);
       
   341 
       
   342 			TShPoolInfo poolinfo;
       
   343 			P2.GetInfo(poolinfo);
       
   344 			test_Equal(*PtrBufSize, poolinfo.iBufSize);
       
   345 			test_Equal(KTestPoolSizeInBufs, poolinfo.iInitialBufs);
       
   346 			test_Equal(KTestPoolSizeInBufs, poolinfo.iMaxBufs);
       
   347 			test_Equal(0, poolinfo.iGrowTriggerRatio);
       
   348 			test_Equal(0, poolinfo.iGrowByRatio);
       
   349 			test_Equal(0, poolinfo.iShrinkHysteresisRatio);
       
   350 			test_Equal(Log2(pagesize), poolinfo.iAlignment);
       
   351 			test(poolinfo.iFlags & EShPoolPageAlignedBuffer);
       
   352 			test(!(poolinfo.iFlags & EShPoolNonPageAlignedBuffer));
       
   353 			break;
       
   354 			}
       
   355 		case ETestPageAlignedGrowing:
       
   356 		// Page-aligned pool growing
       
   357 			{
       
   358 			test.Printf(_L("Page-aligned growing\n"));
       
   359 			test_Equal(0, P2.Handle());
       
   360 
       
   361 			TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, KTestPoolSizeInBufs / 2);
       
   362 			// Set shrink hysteresis high so pool can't shrink
       
   363 			r = inf.SetSizingAttributes(KTestPoolSizeInBufs, 25, 26, 25600);
       
   364 			test_KErrNone(r);
       
   365 			r = Ldd.OpenKernelPool(inf, handle);
       
   366 			test_KErrNone(r);
       
   367 			P2.SetHandle(handle);
       
   368 
       
   369 			r = P2.SetBufferWindow(-1, ETrue);
       
   370 			test_KErrNone(r);
       
   371 
       
   372 			TShPoolInfo poolinfo;
       
   373 			P2.GetInfo(poolinfo);
       
   374 			test_Equal(*PtrBufSize, poolinfo.iBufSize);
       
   375 			test_Equal(KTestPoolSizeInBufs / 2, poolinfo.iInitialBufs);
       
   376 			test_Equal(KTestPoolSizeInBufs, poolinfo.iMaxBufs);
       
   377 			test_Equal(25, poolinfo.iGrowTriggerRatio);
       
   378 			test_Equal(26, poolinfo.iGrowByRatio);
       
   379 			test_Equal(25600, poolinfo.iShrinkHysteresisRatio);
       
   380 			test_Equal(Log2(pagesize), poolinfo.iAlignment);
       
   381 			test(poolinfo.iFlags & EShPoolPageAlignedBuffer);
       
   382 			test(!(poolinfo.iFlags & EShPoolNonPageAlignedBuffer));
       
   383 			break;
       
   384 			}
       
   385 		default:
       
   386 			test(EFalse);
       
   387 		}
       
   388 	}
       
   389 
       
   390 /*
       
   391 @SYMTestCaseID				20
       
   392 @SYMTestCaseDesc			Close pool from kernel-side
       
   393 @SYMREQ						REQ11423
       
   394 @SYMTestActions
       
   395 	1. Device Driver closes P2.
       
   396 	2. Test Thread closes P2.
       
   397 @SYMTestExpectedResults
       
   398 	1. OK and Access Count is now 1.
       
   399 	2. OK
       
   400 @SYMTestPriority			Critical
       
   401 */
       
   402 
       
   403 void CloseKernelPool()
       
   404 	{
       
   405 	test.Next(_L("Close kernel-side pool"));
       
   406 	TInt r;
       
   407 
       
   408 	r = Ldd.CloseKernelPool();
       
   409 	test_KErrNone(r);
       
   410 
       
   411 	P2.Close();
       
   412 
       
   413 	// wait for memory to be freed
       
   414 	r = UserSvr::HalFunction(EHalGroupKernel, EKernelHalSupervisorBarrier, (TAny*)5000, 0);
       
   415 	test_KErrNone(r);
       
   416 
       
   417 	}
       
   418 
       
   419 /*
       
   420 @SYMTestCaseID				21
       
   421 @SYMTestCaseDesc			Close pool from user-side
       
   422 @SYMREQ						REQ11423
       
   423 @SYMTestActions
       
   424 	1. Test Thread closes P1.
       
   425 	2. Device Driver closes P1.
       
   426 @SYMTestExpectedResults
       
   427 	1. OK and Access Count is now 1.
       
   428 	2. OK.
       
   429 @SYMTestPriority			Critical
       
   430 */
       
   431 
       
   432 void CloseUserPool()
       
   433 	{
       
   434 	test.Next(_L("Close user-side pool"));
       
   435 	TInt r;
       
   436 
       
   437 	P1.Close();
       
   438 
       
   439 	r = Ldd.CloseUserPool();
       
   440 	test_KErrNone(r);
       
   441 
       
   442 	// wait for memory to be freed
       
   443 	r = UserSvr::HalFunction(EHalGroupKernel, EKernelHalSupervisorBarrier, (TAny*)5000, 0);
       
   444 	test_KErrNone(r);
       
   445 	}
       
   446 
       
   447 /*
       
   448 @SYMTestCaseID				3
       
   449 @SYMTestCaseDesc			Buffer allocation from user-side
       
   450 @SYMREQ						REQ11423
       
   451 @SYMTestActions
       
   452 	1. Test Thread creates a shared buffer on P1.
       
   453 	2. Test Thread passes buffer to Device Driver.
       
   454 	3. Device Driver obtains buffer and manipulates its contents.
       
   455 	4. Device Driver releases buffer.
       
   456 	5. Test Thread releases buffer.
       
   457 @SYMTestExpectedResults
       
   458 	1. Ok.
       
   459 	2. Ok.
       
   460 	3. Ok.
       
   461 	4. Ok.
       
   462 	5. Ok. Buffer de-allocated.
       
   463 @SYMTestPriority			Critical
       
   464 */
       
   465 
       
   466 void AllocateUserBuffer()
       
   467 	{
       
   468 	test.Next(_L("Allocate user-side buffer"));
       
   469 	TInt r;
       
   470 	RShBuf buf;
       
   471 
       
   472 	// Allocate buffer on POOL 1
       
   473 	__KHEAP_MARK;
       
   474 	r = buf.Alloc(P1);
       
   475 	test_KErrNone(r);
       
   476 	__KHEAP_CHECK(0);
       
   477 
       
   478 	TInt i;
       
   479 	TShPoolInfo poolinfo1;
       
   480 	P1.GetInfo(poolinfo1);
       
   481 	TInt blocks = poolinfo1.iBufSize / KTestData1().Length();
       
   482 
       
   483 	for (i = 0; i < blocks; i++)
       
   484 		{
       
   485 		TPtr8(buf.Ptr() + (i * KTestData1().Length()), KTestData1().Length(),KTestData1().Length()).Copy(KTestData1());
       
   486 		}
       
   487 	r = Ldd.ManipulateUserBuffer(buf.Handle());
       
   488 
       
   489 	test_KErrNone(r);
       
   490 
       
   491 	TBuf8<64> tmp;
       
   492 
       
   493 	P1.GetInfo(poolinfo1);
       
   494 	blocks = poolinfo1.iBufSize / tmp.MaxSize();
       
   495 
       
   496 	for (i = 0 ; i < blocks; i++)
       
   497 		{
       
   498 		tmp.Fill(i);
       
   499 		TPtrC8 ptrc(buf.Ptr() + (i * tmp.Length()), tmp.Length());
       
   500 		r = tmp.Compare(ptrc);
       
   501 		test_Equal(0, r);
       
   502 		}
       
   503 	buf.Close();
       
   504 	__KHEAP_MARKEND;
       
   505 
       
   506 	// Allocate buffer on POOL 2
       
   507 	__KHEAP_MARK;
       
   508 	r = buf.Alloc(P2);
       
   509 	test_KErrNone(r);
       
   510 	__KHEAP_CHECK(0);
       
   511 
       
   512 	TShPoolInfo poolinfo2;
       
   513 	P2.GetInfo(poolinfo2);
       
   514 	blocks = poolinfo2.iBufSize / KTestData1().Length(); // PC REMOVE
       
   515 
       
   516 	for (i = 0; i < blocks; i++)
       
   517 		{
       
   518 		TPtr8(buf.Ptr() + (i * KTestData1().Length()), KTestData1().Length(),KTestData1().Length()).Copy(KTestData1());
       
   519 		}
       
   520 
       
   521 	r = Ldd.ManipulateUserBuffer(buf.Handle());
       
   522 	test_KErrNone(r);
       
   523 
       
   524 	P2.GetInfo(poolinfo2);
       
   525 	blocks = poolinfo2.iBufSize / tmp.MaxSize(); // PC REMOVE
       
   526 
       
   527 	for (i = 0 ; i < blocks; i++)
       
   528 		{
       
   529 		tmp.Fill(i);
       
   530 		r = tmp.Compare(TPtr8(buf.Ptr() + (i * tmp.Length()), tmp.Length(), tmp.Length()));
       
   531 		test_Equal(0, r);
       
   532 		}
       
   533 	buf.Close();
       
   534 	__KHEAP_MARKEND;
       
   535 	}
       
   536 
       
   537 /*
       
   538 @SYMTestCaseID				4
       
   539 @SYMTestCaseDesc			Buffer allocation from kernel-side
       
   540 @SYMREQ						REQ11423
       
   541 @SYMTestActions
       
   542 	1. Device Driver creates a buffer on P2.
       
   543 	2. Device Driver manipulates buffer and passes it to Test Thread.
       
   544 	3. Test Thread manipulates buffer and send it back to Device Driver.
       
   545 	4. Device Driver check buffer's contents and releases it.
       
   546 @SYMTestExpectedResults
       
   547 	1. Ok.
       
   548 	2. Ok.
       
   549 	3. Ok.
       
   550 	4. Ok. Buffer de-allocated.
       
   551 @SYMTestPriority			Critical
       
   552 */
       
   553 
       
   554 void AllocateKernelBuffer()
       
   555 	{
       
   556 	test.Next(_L("Allocate kernel-side buffer"));
       
   557 	TInt r;
       
   558 	TInt handle;
       
   559 	RShBuf kbuf0, kbuf1;
       
   560 
       
   561 	// Allocate buffer on POOL 1
       
   562 	r = Ldd.AllocateKernelBuffer(0, handle);
       
   563 	test_KErrNone(r);
       
   564 	kbuf0.SetHandle(handle);
       
   565 
       
   566 	TInt i;
       
   567 	TShPoolInfo poolinfo1;
       
   568 	P1.GetInfo(poolinfo1);
       
   569 	TInt blocks = poolinfo1.iBufSize / KTestData2().Length();
       
   570 	for (i = 0; i < blocks; i++)
       
   571 		{
       
   572 		r = KTestData2().Compare(TPtr8(kbuf0.Ptr() + (i * KTestData2().Length()), KTestData2().Length(), KTestData2().Length()));
       
   573 
       
   574 		test_Equal(0, r);
       
   575 		}
       
   576 	kbuf0.Close();
       
   577 
       
   578 	// Allocate buffer on POOL 2
       
   579 	r = Ldd.AllocateKernelBuffer(1, handle);
       
   580 	test_KErrNone(r);
       
   581 	kbuf1.SetHandle(handle);
       
   582 
       
   583 	TShPoolInfo poolinfo2;
       
   584 	P2.GetInfo(poolinfo2);
       
   585 	blocks = poolinfo2.iBufSize / KTestData2().Length();
       
   586 
       
   587 	for (i = 0; i < blocks; i++)
       
   588 		{
       
   589 		r = KTestData2().Compare(TPtr8(kbuf1.Ptr() + (i * KTestData2().Length()), KTestData2().Length(), KTestData2().Length()));
       
   590 
       
   591 		test_Equal(0, r);
       
   592 		}
       
   593 	kbuf1.Close();
       
   594 	}
       
   595 
       
   596 
       
   597 /*
       
   598 @SYMTestCaseID				X1
       
   599 @SYMTestCaseDesc			Allocate maximum number of buffers in a pool (user/kernel)
       
   600 @SYMREQ						REQ11423
       
   601 @SYMTestActions
       
   602 	Allocate as many buffers on a pool as possible.
       
   603 	Free them all and re-allocate them again.
       
   604 	Free them all.
       
   605 @SYMTestExpectedResults
       
   606 	Ok.
       
   607 @SYMTestPriority			High
       
   608 */
       
   609 
       
   610 void AllocateUserMax(RShPool& aPool)
       
   611 	{
       
   612 	test.Next(_L("Exhaust pool memory from user-side"));
       
   613 	TInt r;
       
   614 
       
   615 	TShPoolInfo poolinfo;
       
   616 	aPool.GetInfo(poolinfo);
       
   617 	TBool aligned = (poolinfo.iFlags & EShPoolPageAlignedBuffer);
       
   618 	RDebug::Printf("aligned=%d",aligned);
       
   619 
       
   620 	RArray<RShBuf> bufarray;
       
   621 	do
       
   622 		{
       
   623 		RShBuf buf;
       
   624 		r = buf.Alloc(aPool);
       
   625 		if (r==KErrNoMemory && KTestPoolSizeInBufs>bufarray.Count())
       
   626 			{
       
   627 			// try again after a delay, to allow for background resource allocation
       
   628 			
       
   629 			User::After(1000000);
       
   630 			r = buf.Alloc(aPool);
       
   631 			}
       
   632 		if (!r)
       
   633 			{
       
   634 			r = bufarray.Append(buf);
       
   635 			test_KErrNone(r);
       
   636 			FillShBuf(buf,0x99);
       
   637 			}
       
   638 		}
       
   639 	while (r == KErrNone);
       
   640 	test_Equal(KErrNoMemory, r);
       
   641 	test_Compare(KTestPoolSizeInBufs, <=, bufarray.Count());
       
   642 
       
   643 	TInt n = bufarray.Count();
       
   644 	while (n)
       
   645 		{
       
   646 		bufarray[--n].Close();
       
   647 		}
       
   648 
       
   649 	User::After(500000);
       
   650 
       
   651 	// Do it once more
       
   652 	n = 0;
       
   653 	while (n<bufarray.Count())
       
   654 		{
       
   655 		r = bufarray[n].Alloc(aPool);
       
   656 		if (r==KErrNoMemory)
       
   657 			{
       
   658 			// try again after a delay, to allow for background resource allocation
       
   659 			User::After(1000000);
       
   660 			r = bufarray[n].Alloc(aPool);
       
   661 			}
       
   662 		test_Assert(r == KErrNone, test.Printf(_L("n=%d r=%d\n"), n, r));
       
   663 		if(aligned)
       
   664 			test(CheckNotFillShBuf(bufarray[n],0x99));
       
   665 		++n;
       
   666 		}
       
   667 
       
   668 	RShBuf extrabuf;
       
   669 	r = extrabuf.Alloc(aPool);
       
   670 	test_Equal(KErrNoMemory, r);
       
   671 
       
   672 	while (n)
       
   673 		{
       
   674 		bufarray[--n].Close();
       
   675 		}
       
   676 
       
   677 	bufarray.Close();
       
   678 	}
       
   679 
       
   680 void AllocateKernelMax()
       
   681 	{
       
   682 	test.Next(_L("Exhaust pool memory from kernel-side"));
       
   683 	TInt r;
       
   684 	TInt allocated;
       
   685 	r = Ldd.AllocateMax(0, allocated); // P1
       
   686 	test_KErrNone(r);
       
   687 	test_Equal(KTestPoolSizeInBufs, allocated);
       
   688 	r = Ldd.AllocateMax(1, allocated); // P2
       
   689 	test_KErrNone(r);
       
   690 	test_Equal(KTestPoolSizeInBufs, allocated);
       
   691 	}
       
   692 
       
   693 
       
   694 /*
       
   695 @SYMTestCaseID				11
       
   696 @SYMTestCaseDesc			Buffer alignment (kernel/user)
       
   697 @SYMREQ						REQ11423
       
   698 @SYMTestActions
       
   699 	1. Test Thread creates several pools with different buffer alignment
       
   700 	   requirements:
       
   701 	2. Test Thread allocates buffers on all pools.
       
   702 	3. Test Thread frees all buffers and close pools.
       
   703 @SYMTestExpectedResults
       
   704 	1. Ok.
       
   705 	2. Buffers are aligned to the desired boundary.
       
   706 	3. Ok.
       
   707 @SYMTestPriority			High
       
   708 */
       
   709 
       
   710 void BufferAlignmentUser()
       
   711 	{
       
   712 	test.Next(_L("Buffer alignment (User)"));
       
   713 	TInt pagesize;
       
   714 	TInt r;
       
   715 	r = HAL::Get(HAL::EMemoryPageSize, pagesize);
       
   716 	test_KErrNone(r);
       
   717 
       
   718 	// Non page aligned buffers
       
   719 	TInt i;
       
   720 	for (i = 0; i <= Log2(pagesize); i++)
       
   721 		{
       
   722 		test.Printf(_L("."));
       
   723 		TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize, 20, i); // TODO: Change minbufs back to 8 when the pool growing code works
       
   724 		RShPool pool;
       
   725 		r = pool.Create(inf,KDefaultPoolHandleFlags);
       
   726 		test_KErrNone(r);
       
   727 
       
   728 		TInt j;
       
   729 		RShBuf buf[20];
       
   730 		for (j = 0; j < 20; j++)
       
   731 			{
       
   732 			r = buf[j].Alloc(pool);
       
   733 			test_KErrNone(r);
       
   734 			}
       
   735 
       
   736 		TInt alignment = i;
       
   737 		if (alignment < KTestMinimumAlignmentLog2)
       
   738 			{
       
   739 			alignment = KTestMinimumAlignmentLog2;
       
   740 			}
       
   741 		for (j = 0; j < 20; j++)
       
   742 			{
       
   743 			test_Assert(!((TUint32) buf[j].Ptr() & ((1 << alignment) - 1)),
       
   744 				test.Printf(_L("Pool%d buf[%d].Base() == 0x%08x"), i, j, buf[j].Ptr()));
       
   745 			}
       
   746 
       
   747 		for (j = 0; j < 20; j++)
       
   748 			{
       
   749 			buf[j].Close();
       
   750 			}
       
   751 		pool.Close();
       
   752 		// delay to allow the management dfc to run and close pool
       
   753 		User::After(100000);
       
   754 		}
       
   755 	test.Printf(_L("\n"));
       
   756 
       
   757 	// Page aligned buffers
       
   758 	TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, 20); // TODO: Change minbufs back to 8 when the pool growing code works
       
   759 	RShPool pool;
       
   760 	r = pool.Create(inf,KDefaultPoolHandleFlags);
       
   761 	test_KErrNone(r);
       
   762 
       
   763 	r = pool.SetBufferWindow(-1, ETrue);
       
   764 	test_KErrNone(r);
       
   765 
       
   766 	TInt j;
       
   767 	RShBuf buf[20];
       
   768 	for (j = 0; j < 20; j++)
       
   769 		{
       
   770 		r = buf[j].Alloc(pool);
       
   771 		test_KErrNone(r);
       
   772 		}
       
   773 
       
   774 	for (j = 0; j < 20; j++)
       
   775 		{
       
   776 		test_Assert(!((TUint32) buf[j].Ptr() & (pagesize - 1)),
       
   777 					test.Printf(_L("buf[%d].Base() == 0x%08x"), j, buf[j].Ptr()));
       
   778 		}
       
   779 	for (j = 0; j < 20; j++)
       
   780 		{
       
   781 		buf[j].Close();
       
   782 		}
       
   783 	pool.Close();
       
   784 	}
       
   785 
       
   786 void BufferAlignmentKernel()
       
   787 	{
       
   788 	test.Next(_L("Buffer alignment (Kernel)"));
       
   789 	TInt r;
       
   790 
       
   791 	TInt pagesize;
       
   792 	r = HAL::Get(HAL::EMemoryPageSize, pagesize);
       
   793 	test_KErrNone(r);
       
   794 
       
   795 	for (TInt i = 0; i < Log2(pagesize); i++)
       
   796 		{
       
   797 		test.Printf(_L("."));
       
   798 		r = Ldd.BufferAlignmentKernel(*PtrBufSize, i);
       
   799 		test_KErrNone(r);
       
   800 		// delay to allow the management dfc to run
       
   801 		User::After(100000);
       
   802 		}
       
   803 	test.Printf(_L("\n"));
       
   804 	}
       
   805 
       
   806 /*
       
   807 @SYMTestCaseID				6
       
   808 @SYMTestCaseDesc			Create pool at specific physical address
       
   809 @SYMREQ						REQ11423
       
   810 @SYMTestActions
       
   811 	1. Device Driver allocates memory chunk.
       
   812 	2. Device Driver requests physical address of this memory chunk.
       
   813 	3. Device Driver creates pool at physical address of the memory chunk.
       
   814 	3. Device Driver allocate buffers on pool, free them and close pool.
       
   815 @SYMTestExpectedResults
       
   816 	1. Ok.
       
   817 	2. Ok.
       
   818 	3. Ok.
       
   819 	4. Ok
       
   820 @SYMTestPriority			High
       
   821 */
       
   822 
       
   823 void CreateKernelPoolPhysAddr()
       
   824 	{
       
   825 	test.Next(_L("Create pool at specific physical address"));
       
   826 	TInt r;
       
   827 	test.Start(_L("Contiguous physical memory"));
       
   828 	r = Ldd.CreatePoolPhysAddrCont(*PtrBufSize);
       
   829 	test_KErrNone(r);
       
   830 	test.Next(_L("Discontiguous physical memory"));
       
   831 	r = Ldd.CreatePoolPhysAddrNonCont(*PtrBufSize);
       
   832 	test_KErrNone(r);
       
   833 	test.End();
       
   834 	}
       
   835 
       
   836 /*
       
   837 @SYMTestCaseID				14
       
   838 @SYMTestCaseDesc			Buffer separation and overwrites
       
   839 @SYMREQ						REQ11423
       
   840 @SYMTestActions
       
   841 	1. Test Thread creates two pools:
       
   842 		- A pool with no guard pages.
       
   843 		- A pool with guard pages.
       
   844 	2. Allocate two buffers on each pool.
       
   845 	3. Test Thread creates Secondary Thread.
       
   846 	4. Secondary Thread starts reading contents of the first buffer and keep
       
   847 	   reading beyond its limits (using a pointer, not a descriptor).
       
   848 	5. Secondary Thread starts writing on the first buffer and keep writing beyond
       
   849 	   its limits (using a pointer, not a descriptor).
       
   850 	6. Free buffers and close pools.
       
   851 @SYMTestExpectedResults
       
   852 	1. Ok.
       
   853 	2. Ok.
       
   854 	3. Ok.
       
   855 	4. Secondary Thread panics when it attempts to read the guard page, if there
       
   856 	   is one. Otherwise, it moves on to the second buffer. (Secondary Thread will
       
   857 	   have to be restarted).
       
   858 	5. Secondary Thread panics when it attempts to write on the guard page if
       
   859 	   there is one. Otherwise, it carries on writing on to the second buffer.
       
   860 	6. Ok.
       
   861 @SYMTestPriority			High
       
   862 */
       
   863 
       
   864 TInt ThreadGuardPagesRead(TAny* aArg)
       
   865 	{
       
   866 	TUint8* ptr = (TUint8*) aArg;
       
   867 	if (ptr == NULL)
       
   868 		{
       
   869 		return KErrArgument;
       
   870 		}
       
   871 	TInt bufsize = *PtrBufSize;
       
   872 	TInt i;
       
   873 	TUint8 val = '$';
       
   874 	TBool isok = ETrue;
       
   875 	for (i = 0; i < bufsize; i++)
       
   876 		{
       
   877 		if (*(ptr + i) != val)
       
   878 			{
       
   879 			isok = EFalse;
       
   880 			}
       
   881 		}
       
   882 	if (!isok)
       
   883 		{
       
   884 		return KErrUnknown;
       
   885 		}
       
   886 	return KErrNone;
       
   887 	}
       
   888 
       
   889 TInt ThreadGuardPagesWrite(TAny* aArg)
       
   890 	{
       
   891 	TUint8* ptr = (TUint8*) aArg;
       
   892 	if (ptr == NULL)
       
   893 		{
       
   894 		return KErrArgument;
       
   895 		}
       
   896 	TInt bufsize = *PtrBufSize;
       
   897 	TInt i;
       
   898 	for (i = 0; i < bufsize; i++)
       
   899 		{
       
   900 		*(ptr + i) = '#';
       
   901 		}
       
   902 	return KErrNone;
       
   903 	}
       
   904 
       
   905 void GuardPages()
       
   906 	{
       
   907 	test.Next(_L("Guard pages"));
       
   908 	TInt pagesize;
       
   909 	TInt r;
       
   910 	r = HAL::Get(HAL::EMemoryPageSize, pagesize);
       
   911 	test_KErrNone(r);
       
   912 
       
   913 	// Create pools
       
   914 	RShPool pool1;
       
   915 	RShPool pool2;
       
   916 	TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, KTestPoolSizeInBufs);
       
   917 	r = pool1.Create(inf,KDefaultPoolHandleFlags);
       
   918 	test_KErrNone(r);
       
   919 
       
   920 	r = pool1.SetBufferWindow(-1, ETrue);
       
   921 	test_KErrNone(r);
       
   922 
       
   923 	r = inf.SetGuardPages();
       
   924 	test_KErrNone(r);
       
   925 	r = pool2.Create(inf,KDefaultPoolHandleFlags);
       
   926 	test_KErrNone(r);
       
   927 
       
   928 	r = pool2.SetBufferWindow(-1, ETrue);
       
   929 	test_KErrNone(r);
       
   930 
       
   931 	// Allocate buffers
       
   932 	RShBuf bufs1[KTestPoolSizeInBufs];
       
   933 	RShBuf bufs2[KTestPoolSizeInBufs];
       
   934 	TInt i;
       
   935 	for (i = 0; i < KTestPoolSizeInBufs; i++)
       
   936 		{
       
   937 		r = bufs1[i].Alloc(pool1);
       
   938 		test_Assert(r == KErrNone, test.Printf(_L("Pool1: i=%d r=%d\n"), i, r));
       
   939 		TPtr8 ptr(bufs1[i].Ptr(), bufs1[i].Size(),bufs1[i].Size());
       
   940 		ptr.Fill('$');
       
   941 		}
       
   942 	for (i = 0; i < KTestPoolSizeInBufs; i++)
       
   943 		{
       
   944 		r = bufs2[i].Alloc(pool2);
       
   945 		test_Assert(r == KErrNone, test.Printf(_L("Pool2: i=%d r=%d\n"), i, r));
       
   946 		TPtr8 ptr(bufs2[i].Ptr(), bufs1[i].Size(),bufs1[i].Size());
       
   947 		ptr.Fill('$');
       
   948 		}
       
   949 
       
   950 	_LIT(KTestThreadRead, "GuardPagesReadTS%dP%dB%d");
       
   951 	for (i = 0; i < KTestPoolSizeInBufs - 1; i++)
       
   952 		{
       
   953 		TBuf<40> threadname;
       
   954 		RThread thread;
       
   955 		TRequestStatus rs;
       
   956 
       
   957 		// 1. Simple read within buffer
       
   958 		// Pool 1
       
   959 		threadname.Format(KTestThreadRead, 1, 1, i);
       
   960 		r = thread.Create(threadname, ThreadGuardPagesRead, KDefaultStackSize, KMinHeapSize, KMinHeapSize,
       
   961 			(TAny*) bufs1[i].Ptr());
       
   962 		test_KErrNone(r);
       
   963 		thread.Logon(rs);
       
   964 		thread.Resume();
       
   965 		User::WaitForRequest(rs);
       
   966 		test_KErrNone(rs.Int());
       
   967 		test_Equal(EExitKill, thread.ExitType());
       
   968 		test_KErrNone(thread.ExitReason());
       
   969 		thread.Close();
       
   970 		// Pool 2
       
   971 		threadname.Format(KTestThreadRead, 1, 2, i);
       
   972 		r = thread.Create(threadname, ThreadGuardPagesRead, KDefaultStackSize, KMinHeapSize, KMinHeapSize,
       
   973 			(TAny*) bufs2[i].Ptr());
       
   974 		test_KErrNone(r);
       
   975 		thread.Logon(rs);
       
   976 		thread.Resume();
       
   977 		User::WaitForRequest(rs);
       
   978 		test_KErrNone(rs.Int());
       
   979 		test_Equal(EExitKill, thread.ExitType());
       
   980 		test_KErrNone(thread.ExitReason());
       
   981 		thread.Close();
       
   982 
       
   983 		// 2. If the buffer size is not a multiple of the MMU page size, it should be
       
   984 		// possible to read after the buffer end until the page boundary
       
   985 		if (*PtrBufSize % pagesize)
       
   986 			{
       
   987 			// Pool 1
       
   988 			threadname.Format(KTestThreadRead, 2, 1, i);
       
   989 			r = thread.Create(threadname, ThreadGuardPagesRead, KDefaultStackSize, KMinHeapSize, KMinHeapSize,
       
   990 				(TAny*) (bufs1[i].Ptr() + pagesize - *PtrBufSize % pagesize));
       
   991 			test_KErrNone(r);
       
   992 			thread.Logon(rs);
       
   993 			thread.Resume();
       
   994 			User::WaitForRequest(rs);
       
   995 			if (rs.Int() != KErrNone)
       
   996 				{
       
   997 				test_Equal(KErrUnknown, rs.Int());
       
   998 				test_Equal(KErrUnknown, thread.ExitReason());
       
   999 				}
       
  1000 			test_Equal(EExitKill, thread.ExitType());
       
  1001 			thread.Close();
       
  1002 			// Pool 2
       
  1003 			threadname.Format(KTestThreadRead, 2, 2, i);
       
  1004 			r = thread.Create(threadname, ThreadGuardPagesRead, KDefaultStackSize, KMinHeapSize, KMinHeapSize,
       
  1005 				(TAny*) (bufs2[i].Ptr() + pagesize - *PtrBufSize % pagesize));
       
  1006 			test_KErrNone(r);
       
  1007 			thread.Logon(rs);
       
  1008 			thread.Resume();
       
  1009 			User::WaitForRequest(rs);
       
  1010 			if (rs.Int() != KErrNone)
       
  1011 				{
       
  1012 				test_Equal(KErrUnknown, rs.Int());
       
  1013 				test_Equal(KErrUnknown, thread.ExitReason());
       
  1014 				}
       
  1015 			test_Equal(EExitKill, thread.ExitType());
       
  1016 			thread.Close();
       
  1017 			}
       
  1018 
       
  1019 		// 3. Now we attempt to read the first byte on the next page after the end of
       
  1020 		// our buffer.
       
  1021 		TInt offset;
       
  1022 		if (*PtrBufSize % pagesize)
       
  1023 			{
       
  1024 			offset = pagesize - *PtrBufSize % pagesize + 1;
       
  1025 			}
       
  1026 		else
       
  1027 			{
       
  1028 			offset = 1;
       
  1029 			}
       
  1030 		// Pool 1
       
  1031 		if (bufs1[i + 1].Ptr() == bufs1[i].Ptr() + RoundUp(*PtrBufSize, Log2(pagesize)))
       
  1032 			{
       
  1033 			// Only perform this test if the next buffer comes immediately next to this
       
  1034 			// one. This is not necessarily the case on the Flexible Memory Model.
       
  1035 			threadname.Format(KTestThreadRead, 3, 1, i);
       
  1036 			r = thread.Create(threadname, ThreadGuardPagesRead, KDefaultStackSize, KMinHeapSize, KMinHeapSize,
       
  1037 				(TAny*) (bufs1[i].Ptr() + offset));
       
  1038 			test_KErrNone(r);
       
  1039 			thread.Logon(rs);
       
  1040 			thread.Resume();
       
  1041 			User::WaitForRequest(rs);
       
  1042 			if (rs.Int() != KErrNone) // No guard page, so it should be fine
       
  1043 				{
       
  1044 				test_Equal(KErrUnknown, rs.Int());
       
  1045 				test_Equal(KErrUnknown, thread.ExitReason());
       
  1046 				}
       
  1047 			test_Equal(EExitKill, thread.ExitType());
       
  1048 			thread.Close();
       
  1049 			}
       
  1050 		// Pool 2
       
  1051 		TBool jit = User::JustInTime();
       
  1052 		User::SetJustInTime(EFalse);
       
  1053 		threadname.Format(KTestThreadRead, 3, 2, i);
       
  1054 		r = thread.Create(threadname, ThreadGuardPagesRead, KDefaultStackSize, KMinHeapSize, KMinHeapSize,
       
  1055 			(TAny*) (bufs2[i].Ptr() + offset));
       
  1056 		test_KErrNone(r);
       
  1057 		thread.Logon(rs);
       
  1058 		thread.Resume();
       
  1059 		User::WaitForRequest(rs);
       
  1060 		test_Equal(3, rs.Int());
       
  1061 		test_Equal(EExitPanic, thread.ExitType());
       
  1062 		test_Equal(3, thread.ExitReason()); // KERN-EXEC 3
       
  1063 		thread.Close();
       
  1064 		User::SetJustInTime(jit);
       
  1065 		}
       
  1066 
       
  1067 	_LIT(KTestThreadWrite, "GuardPagesWriteTS%dP%dB%d");
       
  1068 	for (i = 0; i < KTestPoolSizeInBufs - 1; i++)
       
  1069 		{
       
  1070 		TBuf<40> threadname;
       
  1071 		RThread thread;
       
  1072 		TRequestStatus rs;
       
  1073 
       
  1074 		// 1. Simple write within buffer
       
  1075 		// Pool 1
       
  1076 		threadname.Format(KTestThreadWrite, 1, 1, i);
       
  1077 		r = thread.Create(threadname, ThreadGuardPagesWrite, KDefaultStackSize, KMinHeapSize, KMinHeapSize,
       
  1078 			(TAny*) bufs1[i].Ptr());
       
  1079 		test_KErrNone(r);
       
  1080 		thread.Logon(rs);
       
  1081 		thread.Resume();
       
  1082 		User::WaitForRequest(rs);
       
  1083 		test_KErrNone(rs.Int());
       
  1084 		test_Equal(EExitKill, thread.ExitType());
       
  1085 		test_KErrNone(thread.ExitReason());
       
  1086 		thread.Close();
       
  1087 		// Pool 2
       
  1088 		threadname.Format(KTestThreadWrite, 1, 2, i);
       
  1089 		r = thread.Create(threadname, ThreadGuardPagesWrite, KDefaultStackSize, KMinHeapSize, KMinHeapSize,
       
  1090 			(TAny*) bufs2[i].Ptr());
       
  1091 		test_KErrNone(r);
       
  1092 		thread.Logon(rs);
       
  1093 		thread.Resume();
       
  1094 		User::WaitForRequest(rs);
       
  1095 		test_KErrNone(rs.Int());
       
  1096 		test_Equal(EExitKill, thread.ExitType());
       
  1097 		test_KErrNone(thread.ExitReason());
       
  1098 		thread.Close();
       
  1099 
       
  1100 		// 2. If the buffer size is not a multiple of the MMU page size, it should be
       
  1101 		// possible to write after the buffer end until the page boundary
       
  1102 		if (*PtrBufSize % pagesize)
       
  1103 			{
       
  1104 			// Pool 1
       
  1105 			threadname.Format(KTestThreadWrite, 2, 1, i);
       
  1106 			r = thread.Create(threadname, ThreadGuardPagesWrite, KDefaultStackSize, KMinHeapSize, KMinHeapSize,
       
  1107 				(TAny*) (bufs1[i].Ptr() + pagesize - *PtrBufSize % pagesize));
       
  1108 			test_KErrNone(r);
       
  1109 			thread.Logon(rs);
       
  1110 			thread.Resume();
       
  1111 			User::WaitForRequest(rs);
       
  1112 			test_KErrNone(rs.Int());
       
  1113 			test_Equal(EExitKill, thread.ExitType());
       
  1114 			test_KErrNone(thread.ExitReason());
       
  1115 			thread.Close();
       
  1116 			// Pool 2
       
  1117 			threadname.Format(KTestThreadWrite, 2, 2, i);
       
  1118 			r = thread.Create(threadname, ThreadGuardPagesWrite, KDefaultStackSize, KMinHeapSize, KMinHeapSize,
       
  1119 				(TAny*) (bufs2[i].Ptr() + pagesize - *PtrBufSize % pagesize));
       
  1120 			test_KErrNone(r);
       
  1121 			thread.Logon(rs);
       
  1122 			thread.Resume();
       
  1123 			User::WaitForRequest(rs);
       
  1124 			test_KErrNone(rs.Int());
       
  1125 			test_Equal(EExitKill, thread.ExitType());
       
  1126 			test_KErrNone(thread.ExitReason());
       
  1127 			thread.Close();
       
  1128 			}
       
  1129 
       
  1130 		// 3. Now we attempt to write on the first byte on the next page after the
       
  1131 		// end of our buffer.
       
  1132 		TInt offset;
       
  1133 		if (*PtrBufSize % pagesize)
       
  1134 			{
       
  1135 			offset = pagesize - *PtrBufSize % pagesize + 1;
       
  1136 			}
       
  1137 		else
       
  1138 			{
       
  1139 			offset = 1;
       
  1140 			}
       
  1141 		// Pool 1
       
  1142 		if (bufs1[i + 1].Ptr() == bufs1[i].Ptr() + RoundUp(*PtrBufSize, Log2(pagesize)))
       
  1143 			{
       
  1144 			// Only perform this test if the next buffer comes immediately next to this
       
  1145 			// one. This is not necessarily the case on the Flexible Memory Model.
       
  1146 			threadname.Format(KTestThreadWrite, 3, 1, i);
       
  1147 			r = thread.Create(threadname, ThreadGuardPagesWrite, KDefaultStackSize, KMinHeapSize, KMinHeapSize,
       
  1148 				(TAny*) (bufs1[i].Ptr() + offset));
       
  1149 			test_KErrNone(r);
       
  1150 			thread.Logon(rs);
       
  1151 			thread.Resume();
       
  1152 			User::WaitForRequest(rs);
       
  1153 			test_KErrNone(rs.Int());
       
  1154 			test_Equal(EExitKill, thread.ExitType());
       
  1155 			test_KErrNone(thread.ExitReason());
       
  1156 			thread.Close();
       
  1157 			}
       
  1158 
       
  1159 		// Pool 2
       
  1160 		TBool jit = User::JustInTime();
       
  1161 		User::SetJustInTime(EFalse);
       
  1162 		threadname.Format(KTestThreadWrite, 3, 2, i);
       
  1163 		r = thread.Create(threadname, ThreadGuardPagesWrite, KDefaultStackSize, KMinHeapSize, KMinHeapSize,
       
  1164 			(TAny*) (bufs2[i].Ptr() + offset));
       
  1165 		test_KErrNone(r);
       
  1166 		thread.Logon(rs);
       
  1167 		thread.Resume();
       
  1168 		User::WaitForRequest(rs);
       
  1169 		test_Equal(3, rs.Int());
       
  1170 		test_Equal(EExitPanic, thread.ExitType());
       
  1171 		test_Equal(3, thread.ExitReason()); // KERN-EXEC 3
       
  1172 		thread.Close();
       
  1173 		User::SetJustInTime(jit);
       
  1174 		}
       
  1175 
       
  1176 	// Free buffers
       
  1177 	for (i = 0; i < KTestPoolSizeInBufs; i++)
       
  1178 		{
       
  1179 		bufs1[i].Close();
       
  1180 		bufs2[i].Close();
       
  1181 		}
       
  1182 	pool1.Close();
       
  1183 	pool2.Close();
       
  1184 	}
       
  1185 
       
  1186 /*
       
  1187 @SYMTestCaseID				12
       
  1188 @SYMTestCaseDesc			Buffer mapping
       
  1189 @SYMREQ						REQ11423
       
  1190 @SYMTestActions
       
  1191 	1. Test Thread allocates buffer on a mappable pool.
       
  1192 	2. Test Thread spawns Slave Process.
       
  1193 	3. Test Thread passes buffer handle to Slave Process.
       
  1194 	4. Slave Process attempts to read buffer then write to buffer.
       
  1195 	5. Slave Process maps buffer.
       
  1196 	6. Slave Process attempts to read buffer then write to buffer.
       
  1197 	7. Slave Process unmaps buffer.
       
  1198 	8. Slave Process attempts to read buffer then write to buffer.
       
  1199 	9. Test Thread kills Slave Process and frees buffer.
       
  1200 @SYMTestExpectedResults
       
  1201 	1. Ok.
       
  1202 	2. Ok.
       
  1203 	3. Ok.
       
  1204 	4. Slave Process panics. (and will have to be restarted)
       
  1205 	5. Ok.
       
  1206 	6. Ok.
       
  1207 	7. Ok.
       
  1208 	8. Slave Process panics.
       
  1209 	9. Ok.
       
  1210 @SYMTestPriority			High
       
  1211 */
       
  1212 
       
  1213 TInt ThreadBufferMappingRead(TAny* aArg)
       
  1214 	{
       
  1215 	if (!aArg)
       
  1216 		{
       
  1217 		return KErrArgument;
       
  1218 		}
       
  1219 	RShBuf* buf = (RShBuf*) aArg;
       
  1220 	TUint x = 0;
       
  1221 	TUint i;
       
  1222 	volatile TUint8* ptr = buf->Ptr();
       
  1223 
       
  1224 	for (i = 0; i < buf->Size(); i++)
       
  1225 		{
       
  1226 		x += *(ptr + i);
       
  1227 		}
       
  1228 	return KErrNone;
       
  1229 	}
       
  1230 
       
  1231 TInt ThreadBufferMappingWrite(TAny* aArg)
       
  1232 	{
       
  1233 	if (!aArg)
       
  1234 		{
       
  1235 		return KErrArgument;
       
  1236 		}
       
  1237 	RShBuf* buf = (RShBuf*) aArg;
       
  1238 	TPtr8 ptr(buf->Ptr(), buf->Size(),buf->Size());
       
  1239 	ptr.Fill('Q');
       
  1240 	return KErrNone;
       
  1241 	}
       
  1242 
       
  1243 const TInt KTestBufferMappingPoolTypes = 8;
       
  1244 const TInt KTestBufferMappingTypes = 8;
       
  1245 
       
  1246 void BufferMapping()
       
  1247 	{
       
  1248 	test.Next(_L("Buffer Mapping"));
       
  1249 #ifdef __WINS__
       
  1250 	test.Printf(_L("Does not run on the emulator. Skipped\n"));
       
  1251 #else
       
  1252 	TInt r;
       
  1253 	RShPool pool[KTestBufferMappingPoolTypes];
       
  1254 	RShBuf buf[KTestBufferMappingTypes][KTestBufferMappingPoolTypes];
       
  1255 	TUint poolflags[KTestBufferMappingPoolTypes];
       
  1256 	TInt bufferwindow[KTestBufferMappingPoolTypes];
       
  1257 	TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, KTestBufferMappingTypes);
       
  1258 
       
  1259 	// POOL TYPES
       
  1260 	// ------------------------------------------
       
  1261 	// Pool no.	AutoMap	Writeable	BufWindow
       
  1262 	// 0			0			0			-1
       
  1263 	// 1			1			0			-1
       
  1264 	// 2			0			0			0
       
  1265 	// 3			1			0			0
       
  1266 	// 4			0			1			-1
       
  1267 	// 5			1			1			-1
       
  1268 	// 6			0			1			0
       
  1269 	// 7			1			1			0
       
  1270 
       
  1271 	TInt i;
       
  1272 	test.Printf(_L("Create pools:"));
       
  1273 	for (i = 0; i < KTestBufferMappingPoolTypes; i++)
       
  1274 		{
       
  1275 		poolflags[i] = EShPoolAllocate;
       
  1276 		bufferwindow[i] = 0;
       
  1277 		if (i % 2)
       
  1278 			{
       
  1279 			poolflags[i] |= EShPoolAutoMapBuf;
       
  1280 			}
       
  1281 		if (i > 3)
       
  1282 			{
       
  1283 			poolflags[i] |= EShPoolWriteable;
       
  1284 			}
       
  1285 		if (i % 4 > 1)
       
  1286 			{
       
  1287 			bufferwindow[i] = -1;
       
  1288 			}
       
  1289 		r = pool[i].Create(inf, poolflags[i] & ~EShPoolAutoMapBuf);
       
  1290 		test_KErrNone(r);
       
  1291 		r = pool[i].SetBufferWindow(bufferwindow[i], poolflags[i] & EShPoolAutoMapBuf);
       
  1292 		test_KErrNone(r);
       
  1293 		test.Printf(_L("."));
       
  1294 		}
       
  1295 	test.Printf(_L("\n"));
       
  1296 
       
  1297 	// BUFFER TYPES
       
  1298 	// Buffer no.	Actions
       
  1299 	// 0			Alloc unmapped.
       
  1300 	// 1			Alloc unmapped then unmap again.
       
  1301 	// 2			Default Alloc. Unmap if it is a AutoMap pool.
       
  1302 	// 3			Alloc unmapped. Map Read-Only.
       
  1303 	// 4			Default Alloc. Unmap if it is a R/W pool and re-map Read-Only.
       
  1304 	// 5			Alloc unmapped. Map R/W
       
  1305 	// 6			Default Alloc. Unmap and re-map.
       
  1306 	// 7            Default Alloc R/W. Map again with Read-Only setting.
       
  1307 	// Depending on the pool type, the actions above might not always be possible.
       
  1308 
       
  1309 	// Buffer allocation
       
  1310 	TInt j;
       
  1311 	test.Printf(_L("Allocate buffers\n"));
       
  1312 	for (j = 0; j < KTestBufferMappingPoolTypes; j++)
       
  1313 		{
       
  1314 		test.Printf(_L("\nPool %d:"), j);
       
  1315 		for (i = 0; i < KTestBufferMappingTypes; i++)
       
  1316 			{
       
  1317 			switch (i % KTestBufferMappingTypes)
       
  1318 				{
       
  1319 				// Unmapped buffers
       
  1320 				case 0:
       
  1321 				case 1:
       
  1322 					// This should always result in an unmapped buffer
       
  1323 					r = buf[i][j].Alloc(pool[j], EShPoolAllocNoMap);
       
  1324 					test_KErrNone(r);
       
  1325 
       
  1326 					if((i % KTestBufferMappingTypes) == 1)
       
  1327 						{
       
  1328 						// Alloc unmapped then unmap again.
       
  1329 						r = buf[i][j].UnMap();
       
  1330 						test_Equal(KErrNotFound, r);
       
  1331 						}
       
  1332 					break;
       
  1333 				case 2:
       
  1334 					r = buf[i][j].Alloc(pool[j]);
       
  1335 					if (poolflags[j] & EShPoolAutoMapBuf)
       
  1336 						{
       
  1337 						if (bufferwindow[j] == 0)
       
  1338 							{
       
  1339 							// Can't ask for a mapped buffer when buffer window is not set
       
  1340 							test_Equal(KErrNoMemory, r);
       
  1341 							}
       
  1342 						else
       
  1343 							{
       
  1344 							// Alloc'd buffer was mapped - unmap it
       
  1345 							test_KErrNone(r);
       
  1346 							r = buf[i][j].UnMap();
       
  1347 							test_KErrNone(r);
       
  1348 							}
       
  1349 						}
       
  1350 					else
       
  1351 						{
       
  1352 						// Buffer not mapped
       
  1353 						test_KErrNone(r);
       
  1354 						}
       
  1355 					break;
       
  1356 
       
  1357 				// Read-Only buffers
       
  1358 				case 3:
       
  1359 					r = buf[i][j].Alloc(pool[j], EShPoolAllocNoMap);
       
  1360 					test_KErrNone(r);
       
  1361 					r = buf[i][j].Map(ETrue);
       
  1362 					if (bufferwindow[j])
       
  1363 						{
       
  1364 						test_KErrNone(r);
       
  1365 						}
       
  1366 					else
       
  1367 						{
       
  1368 						test_Equal(KErrNoMemory, r);
       
  1369 						}
       
  1370 					break;
       
  1371 				case 4:
       
  1372 					r = buf[i][j].Alloc(pool[j]);
       
  1373 					if (poolflags[j] & EShPoolAutoMapBuf)
       
  1374 						{
       
  1375 						if (bufferwindow[j] == 0)
       
  1376 							{
       
  1377 							// Can't ask for a mapped buffer when buffer window is not set
       
  1378 							test_Equal(KErrNoMemory, r);
       
  1379 							}
       
  1380 						else if (poolflags[j] & EShPoolWriteable)
       
  1381 							{
       
  1382 							// Alloc'd buffer was mapped R/W - re-map it R/O
       
  1383 							test_KErrNone(r);
       
  1384 							r = buf[i][j].UnMap();
       
  1385 							test_KErrNone(r);
       
  1386 							r = buf[i][j].Map(ETrue);
       
  1387 							test_KErrNone(r);
       
  1388 							}
       
  1389 						else
       
  1390 							{
       
  1391 							// Nothing to do
       
  1392 							test_KErrNone(r);
       
  1393 							}
       
  1394 						}
       
  1395 					else
       
  1396 						{
       
  1397 						// Buffer not mapped
       
  1398 						test_KErrNone(r);
       
  1399 						if (bufferwindow[j])
       
  1400 							{
       
  1401 							if (poolflags[j] & EShPoolWriteable)
       
  1402 								{
       
  1403 								// Explicitly map Read-Only
       
  1404 								r = buf[i][j].Map(ETrue);
       
  1405 								test_KErrNone(r);
       
  1406 								}
       
  1407 							else
       
  1408 								{
       
  1409 								// If Pool is RO, map default
       
  1410 								r = buf[i][j].Map();
       
  1411 								test_KErrNone(r);
       
  1412 								}
       
  1413 							}
       
  1414 						else
       
  1415 							{
       
  1416 							// Can't map buffer
       
  1417 							r = buf[i][j].Map(ETrue);
       
  1418 							test_Equal(KErrNoMemory, r);
       
  1419 							}
       
  1420 						}
       
  1421 					break;
       
  1422 
       
  1423 				// Mapped for Read-Write
       
  1424 				case 5:
       
  1425 					r = buf[i][j].Alloc(pool[j], EShPoolAllocNoMap);
       
  1426 					test_KErrNone(r);
       
  1427 					r = buf[i][j].Map();
       
  1428 					if (bufferwindow[j] == 0)
       
  1429 						{
       
  1430 						test_Equal(KErrNoMemory, r);
       
  1431 						}
       
  1432 					else if (!(poolflags[j] & EShPoolWriteable))
       
  1433 						{
       
  1434 						test_KErrNone(r);
       
  1435 						}
       
  1436 					else
       
  1437 						{
       
  1438 						test_KErrNone(r);
       
  1439 						}
       
  1440 					break;
       
  1441 				case 6:
       
  1442 				case 7:
       
  1443 					r = buf[i][j].Alloc(pool[j]);
       
  1444 					if (poolflags[j] & EShPoolAutoMapBuf)
       
  1445 						{
       
  1446 						if (bufferwindow[j] == 0)
       
  1447 							{
       
  1448 							// Can't ask for a mapped buffer when buffer window is not set
       
  1449 							test_Equal(KErrNoMemory, r);
       
  1450 							}
       
  1451 						else if (poolflags[j] & EShPoolWriteable)
       
  1452 							{
       
  1453 							// Alloc'd buffer was mapped R/W
       
  1454 							test_KErrNone(r);
       
  1455 
       
  1456                             if((i % KTestBufferMappingTypes) == 7)
       
  1457                                 {
       
  1458                                 // Mapped for Read-Write then remapped as Read-Only
       
  1459                                 r = buf[i][j].Map(true);
       
  1460                                 test_Equal(KErrAlreadyExists, r);
       
  1461                                 }
       
  1462 							}
       
  1463 						}
       
  1464 					else
       
  1465 						{
       
  1466 						// Buffer not mapped
       
  1467 						test_KErrNone(r);
       
  1468 						if (bufferwindow[j])
       
  1469 							{
       
  1470 							if (poolflags[j] & EShPoolWriteable)
       
  1471 								{
       
  1472 								// Default mapping
       
  1473 								r = buf[i][j].Map();
       
  1474                                 test_KErrNone(r);
       
  1475 
       
  1476                                 if((i % KTestBufferMappingTypes) == 7)
       
  1477                                     {
       
  1478                                     // Mapped for Read-Write then remapped as Read-Only
       
  1479                                     r = buf[i][j].Map(true);
       
  1480                                     test_Equal(KErrAlreadyExists, r);
       
  1481                                     }
       
  1482 								}
       
  1483 							}
       
  1484 						else
       
  1485 							{
       
  1486 							// Can't map buffer
       
  1487 							r = buf[i][j].Map(ETrue);
       
  1488 							test_Equal(KErrNoMemory, r);
       
  1489 							}
       
  1490 						}
       
  1491 					break;
       
  1492 
       
  1493 	            default: test(EFalse);
       
  1494 				}
       
  1495 			test.Printf(_L("."));
       
  1496 			}
       
  1497 		}
       
  1498 	test.Printf(_L("\n"));
       
  1499 
       
  1500 	// Read and write tests
       
  1501 	_LIT(KTestThreadName, "BufferMappingBuf%d(Test%d)");
       
  1502 	test.Printf(_L("Read & Write tests\n"));
       
  1503 	for (j = 0; j < KTestBufferMappingPoolTypes; j++)
       
  1504 		{
       
  1505 		for (i = 0; i < KTestBufferMappingTypes; i++)
       
  1506 			{
       
  1507 			if (buf[i][j].Handle())
       
  1508 				{
       
  1509 				switch (i % KTestBufferMappingTypes)
       
  1510 					{
       
  1511 					case 1:
       
  1512 					case 2:
       
  1513 					// Buffer not mapped - Read should fail
       
  1514 					if (buf[i][j].Ptr() == NULL)
       
  1515 						{
       
  1516 						RThread thread;
       
  1517 						TRequestStatus threadrs;
       
  1518 						TBuf<40> threadname;
       
  1519 						threadname.Format(KTestThreadName, i, (i % KTestBufferMappingTypes) + 1);
       
  1520 						r = thread.Create(threadname, ThreadBufferMappingRead, KDefaultStackSize, KMinHeapSize, KMinHeapSize, (TAny*) &buf[i][j]);
       
  1521 						test_KErrNone(r);
       
  1522 						thread.Logon(threadrs);
       
  1523 						thread.Resume();
       
  1524 						User::WaitForRequest(threadrs);
       
  1525 						test_Equal(3, threadrs.Int());
       
  1526 						test_Equal(EExitPanic, thread.ExitType());
       
  1527 						test_Equal(3, thread.ExitReason()); // KERN-EXEC 3
       
  1528 						CLOSE_AND_WAIT(thread);
       
  1529 						// Map buffer read-only for next test
       
  1530 						r = buf[i][j].Map(ETrue);
       
  1531 						if (bufferwindow[j])
       
  1532 							{
       
  1533 							test_KErrNone(r);
       
  1534 							}
       
  1535 						else
       
  1536 							{
       
  1537 							test_Equal(KErrNoMemory, r);
       
  1538 							}
       
  1539 						}
       
  1540 					case 3:
       
  1541 					case 4:
       
  1542 					// Buffer mapped for R/O access - Read should not fail
       
  1543 					if (bufferwindow[j] == 0)
       
  1544 						{
       
  1545 						break;
       
  1546 						}
       
  1547 					else
       
  1548 						{
       
  1549 						RThread thread;
       
  1550 						TRequestStatus threadrs;
       
  1551 						TBuf<40> threadname;
       
  1552 						threadname.Format(KTestThreadName, i, (i % KTestBufferMappingTypes) + 1);
       
  1553 						r = thread.Create(threadname, ThreadBufferMappingRead, KDefaultStackSize, KMinHeapSize, KMinHeapSize, (TAny*) &buf[i][j]);
       
  1554 						test_KErrNone(r);
       
  1555 						thread.Logon(threadrs);
       
  1556 						thread.Resume();
       
  1557 						User::WaitForRequest(threadrs);
       
  1558 						test_KErrNone(threadrs.Int());
       
  1559 						test_Equal(EExitKill, thread.ExitType());
       
  1560 						test_KErrNone(thread.ExitReason());
       
  1561 						CLOSE_AND_WAIT(thread);
       
  1562 						}
       
  1563 					// Write should fail
       
  1564 					if (buf[i][j].Ptr())
       
  1565 						{
       
  1566 						RThread thread;
       
  1567 						TRequestStatus threadrs;
       
  1568 						TBuf<40> threadname;
       
  1569 						threadname.Format(KTestThreadName, i, (i % KTestBufferMappingTypes) + 2);
       
  1570 						r = thread.Create(threadname, ThreadBufferMappingWrite, KDefaultStackSize, KMinHeapSize, KMinHeapSize,(TAny*) &buf[i][j]);
       
  1571 						test_KErrNone(r);
       
  1572 						thread.Logon(threadrs);
       
  1573 						thread.Resume();
       
  1574 						User::WaitForRequest(threadrs);
       
  1575 						test_Equal(3, threadrs.Int());
       
  1576 						test_Equal(EExitPanic, thread.ExitType());
       
  1577 						test_Equal(3, thread.ExitReason()); // KERN-EXEC 3
       
  1578 						CLOSE_AND_WAIT(thread);
       
  1579 						// Map buffer read-write for next test
       
  1580 						r = buf[i][j].UnMap();
       
  1581 						if(r != KErrNotFound)
       
  1582 						    {
       
  1583 						    test_KErrNone(r);
       
  1584 						    }
       
  1585 						r = buf[i][j].Map();
       
  1586 			   			test_KErrNone(r);
       
  1587 						}
       
  1588 					case 5:
       
  1589 					case 6:
       
  1590 						// Buffer mapped for R/W access - Write should not fail
       
  1591 					if (bufferwindow[j] == 0  || !(poolflags[j] & EShPoolWriteable))
       
  1592 						{
       
  1593 						break;
       
  1594 						}
       
  1595 					else
       
  1596 						{
       
  1597 						RThread thread;
       
  1598 						TRequestStatus threadrs;
       
  1599 						TBuf<40> threadname;
       
  1600 						threadname.Format(KTestThreadName, i, (i % KTestBufferMappingTypes) + 1);
       
  1601 						r = thread.Create(threadname, ThreadBufferMappingWrite, KDefaultStackSize, KMinHeapSize, KMinHeapSize,(TAny*) &buf[i][j]);
       
  1602 						test_KErrNone(r);
       
  1603 						thread.Logon(threadrs);
       
  1604 						thread.Resume();
       
  1605 						User::WaitForRequest(threadrs);
       
  1606 						test_KErrNone(threadrs.Int());
       
  1607 						test_Equal(EExitKill, thread.ExitType());
       
  1608 						test_KErrNone(thread.ExitReason());
       
  1609 						CLOSE_AND_WAIT(thread);
       
  1610 						// Unmap buffer for next test
       
  1611 						r = buf[i][j].UnMap();
       
  1612 						test_KErrNone(r);
       
  1613 						}
       
  1614 					// Buffer not mapped - Read should fail
       
  1615 					if (buf[i][j].Ptr())
       
  1616 						{
       
  1617 						RThread thread;
       
  1618 						TRequestStatus threadrs;
       
  1619 						TBuf<40> threadname;
       
  1620 						threadname.Format(KTestThreadName, i, (i % KTestBufferMappingTypes) + 2);
       
  1621 						r = thread.Create(threadname, ThreadBufferMappingRead, KDefaultStackSize, KMinHeapSize, KMinHeapSize,(TAny*) &buf[i][j]);
       
  1622 						test_KErrNone(r);
       
  1623 						thread.Logon(threadrs);
       
  1624 						thread.Resume();
       
  1625 						User::WaitForRequest(threadrs);
       
  1626 						test_Equal(3, threadrs.Int());
       
  1627 						test_Equal(EExitPanic, thread.ExitType());
       
  1628 						test_Equal(3, thread.ExitReason()); // KERN-EXEC 3
       
  1629 						CLOSE_AND_WAIT(thread);
       
  1630 						}
       
  1631 					}
       
  1632 				}
       
  1633 			buf[i][j].Close();
       
  1634 			test.Printf(_L("."));
       
  1635 			}
       
  1636 		pool[j].Close();
       
  1637 		test.Printf(_L("\n"));
       
  1638 		}
       
  1639 #endif
       
  1640 	}
       
  1641 
       
  1642 void BufferWindow()
       
  1643 	{
       
  1644 	test.Next(_L("Buffer Window tests"));
       
  1645 #ifdef __WINS__
       
  1646 	test.Printf(_L("Does not run on the emulator. Skipped\n"));
       
  1647 #else
       
  1648 	TInt r;
       
  1649 	RShPool pool;
       
  1650 	RShBuf buf[KTestPoolSizeInBufs * 2 + 1];
       
  1651 	TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, KTestPoolSizeInBufs * 2);
       
  1652 	r = pool.Create(inf, KDefaultPoolHandleFlags);
       
  1653 	test_KErrNone(r);
       
  1654 
       
  1655 	// Allocate buffer but don't map them to this process memory
       
  1656 	TInt i;
       
  1657 	for (i = 0; i < KTestPoolSizeInBufs * 2; i++)
       
  1658 		{
       
  1659 		r = buf[i].Alloc(pool, EShPoolAllocNoMap);
       
  1660 		test_KErrNone(r);
       
  1661 		}
       
  1662 
       
  1663 	// Pool is full
       
  1664 	r = buf[KTestPoolSizeInBufs * 2].Alloc(pool, EShPoolAllocNoMap);
       
  1665 	test_Equal(KErrNoMemory, r);
       
  1666 	r = buf[0].Map();
       
  1667 	test_Equal(KErrNoMemory, r);
       
  1668 
       
  1669 	// Open a one-buffer window
       
  1670 	r = pool.SetBufferWindow(1, ETrue);
       
  1671 	test_KErrNone(r);
       
  1672 	r = buf[0].Map();
       
  1673 	test_KErrNone(r);
       
  1674 	TPtr8 ptr0(buf[0].Ptr(), buf[0].Size(),buf[0].Size());
       
  1675 	ptr0.Fill('>');
       
  1676 	r = buf[1].Map();
       
  1677 	test_Equal(KErrNoMemory, r);
       
  1678 	r = buf[0].UnMap();
       
  1679 	test_KErrNone(r);
       
  1680 	r = buf[1].Map();
       
  1681 	test_KErrNone(r);
       
  1682 	TPtr8 ptr1(buf[0].Ptr(), buf[0].Size(),buf[0].Size());
       
  1683 	ptr1.Fill('<');
       
  1684 	r = buf[2].Map();
       
  1685 	test_Equal(KErrNoMemory, r);
       
  1686 
       
  1687 	// Enlarge window by one buffer
       
  1688 	r = pool.SetBufferWindow(2, ETrue);
       
  1689 	test_Equal(KErrAlreadyExists, r);
       
  1690 
       
  1691 	// Close All buffers
       
  1692 	for (i = 0; i < KTestPoolSizeInBufs * 2; i++)
       
  1693 		{
       
  1694 		buf[i].Close();
       
  1695 		}
       
  1696 
       
  1697 	pool.Close();
       
  1698 	r = pool.Create(inf, KDefaultPoolHandleFlags);
       
  1699 	test_KErrNone(r);
       
  1700 
       
  1701 	r = pool.SetBufferWindow(KTestPoolSizeInBufs, ETrue); // Half the pool size
       
  1702 	test_KErrNone(r);
       
  1703 	for (i = 0; i < KTestPoolSizeInBufs * 2 - 1; i++)
       
  1704 		{
       
  1705 		if (i < KTestPoolSizeInBufs)
       
  1706 			{
       
  1707 			r = buf[i].Alloc(pool, 0);
       
  1708 			test_KErrNone(r);
       
  1709 			TPtr8 ptr(buf[0].Ptr(), buf[0].Size(),buf[0].Size());
       
  1710 			ptr.Fill('?');
       
  1711 			}
       
  1712 		else
       
  1713 			{
       
  1714 			r = buf[i].Alloc(pool, EShPoolAllocNoMap);
       
  1715 			test_KErrNone(r);
       
  1716 			}
       
  1717 		}
       
  1718 	r = buf[KTestPoolSizeInBufs * 2].Alloc(pool, 0);
       
  1719 	test_Equal(KErrNoMemory, r);
       
  1720 	r = buf[KTestPoolSizeInBufs].Map();
       
  1721 	test_Equal(KErrNoMemory, r);
       
  1722 	r = buf[KTestPoolSizeInBufs * 2].Alloc(pool, EShPoolAllocNoMap);
       
  1723 	test_KErrNone(r);
       
  1724 
       
  1725 	// That's it
       
  1726 	for (i = 0; i < (KTestPoolSizeInBufs * 2)  + 1; i++)
       
  1727 		{
       
  1728 		buf[i].Close();
       
  1729 		}
       
  1730 	pool.Close();
       
  1731 
       
  1732 	// Try again with automap set to false
       
  1733 	RShPool pool2;
       
  1734 	r = pool2.Create(inf, KDefaultPoolHandleFlags);
       
  1735 	test_KErrNone(r);
       
  1736 	for (i = 0; i < KTestPoolSizeInBufs * 2; i++)
       
  1737 		{
       
  1738 		r = buf[i].Alloc(pool2, 0);
       
  1739 		test_KErrNone(r);
       
  1740 		}
       
  1741 	r = pool2.SetBufferWindow(-1, EFalse);
       
  1742 	test_KErrNone(r);
       
  1743 	for (i = 0; i < KTestPoolSizeInBufs * 2; i++)
       
  1744 		{
       
  1745 		r = buf[i].Map(ETrue);
       
  1746 		test_KErrNone(r);
       
  1747 		}
       
  1748 	for (i = 0; i < KTestPoolSizeInBufs * 2; i++)
       
  1749 		{
       
  1750 		buf[i].Close();
       
  1751 		}
       
  1752 	pool2.Close();
       
  1753 #endif
       
  1754 	}
       
  1755 
       
  1756 /*
       
  1757 @SYMTestCaseID				7
       
  1758 @SYMTestCaseDesc			Trigger notifications
       
  1759 @SYMREQ						REQ11423
       
  1760 @SYMTestActions
       
  1761 	Set Low Space Notifications on various thresholds.
       
  1762 	In a separate thread, keep allocating buffers.
       
  1763 @SYMTestExpectedResults
       
  1764 	Notifications are completed when their respective levels are reached.
       
  1765 @SYMTestPriority			Medium
       
  1766 */
       
  1767 
       
  1768 TInt ThreadNotifications(TAny* aArg)
       
  1769 	{
       
  1770 	if (!aArg)
       
  1771 		{
       
  1772 		return KErrArgument;
       
  1773 		}
       
  1774 	RShPool* pool = (RShPool*) aArg;
       
  1775 	RArray<RShBuf> bufarray;
       
  1776 	TInt r;
       
  1777 	RSemaphore sem;
       
  1778 	r = sem.OpenGlobal(KTestLowSpaceSemaphore);
       
  1779 	if (r)
       
  1780 		{
       
  1781 		RDebug::Printf("Line %d: r=%d", __LINE__, r);
       
  1782 		return r;
       
  1783 		}
       
  1784 	// Start allocating buffers
       
  1785 	while (pool->FreeCount() > 1)
       
  1786 		{
       
  1787 		RShBuf buf;
       
  1788 		r = buf.Alloc(*pool);
       
  1789 		if (r)
       
  1790 			{
       
  1791 			RDebug::Printf("Line %d: count=%d r=%d", __LINE__, bufarray.Count(), r);
       
  1792 			return r;
       
  1793 			}
       
  1794 		bufarray.Append(buf);
       
  1795 		if ((bufarray.Count() == 1)								// wait for low3
       
  1796 			|| (bufarray.Count() == KTestPoolSizeInBufs - 2)	// wait for low2
       
  1797 			|| (bufarray.Count() == KTestPoolSizeInBufs - 1))	// wait for low1/low4
       
  1798 				{
       
  1799 				r = sem.Wait(5000000); // 5 second timeout
       
  1800 				if (r)
       
  1801 					{
       
  1802 					RDebug::Printf("Line %d: count=%d r=%d", __LINE__, bufarray.Count(), r);
       
  1803 					return r;
       
  1804 					}
       
  1805 				}
       
  1806 		}
       
  1807 
       
  1808 	// Free all buffers
       
  1809 	while (bufarray.Count())
       
  1810 		{
       
  1811 		bufarray[0].Close();
       
  1812 		bufarray.Remove(0);
       
  1813 		if ((bufarray.Count() == KTestPoolSizeInBufs - 2)		// wait for free3
       
  1814 			|| (bufarray.Count() == 1)							// wait for free2
       
  1815 			|| (bufarray.Count() == 0))							// wait for free1/free4
       
  1816 				{
       
  1817 				r = sem.Wait(5000000); // 5 second timeout
       
  1818 				if (r)
       
  1819 					{
       
  1820 					RDebug::Printf("Line %d: count=%d r=%d", __LINE__, bufarray.Count(), r);
       
  1821 					return r;
       
  1822 					}
       
  1823 				}
       
  1824 		}
       
  1825 	bufarray.Close();
       
  1826 	sem.Close();
       
  1827 	return KErrNone;
       
  1828 	}
       
  1829 
       
  1830 enum TTestLowSpaceType
       
  1831 	{
       
  1832 	ETestCancelNonExistent,
       
  1833 	ETestCancelTwice
       
  1834 	};
       
  1835 
       
  1836 struct TTestThreadLowSpacePanicArgs
       
  1837 	{
       
  1838 	RShPool*			iPool;
       
  1839 	TUint				iThreshold1;
       
  1840 	TUint				iThreshold2;
       
  1841 	TTestLowSpaceType	iType;
       
  1842 	};
       
  1843 
       
  1844 TInt ThreadLowSpacePanic(TAny* aArg)
       
  1845 	{
       
  1846 	if (!aArg)
       
  1847 		{
       
  1848 		return KErrArgument;
       
  1849 		}
       
  1850 	TTestThreadLowSpacePanicArgs& targs = *(TTestThreadLowSpacePanicArgs*) aArg;
       
  1851 	TRequestStatus rs;
       
  1852 	if (targs.iType == ETestCancelNonExistent)
       
  1853 		{
       
  1854 		targs.iPool->CancelLowSpaceNotification(rs); // should panic
       
  1855 		}
       
  1856 	else if (targs.iType == ETestCancelTwice)
       
  1857 		{
       
  1858 		targs.iPool->RequestLowSpaceNotification(targs.iThreshold1, rs);
       
  1859 		targs.iPool->CancelLowSpaceNotification(rs);
       
  1860 		targs.iPool->CancelLowSpaceNotification(rs); // should panic
       
  1861 		}
       
  1862 	else
       
  1863 		{
       
  1864 		return KErrArgument;
       
  1865 		}
       
  1866 	return KErrNone;
       
  1867 	}
       
  1868 
       
  1869 /*
       
  1870  * CancelLowSpaceNotification() no longer panic()s if it can't find the
       
  1871  * notification, so this routine not currently called.
       
  1872  */
       
  1873 void RequestLowSpacePanic(RShPool& aPool, TUint aThreshold1, TUint aThreshold2, TTestLowSpaceType aType, TInt aLine)
       
  1874 	{
       
  1875 	static TInt count = 0;
       
  1876 	count++;
       
  1877 	test.Printf(_L("RequestLowSpacePanic@%d(%d)\n"), aLine, count);
       
  1878 	TBool jit = User::JustInTime();
       
  1879 	User::SetJustInTime(EFalse);
       
  1880 	TInt expectedpaniccode = KErrNone;	// Initialised to silence compiler warnings
       
  1881 	switch (aType)
       
  1882 		{
       
  1883 		case ETestCancelNonExistent:
       
  1884 		case ETestCancelTwice:
       
  1885 			expectedpaniccode = KErrNotFound;
       
  1886 			break;
       
  1887 		default:
       
  1888 			test(EFalse);
       
  1889 		}
       
  1890 	//
       
  1891 	TTestThreadLowSpacePanicArgs targs;
       
  1892 	targs.iPool = &aPool;
       
  1893 	targs.iThreshold1 = aThreshold1;
       
  1894 	targs.iThreshold2 = aThreshold2;
       
  1895 	targs.iType = aType;
       
  1896 	//
       
  1897 	RThread threadpanic;
       
  1898 	TRequestStatus threadpanicrs;
       
  1899 	TInt r;
       
  1900 	TBuf<30> threadname;
       
  1901 	threadname.Format(_L("ThreadLowSpacePanic%d"), count);
       
  1902 	r = threadpanic.Create(threadname, ThreadLowSpacePanic, KDefaultStackSize, KMinHeapSize, 1 << 20, (TAny*) &targs);
       
  1903 	test_KErrNone(r);
       
  1904 	threadpanic.Logon(threadpanicrs);
       
  1905 	threadpanic.Resume();
       
  1906 	User::WaitForRequest(threadpanicrs);
       
  1907 	//
       
  1908 	test_Equal(expectedpaniccode, threadpanicrs.Int());
       
  1909 	test_Equal(EExitPanic, threadpanic.ExitType());
       
  1910 	test_Equal(expectedpaniccode, threadpanic.ExitReason());
       
  1911 	threadpanic.Close();
       
  1912 	User::SetJustInTime(jit);
       
  1913 	}
       
  1914 
       
  1915 void NotificationRequests(RShPool& aPool)
       
  1916 	{
       
  1917 	test.Next(_L("Notifications"));
       
  1918 	TInt r;
       
  1919 
       
  1920 	RSemaphore sem;
       
  1921 	r = sem.CreateGlobal(KTestLowSpaceSemaphore, 0);
       
  1922 	test_KErrNone(r);
       
  1923 	RTimer timer;
       
  1924 	r = timer.CreateLocal();
       
  1925 	test_KErrNone(r);
       
  1926 	RThread thread;
       
  1927 	TRequestStatus threadrs;
       
  1928 	r = thread.Create(_L("ThreadNotifications"), ThreadNotifications, KDefaultStackSize, KMinHeapSize, 1 << 20, (TAny*) &aPool);
       
  1929 	test_KErrNone(r);
       
  1930 	thread.SetPriority(EPriorityMore);
       
  1931 	thread.Logon(threadrs);
       
  1932 
       
  1933 	test.Printf(_L("Low space notification\n"));
       
  1934 	TRequestStatus low1;
       
  1935 	TRequestStatus low2;
       
  1936 	TRequestStatus low3;
       
  1937 	TRequestStatus low4;
       
  1938 	TRequestStatus low5;
       
  1939 	TRequestStatus low6;
       
  1940 	aPool.RequestLowSpaceNotification(1, low1);
       
  1941 	test_Equal(KRequestPending, low1.Int());
       
  1942 	aPool.RequestLowSpaceNotification(2, low2);
       
  1943 	test_Equal(KRequestPending, low2.Int());
       
  1944 	aPool.RequestLowSpaceNotification(aPool.FreeCount() - 1, low3);
       
  1945 	test_Equal(KRequestPending, low3.Int());
       
  1946 	aPool.RequestLowSpaceNotification(1, low4);
       
  1947 	test_Equal(KRequestPending, low4.Int());
       
  1948 	aPool.RequestLowSpaceNotification(0, low5); // Never completes
       
  1949 	test_Equal(KRequestPending, low5.Int());
       
  1950 	aPool.RequestLowSpaceNotification(KMaxTUint, low6); // Completes instantly
       
  1951 	TRequestStatus timeoutlow;
       
  1952 	timer.After(timeoutlow, 5000000); // 5 seconds time out
       
  1953 	User::WaitForRequest(low6, timeoutlow);
       
  1954 	test_KErrNone(low6.Int());
       
  1955 	test_Equal(KRequestPending, low1.Int());
       
  1956 	test_Equal(KRequestPending, low2.Int());
       
  1957 	test_Equal(KRequestPending, low3.Int());
       
  1958 	test_Equal(KRequestPending, low4.Int());
       
  1959 	test_Equal(KRequestPending, low5.Int());
       
  1960 	timer.Cancel();
       
  1961 	User::WaitForRequest(timeoutlow);
       
  1962 	thread.Resume();
       
  1963 	User::WaitForRequest(low3, threadrs);
       
  1964 	test_KErrNone(low3.Int());
       
  1965 	test_Equal(KRequestPending, low1.Int());
       
  1966 	test_Equal(KRequestPending, low2.Int());
       
  1967 	test_Equal(KRequestPending, low4.Int());
       
  1968 	test_Equal(KRequestPending, low5.Int());
       
  1969 	sem.Signal();
       
  1970 	User::WaitForRequest(low2, threadrs);
       
  1971 	test_KErrNone(low2.Int())
       
  1972 	test_Equal(KRequestPending, low1.Int());
       
  1973 	test_Equal(KRequestPending, low4.Int());
       
  1974 	test_Equal(KRequestPending, low5.Int());
       
  1975 	sem.Signal();
       
  1976 	User::WaitForRequest(low1, threadrs);
       
  1977 	test_KErrNone(low1.Int());
       
  1978 	User::WaitForRequest(low4, threadrs);
       
  1979 	test_KErrNone(low4.Int());
       
  1980 	test_Equal(KRequestPending, low5.Int());
       
  1981 	test_Equal(EExitPending, thread.ExitType()); // Thread is still running
       
  1982 	test_Compare(aPool.FreeCount(), <=, 1);
       
  1983 
       
  1984 	test.Printf(_L("Free space notification\n"));
       
  1985 	TRequestStatus free1;
       
  1986 	TRequestStatus free2;
       
  1987 	TRequestStatus free3;
       
  1988 	TRequestStatus free4;
       
  1989 	TRequestStatus free5;
       
  1990 	TRequestStatus free6;
       
  1991 	aPool.RequestFreeSpaceNotification(KTestPoolSizeInBufs, free1);
       
  1992 	test_Equal(KRequestPending, free1.Int());
       
  1993 	aPool.RequestFreeSpaceNotification(KTestPoolSizeInBufs - 1, free2);
       
  1994 	test_Equal(KRequestPending, free2.Int());
       
  1995 	aPool.RequestFreeSpaceNotification(aPool.FreeCount() + 1, free3);
       
  1996 	test_Equal(KRequestPending, free3.Int());
       
  1997 	aPool.RequestFreeSpaceNotification(KTestPoolSizeInBufs, free4);
       
  1998 	test_Equal(KRequestPending, free4.Int());
       
  1999 	aPool.RequestFreeSpaceNotification(KTestPoolSizeInBufs + 1, free5); // Never completes
       
  2000 	test_Equal(KRequestPending, free5.Int());
       
  2001 	aPool.RequestFreeSpaceNotification(0, free6); // Completes instantly
       
  2002 
       
  2003 	TRequestStatus timeoutfree;
       
  2004 	timer.After(timeoutfree, 5000000); // 5 seconds time out
       
  2005 	User::WaitForRequest(free6, timeoutfree);
       
  2006 	test_KErrNone(free6.Int());
       
  2007 
       
  2008 	test_Equal(KRequestPending, free1.Int());
       
  2009 	test_Equal(KRequestPending, free2.Int());
       
  2010 	test_Equal(KRequestPending, free3.Int());
       
  2011 	test_Equal(KRequestPending, free4.Int());
       
  2012 	test_Equal(KRequestPending, free5.Int());
       
  2013 
       
  2014 	timer.Cancel();
       
  2015 	User::WaitForRequest(timeoutfree);
       
  2016 
       
  2017 	sem.Signal();	// resume thread execution
       
  2018 	User::WaitForRequest(free3, threadrs);
       
  2019 	test_KErrNone(free3.Int());
       
  2020 	test_Equal(KRequestPending, free1.Int());
       
  2021 	test_Equal(KRequestPending, free2.Int());
       
  2022 	test_Equal(KRequestPending, free4.Int());
       
  2023 	test_Equal(KRequestPending, free5.Int());
       
  2024 
       
  2025 	sem.Signal();
       
  2026 	User::WaitForRequest(free2, threadrs);
       
  2027 	test_KErrNone(free2.Int())
       
  2028 
       
  2029 	test_Equal(KRequestPending, free1.Int());
       
  2030 	test_Equal(KRequestPending, free4.Int());
       
  2031 	test_Equal(KRequestPending, free5.Int());
       
  2032 	sem.Signal();
       
  2033 
       
  2034 	User::WaitForRequest(free1, threadrs);
       
  2035 	test_KErrNone(free1.Int());
       
  2036 	test_KErrNone(free4.Int());
       
  2037 
       
  2038 	test_Equal(KRequestPending, free5.Int());
       
  2039 	test_Equal(EExitPending, thread.ExitType()); // Thread is still running
       
  2040 
       
  2041 	test_Compare(aPool.FreeCount(), >=, KTestPoolSizeInBufs);
       
  2042 
       
  2043 	// Complete the requests still pending...
       
  2044 	aPool.CancelLowSpaceNotification(low5);
       
  2045 	User::WaitForRequest(low5);
       
  2046 
       
  2047 	aPool.CancelFreeSpaceNotification(free5);
       
  2048 	User::WaitForRequest(free5);
       
  2049 
       
  2050 	// Let thread complete
       
  2051 	sem.Signal();
       
  2052 	User::WaitForRequest(threadrs);
       
  2053 	test_Equal(EExitKill, thread.ExitType());
       
  2054 	test_KErrNone(thread.ExitReason());
       
  2055 	thread.Close();
       
  2056 	sem.Close();
       
  2057 	timer.Close();
       
  2058 	}
       
  2059 
       
  2060 /*
       
  2061 @SYMTestCaseID				9
       
  2062 @SYMTestCaseDesc			Cancel low- and free-space notifications
       
  2063 @SYMREQ						REQ11423
       
  2064 @SYMTestActions
       
  2065 	Set Low/High LowSpace Notifications.
       
  2066 	Cancel them.
       
  2067 @SYMTestExpectedResults
       
  2068 	All OK.
       
  2069 @SYMTestPriority			Medium
       
  2070 */
       
  2071 
       
  2072 void CancelNotificationRequests(RShPool& aPool)
       
  2073 	{
       
  2074 	test.Next(_L("Cancel notifications"));
       
  2075 	TInt r;
       
  2076 
       
  2077 	RSemaphore sem;
       
  2078 	r = sem.CreateGlobal(KTestLowSpaceSemaphore, 0);
       
  2079 	test_KErrNone(r);
       
  2080 	RThread thread;
       
  2081 	TRequestStatus threadrs;
       
  2082 	r = thread.Create(_L("ThreadCancelNotifications"), ThreadNotifications, KDefaultStackSize, KMinHeapSize, 1 << 20, (TAny*) &aPool);
       
  2083 	test_KErrNone(r);
       
  2084 	thread.SetPriority(EPriorityLess);
       
  2085 	thread.Logon(threadrs);
       
  2086 
       
  2087 	test.Printf(_L("Cancel low space notifications\n"));
       
  2088 	// Low space notification cancel
       
  2089 	TRequestStatus low;
       
  2090 	aPool.RequestLowSpaceNotification(1, low);
       
  2091 	aPool.CancelLowSpaceNotification(low);
       
  2092 	test_Equal(KErrCancel, low.Int());
       
  2093 	// We should be able to cancel again without panic()ing
       
  2094 	// (no guarantees on return code; maybe Cancel() should have void return type?)
       
  2095 	aPool.CancelLowSpaceNotification(low);
       
  2096 	test.Printf(_L("Second cancel returned %d\n"), low.Int());
       
  2097 	TRequestStatus low2;
       
  2098 	aPool.RequestLowSpaceNotification(1, low2); // For thread sync
       
  2099 	thread.Resume();
       
  2100 	sem.Signal(2);
       
  2101 	User::WaitForRequest(low2, threadrs);
       
  2102 	test_KErrNone(low2.Int());
       
  2103 	test_Equal(EExitPending, thread.ExitType()); // Thread is still running
       
  2104 	test_Compare(aPool.FreeCount(), <=, 1);
       
  2105 
       
  2106 	test.Printf(_L("Cancel free space notifications\n"));
       
  2107 	TRequestStatus free;
       
  2108 	aPool.CancelFreeSpaceNotification(free);	// Cancel non-existant notification
       
  2109 	aPool.RequestFreeSpaceNotification(KTestPoolSizeInBufs, free);
       
  2110 	aPool.CancelLowSpaceNotification(free);		// Use wrong method
       
  2111 	aPool.CancelFreeSpaceNotification(free);		// Use wrong method
       
  2112 	test_Equal(KErrCancel, free.Int());
       
  2113 	aPool.CancelFreeSpaceNotification(free);		// Already cancelled
       
  2114 
       
  2115 	// Complete the requests still pending...
       
  2116 	User::WaitForRequest(low);
       
  2117 
       
  2118 	sem.Signal(4); // Resume thread execution and let it complete
       
  2119 	User::WaitForRequest(threadrs);
       
  2120 	test_KErrNone(threadrs.Int());
       
  2121 	test_Equal(EExitKill, thread.ExitType());
       
  2122 	test_KErrNone(thread.ExitReason());
       
  2123 	test_Compare(aPool.FreeCount(), >=, KTestPoolSizeInBufs);
       
  2124 	thread.Close();
       
  2125 	sem.Close();
       
  2126 	}
       
  2127 
       
  2128 
       
  2129 /*
       
  2130 @SYMTestCaseID				10
       
  2131 @SYMTestCaseDesc			Grow and shrink pool
       
  2132 @SYMREQ						REQ11423
       
  2133 @SYMTestActions
       
  2134 	1. Test Thread creates pools with various size attributes
       
  2135 	2. Test Thread keeps allocating buffers on pool.
       
  2136 	3. Test Thread keeps freeing buffers on pool
       
  2137 	4. Test Thread frees all buffers and close pool.
       
  2138 @SYMTestExpectedResults
       
  2139 	Pools grows and shrink grows as expected.
       
  2140 @SYMTestPriority			High
       
  2141 */
       
  2142 
       
  2143 const TInt KTestFreeCountTimeOut = 20000000; // 20 seconds (of thread inactivity)
       
  2144 const TInt KTestWaitBeforeRetry = 2000; // 0.002 second
       
  2145 
       
  2146 TUint MultFx248(TUint n, TUint f)
       
  2147 	{
       
  2148 	TUint64 r = (TUint64) n * f;
       
  2149 	I64LSR(r, 8);
       
  2150 	return r > KMaxTUint32 ? KMaxTUint32 : I64LOW(r);
       
  2151 	}
       
  2152 
       
  2153 class TTestPoolModel
       
  2154 	{
       
  2155 public:
       
  2156 	TTestPoolModel(TShPoolInfo& aInfo);
       
  2157 	void Alloc();
       
  2158 	void Free();
       
  2159 	TUint FreeCount();
       
  2160 	void DisplayCounters();
       
  2161 private:
       
  2162 	void CalcGSP();
       
  2163 	void CheckGrowShrink();
       
  2164 	void Grow();
       
  2165 	void Shrink();
       
  2166 private:
       
  2167 	TUint iAllocated;
       
  2168 	TUint iFree;
       
  2169 	//
       
  2170 	TUint iInitial;
       
  2171 	TUint iMax;
       
  2172 	TUint iGrowTriggerRatio;
       
  2173 	TUint iGrowByRatio;
       
  2174 	TUint iShrinkByRatio;
       
  2175 	TUint iShrinkHysteresisRatio;
       
  2176 	TUint iPoolFlags;
       
  2177 	//
       
  2178 	TUint iGrowTrigger;
       
  2179 	TUint iShrinkTrigger;
       
  2180 	//
       
  2181 	TBool iDebug;
       
  2182 	};
       
  2183 
       
  2184 TTestPoolModel::TTestPoolModel(TShPoolInfo& aInfo)
       
  2185 	{
       
  2186 	iInitial = aInfo.iInitialBufs;
       
  2187 	iMax = aInfo.iMaxBufs;
       
  2188 	iGrowTriggerRatio = aInfo.iGrowTriggerRatio;
       
  2189 	iGrowByRatio = aInfo.iGrowByRatio;
       
  2190 	iShrinkByRatio = 256 - 65536 / (256 + iGrowByRatio);
       
  2191 	iShrinkHysteresisRatio = aInfo.iShrinkHysteresisRatio;
       
  2192 	iPoolFlags = aInfo.iFlags;
       
  2193 	iAllocated = 0;
       
  2194 	iFree = iInitial;
       
  2195 	iDebug = EFalse; // Set this to ETrue to display detailed information
       
  2196 	
       
  2197 	CalcGSP();
       
  2198 	if (iDebug)
       
  2199 		{
       
  2200 		test.Printf(_L("A     F     A+F   GT    ST    \n"));
       
  2201 		test.Printf(_L("==============================\n"));
       
  2202 		DisplayCounters();
       
  2203 		}
       
  2204 	}
       
  2205 
       
  2206 void TTestPoolModel::Alloc()
       
  2207 	{
       
  2208 	iAllocated++;
       
  2209 	iFree--;
       
  2210 	CheckGrowShrink();
       
  2211 	}
       
  2212 
       
  2213 void TTestPoolModel::Free()
       
  2214 	{
       
  2215 	iAllocated--;
       
  2216 	iFree++;
       
  2217 	CheckGrowShrink();
       
  2218 	}
       
  2219 
       
  2220 TUint TTestPoolModel::FreeCount()
       
  2221 	{
       
  2222 	return iFree;
       
  2223 	}
       
  2224 
       
  2225 void TTestPoolModel::CalcGSP()
       
  2226 	{
       
  2227 	TUint n = iAllocated + iFree;
       
  2228 
       
  2229 	// If the pool is at its maximum size, we can't grow
       
  2230 	if (n >= iMax || iGrowTriggerRatio == 0 /*|| iCommittedPages >= iMaxPages*/)
       
  2231 		{
       
  2232 		iGrowTrigger = 0;
       
  2233 		}
       
  2234 	else
       
  2235 		{
       
  2236 		iGrowTrigger = MultFx248(n, iGrowTriggerRatio);
       
  2237 
       
  2238 		// Deal with rounding towards zero
       
  2239 		if (iGrowTrigger == 0)
       
  2240 			iGrowTrigger = 1;
       
  2241 		}
       
  2242 
       
  2243 	// If no growing has happened, we can't shrink
       
  2244 	if (n <= iInitial || iGrowTriggerRatio == 0 || (iPoolFlags & EShPoolSuppressShrink) != 0)
       
  2245 		{
       
  2246 		iShrinkTrigger = iMax;
       
  2247 		}
       
  2248 	else
       
  2249 		{
       
  2250 		// To ensure that shrinking doesn't immediately happen after growing, the trigger
       
  2251 		// amount is the grow trigger + the grow amount (which is the number of free buffers
       
  2252 		// just after a grow) times the shrink hysteresis value.
       
  2253 		iShrinkTrigger = MultFx248(n, iGrowTriggerRatio + iGrowByRatio);
       
  2254 		iShrinkTrigger = MultFx248(iShrinkTrigger, iShrinkHysteresisRatio);
       
  2255 
       
  2256 		// Deal with rounding towards zero
       
  2257 		if (iShrinkTrigger == 0)
       
  2258 			iShrinkTrigger = 1;
       
  2259 
       
  2260 		// If the shrink trigger ends up > the number of buffers currently in
       
  2261 		// the pool, set it to that number (less 1, since the test is "> trigger").
       
  2262 		// This means the pool will only shrink when all the buffers have been freed.
       
  2263 		if (iShrinkTrigger >= n)
       
  2264 			iShrinkTrigger = n - 1;
       
  2265 		}
       
  2266 	if (iDebug)
       
  2267 		{
       
  2268 		DisplayCounters();
       
  2269 		}
       
  2270 	}
       
  2271 
       
  2272 void TTestPoolModel::CheckGrowShrink()
       
  2273 	{
       
  2274 	if (iFree < iGrowTrigger)
       
  2275 		{
       
  2276 		Grow();
       
  2277 		CheckGrowShrink();
       
  2278 		}
       
  2279 	if (iFree > iShrinkTrigger)
       
  2280 		{
       
  2281 		Shrink();
       
  2282 		CheckGrowShrink();
       
  2283 		}
       
  2284 	}
       
  2285 
       
  2286 void TTestPoolModel::Grow()
       
  2287 	{
       
  2288 	TUint headroom = iMax - (iAllocated + iFree);
       
  2289 	TUint growby = MultFx248(iAllocated + iFree, iGrowByRatio);
       
  2290 	if (growby == 0)			// Handle round-to-zero
       
  2291 		growby = 1;
       
  2292 	if (growby > headroom)
       
  2293 		growby = headroom;
       
  2294 	iFree += growby;
       
  2295 	if (iDebug)
       
  2296 		{
       
  2297 		test.Printf(_L("GROW by %d!\n"), growby);
       
  2298 		}
       
  2299 	CalcGSP();
       
  2300 	}
       
  2301 
       
  2302 void TTestPoolModel::Shrink()
       
  2303 	{
       
  2304 	TUint grownBy = iAllocated + iFree - iInitial;
       
  2305 	TUint shrinkby = MultFx248(iAllocated + iFree, iShrinkByRatio);
       
  2306 	if (shrinkby == 0)			// Handle round-to-zero
       
  2307 		shrinkby = 1;
       
  2308 	if (shrinkby > grownBy)
       
  2309 		shrinkby = grownBy;
       
  2310 	if (shrinkby > iFree)
       
  2311 		shrinkby = iFree;
       
  2312 	iFree -= shrinkby;
       
  2313 	if (iDebug)
       
  2314 		{
       
  2315 		test.Printf(_L("SHRINK by %d!\n"), shrinkby);
       
  2316 		}
       
  2317 	CalcGSP();
       
  2318 	}
       
  2319 
       
  2320 void TTestPoolModel::DisplayCounters()
       
  2321 	{
       
  2322 	test.Printf(_L("%-6u%-6u%-6u%-6u%-6u\n"), iAllocated, iFree, iAllocated + iFree, iGrowTrigger, iShrinkTrigger);
       
  2323 	}
       
  2324 
       
  2325 void PoolGrowingTestRoutine(const TShPoolCreateInfo& aInfo, TUint aBufferFlags = 0)
       
  2326 	{
       
  2327 	TInt r;
       
  2328 	TInt timeout;
       
  2329 	RShPool pool;
       
  2330 	r = pool.Create(aInfo, KDefaultPoolHandleFlags);
       
  2331 	test_KErrNone(r);
       
  2332 
       
  2333 	TShPoolInfo info;
       
  2334 	pool.GetInfo(info);
       
  2335 
       
  2336 	// Only set the buffer window if we're going to map the buffers
       
  2337 	if (!(aBufferFlags & EShPoolAllocNoMap) && (info.iFlags & EShPoolPageAlignedBuffer))
       
  2338 		{
       
  2339 		r = pool.SetBufferWindow(-1, ETrue);
       
  2340 		test_KErrNone(r)
       
  2341 		}
       
  2342 
       
  2343 	TTestPoolModel model(info);
       
  2344 	RArray<RShBuf> bufarray;
       
  2345 	test_Equal(info.iInitialBufs, pool.FreeCount());
       
  2346 
       
  2347 	// Buffer allocation
       
  2348 	do
       
  2349 		{
       
  2350 		timeout = KTestFreeCountTimeOut / KTestWaitBeforeRetry;
       
  2351 		while (model.FreeCount() != pool.FreeCount())
       
  2352 			{
       
  2353 			User::After(KTestWaitBeforeRetry);
       
  2354 			test_Assert(--timeout,
       
  2355 				test.Printf(_L("Timeout: Free==%u (expected %u)\n"), pool.FreeCount(), model.FreeCount());
       
  2356 				model.DisplayCounters();
       
  2357 				);
       
  2358 			if ((timeout * KTestWaitBeforeRetry) % 1000000 == 0)
       
  2359 				{
       
  2360 				test.Printf(_L("Time out in %d seconds! (line %d)\n"), timeout * KTestWaitBeforeRetry / 1000000, __LINE__);
       
  2361 				}
       
  2362 			}
       
  2363 		RShBuf buf;
       
  2364 		r = buf.Alloc(pool, aBufferFlags);
       
  2365 		if (r == KErrNoMemory)
       
  2366 			{
       
  2367 			// We expect to get a failure when all buffers are allocated
       
  2368 			if ((TUint) bufarray.Count() == info.iMaxBufs)
       
  2369 				break;
       
  2370 			if (!(aBufferFlags & EShPoolAllocCanWait))
       
  2371 				{
       
  2372 				// Give the Management DFC some time to run, then try allocating again
       
  2373 				User::After(1000000); // 1 second
       
  2374 				r = buf.Alloc(pool);
       
  2375 				if (r)
       
  2376 					{
       
  2377 					test.Printf(_L("Alloc fail after %d of %d; Free==%u (expected %u)\n"),
       
  2378 						bufarray.Count(), info.iMaxBufs, pool.FreeCount(), model.FreeCount());
       
  2379 					break;
       
  2380 					}
       
  2381 				}
       
  2382 			}
       
  2383 
       
  2384 		if (r == KErrNone)
       
  2385 			{
       
  2386 			model.Alloc();
       
  2387 			if (!(aBufferFlags & EShPoolAllocNoMap))
       
  2388 				{
       
  2389 				TPtr8 ptr(buf.Ptr(), buf.Size(),buf.Size());
       
  2390 				ptr.Fill(bufarray.Count() % 256);
       
  2391 				}
       
  2392 			bufarray.Append(buf);
       
  2393 			}
       
  2394 		}
       
  2395 	while (r == KErrNone);
       
  2396 
       
  2397 	test_Equal(KErrNoMemory, r);
       
  2398 	test_Equal(info.iMaxBufs, bufarray.Count());
       
  2399 	test_Equal(0, pool.FreeCount());
       
  2400 
       
  2401 	// Now free no more than 1/3 of these buffers...
       
  2402 	while ((TUint) bufarray.Count() > 2 * info.iMaxBufs / 3)
       
  2403 		{
       
  2404 		// remove buffers from the back of the array
       
  2405 		if (!(aBufferFlags & EShPoolAllocNoMap))
       
  2406 			{
       
  2407 			TPtr8 ptr(bufarray[bufarray.Count() - 1].Ptr(), bufarray[bufarray.Count() - 1].Size(),bufarray[bufarray.Count() - 1].Size());
       
  2408 			ptr.Fill((bufarray.Count() + 1) % 256);
       
  2409 			}
       
  2410 		bufarray[bufarray.Count() - 1].Close();
       
  2411 		bufarray.Remove(bufarray.Count() - 1);
       
  2412 		model.Free();
       
  2413 		
       
  2414 		timeout = KTestFreeCountTimeOut / KTestWaitBeforeRetry;
       
  2415 		while (model.FreeCount() != pool.FreeCount())
       
  2416 			{
       
  2417 			User::After(KTestWaitBeforeRetry);
       
  2418 			test_Assert(--timeout,
       
  2419 				test.Printf(_L("Timeout: Free==%u (expected %u)\n"), pool.FreeCount(), model.FreeCount());
       
  2420 				model.DisplayCounters();
       
  2421 				);
       
  2422 			if ((timeout * KTestWaitBeforeRetry) % 1000000 == 0)
       
  2423 				{
       
  2424 				test.Printf(_L("Time out in %d seconds! (line %d)\n"), timeout * KTestWaitBeforeRetry / 1000000, __LINE__);
       
  2425 				}
       
  2426 			}
       
  2427 		}
       
  2428 
       
  2429 	// ... and re-allocate them
       
  2430 	do
       
  2431 		{
       
  2432 		timeout = KTestFreeCountTimeOut / KTestWaitBeforeRetry;
       
  2433 		while (model.FreeCount() != pool.FreeCount())
       
  2434 			{
       
  2435 			User::After(KTestWaitBeforeRetry);
       
  2436 			test_Assert(--timeout,
       
  2437 				test.Printf(_L("Timeout: Free==%u (expected %u)\n"), pool.FreeCount(), model.FreeCount());
       
  2438 				model.DisplayCounters();
       
  2439 				);
       
  2440 			if ((timeout * KTestWaitBeforeRetry) % 1000000 == 0)
       
  2441 				{
       
  2442 				test.Printf(_L("Time out in %d seconds! (line %d)\n"), timeout * KTestWaitBeforeRetry / 1000000, __LINE__);
       
  2443 				}
       
  2444 			}
       
  2445 		RShBuf buf;
       
  2446 		r = buf.Alloc(pool, aBufferFlags);
       
  2447 		if (r == KErrNoMemory)
       
  2448 			{
       
  2449 			// We expect to get a failure when all buffers are allocated
       
  2450 			if ((TUint) bufarray.Count() == info.iMaxBufs)
       
  2451 				break;
       
  2452 			if (!(aBufferFlags & EShPoolAllocCanWait))
       
  2453 				{
       
  2454 				// Give the Management DFC some time to run, then try allocating again
       
  2455 				User::After(1000000); // 1 second
       
  2456 				r = buf.Alloc(pool);
       
  2457 				if (r)
       
  2458 					{
       
  2459 					test.Printf(_L("Alloc fail after %d of %d; Free==%u (expected %u)\n"),
       
  2460 						bufarray.Count(), info.iMaxBufs, pool.FreeCount(), model.FreeCount());
       
  2461 					break;
       
  2462 					}
       
  2463 				}
       
  2464 			}
       
  2465 
       
  2466 		if (r == KErrNone)
       
  2467 			{
       
  2468 			model.Alloc();
       
  2469 			if (!(aBufferFlags & EShPoolAllocNoMap))
       
  2470 				{
       
  2471 				TPtr8 ptr(buf.Ptr(), buf.Size(),buf.Size());
       
  2472 				ptr.Fill(bufarray.Count() % 256);
       
  2473 				}
       
  2474 			bufarray.Append(buf);
       
  2475 			}
       
  2476 		}
       
  2477 	while (r == KErrNone);
       
  2478 
       
  2479 	test_Equal(KErrNoMemory, r);
       
  2480 	test_Equal(info.iMaxBufs, bufarray.Count());
       
  2481 	test_Equal(0, pool.FreeCount());
       
  2482 
       
  2483 	// Free all buffers
       
  2484 	while (bufarray.Count())
       
  2485 		{
       
  2486 		// remove buffers from the back of the array
       
  2487 		if (!(aBufferFlags & EShPoolAllocNoMap))
       
  2488 			{
       
  2489 			TPtr8 ptr(bufarray[bufarray.Count() - 1].Ptr(), bufarray[bufarray.Count() - 1].Size(),bufarray[bufarray.Count() - 1].Size());
       
  2490 			ptr.Fill((bufarray.Count() + 1) % 256);
       
  2491 			}
       
  2492 		bufarray[bufarray.Count() - 1].Close();
       
  2493 		bufarray.Remove(bufarray.Count() - 1);
       
  2494 		model.Free();
       
  2495 		
       
  2496 		timeout = KTestFreeCountTimeOut / KTestWaitBeforeRetry;
       
  2497 		while (model.FreeCount() != pool.FreeCount())
       
  2498 			{
       
  2499 			User::After(KTestWaitBeforeRetry);
       
  2500 			test_Assert(--timeout,
       
  2501 				test.Printf(_L("Timeout: Free==%u (expected %u)\n"), pool.FreeCount(), model.FreeCount());
       
  2502 				model.DisplayCounters();
       
  2503 				);
       
  2504 			if ((timeout * KTestWaitBeforeRetry) % 1000000 == 0)
       
  2505 				{
       
  2506 				test.Printf(_L("Time out in %d seconds! (line %d)\n"), timeout * KTestWaitBeforeRetry / 1000000, __LINE__);
       
  2507 				}
       
  2508 			}
       
  2509 		}
       
  2510 
       
  2511 	// Pool should have shrunk back to its initial size
       
  2512 	test_Equal(info.iInitialBufs, pool.FreeCount());
       
  2513 	bufarray.Close();
       
  2514 	pool.Close();
       
  2515 	}
       
  2516 
       
  2517 void PoolGrowingUser()
       
  2518 	{
       
  2519 	test.Next(_L("Pool Growing/Shrinking (User)"));
       
  2520 	TInt r;
       
  2521 	TInt pagesize;
       
  2522 	r = HAL::Get(HAL::EMemoryPageSize, pagesize);
       
  2523 	test_KErrNone(r);
       
  2524 	// Pool A: Non-page aligned pool (64-byte alignment)
       
  2525 		{
       
  2526 		TInt alignment = 6;
       
  2527 		TInt maxbufs = KTestPoolSizeInBytes / RoundUp(*PtrBufSize, alignment);
       
  2528 		if (maxbufs > 32000)
       
  2529 			{
       
  2530 			maxbufs = 32000;
       
  2531 			}
       
  2532 		TInt initialbufs = maxbufs / 2;
       
  2533 		TInt growtrigger = 32;
       
  2534 		TInt growby = 32;
       
  2535 		TInt shrinkhys = 288;
       
  2536 		test.Printf(_L("POOL A: BufSize=%d InitialBufs=%d MaxBufs=%d GrowTrigger=%d GrowBy=%d ShrinkHys=%d Alignment=%d\n"),
       
  2537 			*PtrBufSize, initialbufs, maxbufs, growtrigger, growby, shrinkhys, alignment);
       
  2538 		TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize, initialbufs, alignment);
       
  2539 		r = inf.SetSizingAttributes(maxbufs, growtrigger, growby, shrinkhys);
       
  2540 		test_KErrNone(r);
       
  2541 		PoolGrowingTestRoutine(inf);
       
  2542 		}
       
  2543 
       
  2544 	// Pool B: Non-page aligned pool (maximum alignment)
       
  2545 		{
       
  2546 		TInt alignment = Log2(pagesize);
       
  2547 		TInt maxbufs = KTestPoolSizeInBytes / RoundUp(*PtrBufSize, alignment);
       
  2548 		if (maxbufs > 32000)
       
  2549 			{
       
  2550 			maxbufs = 32000;
       
  2551 			}
       
  2552 		TInt initialbufs = maxbufs / 4;
       
  2553 		TInt growtrigger = 32;
       
  2554 		TInt growby = 32;
       
  2555 		TInt shrinkhys = 288;
       
  2556 		test.Printf(_L("POOL B: BufSize=%d InitialBufs=%d MaxBufs=%d GrowTrigger=%d GrowBy=%d ShrinkHys=%d Alignment=%d\n"),
       
  2557 			*PtrBufSize, initialbufs, maxbufs, growtrigger, growby, shrinkhys, alignment);
       
  2558 		TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize, initialbufs, alignment);
       
  2559 		r = inf.SetSizingAttributes(maxbufs, growtrigger, growby, shrinkhys);
       
  2560 		test_KErrNone(r);
       
  2561 		PoolGrowingTestRoutine(inf);
       
  2562 		}
       
  2563 	
       
  2564 	// Pool C: Page aligned pool without guard pages
       
  2565 		{
       
  2566 		TInt maxbufs = KTestPoolSizeInBytes / RoundUp(*PtrBufSize, Log2(pagesize));
       
  2567 		if (maxbufs > 32000)
       
  2568 			{
       
  2569 			maxbufs = 32000;
       
  2570 			}
       
  2571 		TInt initialbufs = maxbufs * 3 / 8;
       
  2572 		TInt growtrigger = 32;
       
  2573 		TInt growby = 32;
       
  2574 		TInt shrinkhys = 288;
       
  2575 		test.Printf(_L("POOL C: BufSize=%d InitialBufs=%d MaxBufs=%d GrowTrigger=%d GrowBy=%d ShrinkHys=%d Page-Aligned\n"),
       
  2576 			*PtrBufSize, initialbufs, maxbufs, growtrigger, growby, shrinkhys);
       
  2577 		TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, initialbufs);
       
  2578 		r = inf.SetSizingAttributes(maxbufs, growtrigger, growby, shrinkhys);
       
  2579 		test_KErrNone(r);
       
  2580 		PoolGrowingTestRoutine(inf);
       
  2581 		}
       
  2582 
       
  2583 	// Pool D: Page aligned pool without guard pages
       
  2584 		{
       
  2585 		TInt maxbufs = KTestPoolSizeInBytes / RoundUp(*PtrBufSize, Log2(pagesize));
       
  2586 		if (maxbufs > 32000)
       
  2587 			{
       
  2588 			maxbufs = 32000;
       
  2589 			}
       
  2590 		TInt initialbufs = maxbufs / 2;
       
  2591 		TInt growtrigger = 32;
       
  2592 		TInt growby = 32;
       
  2593 		TInt shrinkhys = 288;
       
  2594 		test.Printf(_L("POOL D: BufSize=%d InitialBufs=%d MaxBufs=%d GrowTrigger=%d GrowBy=%d ShrinkHys=%d Page-Aligned+Guard\n"),
       
  2595 			*PtrBufSize, initialbufs, maxbufs, growtrigger, growby, shrinkhys);
       
  2596 		TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, initialbufs);
       
  2597 		r = inf.SetSizingAttributes(maxbufs, growtrigger, growby, shrinkhys);
       
  2598 		test_KErrNone(r);
       
  2599 		r = inf.SetGuardPages();
       
  2600 		test_KErrNone(r);
       
  2601 		PoolGrowingTestRoutine(inf);
       
  2602 		}
       
  2603 
       
  2604 	// Pool A': Non-page aligned pool (64-byte alignment)
       
  2605 		{
       
  2606 		TInt alignment = 6;
       
  2607 		TInt maxbufs = KTestPoolSizeInBytes / RoundUp(*PtrBufSize, alignment);
       
  2608 		if (maxbufs > 32000)
       
  2609 			{
       
  2610 			maxbufs = 32000;
       
  2611 			}
       
  2612 		TInt initialbufs = 1;
       
  2613 		TInt growtrigger = 32;
       
  2614 		TInt growby = 256;
       
  2615 		TInt shrinkhys = 512;
       
  2616 		test.Printf(_L("POOL A': BufSize=%d InitialBufs=%d MaxBufs=%d GrowTrigger=%d GrowBy=%d ShrinkHys=%d Alignment=%d\n"),
       
  2617 			*PtrBufSize, initialbufs, maxbufs, growtrigger, growby, shrinkhys, alignment);
       
  2618 		TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize, initialbufs, alignment);
       
  2619 		r = inf.SetSizingAttributes(maxbufs, growtrigger, growby, shrinkhys);
       
  2620 		test_KErrNone(r);
       
  2621 		PoolGrowingTestRoutine(inf);
       
  2622 		}
       
  2623 
       
  2624 	// Pool A'': Non-page aligned pool (64-byte alignment) - AllocCanWait
       
  2625 		{
       
  2626 		TInt alignment = 6;
       
  2627 		TInt maxbufs = KTestPoolSizeInBytes / RoundUp(*PtrBufSize, alignment);
       
  2628 		if (maxbufs > 32000)
       
  2629 			{
       
  2630 			maxbufs = 32000;
       
  2631 			}
       
  2632 		TInt initialbufs = 1;
       
  2633 		TInt growtrigger = 1;
       
  2634 		TInt growby = 1;
       
  2635 		TInt shrinkhys = 257;
       
  2636 		test.Printf(_L("POOL A'': BufSize=%d InitialBufs=%d MaxBufs=%d GrowTrigger=%d GrowBy=%d ShrinkHys=%d Alignment=%d\n"),
       
  2637 			*PtrBufSize, initialbufs, maxbufs, growtrigger, growby, shrinkhys, alignment);
       
  2638 		TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize, initialbufs, alignment);
       
  2639 		r = inf.SetSizingAttributes(maxbufs, growtrigger, growby, shrinkhys);
       
  2640 		test_KErrNone(r);
       
  2641 		PoolGrowingTestRoutine(inf, EShPoolAllocCanWait);
       
  2642 		}
       
  2643 
       
  2644 	// Pool D': Page aligned pool without guard pages
       
  2645 		{
       
  2646 		TInt maxbufs = KTestPoolSizeInBytes / RoundUp(*PtrBufSize, Log2(pagesize));
       
  2647 		if (maxbufs > 32000)
       
  2648 			{
       
  2649 			maxbufs = 32000;
       
  2650 			}
       
  2651 		TInt initialbufs = 1;
       
  2652 		TInt growtrigger = 1;
       
  2653 		TInt growby = 1024;
       
  2654 		TInt shrinkhys = 2048;
       
  2655 		test.Printf(_L("POOL D': BufSize=%d InitialBufs=%d MaxBufs=%d GrowTrigger=%d GrowBy=%d ShrinkHys=%d Page-Aligned+Guard\n"),
       
  2656 			*PtrBufSize, initialbufs, maxbufs, growtrigger, growby, shrinkhys);
       
  2657 		TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, initialbufs);
       
  2658 		r = inf.SetSizingAttributes(maxbufs, growtrigger, growby, shrinkhys);
       
  2659 		test_KErrNone(r);
       
  2660 		r = inf.SetGuardPages();
       
  2661 		test_KErrNone(r);
       
  2662 		PoolGrowingTestRoutine(inf);
       
  2663 		}
       
  2664 	// Pool D'': Page aligned pool without guard pages - NoBufferMap
       
  2665 		{
       
  2666 		TInt maxbufs = KTestPoolSizeInBytes / RoundUp(*PtrBufSize, Log2(pagesize));
       
  2667 		if (maxbufs > 32000)
       
  2668 			{
       
  2669 			maxbufs = 32000;
       
  2670 			}
       
  2671 		TInt initialbufs = maxbufs / 2;
       
  2672 		TInt growtrigger = 32;
       
  2673 		TInt growby = 32;
       
  2674 		TInt shrinkhys = 288;
       
  2675 		test.Printf(_L("POOL D'': BufSize=%d InitialBufs=%d MaxBufs=%d GrowTrigger=%d GrowBy=%d ShrinkHys=%d Page-Aligned+Guard\n"),
       
  2676 			*PtrBufSize, initialbufs, maxbufs, growtrigger, growby, shrinkhys);
       
  2677 		TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, initialbufs);
       
  2678 		r = inf.SetSizingAttributes(maxbufs, growtrigger, growby, shrinkhys);
       
  2679 		test_KErrNone(r);
       
  2680 		r = inf.SetGuardPages();
       
  2681 		test_KErrNone(r);
       
  2682 		PoolGrowingTestRoutine(inf, EShPoolAllocNoMap);
       
  2683 		}
       
  2684 	}
       
  2685 
       
  2686 /*
       
  2687 @SYMTestCaseID				X3
       
  2688 @SYMTestCaseDesc			Contiguous buffer allocation
       
  2689 @SYMREQ						REQ11423
       
  2690 @SYMTestActions
       
  2691 	Create a pool with the Contiguous attribute and allocate buffers.
       
  2692 @SYMTestExpectedResults
       
  2693 	Buffers memory is physically contiguous.
       
  2694 @SYMTestPriority			High
       
  2695 */
       
  2696 
       
  2697 void ContiguousPoolKernel()
       
  2698 	{
       
  2699 	test.Next(_L("Contiguous Pool (Kernel)"));
       
  2700 #ifdef __WINS__
       
  2701 	test.Printf(_L("Does not run on the emulator. Skipped\n"));
       
  2702 #else
       
  2703 	TInt r;
       
  2704 	TInt pagesize;
       
  2705 	r = HAL::Get(HAL::EMemoryPageSize, pagesize);
       
  2706 	test_KErrNone(r);
       
  2707 	if (*PtrBufSize <= pagesize)
       
  2708 		{
       
  2709 		test.Printf(_L("Buffer size <= page size. Skipped.\n"));
       
  2710 		return;
       
  2711 		}
       
  2712 
       
  2713 	TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, KTestPoolSizeInBufs);
       
  2714 //	r = inf.SetSizingAttributes(KTestPoolSizeInBufs, 25, 25, 25600);
       
  2715 //	test_KErrNone(r);
       
  2716 
       
  2717 	r = Ldd.ContiguousPoolKernel(inf);
       
  2718 	test_KErrNone(r);
       
  2719 
       
  2720 #endif // __WINS__
       
  2721 	}
       
  2722 
       
  2723 void ShBufPin()
       
  2724 	{
       
  2725 	test.Next(_L("Buffer pinning"));
       
  2726 #ifdef __WINS__
       
  2727 	test.Printf(_L("Does not run on the emulator. Skipped\n"));
       
  2728 #else
       
  2729 	TInt r;
       
  2730 	RShPool pool1;
       
  2731 	RShBuf buf1;
       
  2732 	TShPoolCreateInfo inf1(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize * KTestPoolSizeInBufs, 1, KTestMinimumAlignmentLog2);
       
  2733 	r = pool1.Create(inf1, KDefaultPoolHandleFlags);
       
  2734 	test_KErrNone(r);
       
  2735 	r = buf1.Alloc(pool1);
       
  2736 	test_KErrNone(r);
       
  2737 	r = Ldd.PinBuffer(pool1.Handle(), buf1.Handle());
       
  2738 	test_KErrNone(r);
       
  2739 	buf1.Close();
       
  2740 	pool1.Close();
       
  2741 	
       
  2742 	RShPool pool2;
       
  2743 	RShBuf buf2;
       
  2744 	TShPoolCreateInfo inf2(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize * KTestPoolSizeInBufs, 1, KTestMinimumAlignmentLog2);
       
  2745 	r = pool2.Create(inf2, KDefaultPoolHandleFlags);
       
  2746 	test_KErrNone(r);
       
  2747 	r = buf2.Alloc(pool2);
       
  2748 	test_KErrNone(r);
       
  2749 	r = Ldd.PinBuffer(pool2.Handle(), buf2.Handle());
       
  2750 	test_KErrNone(r);
       
  2751 	buf2.Close();
       
  2752 	pool2.Close();
       
  2753 #endif // _WINS_
       
  2754 	}
       
  2755 
       
  2756 /*
       
  2757 @SYMTestCaseID
       
  2758 @SYMTestCaseDesc
       
  2759 @SYMREQ
       
  2760 @SYMTestActions
       
  2761 @SYMTestExpectedResults
       
  2762 @SYMTestPriority
       
  2763 */
       
  2764 
       
  2765 void SingleBufferPool()
       
  2766 	{
       
  2767 	test.Next(_L("Single Buffer Pool"));
       
  2768 	TInt r;
       
  2769 
       
  2770 	RShPool pool;
       
  2771 	RShBuf buf;
       
  2772 	RShBuf buf2;
       
  2773 
       
  2774 	TShPoolCreateInfo infpa(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize * KTestPoolSizeInBufs, 1);
       
  2775 	r = infpa.SetGuardPages();
       
  2776 	test_KErrNone(r);
       
  2777 	r = pool.Create(infpa, KDefaultPoolHandleFlags);
       
  2778 	test_KErrNone(r);
       
  2779 	r = pool.SetBufferWindow(-1, ETrue);
       
  2780 	test_KErrNone(r);
       
  2781 	r = buf.Alloc(pool);
       
  2782 	test_KErrNone(r);
       
  2783 	r = buf2.Alloc(pool);
       
  2784 	test_Equal(KErrNoMemory, r);
       
  2785 	TPtr8(buf.Ptr(), buf.Size(), buf.Size()).Fill('!');
       
  2786 	buf.Close();
       
  2787 	pool.Close();
       
  2788 
       
  2789 	TShPoolCreateInfo infnpa(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize * KTestPoolSizeInBufs, 1, KTestMinimumAlignmentLog2);
       
  2790 	r = pool.Create(infnpa, KDefaultPoolHandleFlags);
       
  2791 	test_KErrNone(r);
       
  2792 	r = buf.Alloc(pool);
       
  2793 	test_KErrNone(r);
       
  2794 	r = buf2.Alloc(pool);
       
  2795 	test_Equal(KErrNoMemory, r);
       
  2796 	TPtr8(buf.Ptr(), buf.Size(),buf.Size()).Fill('?');
       
  2797 	buf.Close();
       
  2798 	pool.Close();
       
  2799 	}
       
  2800 
       
  2801 /*
       
  2802 @SYMTestCaseID				X4
       
  2803 @SYMTestCaseDesc			Negative tests (user/kernel)
       
  2804 @SYMREQ						REQ11423
       
  2805 @SYMTestActions
       
  2806 	API calls with invalid arguments.
       
  2807 @SYMTestExpectedResults
       
  2808 	Appropriate error code returned.
       
  2809 @SYMTestPriority			High
       
  2810 */
       
  2811 
       
  2812 void NegativeTestsUser()
       
  2813 	{
       
  2814 	test.Next(_L("Negative tests (User)"));
       
  2815 	TInt r;
       
  2816 	TInt pagesize;
       
  2817 	TInt ram;
       
  2818 	r = HAL::Get(HAL::EMemoryPageSize, pagesize);
       
  2819 	test_KErrNone(r);
       
  2820 	r = HAL::Get(HAL::EMemoryRAM, ram);
       
  2821 	test_KErrNone(r);
       
  2822 
       
  2823 	RShPool pool;
       
  2824 	{ TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 0, 0); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
       
  2825 	{ TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 100, 0); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
       
  2826 	{ TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 0, 100); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
       
  2827 	{ TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, KMaxTUint, 10); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
       
  2828 	{ TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 10, KMaxTUint); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
       
  2829 	{ TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, KMaxTUint, KMaxTUint); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
       
  2830 	{ TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 65537, 65536); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
       
  2831 	{ TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 10, 1 + (1 << (32 - Log2(pagesize)))); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
       
  2832 	{ TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 4096, 10); r = pool.Create(inf, KDefaultPoolHandleFlags); test_Equal(KErrNone, r); pool.Close(); }
       
  2833 	// XXX The following test will need updating in Phase 2, when exclusive access will be supported
       
  2834 	// (page-aligned-buffer pools only)
       
  2835 	{ TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 4096, 10); inf.SetExclusive(); r = pool.Create(inf, KDefaultPoolHandleFlags); test_Equal(KErrNotSupported, r); pool.Close(); }
       
  2836 	{ TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 4096, 10, 12); r = pool.Create(inf, KDefaultPoolHandleFlags); test_Equal(KErrNone, r); pool.Close(); }
       
  2837 	{ TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 4096, 10, 12); inf.SetExclusive(); r = pool.Create(inf, KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); pool.Close(); }
       
  2838 #ifndef __WINS__
       
  2839 	{ TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 128 * pagesize, (ram / (128 * pagesize)) + 1); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrNoMemory, r); }
       
  2840 #endif
       
  2841 	{ TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 0, 0, 0); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
       
  2842 	{ TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 100, 0, 0); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
       
  2843 	{ TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 0, 100, 0); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
       
  2844 	{ TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, KMaxTUint, 10, 0); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
       
  2845 	{ TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 10, KMaxTUint, 0); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
       
  2846 	{ TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, KMaxTUint, KMaxTUint, 0); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
       
  2847 	{ TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 65537, 65536, 0); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
       
  2848 	{ TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 10, 10, KMaxTUint); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
       
  2849 	{ TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 10, 10, 33); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
       
  2850 	{ TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 10, 300, 24); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
       
  2851 	{ TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 10, 65537, 16); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
       
  2852 	{ TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 10, 10, Log2(pagesize) + 1); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
       
  2853 
       
  2854 		{
       
  2855 		TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, *BufferSize, KTestPoolSizeInBufs, 0);
       
  2856 		inf.SetGuardPages();
       
  2857 		r = pool.Create(inf, KDefaultPoolHandleFlags); test_Equal(KErrArgument, r);
       
  2858 		r = inf.SetSizingAttributes(KTestPoolSizeInBufs - 1, 25, 25, 280); test_KErrNone(r); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r);
       
  2859 		// Either grow trigger ratio or grow by ratio == 0 => non-growable pool
       
  2860 		// Such pools must have initial buffers == max buffers
       
  2861 		r = inf.SetSizingAttributes(KTestPoolSizeInBufs * 2, 1, 0, 1); test_Equal(KErrArgument, r); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r);
       
  2862 		r = inf.SetSizingAttributes(KTestPoolSizeInBufs * 2, 1, 0, 0); test_Equal(KErrArgument, r); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r);
       
  2863 		// shrink hysteresis ratio must be > 256
       
  2864 		r = inf.SetSizingAttributes(KTestPoolSizeInBufs - 1, 25, 25, 256); test_Equal(KErrArgument, r); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r);
       
  2865 		// grow ratio must be < 256
       
  2866 		r = inf.SetSizingAttributes(KTestPoolSizeInBufs * 2, 256, 25, 260); test_Equal(KErrArgument, r); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r);
       
  2867 		}
       
  2868 
       
  2869 	// Can't have a non-aligned, contiguous pool that grows
       
  2870 	TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 200, 10, 0);
       
  2871 	r = inf.SetSizingAttributes(KTestPoolSizeInBufs * 2, 25, 25, 280);
       
  2872 	test_KErrNone(r);
       
  2873 	}
       
  2874 
       
  2875 void NegativeTestsKernel()
       
  2876 	{
       
  2877 	test.Next(_L("Negative tests (Kernel)"));
       
  2878 	TInt r;
       
  2879 	r = Ldd.NegativeTestsKernel();
       
  2880 	test_KErrNone(r);
       
  2881 	}
       
  2882 
       
  2883 /*
       
  2884 @SYMTestCaseID				23
       
  2885 @SYMTestCaseDesc			Out of memory testing
       
  2886 @SYMREQ
       
  2887 @SYMTestActions
       
  2888 	TBD
       
  2889 @SYMTestExpectedResults
       
  2890 @SYMTestPriority			High
       
  2891 */
       
  2892 
       
  2893 void OutOfMemory()
       
  2894 	{
       
  2895 	test.Next(_L("Out of memory"));
       
  2896 #ifdef _DEBUG
       
  2897 
       
  2898 	
       
  2899 	const TInt KMaxKernelAllocations = 1024;
       
  2900 	TInt i, r;
       
  2901 	RShPool pool;
       
  2902 	TShPoolCreateInfo inf0(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, 1);
       
  2903 	TShPoolCreateInfo inf1(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize, 1, 0);
       
  2904 	r = inf0.SetSizingAttributes(4, 100, 1024, 300);
       
  2905 	test_KErrNone(r);
       
  2906 	r = inf1.SetSizingAttributes(4, 100, 1024, 300);
       
  2907 	test_KErrNone(r);
       
  2908 	
       
  2909 	for(TInt j = 0; j <= 1; j++)
       
  2910 		{
       
  2911 
       
  2912 		if(j == 0)
       
  2913 			test.Printf(_L("OOM testing for page-aligned pool\n"));
       
  2914 		else
       
  2915 			test.Printf(_L("OOM testing for non-page-aligned pool\n"));
       
  2916 
       
  2917 		r = KErrNoMemory;
       
  2918 
       
  2919 		__KHEAP_RESET;
       
  2920 		
       
  2921 		//Create the pool
       
  2922 		for (i = 0; i < KMaxKernelAllocations && r == KErrNoMemory; i++)
       
  2923 			{
       
  2924 			__KHEAP_FAILNEXT(i);
       
  2925 			if(j == 0)
       
  2926 				r = pool.Create(inf0,KDefaultPoolHandleFlags);
       
  2927 			else
       
  2928 				r = pool.Create(inf1,KDefaultPoolHandleFlags);
       
  2929 			__KHEAP_RESET;
       
  2930 			}
       
  2931 		test.Printf(_L("Create pool took %d tries\n"),i);
       
  2932 		test_KErrNone(r);
       
  2933 
       
  2934 		//Allocate buffers with automatic pool growing enabled
       
  2935 		r = KErrNoMemory;
       
  2936 		RShBuf buf1;
       
  2937 		for (i = 0; i < KMaxKernelAllocations && r == KErrNoMemory; i++)
       
  2938 			{
       
  2939 			__KHEAP_FAILNEXT(i);
       
  2940 			if(j == 0)
       
  2941 				r = buf1.Alloc(pool, EShPoolAllocNoMap);
       
  2942 			else
       
  2943 				r = buf1.Alloc(pool);
       
  2944 			__KHEAP_RESET;
       
  2945 			}
       
  2946 		test.Printf(_L("Allocate shared buffer 1 took %d tries\n"),i);	
       
  2947 		test_KErrNone(r);
       
  2948 
       
  2949 		// delay to allow the pool to grow
       
  2950 		User::After(20000);
       
  2951 
       
  2952 		r = KErrNoMemory;
       
  2953 		RShBuf buf2;
       
  2954 		for (i = 0; i < KMaxKernelAllocations && r == KErrNoMemory; i++)
       
  2955 			{
       
  2956 			__KHEAP_FAILNEXT(i);
       
  2957 			if(j == 0)
       
  2958 				r = buf2.Alloc(pool, EShPoolAllocNoMap);
       
  2959 			else
       
  2960 				r = buf2.Alloc(pool);
       
  2961 			__KHEAP_RESET;
       
  2962 			User::After(20000);
       
  2963 			}
       
  2964 		test.Printf(_L("Allocate shared buffer 2 took %d tries\n"),i);	
       
  2965 		test_KErrNone(r);
       
  2966 
       
  2967 		// delay to allow the pool to grow again
       
  2968 		User::After(20000);
       
  2969 
       
  2970 		r = KErrNoMemory;
       
  2971 		RShBuf buf3;
       
  2972 		for (i = 0; i < KMaxKernelAllocations && r == KErrNoMemory; i++)
       
  2973 			{
       
  2974 			__KHEAP_FAILNEXT(i);
       
  2975 			if(j == 0)
       
  2976 				r = buf3.Alloc(pool, EShPoolAllocNoMap);
       
  2977 			else
       
  2978 				r = buf3.Alloc(pool);
       
  2979 			__KHEAP_RESET;
       
  2980 			}
       
  2981 		test.Printf(_L("Allocate shared buffer 3 took %d tries\n"),i);	
       
  2982 		test_KErrNone(r);
       
  2983 
       
  2984 		//Map a buffer in page-aligned-pool case
       
  2985 		if(j == 0)
       
  2986 			{
       
  2987 			//Open a one-buffer window
       
  2988 			r = pool.SetBufferWindow(1, ETrue);
       
  2989 			test_KErrNone(r);
       
  2990 
       
  2991 			//Map a buffer
       
  2992 			r = KErrNoMemory;
       
  2993   			for (i = 0; i < KMaxKernelAllocations && r == KErrNoMemory; i++)
       
  2994 				{
       
  2995 				buf1.UnMap();
       
  2996 				__KHEAP_FAILNEXT(i);
       
  2997 				r = buf1.Map();
       
  2998 				__KHEAP_RESET;
       
  2999 				}
       
  3000 			test.Printf(_L("Mapping buffer 1 took %d tries\n"),i);	
       
  3001 			test_KErrNone(r);
       
  3002 			}
       
  3003 
       
  3004 		//Setup low-space notification
       
  3005 		TRequestStatus low;
       
  3006 		low = KErrNoMemory;
       
  3007 		for (i = 0; i < KMaxKernelAllocations && low != KRequestPending; i++)
       
  3008 			{
       
  3009 			__KHEAP_FAILNEXT(i);
       
  3010 			pool.RequestLowSpaceNotification(1, low);
       
  3011 			__KHEAP_RESET;
       
  3012 			}
       
  3013 		test.Printf(_L("Setting up low-space notification took %d tries\n"),i);
       
  3014 		test_Equal(low.Int(), KRequestPending);
       
  3015 	
       
  3016 		//Setup free-space notification
       
  3017 		TRequestStatus free;
       
  3018 		free = KErrNoMemory;
       
  3019 		for (i = 0; i < KMaxKernelAllocations && free != KRequestPending; i++)
       
  3020 			{
       
  3021 			__KHEAP_FAILNEXT(i);
       
  3022 			pool.RequestFreeSpaceNotification(4, free);
       
  3023 			__KHEAP_RESET;
       
  3024 			}
       
  3025 		test.Printf(_L("Setting up free-space notification took %d tries\n"),i);
       
  3026 		test_Equal(free.Int(), KRequestPending);
       
  3027 		
       
  3028 		//No allocations should occur here
       
  3029 		__KHEAP_FAILNEXT(1);
       
  3030 		if(j == 0)
       
  3031 			{
       
  3032 			//Unmap the buffer
       
  3033 			r = buf1.UnMap();
       
  3034 			}
       
  3035 
       
  3036 		//Cancel the notifications
       
  3037 		pool.CancelLowSpaceNotification(low);
       
  3038 		pool.CancelFreeSpaceNotification(free);
       
  3039 	
       
  3040 		//Close the buffers and the pool
       
  3041 		buf1.Close();
       
  3042 		buf2.Close();
       
  3043 		buf3.Close();
       
  3044 		pool.Close();
       
  3045 		__KHEAP_RESET;
       
  3046 
       
  3047 		}
       
  3048 
       
  3049 	// Allocate kernel-side buffer on Pool 2
       
  3050 	TInt handle = 0;
       
  3051 	RShBuf kbuf;
       
  3052 	r = KErrNoMemory;
       
  3053 	for (i = 0; i < KMaxKernelAllocations && r == KErrNoMemory; i++)
       
  3054 		{
       
  3055 		__KHEAP_FAILNEXT(i);
       
  3056 		r = Ldd.AllocateKernelBuffer(1, handle);
       
  3057 		__KHEAP_RESET;
       
  3058 		}
       
  3059 	test.Printf(_L("Allocate kernel buffer took %d tries\n"),i);
       
  3060 	test_KErrNone(r);
       
  3061      
       
  3062 	__KHEAP_FAILNEXT(1);
       
  3063 	kbuf.SetHandle(handle);
       
  3064 	__KHEAP_RESET;
       
  3065 
       
  3066 	r = KErrNoMemory;
       
  3067 	for (i = 0; i < KMaxKernelAllocations && r == KErrNoMemory; i++)
       
  3068 		{
       
  3069         r = kbuf.UnMap();
       
  3070 		__KHEAP_FAILNEXT(i);
       
  3071 		r = kbuf.Map();
       
  3072 		__KHEAP_RESET;
       
  3073 		}
       
  3074 	test.Printf(_L("Mapping kernel buffer took %d tries\n"),i);
       
  3075 	test_KErrNone(r);
       
  3076 
       
  3077 	__KHEAP_FAILNEXT(1);
       
  3078 	r = kbuf.UnMap();
       
  3079 	kbuf.Close();
       
  3080 	__KHEAP_RESET;
       
  3081 
       
  3082 
       
  3083 #else // _DEBUG
       
  3084 	test.Printf(_L("Debug builds only. Test skipped."));
       
  3085 #endif // _DEBUG
       
  3086 	}
       
  3087 
       
  3088 /*
       
  3089 @SYMTestCaseID				22
       
  3090 @SYMTestCaseDesc			Stress testing
       
  3091 @SYMREQ
       
  3092 @SYMTestActions
       
  3093 	TBD
       
  3094 @SYMTestExpectedResults
       
  3095 @SYMTestPriority			Medium
       
  3096 */
       
  3097 
       
  3098 TInt StressThread1(TAny*)
       
  3099 	{
       
  3100 	TInt r;
       
  3101 	TInt pagesize;
       
  3102 	r = HAL::Get(HAL::EMemoryPageSize, pagesize);
       
  3103 	test_KErrNone(r);
       
  3104 
       
  3105 	TInt i = 0;
       
  3106 	FOREVER
       
  3107 		{
       
  3108 		RShPool pool;
       
  3109 		if (i % 2)
       
  3110 			{
       
  3111 			TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 1000, 512);
       
  3112 			r = pool.Create(inf,KDefaultPoolHandleFlags);
       
  3113 			if (r)
       
  3114 				{
       
  3115 				RDebug::Printf("Error %d line %d", r, __LINE__);
       
  3116 				break;
       
  3117 				}
       
  3118 
       
  3119 			r = pool.SetBufferWindow(-1, ETrue);
       
  3120 			test_KErrNone(r);
       
  3121 
       
  3122 			}
       
  3123 		else
       
  3124 			{
       
  3125 			TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 10000, 200, 0);
       
  3126 			r = pool.Create(inf,KDefaultPoolHandleFlags);
       
  3127 			if (r)
       
  3128 				{
       
  3129 				RDebug::Printf("Error %d line %d", r, __LINE__);
       
  3130 				break;
       
  3131 				}
       
  3132 			}
       
  3133 		pool.Close();
       
  3134 		i++;
       
  3135 		if (i % 100 == 0)
       
  3136 			{
       
  3137 			RDebug::Printf("ST1 %d iterations", i);
       
  3138 			}
       
  3139 		}
       
  3140 	return r;
       
  3141 	}
       
  3142 
       
  3143 TInt StressThread2(TAny*)
       
  3144 	{
       
  3145 	TInt r = KErrUnknown;
       
  3146 	TShPoolInfo inf1;
       
  3147 	TShPoolInfo inf2;
       
  3148 	P1.GetInfo(inf1);
       
  3149 	P2.GetInfo(inf2);
       
  3150 	TInt j = 0;
       
  3151 	FOREVER
       
  3152 		{
       
  3153 		TUint i;
       
  3154 		RArray<RShBuf> bufarray1;
       
  3155 		RArray<RShBuf> bufarray2;
       
  3156 		for (i = 0; i < inf1.iMaxBufs; i++)
       
  3157 			{
       
  3158 			RShBuf buf;
       
  3159 			r = buf.Alloc(P1);
       
  3160 			if (r)
       
  3161 				{
       
  3162 				RDebug::Printf("Error %d line %d i=%d", r, __LINE__, i);
       
  3163 				break;
       
  3164 				}
       
  3165 			TPtr8(buf.Ptr(), buf.Size(),buf.Size()).Fill('1');
       
  3166 			r = bufarray1.Append(buf);
       
  3167 			if (r)
       
  3168 				{
       
  3169 				buf.Close();
       
  3170 				RDebug::Printf("Error %d line %d i=%d", r, __LINE__, i);
       
  3171 				break;
       
  3172 				}
       
  3173 			}
       
  3174 		for (i = 0; i < inf2.iMaxBufs; i++)
       
  3175 			{
       
  3176 			RShBuf buf;
       
  3177 			r = buf.Alloc(P2);
       
  3178 			if (r)
       
  3179 				{
       
  3180 				RDebug::Printf("Error %d line %d i=%d", r, __LINE__, i);
       
  3181 				break;
       
  3182 				}
       
  3183 			TPtr8(buf.Ptr(), buf.Size(),buf.Size()).Fill('2');
       
  3184 			bufarray2.Append(buf);
       
  3185 			}
       
  3186 		i = 0;
       
  3187 		while (bufarray1.Count())
       
  3188 			{
       
  3189 			bufarray1[0].Close();
       
  3190 			bufarray1.Remove(0);
       
  3191 			i++;
       
  3192 			}
       
  3193 
       
  3194 		while (bufarray2.Count())
       
  3195 			{
       
  3196 			bufarray2[0].Close();
       
  3197 			bufarray2.Remove(0);
       
  3198 			}
       
  3199 		bufarray1.Close();
       
  3200 		bufarray2.Close();
       
  3201 		if (r)
       
  3202 			{
       
  3203 			break;
       
  3204 			}
       
  3205 		j++;
       
  3206 		if (j % 10 == 0)
       
  3207 			{
       
  3208 			RDebug::Printf("ST2 %d iterations", j);
       
  3209 			}
       
  3210 		}
       
  3211 	return r;
       
  3212 	}
       
  3213 
       
  3214 void StressTesting(TInt aSecs)
       
  3215 	{
       
  3216 	test.Next(_L("Stress testing"));
       
  3217 	TInt r;
       
  3218 
       
  3219 	test.Start(_L("Create pools"));
       
  3220 	TShPoolCreateInfo inf1(TShPoolCreateInfo::ENonPageAlignedBuffer, 2000, 500, 11);
       
  3221 	r = P1.Create(inf1,KDefaultPoolHandleFlags);
       
  3222 	test_KErrNone(r);
       
  3223 	TInt handle;
       
  3224 	TShPoolCreateInfo inf2(TShPoolCreateInfo::EPageAlignedBuffer, 5000, 150);
       
  3225 	r = Ldd.OpenKernelPool(inf2, handle);
       
  3226 	test_KErrNone(r);
       
  3227 	P2.SetHandle(handle);
       
  3228 
       
  3229 	r = P2.SetBufferWindow(-1, ETrue);
       
  3230 	test_KErrNone(r);
       
  3231 
       
  3232 	test.Next(_L("Create threads"));
       
  3233 	RThread t1;
       
  3234 	r = t1.Create(_L("THREAD1"), StressThread1, KDefaultStackSize, KMinHeapSize, KMinHeapSize, NULL);
       
  3235 	test_KErrNone(r);
       
  3236 	RThread t2;
       
  3237 	r = t2.Create(_L("THREAD2"), StressThread2, KDefaultStackSize*2, KMinHeapSize, 1 << 20, NULL);
       
  3238 	test_KErrNone(r);
       
  3239 	test.Next(_L("Start threads"));
       
  3240 	test.Printf(_L("Wait for %d seconds\n"), aSecs);
       
  3241 	RThread().SetPriority(EPriorityMore);
       
  3242 	TRequestStatus t1rs;
       
  3243 	TRequestStatus t2rs;
       
  3244 	t1.Logon(t1rs);
       
  3245 	t2.Logon(t2rs);
       
  3246 	t1.Resume();
       
  3247 	t2.Resume();
       
  3248 	User::After(aSecs * 1000000);
       
  3249 
       
  3250 	test.Next(_L("Kill threads"));
       
  3251 	t1.Kill(KErrNone);
       
  3252 	t2.Kill(KErrNone);
       
  3253 
       
  3254 	// wait for threads to actually die
       
  3255 	User::WaitForRequest(t1rs);
       
  3256 	User::WaitForRequest(t2rs);
       
  3257 
       
  3258 	t1.Close();
       
  3259 	t2.Close();
       
  3260 	RThread().SetPriority(EPriorityNormal);
       
  3261 
       
  3262 	test.Next(_L("Close pools"));
       
  3263 	P1.Close();
       
  3264 	r = Ldd.CloseKernelPool();
       
  3265 	test_KErrNone(r);
       
  3266 	P2.Close();
       
  3267 	test.End();
       
  3268 	}
       
  3269 
       
  3270 /*
       
  3271 @SYMTestCaseID
       
  3272 @SYMTestCaseDesc
       
  3273 @SYMREQ
       
  3274 @SYMTestActions
       
  3275 @SYMTestExpectedResults
       
  3276 @SYMTestPriority
       
  3277 */
       
  3278 
       
  3279 void NoDeallocation()
       
  3280 	{
       
  3281 	test.Next(_L("No deallocation"));
       
  3282 	TInt r;
       
  3283 	TBuf<10> command;
       
  3284 	command.Format(_L("%S %d"), &KTestSlave, ETestSlaveNoDeallocation);
       
  3285 	RProcess p;
       
  3286 	r = p.Create(RProcess().FileName(), command);
       
  3287 	test_KErrNone(r);
       
  3288 	TRequestStatus rs;
       
  3289 	p.Logon(rs);
       
  3290 	p.Resume();
       
  3291 	User::WaitForRequest(rs);
       
  3292 
       
  3293 	// wait for memory to be freed
       
  3294 	r = UserSvr::HalFunction(EHalGroupKernel, EKernelHalSupervisorBarrier, (TAny*)5000, 0);
       
  3295 	test_KErrNone(r);
       
  3296 
       
  3297 	__KHEAP_MARKEND;
       
  3298 	test_KErrNone(rs.Int());
       
  3299 	test_Equal(EExitKill, p.ExitType());
       
  3300 	test_KErrNone(p.ExitReason());
       
  3301 	p.Close();
       
  3302 	}
       
  3303 
       
  3304 TInt SlaveNoDeallocation()
       
  3305 	{
       
  3306 	__KHEAP_MARK;
       
  3307 	TInt r;
       
  3308 	RShPool pool;
       
  3309 	TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *BufferSize, KTestPoolSizeInBufs);
       
  3310 	r = pool.Create(inf,KDefaultPoolHandleFlags);
       
  3311 	test_KErrNone(r);
       
  3312 
       
  3313 	pool.SetBufferWindow(-1, ETrue);
       
  3314 	test_KErrNone(r);
       
  3315 
       
  3316 	if (!r)
       
  3317 		{
       
  3318 		RShBuf buf;
       
  3319 		r = buf.Alloc(pool);
       
  3320 		}
       
  3321 	return r;
       
  3322 	}
       
  3323 
       
  3324 TInt E32Main()
       
  3325 	{
       
  3326 	__UHEAP_MARK;
       
  3327 
       
  3328 	// Parse command line for slave processes
       
  3329 	TInt r = KErrArgument;
       
  3330 	TBuf<KMaxFullName> cmd;
       
  3331 	User::CommandLine(cmd);
       
  3332 	TLex lex(cmd);
       
  3333 	if (lex.NextToken() == KTestSlave)
       
  3334 		{
       
  3335 		TInt function;
       
  3336 		TLex functionlex(lex.NextToken());
       
  3337 		functionlex.Val(function);
       
  3338 		switch (function)
       
  3339 			{
       
  3340 			case ETestSlaveNoDeallocation:
       
  3341 				r = SlaveNoDeallocation();
       
  3342 				break;
       
  3343 			}
       
  3344 		__UHEAP_MARKEND;
       
  3345 		return r;
       
  3346 		}
       
  3347 	// Test starts here
       
  3348 	test.Title();
       
  3349 
       
  3350 	test.Start(_L("Check for Shared Buffers availability"));
       
  3351 	RShPool pool;
       
  3352 	TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *BufferSize, KTestPoolSizeInBufs);
       
  3353 	r = pool.Create(inf,KDefaultPoolHandleFlags);
       
  3354 	if (r == KErrNotSupported)
       
  3355 		{
       
  3356 		test.Printf(_L("Not supported by this memory model.\n"));
       
  3357 		}
       
  3358 	else
       
  3359 		{
       
  3360 		test_KErrNone(r);
       
  3361 		pool.Close();
       
  3362 
       
  3363 		test.Next(_L("No device driver"));
       
  3364 		test.Start(_L("Start test loop"));
       
  3365 		for (PtrBufSize = BufferSize; *PtrBufSize != 0; PtrBufSize++)
       
  3366 			{
       
  3367 			TBuf<30> title;
       
  3368 			title.Format(_L("Buffer size = %d bytes"), *PtrBufSize);
       
  3369 			test.Next(title);
       
  3370 			test.Start(_L("New test iteration"));
       
  3371 			BufferAlignmentUser();
       
  3372 			BufferMapping();
       
  3373 			BufferWindow();
       
  3374 			GuardPages();
       
  3375 			PoolGrowingUser();
       
  3376 			SingleBufferPool();
       
  3377 			test.End();
       
  3378 			}
       
  3379 		test.End();
       
  3380 		test.Next(_L("Load Device Driver"));
       
  3381 		LoadDeviceDrivers();
       
  3382 
       
  3383 		#ifdef TEST_CLIENT_THREAD
       
  3384 		test.Next(_L("Device driver in client thread"));
       
  3385 		r = Ldd.Open(0);
       
  3386 		#else
       
  3387 		test.Next(_L("Device driver in own thread"));
       
  3388 		r = Ldd.Open(1);
       
  3389 		#endif
       
  3390 
       
  3391 		test_KErrNone(r);
       
  3392 
       
  3393 		test.Start(_L("Start test loop"));
       
  3394 		for (PtrBufSize = BufferSize; *PtrBufSize != 0; PtrBufSize++)
       
  3395 			{
       
  3396 			TBuf<30> title;
       
  3397 			title.Format(_L("Buffer size = %d bytes"), *PtrBufSize);
       
  3398 			test.Next(title);
       
  3399 			test.Start(_L("New test iteration"));
       
  3400 			CreateUserPool(ETestNonPageAligned);
       
  3401 			CreateKernelPool(ETestNonPageAligned);
       
  3402 			AllocateUserBuffer();
       
  3403 			AllocateKernelBuffer();
       
  3404 			AllocateUserMax(P1);
       
  3405 			AllocateUserMax(P2);
       
  3406 			AllocateKernelMax();
       
  3407 			BufferAlignmentKernel();
       
  3408 			CreateKernelPoolPhysAddr();
       
  3409 			NotificationRequests(P1);
       
  3410 			NotificationRequests(P2);
       
  3411 			CancelNotificationRequests(P1);
       
  3412 			CancelNotificationRequests(P2);
       
  3413 			ShBufPin();
       
  3414 			CloseKernelPool();
       
  3415 			CloseUserPool();
       
  3416 			ContiguousPoolKernel();
       
  3417 			CreateUserPool(ETestPageAligned);
       
  3418 			CreateKernelPool(ETestPageAligned);
       
  3419 			OutOfMemory();
       
  3420 			AllocateUserBuffer();
       
  3421 			AllocateKernelBuffer();
       
  3422 			AllocateUserMax(P1);
       
  3423 			AllocateUserMax(P2);
       
  3424 			AllocateKernelMax();
       
  3425 			NotificationRequests(P1);
       
  3426 			NotificationRequests(P2);
       
  3427 			CloseUserPool();
       
  3428 			CloseKernelPool();
       
  3429 			CreateUserPool(ETestPageAlignedGrowing);
       
  3430 			CreateKernelPool(ETestPageAlignedGrowing);
       
  3431 			OutOfMemory();
       
  3432 			AllocateKernelMax();
       
  3433 			AllocateUserMax(P1);
       
  3434 			AllocateUserMax(P2);
       
  3435 			CloseUserPool();
       
  3436 			CloseKernelPool();
       
  3437 			test.End();
       
  3438 			}
       
  3439 		NegativeTestsKernel();
       
  3440 		StressTesting(5);
       
  3441 		test.End();
       
  3442 		Ldd.Close();
       
  3443 
       
  3444 		NegativeTestsUser();
       
  3445 		NoDeallocation();
       
  3446 
       
  3447 		test.Next(_L("Unload Device Drivers"));
       
  3448 		FreeDeviceDrivers();
       
  3449 		}
       
  3450 	test.End();
       
  3451 	test.Close();
       
  3452 
       
  3453 	__UHEAP_MARKEND;
       
  3454 	return KErrNone;
       
  3455 	}