201019_01
authorhgs
Mon, 10 May 2010 11:40:53 +0100
changeset 132 e4a7b1cbe40c
parent 131 e880629062dd
child 133 2a0ada0a1bf8
201019_01
bsptemplate/asspandvariant/template_variant/hal/config.hcf
halservices/hal/inc/hal_data.h
halservices/hal/rom/hal.hby
halservices/hal/src/userhal.cpp
halservices/hal/tsrc/t_newhal.cpp
kernel/eka/bmarm/ekernsmp.def
kernel/eka/bmarm/ekernu.def
kernel/eka/bwins/ekernu.def
kernel/eka/bx86/ekernsmp.def
kernel/eka/bx86/ekernu.def
kernel/eka/bx86gcc/ekernsmp.def
kernel/eka/bx86gcc/ekernu.def
kernel/eka/drivers/power/smppower/idlehelper.cia
kernel/eka/drivers/power/smppower/sample_idlehandler/smpidlehandler.cpp
kernel/eka/drivers/xyin/d_xyin.cpp
kernel/eka/eabi/ekernsmp.def
kernel/eka/eabi/ekernu.def
kernel/eka/include/d32usbdi_errors.h
kernel/eka/include/drivers/dma_v1.h
kernel/eka/include/drivers/dma_v2.h
kernel/eka/include/drivers/smppower/idlehelper.h
kernel/eka/include/drivers/smppower/sample_idlehandler/smpidlehandler.h
kernel/eka/include/drivers/xyin.h
kernel/eka/include/e32ver.h
kernel/eka/include/kernel/kbma.h
kernel/eka/include/kernel/kern_priv.h
kernel/eka/include/memmodel/epoc/mmubase/mmubase.h
kernel/eka/include/memmodel/epoc/mmubase/ramalloc.h
kernel/eka/include/u32hal.h
kernel/eka/klib/bma.cpp
kernel/eka/memmodel/epoc/flexible/mmu/mdefrag.cpp
kernel/eka/memmodel/epoc/flexible/mmu/mmanager.cpp
kernel/eka/memmodel/epoc/flexible/mmu/mmanager.h
kernel/eka/memmodel/epoc/flexible/mmu/mmu.cpp
kernel/eka/memmodel/epoc/flexible/mmu/mmu.h
kernel/eka/memmodel/epoc/flexible/mmu/mpager.cpp
kernel/eka/memmodel/epoc/flexible/mmu/mpager.h
kernel/eka/memmodel/epoc/flexible/mmu/mptalloc.cpp
kernel/eka/memmodel/epoc/mmubase/mmubase.cpp
kernel/eka/memmodel/epoc/mmubase/ramalloc.cpp
kernel/eka/memmodel/epoc/moving/mchunk.cpp
kernel/eka/memmodel/epoc/multiple/arm/xmmu.cpp
kernel/eka/memmodel/epoc/multiple/mchunk.cpp
kernel/eka/release.txt
kerneltest/e32test/buffer/t_tbma.cpp
kerneltest/e32test/buffer/t_tbma.h
kerneltest/e32test/defrag/t_ramdefrag.cpp
kerneltest/e32test/demandpaging/t_datapaging.cpp
kerneltest/e32test/device/t_usbapi.cpp
kerneltest/e32test/digitiser/digi.auto.bat
kerneltest/e32test/digitiser/digitiser.inf
kerneltest/e32test/digitiser/digitiser_tests.iby
kerneltest/e32test/digitiser/t_traweventdigitiser.cpp
kerneltest/e32test/digitiser/t_userdigitisertest.cpp
kerneltest/e32test/digitiser/tshell_digitisertests.oby
kerneltest/e32test/group/bld.inf
kerneltest/e32test/group/t_ramall.mmp
kerneltest/e32test/group/t_traweventdigitiser.mmp
kerneltest/e32test/group/t_userdigitisernocaps.mmp
kerneltest/e32test/group/t_userdigitisertest.mmp
kerneltest/e32test/mmu/t_cachechunk.cpp
kerneltest/e32test/mmu/t_ramall.cpp
kerneltest/e32test/prime/t_semutx.cpp
kerneltest/e32test/prime/t_semutx2.cpp
kerneltest/e32test/system/t_condvar.cpp
kerneltest/f32test/demandpaging/t_nandpaging.cpp
kerneltest/f32test/server/t_falsespace.cpp
userlibandfileserver/fileserver/etshell/ts_com.cpp
userlibandfileserver/fileserver/group/release.txt
userlibandfileserver/fileserver/inc/f32file.h
userlibandfileserver/fileserver/inc/f32ver.h
userlibandfileserver/fileserver/sfile/sf_file.cpp
userlibandfileserver/fileserver/sfile/sf_nbs.cpp
userlibandfileserver/fileserver/sfile/sf_std.h
userlibandfileserver/fileserver/sfile/sf_svr.cpp
userlibandfileserver/fileserver/sfile/sf_utl.cpp
userlibandfileserver/fileserver/sfsrv/cl_fman.cpp
userlibandfileserver/fileserver/shostmassstorage/client/hostmsclient.mmp
userlibandfileserver/fileserver/shostmassstorage/server/hostmsserver.mmp
--- a/bsptemplate/asspandvariant/template_variant/hal/config.hcf	Wed May 05 05:11:16 2010 +0100
+++ b/bsptemplate/asspandvariant/template_variant/hal/config.hcf	Mon May 10 11:40:53 2010 +0100
@@ -99,3 +99,4 @@
 ECustomResourceDrive : set = 0
 EDisplayNumberOfScreens=0
 ENumCpus=GetNumCpus
+EDigitiserOrientation : set = DigitiserOrientation
--- a/halservices/hal/inc/hal_data.h	Wed May 05 05:11:16 2010 +0100
+++ b/halservices/hal/inc/hal_data.h	Mon May 10 11:40:53 2010 +0100
@@ -1110,6 +1110,13 @@
 		*/
 		ENumCpus,
 
+		/**
+		The orientation of the Digitiser. Usually mirrors device orientation.
+		
+		@see TDigitiserOrientation for allowed values
+		@capability WriteDeviceData needed to Set this attribute
+		*/
+		EDigitiserOrientation,
 
 		/*
 		 * NOTE:
@@ -1328,6 +1335,29 @@
 		EPowerBackupStatus_Good,
 		};
 	
+	
+    /**
+    Describes the orientation of the screen digitiser, usually mirrors the
+    device orientation not necessarily the display rotation as this might be
+	limited to upright and left 90 only. The values in degrees measures 
+	the anti-clockwise angle from the left edge of the digitiser from the 
+	normal default position of the device. 
+	
+	User-side clients can use attribute to inform the digitiser driver of the
+	digitiser orientation. The driver may then use this information to adjust 
+	X.Y sampling depending on input pointer type.
+	
+    @see HALData::TAttribute
+    */
+	enum TDigitiserOrientation
+		{
+		EDigitiserOrientation_default,	///< Driver using build-in default
+		EDigitiserOrientation_000,		///< Device normal 'make-call' position
+		EDigitiserOrientation_090,		///< Device rotated left 90 degrees
+		EDigitiserOrientation_180,		///< Device rotated 180 degrees
+		EDigitiserOrientation_270		///< Device rotated right 90 degrees
+		};
+	
 	};
 
 #endif
--- a/halservices/hal/rom/hal.hby	Wed May 05 05:11:16 2010 +0100
+++ b/halservices/hal/rom/hal.hby	Mon May 10 11:40:53 2010 +0100
@@ -1,4 +1,4 @@
-// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
+// Copyright (c) 2007-2010 Nokia Corporation and/or its subsidiary(-ies).
 // All rights reserved.
 // This component and the accompanying materials are made available
 // under the terms of the License "Eclipse Public License v1.0"
@@ -139,3 +139,5 @@
 #define ESerialNumber					117
 #define ECpuProfilingDefaultInterruptBase		118
 #define ENumCpus						119
+#define EDigitiserOrientation			120
+
--- a/halservices/hal/src/userhal.cpp	Wed May 05 05:11:16 2010 +0100
+++ b/halservices/hal/src/userhal.cpp	Mon May 10 11:40:53 2010 +0100
@@ -1,4 +1,4 @@
-// Copyright (c) 1999-2009 Nokia Corporation and/or its subsidiary(-ies).
+// Copyright (c) 1999-2010 Nokia Corporation and/or its subsidiary(-ies).
 // All rights reserved.
 // This component and the accompanying materials are made available
 // under the terms of the License "Eclipse Public License v1.0"
@@ -41,7 +41,8 @@
 	EInvalidAttribKeybd=13,
 	EInvalidAttribPen=14,
 	EInvalidAttribMouse=15,
-	EInvalidAttrib3DPointer=16
+	EInvalidAttrib3DPointer=16,
+	EInvalidAttribDigitiserOrientation=17
 	};
 
 void Panic(THalUserHalPanic aPanic)
@@ -929,3 +930,27 @@
 	return KErrNone;
 	}
 
+// EDigitiserOrientation
+#if defined(_DEBUG)
+TInt DigitiserOrientation(TInt aDeviceNumber, TInt aAttrib, TBool aSet, TAny* aInOut)
+#else
+TInt DigitiserOrientation(TInt aDeviceNumber, TInt /*aAttrib*/, TBool aSet, TAny* aInOut)
+#endif
+	{
+	__ASSERT_DEBUG(aAttrib == HALData::EDigitiserOrientation, Panic(EInvalidAttribDigitiserOrientation));
+	__ASSERT_DEBUG(aDeviceNumber >= 0, Panic(EInvalidAttribDigitiserOrientation));	
+	
+	if (aSet)
+		{
+		//Set
+		if ( ((TInt)aInOut) < 0 || ((TInt)aInOut) > HALData::EDigitiserOrientation_270) 
+			return KErrArgument;
+		return UserSvr::HalFunction(EHalGroupDigitiser, EDigitiserOrientation, aInOut, (TAny*)ETrue, aDeviceNumber);
+		}
+		
+	//Get
+	__ASSERT_DEBUG(aInOut != 0, Panic(EInvalidAttribDigitiserOrientation));
+	return UserSvr::HalFunction(EHalGroupDigitiser, EDigitiserOrientation, aInOut, (TAny*)EFalse, aDeviceNumber);
+	}
+
+
--- a/halservices/hal/tsrc/t_newhal.cpp	Wed May 05 05:11:16 2010 +0100
+++ b/halservices/hal/tsrc/t_newhal.cpp	Mon May 10 11:40:53 2010 +0100
@@ -144,7 +144,7 @@
 	_S("ESerialNumber"),
 	_S("ECpuProfilingDefaultInterruptBase"),
 	_S("ENumCpus"),
-
+	_S("EDigitiserOrientation")
 	};
 
 TInt MatchAbbrev(const TDesC& anInput, const TText** aList, TInt aListLen)
@@ -159,7 +159,7 @@
 		if (r>=0)
 			{
 			// substring matches
-			if (r==0 && list_entry.Length()==anInput.Length())
+			if (r==0 && list_entry.Length()==anInput.Length()) 
 				{
 				// exact match
 				return i;
--- a/kernel/eka/bmarm/ekernsmp.def	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/bmarm/ekernsmp.def	Mon May 10 11:40:53 2010 +0100
@@ -1053,4 +1053,6 @@
 	Stats__13KernCoreStatsPv @ 1052 NONAME R3UNUSED ; KernCoreStats::Stats(void *)
 	SetNumberOfActiveCpus__5NKerni @ 1053 NONAME
 	SetIdleHandler__3ArmPFPvUlPVv_vPv @ 1054 NONAME R3UNUSED ; Arm::SetIdleHandler(void (*)(void *, unsigned long, void volatile *), void *)
+	FreeRamZone__4EpocUi @ 1055 NONAME R3UNUSED ; Epoc::FreeRamZone(unsigned int)
+	SelectiveAlloc__16TBitMapAllocatorii @ 1056 NONAME R3UNUSED ; TBitMapAllocator::SelectiveAlloc(int, int)
 
--- a/kernel/eka/bmarm/ekernu.def	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/bmarm/ekernu.def	Mon May 10 11:40:53 2010 +0100
@@ -1050,3 +1050,5 @@
 	Retire__13KernCoreStatsii @ 1049 NONAME R3UNUSED ; KernCoreStats::Retire(int, int)
 	Stats__13KernCoreStatsPv @ 1050 NONAME R3UNUSED ; KernCoreStats::Stats(void *)
 	SetIdleHandler__3ArmPFPvUl_vPv @ 1051 NONAME R3UNUSED ; Arm::SetIdleHandler(void (*)(void *, unsigned long), void *)
+	FreeRamZone__4EpocUi @ 1052 NONAME R3UNUSED ; Epoc::FreeRamZone(unsigned int)
+	SelectiveAlloc__16TBitMapAllocatorii @ 1053 NONAME R3UNUSED ; TBitMapAllocator::SelectiveAlloc(int, int)
--- a/kernel/eka/bwins/ekernu.def	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/bwins/ekernu.def	Mon May 10 11:40:53 2010 +0100
@@ -934,4 +934,5 @@
 	?Engage@KernCoreStats@@SAHH@Z @ 933 NONAME	;  public: static int KernCoreStats::Engage(int)
 	?Retire@KernCoreStats@@SAHHH@Z @ 934 NONAME	;  public: static int KernCoreStats::Retire(int, int)
 	?Stats@KernCoreStats@@SAHPAX@Z @ 935 NONAME	;  public: static int KernCoreStats::Stats(void *)
+	?SelectiveAlloc@TBitMapAllocator@@QAEIHH@Z @ 936 NONAME ; public: unsigned int __thiscall TBitMapAllocator::SelectiveAlloc(int,int)
 
--- a/kernel/eka/bx86/ekernsmp.def	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/bx86/ekernsmp.def	Mon May 10 11:40:53 2010 +0100
@@ -1010,4 +1010,6 @@
 	?Retire@KernCoreStats@@SAHHH@Z @ 1009 NONAME	;  public: static int KernCoreStats::Retire(int, int)
 	?Stats@KernCoreStats@@SAHPAX@Z @ 1010 NONAME	;  public: static int KernCoreStats::Stats(void *)
 	?SetNumberOfActiveCpus@NKern@@SAXH@Z @ 1011 NONAME ; public: static void __cdecl NKern::SetNumberOfActiveCpus(int)
+	?FreeRamZone@Epoc@@SAHI@Z @ 1012 NONAME ; public: static int Epoc::FreeRamZone(unsigned int)
+	?SelectiveAlloc@TBitMapAllocator@@QAEIHH@Z @ 1013 NONAME ; public: unsigned int __thiscall TBitMapAllocator::SelectiveAlloc(int,int)
 
--- a/kernel/eka/bx86/ekernu.def	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/bx86/ekernu.def	Mon May 10 11:40:53 2010 +0100
@@ -955,4 +955,6 @@
 	?LeaveIdle@KernCoreStats@@SAXI@Z @ 954 NONAME	;  public: static void KernCoreStats::LeaveIdle(unsigned int)
 	?Retire@KernCoreStats@@SAHHH@Z @ 955 NONAME	;  public: static int KernCoreStats::Retire(int, int)
 	?Stats@KernCoreStats@@SAHPAX@Z @ 956 NONAME	;  public: static int KernCoreStats::Stats(void *)
+	?FreeRamZone@Epoc@@SAHI@Z @ 957 NONAME ; public: static int Epoc::FreeRamZone(unsigned int)
+	?SelectiveAlloc@TBitMapAllocator@@QAEIHH@Z @ 958 NONAME ; public: unsigned int __thiscall TBitMapAllocator::SelectiveAlloc(int,int)
 
--- a/kernel/eka/bx86gcc/ekernsmp.def	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/bx86gcc/ekernsmp.def	Mon May 10 11:40:53 2010 +0100
@@ -1098,4 +1098,6 @@
 	_ZN13KernCoreStats6RetireEii @ 1097 NONAME
 	_ZN13KernCoreStats9ConfigureEj @ 1098 NONAME
 	_ZN5NKern21SetNumberOfActiveCpusEi @ 1099 NONAME
+	_ZN4Epoc11FreeRamZoneEj @ 1100 NONAME
+	_ZN16TBitMapAllocator14SelectiveAllocEii @ 1101 NONAME
 
--- a/kernel/eka/bx86gcc/ekernu.def	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/bx86gcc/ekernu.def	Mon May 10 11:40:53 2010 +0100
@@ -1038,4 +1038,6 @@
 	_ZN13KernCoreStats9ConfigureEj @ 1037 NONAME
 	_ZN13KernCoreStats9EnterIdleEv @ 1038 NONAME
 	_ZN13KernCoreStats9LeaveIdleEj @ 1039 NONAME
+	_ZN4Epoc11FreeRamZoneEj @ 1040 NONAME
+	_ZN16TBitMapAllocator14SelectiveAllocEii @ 1041 NONAME
 
--- a/kernel/eka/drivers/power/smppower/idlehelper.cia	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/drivers/power/smppower/idlehelper.cia	Mon May 10 11:40:53 2010 +0100
@@ -85,12 +85,12 @@
 	LDREX(3,1);                                                       // r3 = iIdlingCpus
     asm("orr    r3,r0,r3");                                           // orr in mask for this CPU
     asm("cmp    r3,r2");                                              // compare to iAllEngagedCpusMask
-    asm("orreq  r3,r3,#%a0" : : "i" (TIdleSupport::KGlobalIdleFlag)); // if equal orr in KGlobalIdleFlag
+    asm("orreq  r3,r3,#%a0" : : "i" ((TUint32)TIdleSupport::KGlobalIdleFlag)); // if equal orr in KGlobalIdleFlag
     STREX(12,3,1);
     asm("cmp    r12, #0 ");                                              // 
 	asm("bne    1b ");                                                   // write didn't succeed try again
     __DATA_MEMORY_BARRIER__(r12);
-    asm("and    r0,r3,#%a0" : : "i" (TIdleSupport::KGlobalIdleFlag));
+    asm("and    r0,r3,#%a0" : : "i" ((TUint32)TIdleSupport::KGlobalIdleFlag));
 	__JUMP(,lr);
     asm("__iAllEngagedCpusMask:");
     asm(".word %a0" : : "i" ((TInt)&TIdleSupport::iAllEngagedCpusMask));//
@@ -125,7 +125,7 @@
     asm("stmfd sp!, {r4-r5,lr} ");	
     asm("add r0,r0,#%a0" : : "i"  _FOFF(TSyncPointBase, iStageAndCPUWaitingMask)); // skip vt
     asm("ldr r4,[r0,#4]"); 
-    asm("ldr r4,[r4]")
+    asm("ldr r4,[r4]");
    __DATA_MEMORY_BARRIER_Z__(r12);          // 
     asm("1: ");
 	LDREX(2,0);                             // r2 =  iStageAndCPUWaitingMask, r4 = iAllEnagedCpusMask
@@ -149,7 +149,7 @@
 #endif		
     asm("2: ");
     asm("cmp r3,r5");                       // all (old stage does not equal new stage)
-    asm("ldmfdne sp!, {r4-r5,pc}");         // yup return
+    asm("bne 3f");                            // yup return
 #ifdef SYNCPOINT_WFE		
 	__DATA_MEMORY_BARRIER__(r12);        
 	ARM_WFE;
@@ -158,6 +158,8 @@
     __DATA_MEMORY_BARRIER__(r12);           // ensure read is observed
     asm("mov r3,r2,lsr #16");               // re-read new stage
     asm("b 2b");                            // loop back
+    asm("3: ");
+    asm("ldmfd sp!, {r4-r5,pc}");         // return
     }
 
 /** 
@@ -188,7 +190,7 @@
     asm("stmfd sp!, {r4,lr} ");	
     asm("add r0,r0,#%a0" : : "i"  _FOFF(TSyncPointBase, iStageAndCPUWaitingMask)); // skip vt
     asm("ldr r4,[r0,#4]");
-    asm("ldr r4,[r4]")
+    asm("ldr r4,[r4]");
     __DATA_MEMORY_BARRIER_Z__(r12);          // 
     asm("1: ");
 	LDREX(2,0);                             // r2 =  iStageAndCPUWaitingMask, r4 = iAllEnagedCpusMask
@@ -208,7 +210,7 @@
 #endif		
     asm("2: ");
     asm("ands r3,r2,#0x80000000");          // MSB set?	
-    asm("ldmfdne sp!, {r4,pc}");            // yup return
+    asm("bne 4f");                          // yup return
 #ifdef SYNCPOINT_WFE		
 	__DATA_MEMORY_BARRIER__(r12);
 	ARM_WFE;
@@ -222,7 +224,8 @@
     __DATA_MEMORY_BARRIER__(r12);           // ensure that's written
 	ARM_SEV;
 #endif	
-    asm("ldmfd sp!, {r4,pc}");            // yup return
+    asm("4:");
+    asm("ldmfd sp!, {r4,pc}");            // return
     }
 	
 	
@@ -292,7 +295,7 @@
     }
 #endif
 
-__NAKED__  TInt TIdleSupport::IntPending()  
+__NAKED__  TUint32 TIdleSupport::IntPending()  
     {
 	asm("ldr    r1,__KCPUIFAddr");//r1 = address of iBaseIntIfAddress
 	asm("ldr	r1, [r1]");//r1 = address of Hw GIC CPU interrupt interface base address
--- a/kernel/eka/drivers/power/smppower/sample_idlehandler/smpidlehandler.cpp	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/drivers/power/smppower/sample_idlehandler/smpidlehandler.cpp	Mon May 10 11:40:53 2010 +0100
@@ -41,6 +41,11 @@
       }
 
 
+DSMPIdleHandler::~DSMPIdleHandler()
+	{
+	}
+
+
 /**
    To be called after construction in a thread context with interrupts enabled. Power extension entry point ideal
    @pre thread context ints enable no kernel locks or fast mutexes
--- a/kernel/eka/drivers/xyin/d_xyin.cpp	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/drivers/xyin/d_xyin.cpp	Mon May 10 11:40:53 2010 +0100
@@ -1,4 +1,4 @@
-// Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
+// Copyright (c) 1998-2010 Nokia Corporation and/or its subsidiary(-ies).
 // All rights reserved.
 // This component and the accompanying materials are made available
 // under the terms of the License "Eclipse Public License v1.0"
@@ -19,6 +19,7 @@
 
 #include <drivers/xyin.h>
 #include <kernel/kern_priv.h>
+#include <hal_data.h>
 
 _LIT(KLitDigitiser,"Digitiser");
 
@@ -50,7 +51,8 @@
 	:	DPowerHandler(KLitDigitiser),
 		iMsgQ(rxMsg,this,NULL,1),
 		iSampleDfc(sampleDfc,this,5),
-		iPenUpDfc(penUpDfc,this,5)
+		iPenUpDfc(penUpDfc,this,5),
+		iOrientation(HALData::EDigitiserOrientation_default)
 	{
 //	iBufferIndex=0;
 //	iLastPos=TPoint(0,0);
@@ -358,6 +360,26 @@
 		case EDigitiserHalXYState:
 			kumemput32(a1, (TBool*)&iPointerOn, sizeof(TBool));
 			break;
+			
+		// a2 = TBool aSet (ETrue for setting, EFalse for retrieval) 
+		// a1 = TDigitizerOrientation (set)
+		// a1 = &TDigitizerOrientation (get)
+		case EDigitiserOrientation:	
+			if ((TBool)a2)
+				{
+				// Set the orientation attribute
+				// In case user thread, check it has WDD capability
+				if(!Kern::CurrentThreadHasCapability(ECapabilityWriteDeviceData,__PLATSEC_DIAGNOSTIC_STRING("Checked by Hal function EDigitiserOrientation")))
+					return KErrPermissionDenied;
+				iOrientation = (TInt)a1;
+				}
+			else
+				{
+				// Get the orientation attribute, safe copy it into user memory
+				kumemput32(a1, &iOrientation, sizeof(TInt));	
+				}
+			break; 
+			
 		default:
 			r=KErrNotSupported;
 			break;
--- a/kernel/eka/eabi/ekernsmp.def	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/eabi/ekernsmp.def	Mon May 10 11:40:53 2010 +0100
@@ -1187,4 +1187,6 @@
 	_ZN13KernCoreStats9ConfigureEj @ 1186 NONAME
 	_ZN5NKern21SetNumberOfActiveCpusEi @ 1187 NONAME
 	_ZN3Arm14SetIdleHandlerEPFvPvmPVvES0_ @ 1188 NONAME
+	_ZN4Epoc11FreeRamZoneEj @ 1189 NONAME
+	_ZN16TBitMapAllocator14SelectiveAllocEii @ 1190 NONAME
 
--- a/kernel/eka/eabi/ekernu.def	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/eabi/ekernu.def	Mon May 10 11:40:53 2010 +0100
@@ -1179,4 +1179,6 @@
 	_ZN13KernCoreStats9EnterIdleEv @ 1178 NONAME
 	_ZN13KernCoreStats9LeaveIdleEj @ 1179 NONAME
 	_ZN3Arm14SetIdleHandlerEPFvPvmES0_ @ 1180 NONAME
+	_ZN4Epoc11FreeRamZoneEj @ 1181 NONAME
+	_ZN16TBitMapAllocator14SelectiveAllocEii @ 1182 NONAME
 
--- a/kernel/eka/include/d32usbdi_errors.h	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/include/d32usbdi_errors.h	Mon May 10 11:40:53 2010 +0100
@@ -201,6 +201,7 @@
 		EUsbDevMonDeviceAttachDenied					= 41,
 		EUsbHubDriverZeroInterfaceTokenProduced			= 42,
 		EUsbInterfaceSuccessfulPipeOpenWithNoPipe		= 43,
+		EFailedToLockHostStackInWaitDeviceStateMutex    = 44,
 		};
 
 	_LIT(KUsbDescFaultCat, "USBDesc-Fault");
--- a/kernel/eka/include/drivers/dma_v1.h	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/include/drivers/dma_v1.h	Mon May 10 11:40:53 2010 +0100
@@ -349,27 +349,66 @@
 	virtual void DoUnlink(SDmaDesHdr& aHdr);
 	virtual void DoDfc(DDmaRequest& aCurReq, SDmaDesHdr*& aCompletedHdr) = 0;
 	/**
-	   This function allows the Platform Specific Layer (PSL) to control the
-	   power management of the channel or its controller by overriding the
-	   PIL's default implementation (which does nothing) and making appropriate
-	   use of the Power Resource Manager (PRM).
+		This function allows the Platform Specific Layer (PSL) to control the
+		power management of the channel or its controller by overriding the
+		PIL's default implementation (which does nothing) and making
+		appropriate use of the Power Resource Manager (PRM).
+
+		The function gets called by the PIL whenever the channel's queued
+		requests count has changed in a significant way, either before the
+		channel's Transfer() method is invoked for a request on a previously
+		empty request queue, or immediately after the request count has become
+		zero because of request cancellation or completion.
+
+		Depending on the current and previous observed values of
+		iQueuedRequests, the PSL may power down or power up the channel.
 
-	   The function gets called by the PIL whenever the channel's queued
-	   requests count has changed in a significant way, either before the
-	   channel's Transfer() method is invoked for a request on a previously
-	   empty request queue, or immediately after the request count has become
-	   zero because of request cancellation or completion.
+		Note that iQueuedRequests gets accessed and changed by different
+		threads, so the PSL needs to take the usual precautions when evaluating
+		the variable's value. Also, due to the multithreaded framework
+		architecture, there is no guarantee that the function calls always
+		arrive at the PSL level in the strict chronological order of
+		iQueuedRequests being incremented/decremented in the PIL, i.e. it might
+		happen that the PSL finds iQueuedRequests to have the same value in two
+		or more consecutive calls (that's why the previous observed value needs
+		to be locally available and taken into account). It is however promised
+		that before any actual transfer commences the PSL will find the request
+		count to be greater than zero and that after the last request has
+		finished it will be found to be zero.
+
+		None of the internal DMA framework mutexes is being held by the PIL
+		when calling this function.
 
-	   Depending on the current value of iQueuedRequests, the PSL may power
-	   down or power up the channel. Note that iQueuedRequests gets accessed
-	   and changed by different threads, so the PSL needs to take the usual
-	   precautions when evaluating the variable's value.
+		Here is an example implementation for a derived channel class:
+
+		@code
+
+		class TFooDmaChannel : public TDmaSgChannel
+			{
+			DMutex* iDmaMutex;
+			TInt iPrevQueuedRequests;
+			virtual void QueuedRequestCountChanged();
+			};
 
-	   None of the internal DMA framework mutexes is being held by the PIL when
-	   calling this function.
+		void TFooDmaChannel::QueuedRequestCountChanged()
+			{
+			Kern::MutexWait(*iDmaMutex);
+			if ((iQueuedRequests > 0) && (iPrevQueuedRequests == 0))
+				{
+				IncreasePowerCount(); // Base port specific
+				}
+			else if ((iQueuedRequests == 0) && (iPrevQueuedRequests > 0))
+				{
+				DecreasePowerCount(); // Base port specific
+				}
+			iPrevQueuedRequests = iQueuedRequests;
+			Kern::MutexSignal(*iDmaMutex);
+			}
 
-	   @see iQueuedRequests
-	 */
+		@endcode
+
+		@see iQueuedRequests
+	*/
 	virtual void QueuedRequestCountChanged();
 #if defined(__CPU_ARM) && !defined(__EABI__)
 	inline virtual ~TDmaChannel() {}	// kill really annoying warning
--- a/kernel/eka/include/drivers/dma_v2.h	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/include/drivers/dma_v2.h	Mon May 10 11:40:53 2010 +0100
@@ -1197,14 +1197,53 @@
 		empty request queue, or immediately after the request count has become
 		zero because of request cancellation or completion.
 
-		Depending on the current value of iQueuedRequests, the PSL may power
-		down or power up the channel. Note that iQueuedRequests gets accessed
-		and changed by different threads, so the PSL needs to take the usual
-		precautions when evaluating the variable's value.
+		Depending on the current and previous observed values of
+		iQueuedRequests, the PSL may power down or power up the channel.
+
+		Note that iQueuedRequests gets accessed and changed by different
+		threads, so the PSL needs to take the usual precautions when evaluating
+		the variable's value. Also, due to the multithreaded framework
+		architecture, there is no guarantee that the function calls always
+		arrive at the PSL level in the strict chronological order of
+		iQueuedRequests being incremented/decremented in the PIL, i.e. it might
+		happen that the PSL finds iQueuedRequests to have the same value in two
+		or more consecutive calls (that's why the previous observed value needs
+		to be locally available and taken into account). It is however promised
+		that before any actual transfer commences the PSL will find the request
+		count to be greater than zero and that after the last request has
+		finished it will be found to be zero.
 
 		None of the internal DMA framework mutexes is being held by the PIL
 		when calling this function.
 
+		Here is an example implementation for a derived channel class:
+
+		@code
+
+		class TFooDmaChannel : public TDmaSgChannel
+			{
+			DMutex* iDmaMutex;
+			TInt iPrevQueuedRequests;
+			virtual void QueuedRequestCountChanged();
+			};
+
+		void TFooDmaChannel::QueuedRequestCountChanged()
+			{
+			Kern::MutexWait(*iDmaMutex);
+			if ((iQueuedRequests > 0) && (iPrevQueuedRequests == 0))
+				{
+				IncreasePowerCount(); // Base port specific
+				}
+			else if ((iQueuedRequests == 0) && (iPrevQueuedRequests > 0))
+				{
+				DecreasePowerCount(); // Base port specific
+				}
+			iPrevQueuedRequests = iQueuedRequests;
+			Kern::MutexSignal(*iDmaMutex);
+			}
+
+		@endcode
+
 		@see iQueuedRequests
 	*/
 	virtual void QueuedRequestCountChanged();
--- a/kernel/eka/include/drivers/smppower/idlehelper.h	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/include/drivers/smppower/idlehelper.h	Mon May 10 11:40:53 2010 +0100
@@ -1,24 +1,26 @@
 /*
 * Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
 * All rights reserved.
-* This component and the accompanying materials are made available
-* under the terms of "Eclipse Public License v1.0"
-* which accompanies this distribution, and is available
-* at the URL "http://www.eclipse.org/legal/epl-v10.html".
+* This material, including documentation and any related computer
+* programs, is protected by copyright controlled by Nokia. All
+* rights are reserved. Copying, including reproducing, storing
+* adapting or translating, any or all of this material requires the
+* prior written consent of Nokia. This material also contains
+* confidential information which may not be disclosed to others
+* without the prior written consent of Nokia.
 *
 * Initial Contributors:
 * Nokia Corporation - initial contribution.
 *
 * Contributors:
 *
-* Description:  
+* Description: 
 * os\kernelhwsrv\kernel\eka\include\drivers\smpidlehelper.h
 * Helper classes required to implement CPU idle
 * functionality in a SMP BSP.
 *
 */
 
-
 /**
  @file
  @prototype
@@ -174,7 +176,7 @@
 	static void ClearIdleIPI();
 	static void DoWFI();//puts current CPU in wait for interrupt state
 	static TBool IsIntPending();
-	static TInt	IntPending();
+	static TUint32	IntPending();
 	static TUint32 GetTimerCount();//HW timer can be used for tracing
 	//Atomic checks used to synchronise cores going idle
 	static TBool ClearLocalAndCheckGlobalIdle(TUint32);
--- a/kernel/eka/include/drivers/smppower/sample_idlehandler/smpidlehandler.h	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/include/drivers/smppower/sample_idlehandler/smpidlehandler.h	Mon May 10 11:40:53 2010 +0100
@@ -1,24 +1,26 @@
 /*
 * Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
 * All rights reserved.
-* This component and the accompanying materials are made available
-* under the terms of "Eclipse Public License v1.0"
-* which accompanies this distribution, and is available
-* at the URL "http://www.eclipse.org/legal/epl-v10.html".
+* This material, including documentation and any related computer
+* programs, is protected by copyright controlled by Nokia. All
+* rights are reserved. Copying, including reproducing, storing
+* adapting or translating, any or all of this material requires the
+* prior written consent of Nokia. This material also contains
+* confidential information which may not be disclosed to others
+* without the prior written consent of Nokia.
 *
 * Initial Contributors:
 * Nokia Corporation - initial contribution.
 *
 * Contributors:
 *
-* Description:  
+* Description: 
 * os/kernelhwsrv/kernel/eka/include/drivers/smppower/sample_idlehandler/smpidlehandler.h
 * Example of a generic idle handler layer
 *
 */
 
 
-
 #ifndef __SMPIDLEHANDLER_H__
 #define __SMPIDLEHANDLER_H__
 
@@ -41,6 +43,8 @@
     
 
     DSMPIdleHandler();
+	virtual ~DSMPIdleHandler();
+
     /*
       called to init and bind the idle handler. After this call idle will be directed to idle handler
       @pre thread context, no locks no fast mutexes, interrupt on
--- a/kernel/eka/include/drivers/xyin.h	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/include/drivers/xyin.h	Mon May 10 11:40:53 2010 +0100
@@ -1,4 +1,4 @@
-// Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
+// Copyright (c) 1998-2010 Nokia Corporation and/or its subsidiary(-ies).
 // All rights reserved.
 // This component and the accompanying materials are made available
 // under the terms of the License "Eclipse Public License v1.0"
@@ -27,6 +27,7 @@
 #include <platform.h>
 #include <e32hal.h>
 
+
 #ifdef _DEBUG
 //#define __DIGITISER_DEBUG1__
 //#define __DIGITISER_DEBUG2__
@@ -180,6 +181,7 @@
 	TState iState;
 	TInt iCount;
 	TUint8 iPointerOn;
+	TInt iOrientation;	 			// HALData::TDigitizerOrientation
 	};
 
 
--- a/kernel/eka/include/e32ver.h	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/include/e32ver.h	Mon May 10 11:40:53 2010 +0100
@@ -28,7 +28,7 @@
 
 const TInt KE32MajorVersionNumber=2;
 const TInt KE32MinorVersionNumber=0;
-const TInt KE32BuildVersionNumber=3076;
+const TInt KE32BuildVersionNumber=3079;
 
 const TInt KMachineConfigurationMajorVersionNumber=1;
 const TInt KMachineConfigurationMinorVersionNumber=0;
--- a/kernel/eka/include/kernel/kbma.h	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/include/kernel/kbma.h	Mon May 10 11:40:53 2010 +0100
@@ -39,6 +39,7 @@
 	IMPORT_C void Free(TInt aPos);
 	IMPORT_C void Alloc(TInt aStart, TInt aLength);
 	IMPORT_C void Free(TInt aStart, TInt aLength);
+	IMPORT_C TUint SelectiveAlloc(TInt aStart, TInt aLength);
 	IMPORT_C void SelectiveFree(TInt aStart, TInt aLength);
 	IMPORT_C TBool NotFree(TInt aStart, TInt aLength) const;
 	IMPORT_C TBool NotAllocated(TInt aStart, TInt aLength) const;
--- a/kernel/eka/include/kernel/kern_priv.h	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/include/kernel/kern_priv.h	Mon May 10 11:40:53 2010 +0100
@@ -2787,6 +2787,7 @@
 	static TUint NumberOfFreeDpPages();
 	static TUint NumberOfDirtyDpPages();
 	static TInt MovePage(TPhysAddr aOld, TPhysAddr& aNew, TUint aBlockZoneId, TBool aBlockRest);
+	static TInt MoveAndAllocPage(TPhysAddr aAddr, TZonePageType aPageType);
 	static TInt DiscardPage(TPhysAddr aAddr, TUint aBlockZoneId, TBool aBlockRest);
 	static void RamZoneClaimed(SZone* aZone);
 	static TInt RamDefragFault(TAny* aExceptionInfo);
--- a/kernel/eka/include/memmodel/epoc/mmubase/mmubase.h	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/include/memmodel/epoc/mmubase/mmubase.h	Mon May 10 11:40:53 2010 +0100
@@ -410,6 +410,7 @@
 	TInt ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aNumPages, TPhysAddr* aPageList);
 	TInt FreePhysicalRam(TPhysAddr aPhysAddr, TInt aSize);
 	TInt FreePhysicalRam(TInt aNumPages, TPhysAddr* aPageList);
+	TInt FreeRamZone(TUint aZoneId, TPhysAddr& aZoneBase, TUint& aZoneBytes);
 	TInt ClaimPhysicalRam(TPhysAddr aPhysAddr, TInt aSize);
 	TInt GetPageTableId(TPhysAddr aPtPhys);
 	void MapRamPage(TLinAddr aAddr, TPhysAddr aPage, TPte aPtePerm);
@@ -525,8 +526,8 @@
 public:
 	TInt AllocRamPages(TPhysAddr* aPageList, TInt aNumPages, TZonePageType aPageType, TUint aBlockedZoneId=KRamZoneInvalidId, TBool aBlockRest=EFalse);
 	TInt ZoneAllocRamPages(TUint* aZoneIdList, TUint aZoneIdCount, TPhysAddr* aPageList, TInt aNumPages, TZonePageType aPageType);
-	TInt AllocContiguousRam(TInt aSize, TPhysAddr& aPhysAddr, TZonePageType aPageType, TInt aAlign, TUint aBlockedZoneId=KRamZoneInvalidId, TBool aBlockRest=EFalse);
-	TInt ZoneAllocContiguousRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TZonePageType aPageType, TInt aAlign);
+	TInt AllocContiguousRam(TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign);
+	TInt ZoneAllocContiguousRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign);
 
 public:
 	TInt iPageSize;				// page size in bytes
--- a/kernel/eka/include/memmodel/epoc/mmubase/ramalloc.h	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/include/memmodel/epoc/mmubase/ramalloc.h	Mon May 10 11:40:53 2010 +0100
@@ -142,8 +142,14 @@
 	void FreeRamPages(TPhysAddr* aPageList, TInt aNumPages, TZonePageType aType);
 	TInt AllocRamPages(TPhysAddr* aPageList, TInt aNumPages, TZonePageType aType, TUint aBlockedZoneId=KRamZoneInvalidId, TBool aBlockRest=EFalse);
 	TInt ZoneAllocRamPages(TUint* aZoneIdList, TUint aZoneIdCount, TPhysAddr* aPageList, TInt aNumPages, TZonePageType aType);
-	TInt AllocContiguousRam(TUint aNumPages, TPhysAddr& aPhysAddr, TZonePageType aType, TInt aAlign=0, TUint aBlockZoneId=KRamZoneInvalidId, TBool aBlockRest=EFalse);
-	TInt ZoneAllocContiguousRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TZonePageType aType, TInt aAlign);
+	TInt AllocContiguousRam(TUint aNumPages, TPhysAddr& aPhysAddr, TInt aAlign=0);
+#if !defined(__MEMMODEL_MULTIPLE__) && !defined(__MEMMODEL_MOVING__)
+	void BlockContiguousRegion(TPhysAddr aAddrBase, TUint aNumPages);
+	void UnblockSetAllocRuns(TUint& aOffset1, TUint& aOffset2, TUint aRunLength1, TUint aRunLength2, TUint& aAllocLength, TUint& aAllocStart);
+	void UnblockContiguousRegion(TPhysAddr aAddrBase, TUint aNumPages);
+	TBool ClearContiguousRegion(TPhysAddr aAddrBase, TPhysAddr aZoneBase, TUint aNumPages, TInt& aOffset);
+#endif
+	TInt ZoneAllocContiguousRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign);
 #ifdef _DEBUG
 	void DebugDump();
 #endif
@@ -153,11 +159,12 @@
 	TInt GetZonePageCount(TUint aId, SRamZonePageCount& aPageData);
 	void ChangePageType(SPageInfo* aPageInfo, TZonePageType aOldType, TZonePageType aNewType);
 #ifdef BTRACE_RAM_ALLOCATOR
-	void SendInitialBtraceLogs(void);
+	void DoBTracePrime(void);
 #endif
 	TInt GetZoneAddress(TUint aZoneId, TPhysAddr& aPhysBase, TUint& aNumPages);
 	TInt HalFunction(TInt aFunction, TAny* a1, TAny* a2);
 	TInt NextAllocatedPage(SZone* aZone, TUint& aOffset, TZonePageType aType) const;
+	TInt NextAllocatedRun(SZone* aZone, TUint& aOffset, TUint aEndOffset, TZonePageType aType) const;
 	TUint GenDefragFreePages(TZonePageType aType) const;
 	SZone* GeneralDefragStart0(TGenDefragStage& aStage, TUint& aRequiredToBeDiscarded);
 	SZone* GeneralDefragNextZone0();
@@ -205,9 +212,7 @@
 	SDblQueLink* iZoneGeneralPrefLink;	/**< Link to the current RAM zone being defragged*/
 	SDblQueLink* iZoneGeneralTmpLink;	/**< Link to the current RAM zone being defragged*/
 	TUint iZoneGeneralStage;			/**< The current stage of any general defrag operation*/
-#ifdef _DEBUG
-	TBool iAllowBmaVerify;
-#endif
+	TUint iContiguousReserved;			/**< The count of the number of separate contiguous allocations that have reserved pages*/
 	};
 
 #endif
--- a/kernel/eka/include/u32hal.h	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/include/u32hal.h	Mon May 10 11:40:53 2010 +0100
@@ -1,4 +1,4 @@
-// Copyright (c) 1995-2009 Nokia Corporation and/or its subsidiary(-ies).
+// Copyright (c) 1995-2010 Nokia Corporation and/or its subsidiary(-ies).
 // All rights reserved.
 // This component and the accompanying materials are made available
 // under the terms of the License "Eclipse Public License v1.0"
@@ -2015,7 +2015,23 @@
     @see TPckgBuf
     @see TDigitiserInfoV02
     */
-	EDigitiserHal3DInfo
+	EDigitiserHal3DInfo,
+
+	/**
+	Get or sets the digitiser driver's current orientation property.
+	Requirements of the HAL function handler's 2nd, 3rd and 4th parameters:
+	
+	- TInt aFunction : This enum value.
+    - TAny* a1 : if Set, a TDigitiserOrientation value the driver should now use	
+			   : If Get, a pointer to a TDigitiserOrientation. The HAL function 
+			   : needs to set its value to the current value used in the driver.	
+	- TAny* a2 : Cast to a TInt. Should be assigned the value 
+			   : EFalse - Get property; ETrue - Set property 
+	
+	@see HALData::TDigitiserOrientation
+	@capability WriteDeviceData To set the property, None to read
+	*/
+	EDigitiserOrientation
 
 	};
 
--- a/kernel/eka/klib/bma.cpp	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/klib/bma.cpp	Mon May 10 11:40:53 2010 +0100
@@ -762,6 +762,49 @@
 	}
 
 
+/**	Allocates a specific range of bit positions.
+	
+	The specified range must lie within the total range for this allocator but it is
+	not necessary that all the positions are currently free.
+
+	@param	aStart	First position to allocate.
+	@param	aLength	Number of consecutive positions to allocate, must be >0.
+	@return The number of previously free positions that were allocated.
+ */
+EXPORT_C TUint TBitMapAllocator::SelectiveAlloc(TInt aStart, TInt aLength)
+	{
+	__ASSERT_ALWAYS(TUint(aStart) < TUint(iSize), TBMA_FAULT());
+	__ASSERT_ALWAYS(TUint(aStart + aLength) >= TUint(aStart), TBMA_FAULT());
+	__ASSERT_ALWAYS(TUint(aStart + aLength) <= TUint(iSize), TBMA_FAULT());
+	TInt wix = aStart >> 5;
+	TInt sbit = aStart & 31;
+	TUint32* pW = iMap + wix;
+	iAvail -= aLength;	// update free count assuming no positions already allocated
+	TInt ebit = sbit + aLength;
+	if (ebit < 32)
+		{
+		TUint32 b = ((0xffffffffu >> aLength) >> sbit) | ~(0xffffffffu >> sbit);
+		TUint32 w = *pW;
+		*pW = w & b;	// mark all positions allocated
+		TUint allocated = __e32_bit_count_32(~w & ~b);
+		iAvail += allocated;	// increase free count by number of positions already allocated
+		return aLength - allocated;
+		}
+	TUint32 b = ~(0xffffffffu >> sbit);
+	while (ebit > 0)
+		{
+		TUint32 w = *pW;
+		*pW++ = w & b;		// mark all positions allocated
+		TUint allocated = __e32_bit_count_32(~w & ~b);
+		iAvail += allocated;	// increase free count by number of positions already allocated
+		aLength -= allocated;
+		ebit -= 32;
+		b = (ebit >= 32)? 0 : 0xffffffff >> ebit;
+		}
+	return aLength;
+	}
+
+
 /** Copies a range from another allocator, mark remainder as occupied.
 
 	Values of bit positions from aFirst to aFirst+aLen-1 inclusive in allocator
--- a/kernel/eka/memmodel/epoc/flexible/mmu/mdefrag.cpp	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/memmodel/epoc/flexible/mmu/mdefrag.cpp	Mon May 10 11:40:53 2010 +0100
@@ -124,7 +124,8 @@
 
 TInt M::MovePage(TPhysAddr aOld, TPhysAddr& aNew, TUint aBlockZoneId, TBool aBlockRest)
 	{
-	TInt r;
+	// Returns this when page is not paged or managed or free but is a real RAM page.
+	TInt r = KErrNotSupported;
 
 	// get memory object corresponding to the page...
 	DMemoryObject* memory = 0;
@@ -136,30 +137,76 @@
 			{// The page is paged so let the pager handle it.
 			return ThePager.DiscardPage(pi, aBlockZoneId, aBlockRest);
 			}
-		if (pi->Type()==SPageInfo::EManaged)
-			memory = pi->Owner();
-		}
-	MmuLock::Unlock();
-
-	// Note, whilst we hold the RamAllocLock the page can't change it's use
-	// and we can safely assume that it still belongs to the memory object
-	// at a fixed page index.
-	// Also, as memory objects can't be destroyed whilst they still own pages
-	// we can safely access this object without taking an explicit referernce,
-	// i.e. we don't need to Open() the memory object.
-	if (!pi)
-		{// page info for aOld not found so aOld is not a RAM page...
-		r = KErrArgument;
-		}
-	else if(!memory)
-		{
-		// page does not have a memory manager, so we can't move it...
-		r = KErrNotSupported;
+		switch (pi->Type())
+			{
+			case SPageInfo::EManaged:
+				memory = pi->Owner();
+				// Note, whilst we hold the RamAllocLock the page can't change it's use
+				// and we can safely assume that it still belongs to the memory object
+				// at a fixed page index.
+				// Also, as memory objects can't be destroyed whilst they still own pages
+				// we can safely access this object without taking an explicit reference,
+				// i.e. we don't need to Open() the memory object.
+				MmuLock::Unlock();
+				// move page...
+				r = memory->iManager->MovePage(memory, pi, aNew, aBlockZoneId, aBlockRest);
+				break;
+			case SPageInfo::EUnused:
+				r = KErrNotFound;	// This page is free so nothing to do.
+				// Fall through..
+			default:
+				MmuLock::Unlock();
+			}
 		}
 	else
-		{
-		// move page...
-		r = memory->iManager->MovePage(memory, pi, aNew, aBlockZoneId, aBlockRest);
+		{// page info for aOld not found so aOld is not a RAM page...
+		MmuLock::Unlock();
+		r = KErrArgument;
 		}
 	return r;
 	}
+
+
+TInt M::MoveAndAllocPage(TPhysAddr aAddr, TZonePageType aPageType)
+	{
+	// Returns this when page is not paged or managed or free but is a real RAM page.
+	TInt r = KErrNotSupported;
+
+	// get memory object corresponding to the page...
+	DMemoryObject* memory = 0;
+	MmuLock::Lock();
+	SPageInfo* pi = SPageInfo::SafeFromPhysAddr(aAddr & ~KPageMask);
+	if(pi)
+		{
+		if (pi->PagedState() != SPageInfo::EUnpaged)
+			{// The page is paged so let the pager handle it.
+			return ThePager.DiscardAndAllocPage(pi, aPageType);
+			}
+		switch (pi->Type())
+			{
+			case SPageInfo::EManaged:
+				memory = pi->Owner();
+				// Note, whilst we hold the RamAllocLock the page can't change it's use
+				// and we can safely assume that it still belongs to the memory object
+				// at a fixed page index.
+				// Also, as memory objects can't be destroyed whilst they still own pages
+				// we can safely access this object without taking an explicit referernce,
+				// i.e. we don't need to Open() the memory object.
+				MmuLock::Unlock();
+				// move page...
+				r = memory->iManager->MoveAndAllocPage(memory, pi, aPageType);
+				break;
+			case SPageInfo::EUnused:
+				r = KErrNone;	// This page is free so nothing to do.
+				// Fall through..
+			default:
+				MmuLock::Unlock();
+			}
+		}
+	else
+		{// page info for aAddr not found so aAddr is not a RAM page...
+		MmuLock::Unlock();
+		r = KErrArgument;
+		}
+	return r;
+	}
--- a/kernel/eka/memmodel/epoc/flexible/mmu/mmanager.cpp	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/memmodel/epoc/flexible/mmu/mmanager.cpp	Mon May 10 11:40:53 2010 +0100
@@ -66,7 +66,7 @@
 	}
 
 
-TInt DMemoryManager::AddPages(DMemoryObject* /*aMemory*/, TUint /*aIndex*/, TUint /*aCount*/, TPhysAddr* /*aPages*/)
+TInt DMemoryManager::AddPages(DMemoryObject* /*aMemory*/, TUint /*aIndex*/, TUint /*aCount*/, const TPhysAddr* /*aPages*/)
 	{
 	return KErrNotSupported;
 	}
@@ -134,6 +134,13 @@
 	return KErrNotSupported;
 	}
 
+
+TInt DMemoryManager::MoveAndAllocPage(DMemoryObject*, SPageInfo*, TZonePageType)
+	{
+	return KErrNotSupported;
+	}
+
+
 TZonePageType DMemoryManager::PageType()
 	{// This should not be invoked on memory managers that do not use the methods
 	// AllocPages() and FreePages().
@@ -721,6 +728,7 @@
 public:
 	// from DMemoryManager...
 	virtual TInt MovePage(DMemoryObject* aMemory, SPageInfo* aOldPageInfo, TPhysAddr& aNewPage, TUint aBlockZoneId, TBool aBlockRest);
+	virtual TInt MoveAndAllocPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TZonePageType aPageType);
 	virtual TInt HandleFault(	DMemoryObject* aMemory, TUint aIndex, DMemoryMapping* aMapping, 
 								TUint aMapInstanceCount, TUint aAccessPermissions);
 	virtual TZonePageType PageType();
@@ -887,6 +895,18 @@
 	}
 
 
+TInt DMovableMemoryManager::MoveAndAllocPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TZonePageType aPageType)
+	{
+	TPhysAddr newPage;
+	TInt r = MovePage(aMemory, aPageInfo, newPage, KRamZoneInvalidId, EFalse);
+	if (r == KErrNone)
+		{
+		TheMmu.MarkPageAllocated(aPageInfo->PhysAddr(), aPageType);
+		}
+	return r;
+	}
+
+
 TInt DMovableMemoryManager::HandleFault(DMemoryObject* aMemory, TUint aIndex, DMemoryMapping* aMapping, 
 										TUint aMapInstanceCount, TUint aAccessPermissions)
 	{
@@ -1161,7 +1181,7 @@
 public:
 	// from DMemoryManager...
 	virtual void Destruct(DMemoryObject* aMemory);
-	virtual TInt AddPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages);
+	virtual TInt AddPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, const TPhysAddr* aPages);
 	virtual TInt AddContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr aPhysAddr);
 	virtual TInt RemovePages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages);
 	virtual TInt Pin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs);
@@ -1226,14 +1246,14 @@
 	}
 
 
-TInt DHardwareMemoryManager::AddPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages)
+TInt DHardwareMemoryManager::AddPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, const TPhysAddr* aPages)
 	{
 	TRACE2(("DHardwareMemoryManager::AddPages(0x%08x,0x%x,0x%x,?)",aMemory, aIndex, aCount));
 	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
 
 	// validate arguments...
-	TPhysAddr* pages = aPages;
-	TPhysAddr* pagesEnd = aPages+aCount;
+	const TPhysAddr* pages = aPages;
+	const TPhysAddr* pagesEnd = aPages+aCount;
 	TPhysAddr checkMask = 0;
 	do checkMask |= *pages++;
 	while(pages<pagesEnd);
--- a/kernel/eka/memmodel/epoc/flexible/mmu/mmanager.h	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/memmodel/epoc/flexible/mmu/mmanager.h	Mon May 10 11:40:53 2010 +0100
@@ -153,7 +153,7 @@
 			KErrNotSupported if the manager doesn't support this function,
 			otherwise one of the system wide error codes.
 	*/
-	virtual TInt AddPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages);
+	virtual TInt AddPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, const TPhysAddr* aPages);
 
 	/**
 	Add a contiguous range of physical memory pages to a region of a memory object.
@@ -359,11 +359,37 @@
 	virtual void Unpin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs) =0;
 
 	/**
-	@todo
+	Attempt to move the page specified to a new physical location.  The new physical
+	location for the page to be moved to is allocated by this method.  However,
+	aBlockZoneId and aBlockRest can be used to control which RAM zone the new
+	location is in.
+
+	@param aMemory		The memory object that owns the page.
+	@param aOldPageInfo	The page info for the physical page to move.
+	@param aNewPage 	On success this will hold the physical address of the new 
+						location for the page.
+	@param aBlockZoneId The ID of a RAM zone not to allocate the new page into.
+	@param aBlockRest 	When set to ETrue the search for a new page will stop if it 
+						ever needs to look at aBlockZoneId.
+	@return KErrNone on success, KErrInUse if the page couldn't be moved, 
+			or KErrNoMemory if it wasn't possible to allocate a new page.
 	*/
 	virtual TInt MovePage(DMemoryObject* aMemory, SPageInfo* aOldPageInfo, TPhysAddr& aNewPage, TUint aBlockZoneId, TBool aBlockRest);
 
 	/**
+	Move the page specified to a new physical location and mark the page as 
+	allocated as type aPageType.
+	
+	@param aMemory		The memory object that owns the page.
+	@param aPageInfo	The page info for the physical page to move.
+	@param aPageType	The type of the page to allocate into the orignal physical 
+						location of the page to move.
+	@return KErrNone on success, KErrInUse if the page couldn't be moved, 
+			or KErrNoMemory if it wasn't possible to allocate a new page.
+	*/
+	virtual TInt MoveAndAllocPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TZonePageType aPageType);
+
+	/**
 	Return the TZonePageType of the pages that the memory manager can allocate and free.
 	*/
 	virtual TZonePageType PageType();
--- a/kernel/eka/memmodel/epoc/flexible/mmu/mmu.cpp	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/memmodel/epoc/flexible/mmu/mmu.cpp	Mon May 10 11:40:53 2010 +0100
@@ -312,7 +312,8 @@
 	}
 
 
-#if 0
+#ifdef FMM_VERIFY_RAM
+// Attempt to write to each unused RAM page and verify the contents.
 void Mmu::VerifyRam()
 	{
 	Kern::Printf("Mmu::VerifyRam() pass 1");
@@ -474,6 +475,10 @@
 					__ASSERT_ALWAYS(r==KErrNone || r==KErrAlreadyExists, Panic(EBadMappedPageAfterBoot));
 					if(pi->Type()==SPageInfo::EUnused)
 						pi->SetFixed();
+#ifdef BTRACE_KERNEL_MEMORY
+					if(r == KErrNone)
+						++Epoc::KernelMiscPages;
+#endif
 					}
 				}
 			}
@@ -500,21 +505,24 @@
 	r = K::MutexCreate(iPhysMemSyncMutex, KLitPhysMemSync, NULL, EFalse, KMutexOrdSyncPhysMem);
 	if(r!=KErrNone)
 		Panic(EPhysMemSyncMutexCreateFailed);
-//	VerifyRam();
+
+#ifdef FMM_VERIFY_RAM
+	VerifyRam();
+#endif
 	}
 
 
 void Mmu::Init2FinalCommon()
 	{
 	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Mmu::Init2FinalCommon"));
-	// hack, reduce free memory to <2GB...
+	// Reduce free memory to <2GB...
 	while(FreeRamInPages()>=0x80000000/KPageSize)
 		{
 		TPhysAddr dummyPage;
 		TInt r = iRamPageAllocator->AllocRamPages(&dummyPage,1, EPageFixed);
 		__NK_ASSERT_ALWAYS(r==KErrNone);
 		}
-	// hack, reduce total RAM to <2GB...
+	// Reduce total RAM to <2GB...
 	if(TheSuperPage().iTotalRamSize<0)
 		TheSuperPage().iTotalRamSize = 0x80000000-KPageSize;
 
@@ -540,6 +548,27 @@
 	iDefrag->Init3(TheMmu.iRamPageAllocator);
 	}
 
+
+void Mmu::BTracePrime(TUint aCategory)
+	{
+	(void)aCategory;
+
+#ifdef BTRACE_RAM_ALLOCATOR
+	// Must check for -1 as that is the default value of aCategory for
+	// BTrace::Prime() which is intended to prime all categories that are 
+	// currently enabled via a single invocation of BTrace::Prime().
+	if(aCategory==BTrace::ERamAllocator || (TInt)aCategory == -1)
+		{
+		NKern::ThreadEnterCS();
+		RamAllocLock::Lock();
+		iRamPageAllocator->DoBTracePrime();
+		RamAllocLock::Unlock();
+		NKern::ThreadLeaveCS();
+		}
+#endif
+	}
+
+
 //
 // Utils
 //
@@ -593,7 +622,7 @@
 	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam(?,%d,%d,?,%d)", aZoneIdCount, aBytes, aPhysAddr, aAlign));
 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
 
-	TInt r = iRamPageAllocator->ZoneAllocContiguousRam(aZoneIdList, aZoneIdCount, aBytes, aPhysAddr, EPageFixed, aAlign);
+	TInt r = iRamPageAllocator->ZoneAllocContiguousRam(aZoneIdList, aZoneIdCount, aBytes, aPhysAddr, aAlign);
 	if(r!=KErrNone)
 		iRamAllocFailed = ETrue;
 	else
@@ -619,18 +648,7 @@
 		PagesAllocated(aPageList, aNumPages, (Mmu::TRamAllocFlags)EMemAttStronglyOrdered);
 
 		// update page infos...
-		TUint flash = 0;
-		TPhysAddr* pageEnd = aPageList + aNumPages;
-		MmuLock::Lock();
-		TPhysAddr* page = aPageList;
-		while (page < pageEnd)
-			{
-			MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo/2);
-			TPhysAddr pagePhys = *page++;
-			__NK_ASSERT_DEBUG(pagePhys != KPhysAddrInvalid);
-			SPageInfo::FromPhysAddr(pagePhys)->SetPhysAlloc();
-			}
-		MmuLock::Unlock();
+		SetAllocPhysRam(aPageList, aNumPages);
 		}
 	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam returns %d",r));
 	return r;
@@ -853,6 +871,22 @@
 	}
 
 
+/**
+Mark a page as being allocated to a particular page type.
+
+NOTE - This page should not be used until PagesAllocated() has been invoked on it.
+
+@param aPhysAddr		The physical address of the page to mark as allocated.
+@param aZonePageType	The type of the page to mark as allocated.
+*/
+void Mmu::MarkPageAllocated(TPhysAddr aPhysAddr, TZonePageType aZonePageType)
+	{
+	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::MarkPageAllocated(0x%x, %d)", aPhysAddr, aZonePageType));
+	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
+	iRamPageAllocator->MarkPageAllocated(aPhysAddr, aZonePageType);
+	}
+
+
 void Mmu::FreeRam(TPhysAddr* aPages, TUint aCount, TZonePageType aZonePageType)
 	{
 	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreeRam(?,%d)",aCount));
@@ -872,20 +906,31 @@
 		SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
 		PageFreed(pi);
 
-		// If this is an old page of a page being moved that was previously pinned
-		// then make sure it is freed as discardable otherwise despite DPager::DonatePages()
-		// having marked it as discardable it would be freed as movable.
-		__NK_ASSERT_DEBUG(pi->PagedState() != SPageInfo::EPagedPinnedMoved || aCount == 1);
-		if (pi->PagedState() == SPageInfo::EPagedPinnedMoved)
-			aZonePageType = EPageDiscard;
-
-		if(ThePager.PageFreed(pi)==KErrNone)
-			--aCount; // pager has dealt with this page, so one less for us
-		else
+		switch (ThePager.PageFreed(pi))
 			{
-			// All paged pages should have been dealt with by the pager above.
-			__NK_ASSERT_DEBUG(pi->PagedState() == SPageInfo::EUnpaged);
-			*pagesOut++ = pagePhys; // store page address for freeing later
+			case KErrNone: 
+				--aCount; // pager has dealt with this page, so one less for us
+				break;
+			case KErrCompletion:
+				// This was a pager controlled page but it is no longer required.
+				__NK_ASSERT_DEBUG(aZonePageType == EPageMovable || aZonePageType == EPageDiscard);
+				__NK_ASSERT_DEBUG(pi->PagedState() == SPageInfo::EUnpaged);
+				if (aZonePageType == EPageMovable)
+					{// This page was donated to the pager so have to free it here
+					// as aZonePageType is incorrect for this page but aPages may 
+					// contain a mixture of movable and discardable pages.
+					MmuLock::Unlock();
+					iRamPageAllocator->FreeRamPages(&pagePhys, 1, EPageDiscard);
+					aCount--; // We've freed this page here so one less to free later
+					flash = 0;	// reset flash count as we released the mmulock.
+					MmuLock::Lock();
+					break;
+					}
+				// fall through..
+			default:
+				// Free this page..
+				__NK_ASSERT_DEBUG(pi->PagedState() == SPageInfo::EUnpaged);
+				*pagesOut++ = pagePhys; // store page address for freeing later
 			}
 		}
 	MmuLock::Unlock();
@@ -904,21 +949,15 @@
 		__KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocContiguousRam returns simulated OOM %d",KErrNoMemory));
 		return KErrNoMemory;
 		}
-	// Only the page sets EAllocNoPagerReclaim and it shouldn't allocate contiguous ram.
+	// Only the pager sets EAllocNoPagerReclaim and it shouldn't allocate contiguous ram.
 	__NK_ASSERT_DEBUG(!(aFlags&EAllocNoPagerReclaim));
 #endif
-	TInt r = iRamPageAllocator->AllocContiguousRam(aCount, aPhysAddr, EPageFixed, aAlign+KPageShift);
-	if(r==KErrNoMemory && aCount > KMaxFreeableContiguousPages)
-		{
-		// flush paging cache and retry...
-		ThePager.FlushAll();
-		r = iRamPageAllocator->AllocContiguousRam(aCount, aPhysAddr, EPageFixed, aAlign+KPageShift);
-		}
+	TInt r = iRamPageAllocator->AllocContiguousRam(aCount, aPhysAddr, aAlign+KPageShift);
 	if(r!=KErrNone)
 		iRamAllocFailed = ETrue;
 	else
 		PagesAllocated((TPhysAddr*)(aPhysAddr|1), aCount, aFlags);
-	__KTRACE_OPT(KMMU,Kern::Printf("AllocContiguouseRam returns %d and aPhysAddr=0x%08x",r,aPhysAddr));
+	__KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam returns %d and aPhysAddr=0x%08x",r,aPhysAddr));
 	return r;
 	}
 
@@ -963,19 +1002,7 @@
 		return r;
 
 	// update page infos...
-	TPhysAddr* pages = aPages;
-	TPhysAddr* pagesEnd = pages+aCount;
-	MmuLock::Lock();
-	TUint flash = 0;
-	while(pages<pagesEnd)
-		{
-		MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo/2);
-		TPhysAddr pagePhys = *pages++;
-		__NK_ASSERT_DEBUG(pagePhys!=KPhysAddrInvalid);
-		SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
-		pi->SetPhysAlloc();
-		}
-	MmuLock::Unlock();
+	SetAllocPhysRam(aPages, aCount);
 
 	return KErrNone;
 	}
@@ -1004,6 +1031,19 @@
 	MmuLock::Unlock();
 
 	iRamPageAllocator->FreeRamPages(aPages,aCount, EPageFixed);
+
+#ifdef BTRACE_KERNEL_MEMORY
+	if (BTrace::CheckFilter(BTrace::EKernelMemory))
+		{// Only loop round each page if EKernelMemory tracing is enabled
+		pages = aPages;
+		pagesEnd = aPages + aCount;
+		while (pages < pagesEnd)
+			{
+			BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysFree, KPageSize, *pages++);
+			Epoc::DriverAllocdPhysRam -= KPageSize;
+			}
+		}
+#endif
 	}
 
 
@@ -1015,17 +1055,7 @@
 		return r;
 
 	// update page infos...
-	SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr);
-	SPageInfo* piEnd = pi+aCount;
-	TUint flash = 0;
-	MmuLock::Lock();
-	while(pi<piEnd)
-		{
-		MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo);
-		pi->SetPhysAlloc();
-		++pi;
-		}
-	MmuLock::Unlock();
+	SetAllocPhysRam(aPhysAddr, aCount);
 
 	return KErrNone;
 	}
@@ -1050,7 +1080,25 @@
 		}
 	MmuLock::Unlock();
 
-	iRamPageAllocator->FreePhysicalRam(aPhysAddr, aCount << KPageShift);
+	TUint bytes = aCount << KPageShift;
+	iRamPageAllocator->FreePhysicalRam(aPhysAddr, bytes);
+
+#ifdef BTRACE_KERNEL_MEMORY
+	BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysFree, bytes, aPhysAddr);
+	Epoc::DriverAllocdPhysRam -= bytes;
+#endif
+	}
+
+
+TInt Mmu::FreeRamZone(TUint aZoneId)
+	{
+	TPhysAddr zoneBase;
+	TUint zonePages;
+	TInt r = iRamPageAllocator->GetZoneAddress(aZoneId, zoneBase, zonePages);
+	if (r != KErrNone)
+		return r;
+	FreePhysicalRam(zoneBase, zonePages);
+	return KErrNone;
 	}
 
 
@@ -1058,25 +1106,11 @@
 	{
 	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::ClaimPhysicalRam(0x%08x,0x%x,0x%08x)",aPhysAddr,aCount,aFlags));
 	aPhysAddr &= ~KPageMask;
-	TInt r = iRamPageAllocator->ClaimPhysicalRam(aPhysAddr,(aCount << KPageShift));
-	if(r!=KErrNone)
+	TInt r = iRamPageAllocator->ClaimPhysicalRam(aPhysAddr, aCount << KPageShift);
+	if(r != KErrNone)
 		return r;
 
-	PagesAllocated((TPhysAddr*)(aPhysAddr|1), aCount, aFlags);
-
-	// update page infos...
-	SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr);
-	SPageInfo* piEnd = pi+aCount;
-	TUint flash = 0;
-	MmuLock::Lock();
-	while(pi<piEnd)
-		{
-		MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo);
-		pi->SetPhysAlloc();
-		++pi;
-		}
-	MmuLock::Unlock();
-
+	AllocatedPhysicalRam(aPhysAddr, aCount, aFlags);
 	return KErrNone;
 	}
 
@@ -1088,17 +1122,59 @@
 	PagesAllocated((TPhysAddr*)(aPhysAddr|1), aCount, aFlags);
 
 	// update page infos...
+	SetAllocPhysRam(aPhysAddr, aCount);
+	}
+
+
+void Mmu::SetAllocPhysRam(TPhysAddr aPhysAddr, TUint aCount)
+	{
 	SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr);
 	SPageInfo* piEnd = pi+aCount;
 	TUint flash = 0;
 	MmuLock::Lock();
 	while(pi<piEnd)
 		{
-		MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo);
+		MmuLock::Flash(flash, KMaxPageInfoUpdatesInOneGo);
 		pi->SetPhysAlloc();
 		++pi;
 		}
 	MmuLock::Unlock();
+
+#ifdef BTRACE_KERNEL_MEMORY
+	TUint bytes = aCount << KPageShift;
+	BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysAlloc, bytes, aPhysAddr);
+	Epoc::DriverAllocdPhysRam += bytes;
+#endif
+	}
+
+
+void Mmu::SetAllocPhysRam(TPhysAddr* aPageList, TUint aNumPages)
+	{
+	TPhysAddr* page = aPageList;
+	TPhysAddr* pageEnd = aPageList + aNumPages;
+	TUint flash = 0;
+	MmuLock::Lock();
+	while (page < pageEnd)
+		{
+		MmuLock::Flash(flash, KMaxPageInfoUpdatesInOneGo / 2);
+		TPhysAddr pagePhys = *page++;
+		__NK_ASSERT_DEBUG(pagePhys != KPhysAddrInvalid);
+		SPageInfo::FromPhysAddr(pagePhys)->SetPhysAlloc();
+		}
+	MmuLock::Unlock();
+
+#ifdef BTRACE_KERNEL_MEMORY
+	if (BTrace::CheckFilter(BTrace::EKernelMemory))
+		{// Only loop round each page if EKernelMemory tracing is enabled
+		TPhysAddr* pAddr = aPageList;
+		TPhysAddr* pAddrEnd = aPageList + aNumPages;
+		while (pAddr < pAddrEnd)
+			{
+			BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysAlloc, KPageSize, *pAddr++);
+			Epoc::DriverAllocdPhysRam += KPageSize;
+			}
+		}
+#endif
 	}
 
 
@@ -1187,20 +1263,10 @@
 	__NK_ASSERT_DEBUG(iSize>=1);
 	__NK_ASSERT_DEBUG(iCount==0);
 
-	TUint colour = aColour&KPageColourMask;
-	TLinAddr addr = iLinAddr+(colour<<KPageShift);
-	TPte* pPte = iPtePtr+colour;
-	iColour = colour;
-
-	__ASSERT_DEBUG(*pPte==KPteUnallocatedEntry,MM::Panic(MM::ETempMappingAlreadyInUse));
-	*pPte = (aPage&~KPageMask) | iBlankPte;
-	CacheMaintenance::SinglePteUpdated((TLinAddr)pPte);
-	InvalidateTLBForPage(addr|KKernelOsAsid);
-
-	iCount = 1;
-	return addr;
+	return Map(aPage, aColour, iBlankPte);
 	}
 
+
 /**
 Map a single physical page into this temporary mapping using the given page table entry (PTE) value.
 
@@ -1215,16 +1281,17 @@
 	{
 	__NK_ASSERT_DEBUG(iSize>=1);
 	__NK_ASSERT_DEBUG(iCount==0);
-
-	TUint colour = aColour&KPageColourMask;
-	TLinAddr addr = iLinAddr+(colour<<KPageShift);
-	TPte* pPte = iPtePtr+colour;
+	__NK_ASSERT_DEBUG(!(aBlankPte & ~KPageMask));
+
+	TUint colour = aColour & KPageColourMask;
+	TLinAddr addr = iLinAddr + (colour << KPageShift);
+	TPte* pPte = iPtePtr + colour;
 	iColour = colour;
 
-	__ASSERT_DEBUG(*pPte==KPteUnallocatedEntry,MM::Panic(MM::ETempMappingAlreadyInUse));
-	*pPte = (aPage&~KPageMask) | aBlankPte;
+	__ASSERT_DEBUG(*pPte == KPteUnallocatedEntry, MM::Panic(MM::ETempMappingAlreadyInUse));
+	*pPte = (aPage & ~KPageMask) | aBlankPte;
 	CacheMaintenance::SinglePteUpdated((TLinAddr)pPte);
-	InvalidateTLBForPage(addr|KKernelOsAsid);
+	InvalidateTLBForPage(addr | KKernelOsAsid);
 
 	iCount = 1;
 	return addr;
@@ -1290,19 +1357,16 @@
 	TUint colour = iColour;
 	TLinAddr addr = iLinAddr+(colour<<KPageShift);
 	TPte* pPte = iPtePtr+colour;
-	TUint count = iCount;
-
-	while(count)
+
+	while(iCount)
 		{
 		*pPte = KPteUnallocatedEntry;
 		CacheMaintenance::SinglePteUpdated((TLinAddr)pPte);
 		InvalidateTLBForPage(addr|KKernelOsAsid);
 		addr += KPageSize;
 		++pPte;
-		--count;
+		--iCount;
 		}
-
-	iCount = 0;
 	}
 
 #ifdef __SMP__
--- a/kernel/eka/memmodel/epoc/flexible/mmu/mmu.h	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/memmodel/epoc/flexible/mmu/mmu.h	Mon May 10 11:40:53 2010 +0100
@@ -84,7 +84,8 @@
 		Page is in an indeterminate state.
 
 		A page is placed into this state by Mmu::PagesAllocated when it is
-		allocated (ceases to be #EUnused). Once the page
+		allocated (ceases to be #EUnused). Once the page has been assigned to 
+		its new use its type will be updated.
 		*/
 		EUnknown,
 
@@ -218,7 +219,7 @@
 		{
 		/**
 		The memory object which owns this page.
-		Used for always set for #EManaged pages and can be set for #PhysAlloc pages.
+		Always set for #EManaged pages and can be set for #PhysAlloc pages.
 		*/
 		DMemoryObject* iOwner;
 
@@ -297,7 +298,7 @@
 	*/
 	FORCE_INLINE static SPageInfo* FromLink(SDblQueLink* aLink)
 		{
-		return (SPageInfo*)((TInt)aLink-_FOFF(SPageInfo,iLink));
+		return _LOFF(aLink, SPageInfo, iLink);
 		}
 
 	//
@@ -483,7 +484,7 @@
 		}
 
 	/**
-	Reutrns a pointer to the SPageInfo of the page that this page is shadowing.
+	Returns a pointer to the SPageInfo of the page that this page is shadowing.
 
 	@return	A pointer to the SPageInfo that this page is shadowing
 
@@ -570,7 +571,7 @@
 		}
 
 	/**
-	The the pages #iModifier value.
+	Set the page's #iModifier value.
 
 	#iModifier is cleared to zero whenever the usage or paging state of the page
 	changes. So if a thread sets this to a suitable unique value (e.g. the address
@@ -1433,7 +1434,7 @@
 	*/
 	FORCE_INLINE static SPageTableInfo* FromFreeLink(SDblQueLink* aLink)
 		{
-		return (SPageTableInfo*)((TInt)aLink-_FOFF(SPageTableInfo,iUnused));
+		return _LOFF(aLink, SPageTableInfo, iUnused);
 		}
 
 	/**
@@ -1710,9 +1711,9 @@
 	*/
 	static FORCE_INLINE void UnlockGuardStart()
 		{
-		#ifdef _DEBUG
-			++UnlockGuardNest;
-		#endif
+#ifdef _DEBUG
+		++UnlockGuardNest;
+#endif
 		}
 
 	/**
@@ -1721,18 +1722,18 @@
 
 	@see UnlockGuardStart
 
-	@return True if the MmuLock was released between a previous #UnlockGuardStart
+	@return EFalse if the MmuLock was released between a previous #UnlockGuardStart
 			and the call this function.
 	*/
 	static FORCE_INLINE TBool UnlockGuardEnd()
 		{
-		#ifdef _DEBUG
-			__NK_ASSERT_DEBUG(UnlockGuardNest);
-			--UnlockGuardNest;
-			return UnlockGuardFail==0;
-		#else
-			return true;
-		#endif
+#ifdef _DEBUG
+		__NK_ASSERT_DEBUG(UnlockGuardNest);
+		--UnlockGuardNest;
+		return UnlockGuardFail==0;
+#else
+		return ETrue;
+#endif
 		}
 
 private:
@@ -1742,10 +1743,10 @@
 	*/
 	static FORCE_INLINE void UnlockGuardCheck()
 		{
-		#ifdef _DEBUG
-			if(UnlockGuardNest)
-				UnlockGuardFail = true;
-		#endif
+#ifdef _DEBUG
+		if(UnlockGuardNest)
+			UnlockGuardFail = ETrue;
+#endif
 		}
 
 public:
@@ -1950,6 +1951,8 @@
 	void Init2FinalCommon();
 	void Init3();
 
+	void BTracePrime(TUint aCategory);
+
 	static void Panic(TPanic aPanic);
 
 	static TInt HandlePageFault(TLinAddr aPc, TLinAddr aFaultAddress, TUint aAccessPermissions, TAny* aExceptionInfo);
@@ -1959,6 +1962,7 @@
 
 	TInt AllocRam(	TPhysAddr* aPages, TUint aCount, TRamAllocFlags aFlags, TZonePageType aZonePageType, 
 					TUint aBlockZoneId=KRamZoneInvalidId, TBool aBlockRest=EFalse);
+	void MarkPageAllocated(TPhysAddr aPhysAddr, TZonePageType aZonePageType);
 	void FreeRam(TPhysAddr* aPages, TUint aCount, TZonePageType aZonePageType);
 	TInt AllocContiguousRam(TPhysAddr& aPhysAddr, TUint aCount, TUint aAlign, TRamAllocFlags aFlags);
 	void FreeContiguousRam(TPhysAddr aPhysAddr, TUint aCount);
@@ -1971,6 +1975,7 @@
 	TInt ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aNumPages, TPhysAddr* aPageList);
 	TInt RamHalFunction(TInt aFunction, TAny* a1, TAny* a2);	
 	void ChangePageType(SPageInfo* aPageInfo, TZonePageType aOldPageType, TZonePageType aNewPageType);
+	TInt FreeRamZone(TUint aZoneId);
 
 	TInt AllocPhysicalRam(TPhysAddr* aPages, TUint aCount, TRamAllocFlags aFlags);
 	void FreePhysicalRam(TPhysAddr* aPages, TUint aCount);
@@ -1978,7 +1983,11 @@
 	void FreePhysicalRam(TPhysAddr aPhysAddr, TUint aCount);
 	TInt ClaimPhysicalRam(TPhysAddr aPhysAddr, TUint aCount, TRamAllocFlags aFlags);
 	void AllocatedPhysicalRam(TPhysAddr aPhysAddr, TUint aCount, TRamAllocFlags aFlags);
+private:
+	void SetAllocPhysRam(TPhysAddr aPhysAddr, TUint aCount);
+	void SetAllocPhysRam(TPhysAddr* aPageList, TUint aNumPages);
 
+public:
 	TLinAddr MapTemp(TPhysAddr aPage, TUint aColour, TUint aSlot=0);
 	void UnmapTemp(TUint aSlot=0);
 	void RemoveAliasesForPageTable(TPhysAddr aPageTable);
@@ -2131,8 +2140,11 @@
 	TUint iRamAllocInitialFreePages;
 
 	friend class RamAllocLock;
+
+#ifdef FMM_VERIFY_RAM
 private:
 	void VerifyRam();
+#endif
 	};
 
 /**
--- a/kernel/eka/memmodel/epoc/flexible/mmu/mpager.cpp	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/memmodel/epoc/flexible/mmu/mpager.cpp	Mon May 10 11:40:53 2010 +0100
@@ -307,6 +307,7 @@
 
 TInt DPager::PageFreed(SPageInfo* aPageInfo)
 	{
+	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
 	__NK_ASSERT_DEBUG(CheckLists());
 
@@ -351,7 +352,7 @@
 		// This page was pinned when it was moved but it has not been returned 
 		// to the free pool yet so make sure it is...
 		aPageInfo->SetPagedState(SPageInfo::EUnpaged);	// Must be unpaged before returned to free pool.
-		return KErrNotFound;
+		return KErrCompletion;
 
 	default:
 		__NK_ASSERT_DEBUG(0);
@@ -365,6 +366,14 @@
 		SetClean(*aPageInfo);
 		}
 
+	if (iNumberOfFreePages > 0)
+		{// The paging cache is not at the minimum size so safe to let the 
+		// ram allocator free this page.
+		iNumberOfFreePages--;
+		aPageInfo->SetPagedState(SPageInfo::EUnpaged);
+		return KErrCompletion;
+		}
+	// Need to hold onto this page as have reached the page cache limit.
 	// add as oldest page...
 	aPageInfo->SetPagedState(SPageInfo::EPagedOldestClean);
 	iOldestCleanList.Add(&aPageInfo->iLink);
@@ -413,8 +422,8 @@
 #ifdef _DEBUG
 		if (!IsPageTableUnpagedRemoveAllowed(aPageInfo))
 			__NK_ASSERT_DEBUG(0);
+#endif
 		break;
-#endif
 	default:
 		__NK_ASSERT_DEBUG(0);
 		return;
@@ -803,6 +812,20 @@
 	}
 
 
+TInt DPager::DiscardAndAllocPage(SPageInfo* aPageInfo, TZonePageType aPageType)
+	{
+	TInt r = DiscardPage(aPageInfo, KRamZoneInvalidId, EFalse);
+	if (r == KErrNone)
+		{
+		TheMmu.MarkPageAllocated(aPageInfo->PhysAddr(), aPageType);
+		}
+	// Flash the ram alloc lock as we may have had to write a page out to swap.
+	RamAllocLock::Unlock();
+	RamAllocLock::Lock();
+	return r;
+	}
+
+
 static TBool DiscardCanStealPage(SPageInfo* aOldPageInfo, TBool aBlockRest)
 	{
  	// If the page is pinned or if the page is dirty and a general defrag is being performed then
@@ -1002,6 +1025,9 @@
 	__NK_ASSERT_DEBUG(iNumberOfFreePages>0);
 	--iNumberOfFreePages;
 
+	// The page must be unpaged, otherwise it wasn't successfully removed 
+	// from the live list.
+	__NK_ASSERT_DEBUG(aPageInfo.PagedState() == SPageInfo::EUnpaged);
 	MmuLock::Unlock();
 
 	TPhysAddr pagePhys = aPageInfo.PhysAddr();
@@ -2068,6 +2094,7 @@
 	}
 
 
+// WARNING THIS METHOD MAY HOLD THE RAM ALLOC LOCK FOR EXCESSIVE PERIODS.  DON'T USE THIS IN ANY PRODUCTION CODE.
 void DPager::FlushAll()
 	{
 	NKern::ThreadEnterCS();
@@ -2108,7 +2135,9 @@
 					}
 				++pi;
 				if(((TUint)pi&(0xf<<KPageInfoShift))==0)
+					{
 					MmuLock::Flash(); // every 16 page infos
+					}
 				}
 			while(pi<piEnd);
 			}
--- a/kernel/eka/memmodel/epoc/flexible/mmu/mpager.h	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/memmodel/epoc/flexible/mmu/mpager.h	Mon May 10 11:40:53 2010 +0100
@@ -353,6 +353,15 @@
 	*/
 	TInt DiscardPage(SPageInfo* aOldPageInfo, TUint aBlockZoneId, TBool aBlockRest);
 
+	/**
+	Attempt to discard the specified page and then allocate a page of type aPageType
+	in its place.
+	
+	@param aPageInfo	The page info of the page to discard.
+	@param aPageType 	The new page type to allocate into aPageInfo's physical address.
+	*/
+	TInt DiscardAndAllocPage(SPageInfo* aPageInfo, TZonePageType aPageType);
+
 
 	/**
 	Update any live list links to replace the old page with the new page.
--- a/kernel/eka/memmodel/epoc/flexible/mmu/mptalloc.cpp	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/memmodel/epoc/flexible/mmu/mptalloc.cpp	Mon May 10 11:40:53 2010 +0100
@@ -54,10 +54,6 @@
 uses memory from #ThePager which will reclaim paged memory if necessary.
 Providing the live list always has #DPager::iMinYoungPages, this guarantees that
 handling page faults can never fail by running out of memory.
-
-TODO: In really pathological situations page table allocation can fail due to
-being out of virtual address space to map the table, this needs to be prevented
-from happening when handling demand paging faults.
 */
 
 
@@ -177,6 +173,14 @@
 	else
 		{// Allocate fixed paged as page tables aren't movable.
 		r = TheMmu.AllocRam(&pagePhys, 1, aMemory->RamAllocFlags(), EPageFixed);
+
+#ifdef BTRACE_KERNEL_MEMORY
+		if (r == KErrNone)
+			{
+			BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscAlloc, KPageSize);
+			++Epoc::KernelMiscPages;
+			}
+#endif
 		}
 	RamAllocLock::Unlock();
 
@@ -241,7 +245,14 @@
 		if(aDemandPaged)
 			ThePager.PageInFreePages(&pagePhys,1);
 		else
+			{
 			TheMmu.FreeRam(&pagePhys, 1, EPageFixed);
+
+#ifdef BTRACE_KERNEL_MEMORY
+			BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscFree, KPageSize);
+			--Epoc::KernelMiscPages;
+#endif
+			}
 		r = 1;
 		}
 
@@ -362,11 +373,8 @@
 	{
 	iReserveCount = aReserveCount;
 	iDemandPaged = aDemandPaged;
-	while(iFreeCount<aReserveCount)
-		if(!aAllocator->AllocReserve(*this))
-			{
-			__NK_ASSERT_ALWAYS(0);
-			}
+	while(iFreeCount < aReserveCount)
+		__NK_ASSERT_ALWAYS(aAllocator->AllocReserve(*this));
 	}
 
 
--- a/kernel/eka/memmodel/epoc/mmubase/mmubase.cpp	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/memmodel/epoc/mmubase/mmubase.cpp	Mon May 10 11:40:53 2010 +0100
@@ -483,7 +483,7 @@
 TInt MmuBase::AllocPhysicalRam(TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
 	{
 	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocPhysicalRam() size=%x align=%d",aSize,aAlign));
-	TInt r=AllocContiguousRam(aSize, aPhysAddr, EPageFixed, aAlign);
+	TInt r=AllocContiguousRam(aSize, aPhysAddr, aAlign);
 	if (r!=KErrNone)
 		{
 		iAllocFailed=ETrue;
@@ -516,7 +516,7 @@
 TInt MmuBase::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
 	{
 	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam() size=0x%x align=%d", aSize, aAlign));
-	TInt r = ZoneAllocContiguousRam(aZoneIdList, aZoneIdCount, aSize, aPhysAddr, EPageFixed, aAlign);
+	TInt r = ZoneAllocContiguousRam(aZoneIdList, aZoneIdCount, aSize, aPhysAddr, aAlign);
 	if (r!=KErrNone)
 		{
 		iAllocFailed=ETrue;
@@ -725,20 +725,19 @@
 	}
 
 
-TInt MmuBase::AllocContiguousRam(TInt aSize, TPhysAddr& aPhysAddr, TZonePageType aPageType, TInt aAlign, TUint aBlockedZoneId, TBool aBlockRest)
+TInt MmuBase::AllocContiguousRam(TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
 	{
 #ifdef _DEBUG
 	if(K::CheckForSimulatedAllocFail())
 		return KErrNoMemory;
 #endif
-	__NK_ASSERT_DEBUG(aPageType == EPageFixed);
 	TUint contigPages = (aSize + KPageSize - 1) >> KPageShift;
-	TInt r = iRamPageAllocator->AllocContiguousRam(contigPages, aPhysAddr, aPageType, aAlign, aBlockedZoneId, aBlockRest);
+	TInt r = iRamPageAllocator->AllocContiguousRam(contigPages, aPhysAddr, aAlign);
 	if (r == KErrNoMemory && contigPages > KMaxFreeableContiguousPages)
 		{// Allocation failed but as this is a large allocation flush the RAM cache 
 		// and reattempt the allocation as large allocation wouldn't discard pages.
 		iRamCache->FlushAll();
-		r = iRamPageAllocator->AllocContiguousRam(contigPages, aPhysAddr, aPageType, aAlign, aBlockedZoneId, aBlockRest);
+		r = iRamPageAllocator->AllocContiguousRam(contigPages, aPhysAddr, aAlign);
 		}
 	return r;
 	}
@@ -750,16 +749,15 @@
 @param aZoneIdCount	The number of IDs listed in aZoneIdList
 @param aSize		The number of bytes to allocate
 @param aPhysAddr 	Will receive the physical base address of the allocated RAM
-@param aPageType 	The type of the pages being allocated
 @param aAlign 		The log base 2 alginment required
 */
-TInt MmuBase::ZoneAllocContiguousRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TZonePageType aPageType, TInt aAlign)
+TInt MmuBase::ZoneAllocContiguousRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
 	{
 #ifdef _DEBUG
 	if(K::CheckForSimulatedAllocFail())
 		return KErrNoMemory;
 #endif
-	return iRamPageAllocator->ZoneAllocContiguousRam(aZoneIdList, aZoneIdCount, aSize, aPhysAddr, aPageType, aAlign);
+	return iRamPageAllocator->ZoneAllocContiguousRam(aZoneIdList, aZoneIdCount, aSize, aPhysAddr, aAlign);
 	}
 
 SPageInfo* SPageInfo::SafeFromPhysAddr(TPhysAddr aAddress)
--- a/kernel/eka/memmodel/epoc/mmubase/ramalloc.cpp	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/memmodel/epoc/mmubase/ramalloc.cpp	Mon May 10 11:40:53 2010 +0100
@@ -354,13 +354,13 @@
 					aZone->iAllocPages[EPageFixed],	aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard]));
 		Panic(EZonesCountErr);
 		}
-	__ASSERT_DEBUG(free == (TUint32)aZone->iBma[KBmaAllPages]->iAvail, Panic(EAllocRamPagesInconsistent));
 	__KTRACE_OPT(KMMU2,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc));
 	__KTRACE_OPT(KMMU2,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown], 
 					aZone->iAllocPages[EPageFixed],	aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard]));
 
-	if (iAllowBmaVerify)
+	if (!iContiguousReserved)
 		{
+		__ASSERT_DEBUG(free == (TUint32)aZone->iBma[KBmaAllPages]->iAvail, Panic(EAllocRamPagesInconsistent));
 		TBitMapAllocator& bmaType = *(aZone->iBma[(aType != EPageUnknown)? aType : EPageFixed]);
 		TUint allocPages;
 		if (aType == EPageFixed || aType == EPageUnknown)
@@ -495,13 +495,13 @@
 					aZone->iAllocPages[EPageFixed],	aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard]));
 		Panic(EZonesCountErr);
 		}
-	__ASSERT_DEBUG(free == (TUint32)aZone->iBma[KBmaAllPages]->iAvail, Panic(EAllocRamPagesInconsistent));
 	__KTRACE_OPT(KMMU2,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc));
 	__KTRACE_OPT(KMMU2,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown], 
 					aZone->iAllocPages[EPageFixed],	aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard]));
 
-	if (iAllowBmaVerify)
+	if (!iContiguousReserved)
 		{
+		__ASSERT_DEBUG(free == (TUint32)aZone->iBma[KBmaAllPages]->iAvail, Panic(EAllocRamPagesInconsistent));
 		TBitMapAllocator& bmaType = *(aZone->iBma[(aType != EPageUnknown)? aType : EPageFixed]);
 		TUint allocPages;
 		if (aType == EPageFixed || aType == EPageUnknown)
@@ -968,7 +968,7 @@
 	// Temporarily fill preference list so SetPhysicalRamState can succeed
 #ifdef _DEBUG
 	// Block bma verificaitons as bma and alloc counts aren't consistent yet.
-	iAllowBmaVerify = EFalse;
+	iContiguousReserved = 1;
 #endif
 	const SZone* const lastZone = iZones + iNumZones;
 	zone = iZones;
@@ -984,7 +984,7 @@
 		}
 #ifdef _DEBUG
 	// Only now is it safe to enable bma verifications
-	iAllowBmaVerify = ETrue;
+	iContiguousReserved = 0;
 #endif
 
 	///////////////////////////////////////////////////////////////////////////
@@ -1135,6 +1135,7 @@
 		}
 	}
 
+
 TInt DRamAllocator::MarkPageAllocated(TPhysAddr aAddr, TZonePageType aType)
 	{
 	__KTRACE_OPT(KMMU,Kern::Printf("DRamAllocator::MarkPageAllocated %08x",aAddr));
@@ -1160,7 +1161,12 @@
 		return KErrAlreadyExists;			// page is already allocated
 		}
 	bmaAll.Alloc(n,1);
-	bmaType.Alloc(n,1);
+	if (bmaType.NotAllocated(n,1))
+		bmaType.Alloc(n,1);
+#ifdef _DEBUG
+	else // Allow this page to already be reserved in bmaType as AllocContiguousRam() may have done this.
+		__NK_ASSERT_DEBUG(aType == EPageFixed);
+#endif
 	--iTotalFreeRamPages;
 	ZoneAllocPages(z, 1, aType);
 	__KTRACE_OPT(KMMU,Kern::Printf("Total free RAM pages now = %d",iTotalFreeRamPages));
@@ -1171,6 +1177,7 @@
 	return KErrNone;
 	}
 
+
 TInt DRamAllocator::FreeRamPage(TPhysAddr aAddr, TZonePageType aType)
 	{
 	__KTRACE_OPT(KMMU,Kern::Printf("FreeRamPage %08x",aAddr));
@@ -1201,17 +1208,27 @@
 	__KTRACE_OPT(KMMU2,Kern::Printf("Zone index %d page index %04x",z-iZones,n));
 	TBitMapAllocator& bmaAll = *(z->iBma[KBmaAllPages]);
 	TBitMapAllocator& bmaType = *(z->iBma[aType]);
-	bmaAll.Free(n);
+
 	bmaType.Free(n);
-	++iTotalFreeRamPages;
-	ZoneFreePages(z, 1, aType);
-
+	if (iContiguousReserved && aType != EPageFixed && z->iBma[EPageFixed]->NotFree(n, 1))
+		{// This page has been reserved by AllocContiguous() so don't free it
+		// but allocate it as fixed.
+		ZoneFreePages(z, 1, aType);
+		ZoneAllocPages(z, 1, EPageFixed);
+		}
+	else
+		{
+		bmaAll.Free(n);
+		++iTotalFreeRamPages;
+		ZoneFreePages(z, 1, aType);	
+		}
 #ifdef BTRACE_RAM_ALLOCATOR
 	BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocFreePage, aType, aAddr);
 #endif
 	return KErrNone;
 	}
 
+
 void DRamAllocator::FreeRamPages(TPhysAddr* aPageList, TInt aNumPages, TZonePageType aType)
 	{
 	__KTRACE_OPT(KMMU,Kern::Printf("FreeRamPages count=%08x",aNumPages));
@@ -1259,11 +1276,37 @@
 			pa += KPageSize;
 			}
 		__KTRACE_OPT(KMMU2,Kern::Printf("%d consecutive pages, zp_rem=%x, %d remaining pages",n,zp_rem,aNumPages));
-		bmaAll.Free(ix,n);
 		TBitMapAllocator& bmaType = *(z->iBma[aType]);
 		bmaType.Free(ix,n);
-		iTotalFreeRamPages += n;
-		ZoneFreePages(z, n, aType);
+
+		if (iContiguousReserved && aType != EPageFixed)
+			{// See if a page has been reserved by AllocContiguous() in this range.
+			TUint pagesFreed = 0;
+			TUint allocStart = ix;
+			TUint freeOffset = ix;
+			TUint endOffset = ix + n - 1;
+			while (freeOffset <= endOffset)
+				{
+				TUint runLength =  NextAllocatedRun(z, allocStart, endOffset, EPageFixed);
+				if (allocStart > freeOffset)
+					{
+					TUint freed = allocStart - freeOffset;
+					bmaAll.Free(freeOffset, freed);
+					pagesFreed += freed;
+					}
+				allocStart += runLength;
+				freeOffset = allocStart;
+				}
+			iTotalFreeRamPages += pagesFreed;
+			ZoneFreePages(z, n, aType);
+			ZoneAllocPages(z, n - pagesFreed, EPageFixed);
+			}
+		else
+			{
+			bmaAll.Free(ix,n);
+			iTotalFreeRamPages += n;
+			ZoneFreePages(z, n, aType);
+			}
 #ifdef BTRACE_RAM_ALLOCATOR
 		BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocFreePages, aType, n, first_pa);
 #endif
@@ -1273,6 +1316,7 @@
 #endif
 	}
 
+
 /**
 	Attempt to clear upto the required amount of discardable or movable pages
 	from the RAM zone.
@@ -1464,7 +1508,7 @@
 			{// Allocating as part of a general defragmentation and
 			// can't allocate without using a RAM zone less preferable than
 			// the current least prefeable RAM zone with movable and/or 
-			//discardable.
+			// discardable.
 			__NK_ASSERT_DEBUG(numMissing);
 			goto exit;
 			}
@@ -1679,29 +1723,173 @@
 	return r;
 	}
 
+
+#if !defined(__MEMMODEL_MULTIPLE__) && !defined(__MEMMODEL_MOVING__)
+void DRamAllocator::BlockContiguousRegion(TPhysAddr aAddrBase, TUint aNumPages)
+	{
+	// Shouldn't be asked to block zero pages, addrEndPage would be wrong if we did.
+	__NK_ASSERT_DEBUG(aNumPages);
+	TPhysAddr addr = aAddrBase;
+	TPhysAddr addrEndPage = aAddrBase + ((aNumPages - 1) << KPageShift);
+	TInt tmpOffset;
+	SZone* endZone = GetZoneAndOffset(addrEndPage, tmpOffset);
+	SZone* tmpZone;
+	do
+		{
+		tmpZone = GetZoneAndOffset(addr, tmpOffset);
+		__NK_ASSERT_DEBUG(tmpZone != NULL);
+		TUint runLength = 	(addrEndPage < tmpZone->iPhysEnd)? 
+							((addrEndPage - addr) >> KPageShift) + 1: 
+							tmpZone->iPhysPages - tmpOffset;
+		TUint reserved = tmpZone->iBma[KBmaAllPages]->SelectiveAlloc(tmpOffset, runLength);
+		if (reserved)
+			{
+#ifdef _DEBUG
+			TUint runEnd = tmpOffset + runLength;
+			TUint free = 0;
+			for (TUint i = tmpOffset; i < runEnd; i++)
+				if (tmpZone->iBma[EPageMovable]->NotAllocated(i,1) && tmpZone->iBma[EPageDiscard]->NotAllocated(i,1))
+					free++;
+			__NK_ASSERT_DEBUG(free == reserved);
+#endif
+			ZoneAllocPages(tmpZone, reserved, EPageFixed);
+			iTotalFreeRamPages -= reserved;
+			}
+		tmpZone->iBma[EPageFixed]->Alloc(tmpOffset, runLength);
+		addr = tmpZone->iPhysEnd + 1;
+		}
+	while (tmpZone != endZone);
+	}
+
+
+FORCE_INLINE void DRamAllocator::UnblockSetAllocRuns(	TUint& aOffset1, TUint& aOffset2, 
+														TUint aRunLength1, TUint aRunLength2, 
+														TUint& aAllocLength, TUint& aAllocStart)
+	{
+	aAllocStart = aOffset1;
+	aAllocLength = aRunLength1;
+	aOffset1 += aAllocLength;
+	if  (aOffset1 == aOffset2)
+		{
+		aAllocLength += aRunLength2;
+		aOffset2 += aRunLength2;
+		aOffset1 = aOffset2;
+		}
+	} 	
+
+
+void DRamAllocator::UnblockContiguousRegion(TPhysAddr aAddrBase, TUint aNumPages)
+	{
+	// Shouldn't be asked to unblock zero pages, addrEndPage would be wrong if we did.
+	__NK_ASSERT_DEBUG(aNumPages);
+	TPhysAddr addr = aAddrBase;
+	TPhysAddr addrEndPage = aAddrBase + ((aNumPages - 1) << KPageShift);
+	TInt tmpOffset;
+	SZone* endZone = GetZoneAndOffset(addrEndPage, tmpOffset);
+	SZone* tmpZone;
+	do
+		{
+		tmpZone = GetZoneAndOffset(addr, tmpOffset);
+		__NK_ASSERT_DEBUG(tmpZone != NULL);
+		TUint runLength = 	(addrEndPage < tmpZone->iPhysEnd)? 
+							((addrEndPage - addr) >> KPageShift) + 1: 
+							tmpZone->iPhysPages - tmpOffset;
+		TUint unreserved = 0;
+		TUint runEnd = tmpOffset + runLength - 1;
+		TUint freeOffset = tmpOffset;
+		TUint discardOffset = freeOffset;
+		TUint movableOffset = freeOffset;
+		__KTRACE_OPT(KMMU2, Kern::Printf("freeOff %d, runEnd %d", freeOffset, runEnd));
+		while (freeOffset <= runEnd)
+			{
+			TUint discardRun;
+			TUint movableRun;
+			discardRun = NextAllocatedRun(tmpZone, discardOffset, runEnd, EPageDiscard);
+			movableRun = NextAllocatedRun(tmpZone, movableOffset, runEnd, EPageMovable);
+			TUint allocLength;
+			TUint allocStart;
+			__KTRACE_OPT(KMMU2, Kern::Printf("disOff %d len %d movOff %d len %d", discardOffset, discardRun, movableOffset, movableRun));
+			if (discardOffset < movableOffset)
+				UnblockSetAllocRuns(discardOffset, movableOffset, discardRun, movableRun, allocLength, allocStart);
+			else
+				UnblockSetAllocRuns(movableOffset, discardOffset, movableRun, discardRun, allocLength, allocStart);
+
+			if (allocStart > freeOffset)
+				{
+				unreserved += allocStart - freeOffset;
+				tmpZone->iBma[KBmaAllPages]->Free(freeOffset, allocStart - freeOffset);
+				__NK_ASSERT_DEBUG(	!tmpZone->iBma[EPageMovable]->NotFree(freeOffset, allocStart - freeOffset) && 
+									!tmpZone->iBma[EPageDiscard]->NotFree(freeOffset, allocStart - freeOffset));
+				}
+			__KTRACE_OPT(KMMU2, Kern::Printf("disOff %d len %d movOff %d len %d start %d len %d", discardOffset, discardRun, movableOffset, movableRun, allocStart, allocLength));
+			freeOffset = allocStart + allocLength;
+			__KTRACE_OPT(KMMU2, Kern::Printf("freeOff %d", freeOffset));
+			}
+		tmpZone->iBma[EPageFixed]->Free(tmpOffset, runLength);
+		ZoneFreePages(tmpZone, unreserved, EPageFixed);
+		iTotalFreeRamPages += unreserved;
+		addr = tmpZone->iPhysEnd + 1;
+		}
+	while (tmpZone != endZone);
+	}
+
+
+TBool DRamAllocator::ClearContiguousRegion(TPhysAddr aAddrBase, TPhysAddr aZoneBase, TUint aNumPages, TInt& aOffset)
+	{
+	TPhysAddr addr = aAddrBase;
+	TPhysAddr addrEnd = aAddrBase + (aNumPages << KPageShift);
+	TInt contigOffset = 0;
+	SZone* contigZone = GetZoneAndOffset(addr, contigOffset);
+	for (; addr != addrEnd; addr += KPageSize, contigOffset++)
+		{
+		if (contigZone->iPhysEnd < addr)
+			{
+			contigZone = GetZoneAndOffset(addr, contigOffset);
+			__NK_ASSERT_DEBUG(contigZone != NULL);
+			}
+
+		__NK_ASSERT_DEBUG(contigZone != NULL);
+		__NK_ASSERT_DEBUG(contigZone->iBma[EPageFixed]->NotFree(contigOffset, 1));
+		__NK_ASSERT_DEBUG(SPageInfo::SafeFromPhysAddr(addr) != NULL);
+
+		// WARNING - This may flash the ram alloc mutex.
+		TInt exRet = M::MoveAndAllocPage(addr, EPageFixed);
+		if (exRet != KErrNone)
+			{// This page couldn't be moved or discarded so 
+			// restart the search the page after this one.
+			__KTRACE_OPT(KMMU2, Kern::Printf("ContigMov fail contigOffset 0x%x exRet %d", contigOffset, exRet));
+			aOffset = (addr < aZoneBase)? 0 : contigOffset + 1;
+			break;
+			}
+		}
+	return addr == addrEnd;
+	}
+
+
 /**
 Search through the zones for the requested contiguous RAM, first in preference 
 order then, if that fails, in address order.
 
+No support for non-fixed pages as this will discard and move pages if required.
+
 @param aNumPages The number of contiguous pages to find
 @param aPhysAddr Will contain the base address of any contiguous run if found
-@param aType The page type of the memory to be allocated
 @param aAlign Alignment specified as the alignment shift
-@param aBlockedZoneId The ID of a zone that can't be allocated into, by default this has no effect
-@param aBlockRest Set to ETrue to stop allocation as soon as aBlockedZoneId is reached 
 in preference ordering.  EFalse otherwise.
 
 @return KErrNone on success, KErrNoMemory otherwise
 */	
-TInt DRamAllocator::AllocContiguousRam(TUint aNumPages, TPhysAddr& aPhysAddr, TZonePageType aType, TInt aAlign, TUint aBlockedZoneId, TBool aBlockRest)
+TInt DRamAllocator::AllocContiguousRam(TUint aNumPages, TPhysAddr& aPhysAddr, TInt aAlign)
 	{
 	__KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam size %08x align %d",aNumPages,aAlign));
 
 	M::RamAllocIsLocked();
 
-	// No support for non-fixed pages as this will discard and move 
-	// pages if required.
-	__NK_ASSERT_DEBUG(aType == EPageFixed);
+	if ((TUint)aNumPages > iTotalFreeRamPages + M::NumberOfFreeDpPages())
+		{// Not enough free space and not enough freeable pages.
+		return KErrNoMemory;
+		}
+
 	TInt alignWrtPage = Max(aAlign - KPageShift, 0);
 	TUint32 alignmask = (1u << alignWrtPage) - 1;
 
@@ -1716,7 +1904,124 @@
 	TInt offset = 0;
 	iZoneTmpAddrIndex = -1;
 	iZoneTmpPrefLink = iZonePrefList.First();
-	while (NextAllocZone(zone, searchState, aType, aBlockedZoneId, aBlockRest))
+	while (NextAllocZone(zone, searchState, EPageFixed, KRamZoneInvalidId, EFalse))
+		{
+		// Be sure to start from scratch if zone not contiguous with previous zone
+		if (prevZone && (zone->iPhysBase == 0 || (zone->iPhysBase - 1) != prevZone->iPhysEnd))
+			{
+			carryAll = 0;
+			carryImmov = 0;
+			}
+		prevZone = zone;
+		TBitMapAllocator& bmaAll = *(zone->iBma[KBmaAllPages]);
+		base = TInt(zone->iPhysBase >> KPageShift);
+		TInt runLength;
+		__KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: base=%08x carryAll=%08x offset=%08x", base, carryAll, offset));
+		offset = bmaAll.AllocAligned(aNumPages, alignWrtPage, base, EFalse, carryAll, runLength);
+		__KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: offset=%08x", offset));
+
+		if (offset >= 0)
+			{
+			// Have found enough contiguous pages so return address of physical page
+			// at the start of the region
+			aPhysAddr = TPhysAddr((base + offset - carryAll + alignmask) & ~alignmask) << KPageShift;
+			MarkPagesAllocated(aPhysAddr, aNumPages, EPageFixed);
+
+			__KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam returns %08x",aPhysAddr));
+#ifdef BTRACE_RAM_ALLOCATOR
+			BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocContiguousRam, EPageFixed, aNumPages, aPhysAddr);
+#endif
+			return KErrNone;
+			}
+		// No run found when looking in just the free pages so see if this
+		// RAM zone could be used if pages where moved or discarded.
+		TBitMapAllocator& bmaImmov = *(zone->iBma[EPageFixed]);
+		offset = 0;	// Clear so searches whole of fixed BMA on the first pass.
+		do
+			{
+			__KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: base=%08x carryImmov=%08x offset=%08x", base, carryImmov, offset));
+			offset = bmaImmov.AllocAligned(aNumPages, alignWrtPage, base, EFalse, carryImmov, runLength, offset);
+			__KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: offset=%08x", offset));
+			if (offset >= 0)
+				{// Have found a run in immovable page bma so attempt to clear
+				// it for the allocation.
+				TPhysAddr addrBase = TPhysAddr((base + offset - carryImmov + alignmask) & ~alignmask) << KPageShift;
+				__KTRACE_OPT(KMMU2, Kern::Printf(">AllocContig fix run 0x%08x - 0x%08x 0x%x", addrBase, addrBase + (aNumPages << KPageShift), TheCurrentThread));
+				
+				// Block the contiguous region from being allocated.
+				iContiguousReserved++;
+				BlockContiguousRegion(addrBase, aNumPages);
+				if (ClearContiguousRegion(addrBase, zone->iPhysBase, aNumPages, offset))
+					{// Cleared all the required pages.
+					// Return address of physical page at the start of the region.
+					iContiguousReserved--;
+					aPhysAddr = addrBase;
+					__KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam returns %08x",aPhysAddr));
+#ifdef BTRACE_RAM_ALLOCATOR
+					BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocContiguousRam, EPageFixed, aNumPages, aPhysAddr);
+#endif
+					__KTRACE_OPT(KMMU2, Kern::Printf("<AllocContig suc run 0x%08x - 0x%08x 0x%x", addrBase, addrBase + (aNumPages << KPageShift), TheCurrentThread));
+					return KErrNone;
+					}
+				else
+					{
+					// Unblock the contiguous region.
+					UnblockContiguousRegion(addrBase, aNumPages);
+					iContiguousReserved--;
+					__KTRACE_OPT(KMMU2, Kern::Printf("ContigMov fail offset 0x%x carryImmov %x", 
+														offset, carryImmov));
+					// Can't rely on RAM zone preference ordering being
+					// the same so clear carrys and restart search from
+					// within the current RAM zone or skip onto the next 
+					// one if at the end of this one.
+					carryImmov = 0;
+					carryAll = 0;
+					__KTRACE_OPT(KMMU2, Kern::Printf("<AllocContigfail run 0x%08x - 0x%08x 0x%x", addrBase, addrBase + (aNumPages << KPageShift), TheCurrentThread));
+					}
+				}
+			}
+		// Keep searching immovable page bma of the current RAM zone until 
+		// gone past end of RAM zone or no run can be found.
+		while (offset >= 0 && (TUint)offset < zone->iPhysPages);
+		}
+	return KErrNoMemory;
+	}
+
+#else
+
+/**
+Search through the zones for the requested contiguous RAM, first in preference 
+order then, if that fails, in address order.
+
+No support for non-fixed pages as this will discard and move pages if required.
+
+@param aNumPages The number of contiguous pages to find
+@param aPhysAddr Will contain the base address of any contiguous run if found
+@param aAlign Alignment specified as the alignment shift
+
+@return KErrNone on success, KErrNoMemory otherwise
+*/	
+TInt DRamAllocator::AllocContiguousRam(TUint aNumPages, TPhysAddr& aPhysAddr, TInt aAlign)
+	{
+	__KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam size %08x align %d",aNumPages,aAlign));
+
+	M::RamAllocIsLocked();
+
+	TInt alignWrtPage = Max(aAlign - KPageShift, 0);
+	TUint32 alignmask = (1u << alignWrtPage) - 1;
+
+	// Attempt to find enough pages searching in preference order first then
+	// in address order
+	TZoneSearchState searchState = EZoneSearchPref;
+	SZone* zone;
+	SZone* prevZone = NULL;
+	TInt carryAll = 0;		// Carry for all pages bma, clear to start new run.
+	TInt carryImmov = 0;	// Carry for immovable pages bma, clear to start new run.
+	TInt base = 0;
+	TInt offset = 0;
+	iZoneTmpAddrIndex = -1;
+	iZoneTmpPrefLink = iZonePrefList.First();
+	while (NextAllocZone(zone, searchState, EPageFixed, KRamZoneInvalidId, EFalse))
 		{
 		// Be sure to start from scratch if zone not contiguous with previous zone
 		if (prevZone && (zone->iPhysBase == 0 || (zone->iPhysBase - 1) != prevZone->iPhysEnd))
@@ -1736,11 +2041,11 @@
 			{// Have found enough contiguous pages so return address of physical page
 			 // at the start of the region
 			aPhysAddr = TPhysAddr((base + offset - carryAll + alignmask) & ~alignmask) << KPageShift;
-			MarkPagesAllocated(aPhysAddr, aNumPages, aType);
+			MarkPagesAllocated(aPhysAddr, aNumPages, EPageFixed);
 
 			__KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam returns %08x",aPhysAddr));
 #ifdef BTRACE_RAM_ALLOCATOR
-			BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocContiguousRam, aType, aNumPages, aPhysAddr);
+			BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocContiguousRam, EPageFixed, aNumPages, aPhysAddr);
 #endif
 			return KErrNone;
 			}
@@ -1788,12 +2093,11 @@
 							contigZone = GetZoneAndOffset(addr, contigOffset);
 							__NK_ASSERT_DEBUG(contigZone != NULL);
 							}
-#ifdef _DEBUG			// This page shouldn't be allocated as fixed, only movable or discardable.
+						// This page shouldn't be allocated as fixed, only movable or discardable.
 						__NK_ASSERT_DEBUG(contigZone != NULL);
 						__NK_ASSERT_DEBUG(contigZone->iBma[EPageFixed]->NotAllocated(contigOffset, 1));
-						SPageInfo* pageInfo = SPageInfo::SafeFromPhysAddr(addr);
-						__NK_ASSERT_DEBUG(pageInfo != NULL);
-#endif
+						__NK_ASSERT_DEBUG(SPageInfo::SafeFromPhysAddr(addr) != NULL);
+
 						TPhysAddr newAddr;
 						TInt moveRet = M::MovePage(addr, newAddr, contigZone->iId, EFalse);
 						if (moveRet != KErrNone && moveRet != KErrNotFound)
@@ -1827,11 +2131,11 @@
 						{// Cleared all the required pages so allocate them.
 						// Return address of physical page at the start of the region.
 						aPhysAddr = addrBase;
-						MarkPagesAllocated(aPhysAddr, aNumPages, aType);
+						MarkPagesAllocated(aPhysAddr, aNumPages, EPageFixed);
 
 						__KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam returns %08x",aPhysAddr));
 #ifdef BTRACE_RAM_ALLOCATOR
-						BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocContiguousRam, aType, aNumPages, aPhysAddr);
+						BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocContiguousRam, EPageFixed, aNumPages, aPhysAddr);
 #endif
 						return KErrNone;
 						}
@@ -1844,6 +2148,7 @@
 		}
 	return KErrNoMemory;
 	}
+#endif // !defined(__MEMODEL_MULTIPLE__) || !defined(__MEMODEL_MOVING__)
 
 
 /**
@@ -1858,19 +2163,17 @@
 @param aZoneIdCount	The number of the IDs listed by aZoneIdList.
 @param aSize 		The number of contiguous bytes to find
 @param aPhysAddr 	Will contain the base address of the contiguous run if found
-@param aType 		The page type of the memory to be allocated
 @param aAlign 		Alignment specified as the alignment shift
 
 @return KErrNone on success, KErrNoMemory if allocation couldn't succeed or 
 the RAM zone has the KRamZoneFlagNoAlloc flag set.  KErrArgument if a zone of
 aZoneIdList exists or if aSize is larger than the size of the zone.
 */	
-TInt DRamAllocator::ZoneAllocContiguousRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TZonePageType aType, TInt aAlign)
+TInt DRamAllocator::ZoneAllocContiguousRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
 	{
 	__KTRACE_OPT(KMMU,Kern::Printf("ZoneAllocContiguousRam zones 0x%x size 0x%08x align %d",aZoneIdCount, aSize, aAlign));
 
 	M::RamAllocIsLocked();
-	__NK_ASSERT_DEBUG(aType == EPageFixed);
 
 
 	TUint numPages = (aSize + KPageSize - 1) >> KPageShift;
@@ -1930,11 +2233,11 @@
 	// Have found enough contiguous pages so mark the pages allocated and 
 	// return address of physical page at the start of the region.
 	aPhysAddr = TPhysAddr((base + offset - carry + alignmask) & ~alignmask) << KPageShift;
-	MarkPagesAllocated(aPhysAddr, numPages, aType);
+	MarkPagesAllocated(aPhysAddr, numPages, EPageFixed);
 
 	__KTRACE_OPT(KMMU,Kern::Printf("ZoneAllocContiguousRam returns %08x",aPhysAddr));
 #ifdef BTRACE_RAM_ALLOCATOR
-	BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocZoneContiguousRam, aType, numPages, aPhysAddr);
+	BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocZoneContiguousRam, EPageFixed, numPages, aPhysAddr);
 #endif
 	return KErrNone;
 	}
@@ -2106,34 +2409,34 @@
 	// Makes things simpler for bma selection.
 	__NK_ASSERT_DEBUG(aType != EPageUnknown);
 
-	if (aOffset >= aZone->iPhysPages)
+	TUint zoneEndOffset = aZone->iPhysPages - 1;
+	if (aOffset > zoneEndOffset)
 		{// Starting point is outside the zone
 		return KErrArgument;
 		}
 
-	TUint offset = aOffset;
-	TUint endOffset = aZone->iPhysPages;
-	TUint endOffsetAligned = endOffset & KWordAlignMask;
+	TUint wordIndex = aOffset >> 5;
+	TUint endWordIndex = zoneEndOffset >> 5;
 
 	// Select the BMA to search, 
 	TUint bmaIndex = (aType == EPageTypes)? KBmaAllPages : aType;
-	TUint32* map = &(aZone->iBma[bmaIndex]->iMap[offset >> 5]);
-	TUint32 bits = *map++;
+	TUint32* map = &(aZone->iBma[bmaIndex]->iMap[wordIndex]);
+	TUint32* mapEnd = &(aZone->iBma[bmaIndex]->iMap[endWordIndex]);
+	TUint32 bits = *map;
 
 	// Set bits for pages before 'offset' (i.e. ones we want to ignore)...
-	bits |= ~(KMaxTUint32 >> (offset & ~KWordAlignMask));
+	bits |= ~(KMaxTUint32 >> (aOffset & ~KWordAlignMask));
 
 	// Find the first bit map word from aOffset in aZone with allocated pages
-	while (bits == KMaxTUint32 && offset < endOffsetAligned)
+	while (bits == KMaxTUint32 && map < mapEnd)
 		{
-		bits = *map++;
-		offset = (offset + 32) & KWordAlignMask;
+		bits = *++map;
 		}
 
-	if (offset >= endOffsetAligned && endOffset != endOffsetAligned)
+	if (map == mapEnd)
 		{// Have reached the last bit mask word so set the bits that are
 		//  outside of the zone so that they are ignored.
-		bits |= KMaxTUint32 >> (endOffset - endOffsetAligned);
+		bits |= (KMaxTUint32 >> (zoneEndOffset & ~KWordAlignMask)) >> 1;
 		}
 
 	if (bits == KMaxTUint32)
@@ -2143,25 +2446,104 @@
 
 	// Now we have bits with allocated pages in it so determine the exact 
 	// offset of the next allocated page
-	TUint32 mask = 0x80000000 >> (offset & ~KWordAlignMask);
-	while (bits & mask)
-		{
-		mask >>= 1;
-		offset++;
-		}
-
-	if (offset >= endOffset)
-		{// Reached the end of the zone without finding an allocated page after aOffset
-		return KErrNotFound;
-		}
-
-	// Should definitely have found an allocated page within aZone's pages
-	__NK_ASSERT_DEBUG(mask != 0 && !(bits & mask) && offset < aZone->iPhysPages);
-
-	aOffset = offset;
+	TInt msOne = __e32_find_ms1_32(~bits);
+	__NK_ASSERT_DEBUG(msOne >= 0);	// Must have at least one allocated page in the word.
+	TUint msOneOffset = 31 - msOne;
+	aOffset = ((map - aZone->iBma[bmaIndex]->iMap) << 5) + msOneOffset;
 	return KErrNone;
 	}
 
+
+/**
+Get the next run of pages in this zone that are allocated after aOffset.
+
+@param aZone	The zone to find the next allocated page in.
+@param aOffset	On entry this is the offset from which the next allocated
+				page in the zone should be found, on return it will be the offset 
+				of the next allocated page.
+@param aEndOffset The last offset within this RAM zone to check for allocated runs.
+@return The length of any run found, KErrNotFound if no more pages in
+the zone after aOffset are allocated, KErrArgument if aOffset is outside the zone.
+*/
+TInt DRamAllocator::NextAllocatedRun(SZone* aZone, TUint& aOffset, TUint aEndOffset, TZonePageType aType) const
+	{
+	const TUint KWordAlignMask = KMaxTUint32 << 5;
+
+	M::RamAllocIsLocked();
+
+	__NK_ASSERT_DEBUG(aZone - iZones < (TInt)iNumZones);
+	// Makes things simpler for bma selection.
+	__NK_ASSERT_DEBUG(aType != EPageUnknown);
+
+	if (aOffset > aEndOffset)
+		{// UnblockContiguous() has already searched the whole range for this page type.
+		return 0;
+		}
+
+	TUint wordIndex = aOffset >> 5;
+	TUint endWordIndex = aEndOffset >> 5;
+
+	// Select the BMA to search, 
+	TUint bmaIndex = (aType == EPageTypes)? KBmaAllPages : aType;
+	TUint32* map = &(aZone->iBma[bmaIndex]->iMap[wordIndex]);
+	TUint32* mapEnd = &(aZone->iBma[bmaIndex]->iMap[endWordIndex]);
+	TUint32 bits = *map;
+
+	// Set bits for pages before 'offset' (i.e. ones we want to ignore)...
+	bits |= ~(KMaxTUint32 >> (aOffset & ~KWordAlignMask));
+
+	// Find the first bit map word from aOffset in aZone with allocated pages
+	while (bits == KMaxTUint32 && map < mapEnd)
+		{
+		bits = *++map;
+		}
+
+	if (map == mapEnd)
+		{// Have reached the last bit mask word so set the bits that are
+		//  outside of the range so that they are ignored.
+		bits |= (KMaxTUint32 >> (aEndOffset & ~KWordAlignMask)) >> 1;
+		}
+
+	if (bits == KMaxTUint32)
+		{// No allocated pages found in the range.
+		aOffset = aEndOffset + 1;
+		return 0;
+		}
+
+	// Now we have bits with allocated pages in it so determine the exact 
+	// offset of the next allocated page
+	TInt msOne = __e32_find_ms1_32(~bits);
+	__NK_ASSERT_DEBUG(msOne >= 0);	// Must have at least one allocated page in the word.
+	TUint msOneOffset = 31 - msOne;
+	aOffset = ((map - aZone->iBma[bmaIndex]->iMap) << 5) + msOneOffset;
+	TUint32* runWord = map;
+
+	if (map < mapEnd && __e32_bit_count_32(~bits) == msOne + 1)
+		{// The whole of the region in this word is allocated.
+		// Find the next word which isn't completely allocated within the range.
+		do
+			{
+			bits = *++map;
+			}
+		while (!bits && map < mapEnd);
+		}
+
+	// Clear any bits before the run so can get next free from __e32_find_msl_32().
+	if (runWord == map)
+		bits &= KMaxTUint32 >> (aOffset & ~KWordAlignMask);
+	TInt msFree = __e32_find_ms1_32(bits);
+	__NK_ASSERT_DEBUG(msFree >= 0 || map == mapEnd);
+	TUint msFreeOffset = (msFree >= 0)? 31 - msFree : 32;
+	TUint endIndex = map - aZone->iBma[bmaIndex]->iMap;
+	TUint runEnd = (endIndex << 5) + msFreeOffset;
+	if (runEnd > aEndOffset + 1)	// Ensure we don't go past the range.
+		runEnd = aEndOffset + 1;
+	__NK_ASSERT_DEBUG(runEnd > aOffset);
+
+	return runEnd - aOffset;
+	}
+
+
 /**
 See if any of the least preferable RAM zones can be emptied.  If they can then 
 initialise the allocator for a general defragmentation operation.
@@ -2852,7 +3234,7 @@
 It outputs the zone configuration and the base addresses of any contiguous block
 of allocated pages.
 */
-void DRamAllocator::SendInitialBtraceLogs(void)
+void DRamAllocator::DoBTracePrime(void)
 	{
 	M::RamAllocIsLocked();
 	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "DRamAllocator::SendInitialBtraceLogs");
--- a/kernel/eka/memmodel/epoc/moving/mchunk.cpp	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/memmodel/epoc/moving/mchunk.cpp	Mon May 10 11:40:53 2010 +0100
@@ -467,7 +467,8 @@
 		{
 		// Allocate a block of contiguous RAM from the free pool
 		TInt numPages=(endOffset-offset)>>m.iPageShift;
-		r=m.AllocContiguousRam(numPages<<m.iPageShift, nextPage, GetPageType(), 0);
+		__NK_ASSERT_DEBUG(EPageFixed == GetPageType());
+		r=m.AllocContiguousRam(numPages<<m.iPageShift, nextPage, 0);
 		if (r!=KErrNone)
 			return r;
 		if(clearRam)
--- a/kernel/eka/memmodel/epoc/multiple/arm/xmmu.cpp	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/memmodel/epoc/multiple/arm/xmmu.cpp	Mon May 10 11:40:53 2010 +0100
@@ -1617,7 +1617,7 @@
 	if (aNumPages>1)
 		{
 		TInt align=aSeparateGlobal ? KPageDirectoryShift : KPageDirectoryShift-1;
-		r=AllocContiguousRam(aNumPages<<KPageShift, aPhysAddr, EPageFixed, align);
+		r=AllocContiguousRam(aNumPages<<KPageShift, aPhysAddr, align);
 		}
 	else
 		r=AllocRamPages(&aPhysAddr,1, EPageFixed);
--- a/kernel/eka/memmodel/epoc/multiple/mchunk.cpp	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/memmodel/epoc/multiple/mchunk.cpp	Mon May 10 11:40:53 2010 +0100
@@ -417,7 +417,8 @@
 		{
 		// Allocate a block of contiguous RAM from the free pool
 		TInt numPages=(endOffset-offset)>>m.iPageShift;
-		r=m.AllocContiguousRam(numPages<<m.iPageShift, nextPage, GetPageType(), 0);
+		__NK_ASSERT_DEBUG(EPageFixed == GetPageType());
+		r=m.AllocContiguousRam(numPages<<m.iPageShift, nextPage, 0);
 		if (r!=KErrNone)
 			return r;
 		if(clearRam)
--- a/kernel/eka/release.txt	Wed May 05 05:11:16 2010 +0100
+++ b/kernel/eka/release.txt	Mon May 10 11:40:53 2010 +0100
@@ -1,3 +1,40 @@
+Version 2.00.3079
+=================
+(Made by vfebvre 07/05/2010)
+
+1.	mipetzol
+	1.	MINOR_CHANGE: Updated comment in dma_v1.h
+	2.	MINOR_CHANGE: Updated comment in dma_v2.h
+
+
+Version 2.00.3078
+=================
+(Made by vfebvre 05/05/2010)
+
+1.	cnotton
+	1.	ou1cimx1#345137 idlehelper_lib & Power DLL don't build for ARM4SMP (kernelhwsrv)
+
+2.	jimmzhou
+	1.	ou1cimx1#366912 [kernelmcl][[Internal]t_usbapi test is not ready to run on production phone environment Vasco/hw79
+
+3.	ricoles
+	1.	PDEF144020 SMP known test failures
+
+4.	y153liu
+	1.	ou1cimx1#355904 MCL_Music playback is not correct after resuming the playback from the USB headset AD-83 after long pause
+
+
+Version 2.00.3077
+=================
+(Made by vfebvre 04/05/2010)
+
+1.	martai
+	1.	ou1cimx1#337920 WDP:Potential system impact of flushing the entire page cache is unacceptable
+
+2.	shubmurt
+	1.	PDEF145383 E32TEST T_DATAPAGING occasionally reports KERN-EXEC 17 on SMPDATAPAGE configs
+
+
 Version 2.00.3076
 =================
 (Made by vfebvre 30/04/2010)
--- a/kerneltest/e32test/buffer/t_tbma.cpp	Wed May 05 05:11:16 2010 +0100
+++ b/kerneltest/e32test/buffer/t_tbma.cpp	Mon May 10 11:40:53 2010 +0100
@@ -682,6 +682,62 @@
 	delete pA;
 	}
 
+
+void TestSelectiveAlloc(TInt aSize)
+	{
+	test.Printf(_L("TestSelectiveAlloc %d\n"),aSize);
+	TBitMapAllocator* pA=TBitMapAllocator::New(aSize, ETrue);
+	test(pA!=NULL);
+	test(pA->Avail()==aSize);
+	// Allocate whole free bma
+	test_Equal(aSize, pA->SelectiveAlloc(0, aSize));
+	test_Equal(0,pA->Avail());
+	// Allocate whole full bma
+	test_Equal(0, pA->SelectiveAlloc(0, aSize));
+	test_Equal(0,pA->Avail());
+	TInt i;
+	TInt j;
+	TInt l;
+	for (i=2; i<8; ++i)
+		{
+		for (l=1; l<=aSize; ++l)
+			{
+			new (pA) TBitMapAllocator(aSize, ETrue);
+			for (j=0; j<aSize; j+=i)
+				pA->Alloc(j,1);
+			TInt orig=pA->Avail();
+			test_Equal(aSize-(aSize+i-1)/i, orig);
+			TUint newAllocs = pA->SelectiveAlloc(0,l);
+			TInt allocated = orig - pA->Avail();
+			test_Equal(allocated, newAllocs);
+			test_Equal(l - (l+i-1)/i, allocated);
+			Check(*pA);
+			}
+		}
+	for (i=0; i<=Min(32,aSize-1); ++i)
+		{
+		for (l=1; l<=aSize-i; ++l)
+			{
+			for (j=1; j<=aSize; ++j)
+				{
+				new (pA) TBitMapAllocator(aSize, ETrue);
+				pA->Alloc(i,l);
+				test_Equal(aSize-l, pA->Avail());
+				TUint newAllocs = pA->SelectiveAlloc(0,j);
+				TUint allocated = j - Max(0,Min(i+l,j)-i);
+				test_Equal(allocated, newAllocs);
+				test_Equal(pA->Avail(), aSize-l-allocated);
+				test(!pA->NotAllocated(0,j));
+				if (j>=i && j<i+l)
+					test(!pA->NotAllocated(0,j+1));
+				Check(*pA);
+				}
+			}
+		}
+	delete pA;
+	}
+
+
 TBitMapAllocator* DoSetupBMA(TInt aSize, VA_LIST aList)
 	{
 	TBitMapAllocator* pA=TBitMapAllocator::New(aSize, EFalse);
@@ -1246,6 +1302,11 @@
 	TestSelectiveFree(128);
 	TestSelectiveFree(149);
 
+	TestSelectiveAlloc(3);
+	TestSelectiveAlloc(31);
+	TestSelectiveAlloc(128);
+	TestSelectiveAlloc(149);
+
 	TestAllocConsecutive();
 
 	TestChain();
--- a/kerneltest/e32test/buffer/t_tbma.h	Wed May 05 05:11:16 2010 +0100
+++ b/kerneltest/e32test/buffer/t_tbma.h	Mon May 10 11:40:53 2010 +0100
@@ -39,6 +39,7 @@
 	IMPORT_C void Free(TInt aPos);
 	IMPORT_C void Alloc(TInt aStart, TInt aLength);
 	IMPORT_C void Free(TInt aStart, TInt aLength);
+	IMPORT_C TUint SelectiveAlloc(TInt aStart, TInt aLength);
 	IMPORT_C void SelectiveFree(TInt aStart, TInt aLength);
 	IMPORT_C TBool NotFree(TInt aStart, TInt aLength) const;
 	IMPORT_C TBool NotAllocated(TInt aStart, TInt aLength) const;
--- a/kerneltest/e32test/defrag/t_ramdefrag.cpp	Wed May 05 05:11:16 2010 +0100
+++ b/kerneltest/e32test/defrag/t_ramdefrag.cpp	Mon May 10 11:40:53 2010 +0100
@@ -33,6 +33,7 @@
 #include <e32math.h>
 #include <hal.h>
 #include "testdefs.h"
+#include "..\mmu\mmudetect.h"
 
 
 #include <dptest.h>
@@ -128,6 +129,7 @@
 LOCAL_D TInt* gCandList1;											// Array of zones that have the same preference and the same
 LOCAL_D TInt* gCandList2;											// amount of free pages
 const TInt KInvalidCandIndex = -1;
+LOCAL_D TUint gMemModel;
 
 //
 // GetDrive
@@ -489,6 +491,8 @@
 									currentCacheSize >> gPageShift));
 		}
 
+	// Get the memory model of the kernel that this test is running on.
+	gMemModel = MemModelType();
 	return KErrNone;
 	}
 
@@ -1536,7 +1540,7 @@
 		}
 
 	if (totalMorePrefInUse > requiredMovDis)
-		{// There enough allocatable pages in the RAM zones below the currently 
+		{// There are enough allocatable pages in the RAM zones below the currently 
 		// least preferable RAM in use.
 		test.Printf(_L("Memory is spread out totalMorePref 0x%x required 0x%x\n"), totalMorePrefInUse, requiredMovDis);
 		if (verifySpread)
@@ -7073,16 +7077,20 @@
 //! @SYMPREQ					PREQ308
 //! @SYMTestPriority			High
 //! @SYMTestActions				
-//! 	1.	Allocate fixed pages and call function to free all fixed pages allocated.  
+//! 	1.	Allocate fixed pages and call function to free all fixed pages allocated.
+//!		2.	Claim a RAM zone and then free it via Epoc::FreeRamZone().
+//!		3.	Invoke Epoc::FreeRamZone() with an invalid RAM zone ID.
 //! 
 //! @SYMTestExpectedResults
 //! 	1.	KErrNone
+//!		2.	KErrNone
+//!		3.	KErrArgument
 //---------------------------------------------------------------------------------------------------------------------
 TInt TestFreeZone()
 	{
 	TInt r = 0;
 	TUint zoneID = 0;
-	test.Start(_L("Test1: Free allocated pages"));	
+	test.Start(_L("Test1: Freeing allocated pages"));	
 	TestStart();	
 	
 	TInt pages = 50;
@@ -7128,7 +7136,58 @@
 			}
 		}
 	TestEnd();
-
+	test.End();
+
+	test.Start(_L("Test2: Epoc::FreeRamZone() on a claimed RAM zone"));
+	TestStart();
+	GetAllPageInfo();
+	TUint zoneIndex = 0;
+	while (zoneIndex < gZoneCount)
+		{
+		if (gZoneUtilArray[zoneIndex].iFreePages == gZoneUtilArray[zoneIndex].iPhysPages)
+			break;
+		zoneIndex++;
+		}
+	if (zoneIndex >= gZoneCount)
+		{
+		test.Printf(_L("Cannot find zone to perform test, Skipping test step...\n"));
+		goto Test2End;
+		}
+	zoneID = gZoneConfigArray[zoneIndex].iZoneId;
+	r = Ldd.CallDefrag(DEFRAG_TYPE_CLAIM, DEFRAG_VER_SYNC, zoneID);
+	if (r != KErrNone)
+		{
+		test.Printf(_L("Fail: r = %d, expected = %d\n"), r, KErrNone);
+		TEST_FAIL;
+		}
+	GetAllPageInfo();
+	if (gZoneUtilArray[zoneIndex].iPhysPages != gZoneUtilArray[zoneIndex].iAllocFixed)
+		{
+		test.Printf(_L("Fail: RAM zone ID %d not claimed successfully"), zoneID);
+		TEST_FAIL;
+		}
+	r = Ldd.FreeZoneId(zoneID);
+	GetAllPageInfo();
+	if (r != KErrNone ||
+		gZoneUtilArray[zoneIndex].iPhysPages != gZoneUtilArray[zoneIndex].iFreePages)
+		{
+		test.Printf(_L("Fail: RAM zone ID %d not freed successfully r=%d"), zoneID, r);
+		TEST_FAIL;
+		}
+Test2End:
+	TestEnd();
+	test.End();
+
+	test.Start(_L("Test2: Epoc::FreeRamZone() on an invalid RAM zone"));
+	TestStart();
+	r = Ldd.FreeZoneId(KInvalidZoneID);
+	if (r != KErrArgument)
+		{
+		test.Printf(_L("Fail: Error RAM zone ID %d r=%d"), KInvalidZoneID, r);
+		TEST_FAIL;
+		}
+	
+	TestEnd();
 	test.End();
 	return KErrNone;
 	}
@@ -9384,6 +9443,14 @@
 
 	test.Next(_L("Test5: Filling the FS Cache and allocating more than 16 contiguous fixed pages"));	
 	TestStart();
+
+	if (gMemModel >= EMemModelTypeFlexible)
+		{// The flexible memory model won't flush the whole paging cache for 
+		// contiguous allocations >16 pages so skip the next test.
+		test.Printf(_L("This memory model won't flush the cache - Skipping...\n"));
+		goto SkipTest5;
+		}
+
 	// TestEnd() will have reduced any cache pages to minimum so just get current 
 	// count of discardable pages.
 	GetAllPageInfo();
--- a/kerneltest/e32test/demandpaging/t_datapaging.cpp	Wed May 05 05:11:16 2010 +0100
+++ b/kerneltest/e32test/demandpaging/t_datapaging.cpp	Mon May 10 11:40:53 2010 +0100
@@ -236,7 +236,7 @@
 		CLOSE_AND_WAIT(thread);
 		}
 	CLOSE_AND_WAIT(gChunk);
-	User::After(1000000);
+	UserSvr::HalFunction(EHalGroupKernel, EKernelHalSupervisorBarrier, 0, 0);
 	__KHEAP_MARKEND;
 	}
 
@@ -783,6 +783,9 @@
 	test_KErrNone(timeoutStatus.Int());
 	
 	CLOSE_AND_WAIT(gChunk);
+	
+	UserSvr::HalFunction(EHalGroupKernel, EKernelHalSupervisorBarrier, 0, 0);
+	
 	__KHEAP_MARKEND;
 	}
 
--- a/kerneltest/e32test/device/t_usbapi.cpp	Wed May 05 05:11:16 2010 +0100
+++ b/kerneltest/e32test/device/t_usbapi.cpp	Mon May 10 11:40:53 2010 +0100
@@ -1849,6 +1849,29 @@
 	{
 	test.Start(_L("Test Endpoint Stall Status"));
 
+#ifdef BSW_USB_DRC
+	// The MACRO comes from ncp adaptation to indicate that otg is built in.
+	// Newly added code for defect ou1cimx1#267421. When otg is built in and the device is not 
+	// in peripheral role, the ncp adaptation will return a dummy endpoint for the stall operation.
+	// The solution is to check if the device is in peripheral mode, if not, skip the stall
+	// operation. A problem is now we can't find a good solution to check the current role of the device.
+	// For the test environement, it's ok to use the USB state to confirm and from the test result,
+	// it works fine. Later when we find accurate method, we will change the confirmation logic.
+	TInt ret = KErrNone;
+
+	TUsbcDeviceState devstate = EUsbcDeviceStateUndefined;
+	ret = gPort.DeviceStatus(devstate);
+	test(ret == KErrNone);
+
+	if( EUsbcDeviceStateUndefined==devstate )
+		{
+		test.Printf( _L("Device not connected, state EUsbcDeviceStateUndefined.\n")  );
+		test.Printf( _L("Skipping endpoint stall status tests.\n") );
+		test.End();
+		return;
+		}
+#endif
+
 	if (!SupportsEndpointStall())
 		{
 		test.Printf(_L("*** Not supported - skipping endpoint stall status tests\n"));
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/kerneltest/e32test/digitiser/digi.auto.bat	Mon May 10 11:40:53 2010 +0100
@@ -0,0 +1,19 @@
+rem
+rem Copyright (c) 2010 Nokia Corporation and/or its subsidiary(-ies).
+rem All rights reserved.
+rem This component and the accompanying materials are made available
+rem under the terms of the License "Eclipse Public License v1.0"
+rem which accompanies this distribution, and is available
+rem at the URL "http://www.eclipse.org/legal/epl-v10.html".
+rem
+rem Initial Contributors:
+rem Nokia Corporation - initial contribution.
+rem
+rem Contributors:
+rem
+rem Description:
+rem
+
+t_userdigitisertest
+t_userdigitisernocap
+t_ldddigitisertest
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/kerneltest/e32test/digitiser/digitiser.inf	Mon May 10 11:40:53 2010 +0100
@@ -0,0 +1,44 @@
+// Copyright (c) 2010 Nokia Corporation and/or its subsidiary(-ies).
+// All rights reserved.
+// This component and the accompanying materials are made available
+// under the terms of the License "Eclipse Public License v1.0"
+// which accompanies this distribution, and is available
+// at the URL "http://www.eclipse.org/legal/epl-v10.html".
+//
+// Initial Contributors:
+// Nokia Corporation - initial contribution.
+//
+// Contributors:
+//
+// Description:
+// Digitiser Driver Tests
+// Convienence INF file to build just Digitiser tests "bldmake -f digitiser.inf bldfiles"
+// Test MMPs also part of the offical ../group/bld.inf e32test component.
+//
+
+/**
+@file
+
+@SYMPurpose Kernel and User library test code
+*/
+
+PRJ_PLATFORMS
+
+BASEDEFAULT
+
+PRJ_TESTEXPORTS
+
+digitiser_tests.iby			/epoc32/rom/include/digitiser_tests.iby
+tshell_digitisertests.oby	../../../kernel/eka/rombuild/tshell_digitisertests.oby
+
+digi.auto.bat				/epoc32/rom/include/digi.auto.bat
+
+PRJ_TESTMMPFILES
+
+..\group\d_ldddigitisertest			support
+
+#ifndef SMP
+..\group\t_userdigitisertest
+..\group\t_userdigitisernocaps
+..\group\t_ldddigitisertest
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/kerneltest/e32test/digitiser/digitiser_tests.iby	Mon May 10 11:40:53 2010 +0100
@@ -0,0 +1,27 @@
+// Copyright (c) 2010 Nokia Corporation and/or its subsidiary(-ies).
+// All rights reserved.
+// This component and the accompanying materials are made available
+// under the terms of the License "Eclipse Public License v1.0"
+// which accompanies this distribution, and is available
+// at the URL "http://www.eclipse.org/legal/epl-v10.html".
+//
+// Initial Contributors:
+// Nokia Corporation - initial contribution.
+//
+// Contributors:
+//
+// Description:
+// Digitiser Driver Test Application
+//
+
+#ifndef __DIGITISER_TEST_IBY__
+#define __DIGITISER_TEST_IBY__
+
+device[VARID]=\Epoc32\Release\##KMAIN##\##BUILD##\d_ldddigitisertest.ldd	\sys\bin\d_ldddigitisertest.ldd
+file=\Epoc32\Release\##MAIN##\##BUILD##\t_ldddigitisertest.exe				\sys\bin\t_ldddigitisertest.exe
+file=\Epoc32\Release\##MAIN##\##BUILD##\t_userdigitisertest.exe				\sys\bin\t_userdigitisertest.exe
+file=\Epoc32\Release\##MAIN##\##BUILD##\t_userdigitisernocaps.exe			\sys\bin\t_userdigitisernocaps.exe
+
+data=\epoc32\rom\include\digi.auto.bat										\digi.auot.bat
+
+#endif // __DIGITISER_TEST_IBY__
--- a/kerneltest/e32test/digitiser/t_traweventdigitiser.cpp	Wed May 05 05:11:16 2010 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,143 +0,0 @@
-// Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
-// All rights reserved.
-// This component and the accompanying materials are made available
-// under the terms of the License "Eclipse Public License v1.0"
-// which accompanies this distribution, and is available
-// at the URL "http://www.eclipse.org/legal/epl-v10.html".
-//
-// Initial Contributors:
-// Nokia Corporation - initial contribution.
-//
-// Contributors:
-//
-// Description:
-// e32test\digitiser\t_traweventdigitiser.cpp
-// Overview:
-// Test the TRawEvent APIS and events associated with the Digitiser and also verify the BTRACEs (manually)
-// API Information:
-// UserSvr
-// Details:
-// - Test the following 6  Events types 
-// 1.	EPointerMove
-// 2.	EPointer3DInRange,
-// 3.	EPointer3DOutOfRange,
-// 4.	EPointer3DTilt,
-// 5.	EPointer3DRotation,
-// 6.	EPointer3DTiltAndMove,
-// Platforms/Drives/Compatibility:
-// All.
-// Assumptions/Requirement/Pre-requisites: 
-// Failures and causes:
-// 
-//
-
-#include <e32test.h>
-#include <e32svr.h>
-#include <e32cmn.h>
-#include <e32cmn_private.h>
-
-LOCAL_D RTest test(_L("t_TRawEventDigitiser"));
-
-class TestTRawDigitiserEvent
-	{
-public:
-	TestTRawDigitiserEvent(TRawEvent::TType aType,TInt aX,TInt aY,TInt aZ,TInt aScanCode,TInt aPhi,TInt aTheta,TInt aAlpha,TUint8 aPointerNumber,TUint8 iTip);
-	void TestEvents();	
-private:	
-	TRawEvent::TType iType;
-	TInt iX;
-    TInt iY;
-	TInt iZ;
-	TInt iScanCode;
-	TInt iPhi;
-	TInt iTheta;
-	TInt iAlpha;
-	TUint8 iPointerNumber;
-	TUint8 iTip;
-	TRawEvent iDigitiser3DEvent;
-	};
-
-
-TestTRawDigitiserEvent::TestTRawDigitiserEvent(TRawEvent::TType aType,TInt aX,TInt aY,TInt aZ,TInt aScanCode,TInt aPhi,TInt aTheta,TInt aAlpha,TUint8 aPointerNumber,TUint8 aTip):iType(aType),iX(aX),iY(aY),iZ(aZ),iScanCode(aScanCode),iPhi(aPhi),iTheta(aTheta),iAlpha(aAlpha),iPointerNumber(aPointerNumber),iTip(aTip)
-	{}
-
-
-void TestTRawDigitiserEvent::TestEvents()
-	{
-	
-	test(iDigitiser3DEvent.Type()==0);
-	iDigitiser3DEvent.Set(iType);
-	test(iDigitiser3DEvent.Type()==iType);
-	iDigitiser3DEvent.SetPointerNumber(iPointerNumber);
-	test(iPointerNumber == iDigitiser3DEvent.PointerNumber());
-	iDigitiser3DEvent.Set(iType,iScanCode);
-	//Set the Type temporarily to get through the assertion 
-	iDigitiser3DEvent.Set(TRawEvent::EKeyDown);
-    test(iScanCode==iDigitiser3DEvent.ScanCode());
-	iDigitiser3DEvent.Set(iType,iX,iY);
-	//Set the Type temporarily to get through the assertion
-	iDigitiser3DEvent.Set(TRawEvent::EPointerMove);
-	test(TPoint(iX,iY)==iDigitiser3DEvent.Pos());
-	iDigitiser3DEvent.Set(iType,iX,iY,iZ);
-	//Set the Type temporarily to get through the assertion
-	iDigitiser3DEvent.Set(TRawEvent::EPointerMove);
-	test(TPoint3D(iX,iY,iZ)==iDigitiser3DEvent.Pos3D());
-	iDigitiser3DEvent.SetTip(iTip);
-	test(TBool(iTip) == iDigitiser3DEvent.IsTip());
-	iDigitiser3DEvent.SetTilt(iType,iPhi,iTheta);
-	//Set the Type temporarily to get through the assertion
-	iDigitiser3DEvent.Set(TRawEvent::EPointer3DTilt);
-	TAngle3D rawEventAnge3D=iDigitiser3DEvent.Tilt();
-	test((rawEventAnge3D.iPhi==iPhi) && (rawEventAnge3D.iTheta==iTheta)) ;
-	
-
-	iDigitiser3DEvent.SetRotation(iType,iAlpha);
-	//Set the Type temporarily to get through the assertion
-	iDigitiser3DEvent.Set(TRawEvent::EPointer3DRotation);
-	test(iAlpha == iDigitiser3DEvent.Rotation());
-	iDigitiser3DEvent.Set(iType,iX+1,iY+1,iZ+1,iPhi+1,iTheta+1,iAlpha+1);
-	//Set the Type temporarily to get through the assertion
-	iDigitiser3DEvent.Set(TRawEvent::EPointer3DTiltAndMove);
-	test(TPoint3D(iX+1,iY+1,iZ+1)==iDigitiser3DEvent.Pos3D());
-    rawEventAnge3D=iDigitiser3DEvent.Tilt();
-	test((rawEventAnge3D.iPhi==iPhi+1) &&(rawEventAnge3D.iTheta==iTheta+1));	
-	test((iAlpha+1) == iDigitiser3DEvent.Rotation());   
-	iDigitiser3DEvent.Set(iType,iX+2,iY+2,iZ+2,static_cast<TUint8>(iPointerNumber+1));
-	//Set the Type temporarily to get through the assertion
-	iDigitiser3DEvent.Set(TRawEvent::EPointer3DTiltAndMove);
-  	test(TPoint3D(iX+2,iY+2,iZ+2)==iDigitiser3DEvent.Pos3D());
-	test((iPointerNumber+1) == iDigitiser3DEvent.PointerNumber());
-
-	UserSvr::AddEvent(iDigitiser3DEvent);
-	}
-
-
-GLDEF_C TInt E32Main()
-//
-//
-    {
-
- 	test.Title();
-	test.Start(_L("Testing Digitiser Events"));
-	
-    TestTRawDigitiserEvent digitiserEvent1(TRawEvent::EPointerMove, -890,-123, -823,455,2563,156,62,3,1);
-	TestTRawDigitiserEvent digitiserEvent2(TRawEvent::EPointer3DInRange, 23,45,23,1,2,6,4,2,1);
-	TestTRawDigitiserEvent digitiserEvent3(TRawEvent::EPointer3DOutOfRange, 23,45,23,1,2,6,4,2,0);
-	TestTRawDigitiserEvent digitiserEvent4(TRawEvent::EPointer3DTilt, 23,45,23,1,2,6,4,2,1);
-	TestTRawDigitiserEvent digitiserEvent5(TRawEvent::EPointer3DRotation, 23,45,23,1,2,6,4,2,1);
-	TestTRawDigitiserEvent digitiserEvent6(TRawEvent::EPointer3DTiltAndMove, 23,45,23,1,2,6,4,2,0);
-
-    digitiserEvent1.TestEvents();
-	digitiserEvent2.TestEvents();
-	digitiserEvent3.TestEvents();
-	digitiserEvent4.TestEvents();
-	digitiserEvent5.TestEvents();
-	digitiserEvent6.TestEvents();    
-	test.Printf(_L("T_TRAWEVENTDIGITISER: TEST Successfully Completed\n"));
-	test.End();
-	test.Close();
-
-    return KErrNone;
-
-    }
-
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/kerneltest/e32test/digitiser/t_userdigitisertest.cpp	Mon May 10 11:40:53 2010 +0100
@@ -0,0 +1,267 @@
+// Copyright (c) 2010 Nokia Corporation and/or its subsidiary(-ies).
+// All rights reserved.
+// This component and the accompanying materials are made available
+// under the terms of the License "Eclipse Public License v1.0"
+// which accompanies this distribution, and is available
+// at the URL "http://www.eclipse.org/legal/epl-v10.html".
+//
+// Initial Contributors:
+// Nokia Corporation - initial contribution.
+//
+// Contributors:
+//
+// Description:
+// e32test\digitiser\t_userdigitisertest.cpp
+// Overview:
+// Test the TRawEvent APIS and events associated with the Digitiser and also verify the BTRACEs (manually)
+// Test HAL digitiser orientation attribute
+// API Information:
+// UserSvr
+// Details:
+// - Test the following 6  Events types 
+// 1.	EPointerMove
+// 2.	EPointer3DInRange,
+// 3.	EPointer3DOutOfRange,
+// 4.	EPointer3DTilt,
+// 5.	EPointer3DRotation,
+// 6.	EPointer3DTiltAndMove,
+// Platforms/Drives/Compatibility:
+// All.
+// Assumptions/Requirement/Pre-requisites: 
+// Failures and causes:
+// 
+//
+
+#define __E32TEST_EXTENSION__
+#include <e32test.h>
+#include <e32svr.h>
+#include <e32cmn.h>
+#include <e32cmn_private.h>
+#include <hal.h>
+
+#ifndef E32TEST_NOCAPS
+LOCAL_D RTest test(_L("T_UserDigitiserTest"));
+#else
+LOCAL_D RTest test(_L("T_UserDigitiserNoCaps"));
+#endif
+
+class TestTRawDigitiserEvent
+	{
+public:
+	TestTRawDigitiserEvent(TRawEvent::TType aType,TInt aX,TInt aY,TInt aZ,TInt aScanCode,TInt aPhi,TInt aTheta,TInt aAlpha,TUint8 aPointerNumber,TUint8 iTip);
+	void TestEvents();	
+private:	
+	TRawEvent::TType iType;
+	TInt iX;
+    TInt iY;
+	TInt iZ;
+	TInt iScanCode;
+	TInt iPhi;
+	TInt iTheta;
+	TInt iAlpha;
+	TUint8 iPointerNumber;
+	TUint8 iTip;
+	TRawEvent iDigitiser3DEvent;
+	};
+
+
+TestTRawDigitiserEvent::TestTRawDigitiserEvent(TRawEvent::TType aType,TInt aX,TInt aY,TInt aZ,TInt aScanCode,TInt aPhi,TInt aTheta,TInt aAlpha,TUint8 aPointerNumber,TUint8 aTip):iType(aType),iX(aX),iY(aY),iZ(aZ),iScanCode(aScanCode),iPhi(aPhi),iTheta(aTheta),iAlpha(aAlpha),iPointerNumber(aPointerNumber),iTip(aTip)
+	{}
+
+
+void TestTRawDigitiserEvent::TestEvents()
+	{
+	static TInt count = 0;
+	count++;
+	test.Printf(_L("TestTRawDigitiserEvent test case %2d\n"), count);
+	
+	test(iDigitiser3DEvent.Type()==0);
+	iDigitiser3DEvent.Set(iType);
+	test(iDigitiser3DEvent.Type()==iType);
+	iDigitiser3DEvent.SetPointerNumber(iPointerNumber);
+	test(iPointerNumber == iDigitiser3DEvent.PointerNumber());
+	iDigitiser3DEvent.Set(iType,iScanCode);
+	//Set the Type temporarily to get through the assertion 
+	iDigitiser3DEvent.Set(TRawEvent::EKeyDown);
+    test(iScanCode==iDigitiser3DEvent.ScanCode());
+	iDigitiser3DEvent.Set(iType,iX,iY);
+	//Set the Type temporarily to get through the assertion
+	iDigitiser3DEvent.Set(TRawEvent::EPointerMove);
+	test(TPoint(iX,iY)==iDigitiser3DEvent.Pos());
+	iDigitiser3DEvent.Set(iType,iX,iY,iZ);
+	//Set the Type temporarily to get through the assertion
+	iDigitiser3DEvent.Set(TRawEvent::EPointerMove);
+	test(TPoint3D(iX,iY,iZ)==iDigitiser3DEvent.Pos3D());
+	iDigitiser3DEvent.SetTip(iTip);
+	test(TBool(iTip) == iDigitiser3DEvent.IsTip());
+	iDigitiser3DEvent.SetTilt(iType,iPhi,iTheta);
+	//Set the Type temporarily to get through the assertion
+	iDigitiser3DEvent.Set(TRawEvent::EPointer3DTilt);
+	TAngle3D rawEventAnge3D=iDigitiser3DEvent.Tilt();
+	test((rawEventAnge3D.iPhi==iPhi) && (rawEventAnge3D.iTheta==iTheta)) ;
+	
+
+	iDigitiser3DEvent.SetRotation(iType,iAlpha);
+	//Set the Type temporarily to get through the assertion
+	iDigitiser3DEvent.Set(TRawEvent::EPointer3DRotation);
+	test(iAlpha == iDigitiser3DEvent.Rotation());
+	iDigitiser3DEvent.Set(iType,iX+1,iY+1,iZ+1,iPhi+1,iTheta+1,iAlpha+1);
+	//Set the Type temporarily to get through the assertion
+	iDigitiser3DEvent.Set(TRawEvent::EPointer3DTiltAndMove);
+	test(TPoint3D(iX+1,iY+1,iZ+1)==iDigitiser3DEvent.Pos3D());
+    rawEventAnge3D=iDigitiser3DEvent.Tilt();
+	test((rawEventAnge3D.iPhi==iPhi+1) &&(rawEventAnge3D.iTheta==iTheta+1));	
+	test((iAlpha+1) == iDigitiser3DEvent.Rotation());   
+	iDigitiser3DEvent.Set(iType,iX+2,iY+2,iZ+2,static_cast<TUint8>(iPointerNumber+1));
+	//Set the Type temporarily to get through the assertion
+	iDigitiser3DEvent.Set(TRawEvent::EPointer3DTiltAndMove);
+  	test(TPoint3D(iX+2,iY+2,iZ+2)==iDigitiser3DEvent.Pos3D());
+	test((iPointerNumber+1) == iDigitiser3DEvent.PointerNumber());
+
+	UserSvr::AddEvent(iDigitiser3DEvent);
+	}
+	
+	
+struct HalAttribute_TestCase
+	{
+	HALData::TAttribute iAttr;
+	TInt				iValueIn;
+	TInt				iSetRC;		// Set to KMaxTInt to skip set test case
+	TInt				iGetRC;		// Set to KMaxTInt to skip get test case
+
+	};
+	
+static HalAttribute_TestCase gHalAttributeTests[] =
+	{
+#ifndef E32TEST_NOCAPS
+	// Normal all pass tests
+	{ HALData::EDigitiserOrientation, HALData::EDigitiserOrientation_000,		KErrNone, KErrNone},
+	{ HALData::EDigitiserOrientation, HALData::EDigitiserOrientation_090,		KErrNone, KErrNone},	
+	{ HALData::EDigitiserOrientation, HALData::EDigitiserOrientation_180,		KErrNone, KErrNone},
+	{ HALData::EDigitiserOrientation, HALData::EDigitiserOrientation_270,		KErrNone, KErrNone},
+	{ HALData::EDigitiserOrientation, HALData::EDigitiserOrientation_default,	KErrNone, KErrNone},
+				
+	// Negative tests
+	{ HALData::EDigitiserOrientation, -1,		KErrArgument, KMaxTInt},
+	{ HALData::EDigitiserOrientation, 100,		KErrArgument, KMaxTInt},
+		
+#else
+	// Platsec tests for no capabilities executable.
+	{ HALData::EDigitiserOrientation, HALData::EDigitiserOrientation_default,		KMaxTInt, KErrNone},			 // Get, No caps needed
+	{ HALData::EDigitiserOrientation, HALData::EDigitiserOrientation_default,		KErrPermissionDenied, KMaxTInt}, // Set WDD cap needed
+#endif
+	};
+	
+static TInt gNumHalAttributeTests = sizeof(gHalAttributeTests)/sizeof(HalAttribute_TestCase);
+
+void DoTestDigitiserHalAttributes()
+	{
+	__UHEAP_MARK;
+#ifndef E32TEST_NOCAPS
+	test.Start(_L("DoTestDigitiserHalAttributes tests"));
+#else
+	test.Start(_L("DoTestDigitiserHalAttributes NO CAPS tests"));
+	
+	// Skip No Caps testing for WDD caps when enforcement is not enabled on the
+	// platform i.e. when a emulator epoc.ini is missing.
+	if (!PlatSec::IsCapabilityEnforced(ECapabilityWriteDeviceData))
+		{
+		test.Printf(_L("Platform security enforcement off, skipping\n"));
+		test.End();
+		__UHEAP_MARKEND;
+		return;
+		}
+#endif
+
+	TInt i = 0;
+	TInt origValue = -1;
+	TInt r = HAL::Get(HALData::EDigitiserOrientation, origValue);
+	if (r == KErrNotSupported)
+		{
+		test.Printf(_L("Platform doesn't support EDigitiserOrientation, skipping\n"));
+		test.End();
+		__UHEAP_MARKEND;
+		return;
+		}
+	test_KErrNone(r);
+	
+	// Attribute supported on platform, proceed with test.
+	TInt value = -1;
+	for (i=0; i < gNumHalAttributeTests; i++)
+		{
+		test.Printf(_L("DoTestDigitiserHalAttributes - step/row %2d\n"), i+1);
+		
+		if (gHalAttributeTests[i].iSetRC != KMaxTInt) // Skip set test?
+			{
+			r = HAL::Set(gHalAttributeTests[i].iAttr,  gHalAttributeTests[i].iValueIn);
+			test_Equal( gHalAttributeTests[i].iSetRC, r);	
+			}
+			
+		if (gHalAttributeTests[i].iGetRC != KMaxTInt) // Skip get test?
+			{
+			r = HAL::Get(gHalAttributeTests[i].iAttr,  value);
+			test_Equal(gHalAttributeTests[i].iGetRC, r);
+			test_Equal(gHalAttributeTests[i].iValueIn, value);
+			}
+		}
+		
+#ifndef E32TEST_NOCAPS
+	// Return system state back to before the test
+	r = HAL::Set(HALData::EDigitiserOrientation, origValue);
+	test_KErrNone(r);
+#endif
+		
+	test.Printf(_L("DoTestDigitiserHalAttributes - complete\n"));
+	test.End();
+	__UHEAP_MARKEND;
+	}
+
+#ifndef E32TEST_NOCAPS
+void DoTestRawDigitiserEvent()
+	{
+	__UHEAP_MARK;
+	test.Start(_L("DoTestRawDigitiserEvent tests"));
+
+    TestTRawDigitiserEvent digitiserEvent1(TRawEvent::EPointerMove, -890,-123, -823,455,2563,156,62,3,1);
+	TestTRawDigitiserEvent digitiserEvent2(TRawEvent::EPointer3DInRange, 23,45,23,1,2,6,4,2,1);
+	TestTRawDigitiserEvent digitiserEvent3(TRawEvent::EPointer3DOutOfRange, 23,45,23,1,2,6,4,2,0);
+	TestTRawDigitiserEvent digitiserEvent4(TRawEvent::EPointer3DTilt, 23,45,23,1,2,6,4,2,1);
+	TestTRawDigitiserEvent digitiserEvent5(TRawEvent::EPointer3DRotation, 23,45,23,1,2,6,4,2,1);
+	TestTRawDigitiserEvent digitiserEvent6(TRawEvent::EPointer3DTiltAndMove, 23,45,23,1,2,6,4,2,0);
+
+    digitiserEvent1.TestEvents();
+	digitiserEvent2.TestEvents();
+	digitiserEvent3.TestEvents();
+	digitiserEvent4.TestEvents();
+	digitiserEvent5.TestEvents();
+	digitiserEvent6.TestEvents();    
+	
+	test.End();
+	__UHEAP_MARKEND;
+	}
+#endif
+
+
+GLDEF_C TInt E32Main()
+//
+//
+    {
+	__UHEAP_MARK;
+	
+ 	test.Title();
+	test.Start(_L("User-side Digitiser Testing Events/HAL"));
+	
+	DoTestDigitiserHalAttributes();
+	
+#ifndef E32TEST_NOCAPS
+	DoTestRawDigitiserEvent();
+#endif
+
+	test.Printf(_L("\n"));
+	test.End();
+	test.Close();
+
+	__UHEAP_MARKEND;
+    return KErrNone;
+    }
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/kerneltest/e32test/digitiser/tshell_digitisertests.oby	Mon May 10 11:40:53 2010 +0100
@@ -0,0 +1,29 @@
+/*
+* Copyright (c) 2010 Nokia Corporation and/or its subsidiary(-ies).
+* All rights reserved.
+* This component and the accompanying materials are made available
+* under the terms of the License "Eclipse Public License v1.0"
+* which accompanies this distribution, and is available
+* at the URL "http://www.eclipse.org/legal/epl-v10.html".
+*
+* Initial Contributors:
+* Nokia Corporation - initial contribution.
+*
+* Contributors:
+*
+* Description:
+*
+*/
+
+#define BASE_ROM
+#include <rom\##VARIANT##\header.iby>
+
+
+files=
+
+#include <rom\##VARIANT##\kernel.iby>
+#include "user.iby"
+#include <rom\hal\hal.iby>
+#include <rom\f32\f32.iby>
+
+#include <rom\include\digitiser_tests.iby>
--- a/kerneltest/e32test/group/bld.inf	Wed May 05 05:11:16 2010 +0100
+++ b/kerneltest/e32test/group/bld.inf	Mon May 10 11:40:53 2010 +0100
@@ -1,4 +1,4 @@
-// Copyright (c) 1999-2009 Nokia Corporation and/or its subsidiary(-ies).
+// Copyright (c) 1999-2010 Nokia Corporation and/or its subsidiary(-ies).
 // All rights reserved.
 // This component and the accompanying materials are made available
 // under the terms of the License "Eclipse Public License v1.0"
@@ -387,7 +387,8 @@
 t_newldd
 t_lddpowerseqtest
 t_ldddigitisertest
-t_traweventdigitiser
+t_userdigitisertest
+t_userdigitisernocaps
 t_persistrestart	manual
 halsettings	support
 
@@ -506,6 +507,11 @@
 t_heap
 t_heap2
 t_heapdb
+t_heapdl
+t_heapslab
+t_heapstress		manual
+t_heapcheck
+t_heappagealloc
 t_kheap
 
 // Secure RNG tests
@@ -867,7 +873,7 @@
 t_logtofile         manual
 t_eventtracker      manual
 t_traceredirect     support
-t_heapcorruption	support
+t_heapcorruption    support
 t_btrace
 t_perflogger
 
--- a/kerneltest/e32test/group/t_ramall.mmp	Wed May 05 05:11:16 2010 +0100
+++ b/kerneltest/e32test/group/t_ramall.mmp	Mon May 10 11:40:53 2010 +0100
@@ -19,7 +19,7 @@
 targettype		exe
 sourcepath		../mmu
 source			t_ramall.cpp
-library			euser.lib
+library			euser.lib  dptest.lib
 OS_LAYER_SYSTEMINCLUDE_SYMBIAN
 
 
--- a/kerneltest/e32test/group/t_traweventdigitiser.mmp	Wed May 05 05:11:16 2010 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,30 +0,0 @@
-/*
-* Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies).
-* All rights reserved.
-* This component and the accompanying materials are made available
-* under the terms of the License "Eclipse Public License v1.0"
-* which accompanies this distribution, and is available
-* at the URL "http://www.eclipse.org/legal/epl-v10.html".
-*
-* Initial Contributors:
-* Nokia Corporation - initial contribution.
-*
-* Contributors:
-*
-* Description:
-*
-*/
-
-TARGET         t_traweventdigitiser.exe        
-TARGETTYPE     EXE
-SOURCEPATH	../digitiser
-SOURCE         t_traweventdigitiser.cpp
-LIBRARY        euser.lib
-OS_LAYER_SYSTEMINCLUDE_SYMBIAN
-
-
-capability		all
-
-VENDORID 0x70000001
-
-SMPSAFE
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/kerneltest/e32test/group/t_userdigitisernocaps.mmp	Mon May 10 11:40:53 2010 +0100
@@ -0,0 +1,36 @@
+/*
+* Copyright (c) 2010 Nokia Corporation and/or its subsidiary(-ies).
+* All rights reserved.
+* This component and the accompanying materials are made available
+* under the terms of the License "Eclipse Public License v1.0"
+* which accompanies this distribution, and is available
+* at the URL "http://www.eclipse.org/legal/epl-v10.html".
+*
+* Initial Contributors:
+* Nokia Corporation - initial contribution.
+*
+* Contributors:
+*
+* Description:
+*
+*/
+
+TARGET		t_userdigitisernocaps.exe
+TARGETTYPE	EXE
+
+CAPABILITY	None
+MACRO		E32TEST_NOCAPS
+
+VENDORID	0x70000001
+SMPSAFE
+
+OS_LAYER_SYSTEMINCLUDE_SYMBIAN
+
+SOURCEPATH	../digitiser
+SOURCE		t_userdigitisertest.cpp
+
+LIBRARY		euser.lib
+LIBRARY		hal.lib
+
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/kerneltest/e32test/group/t_userdigitisertest.mmp	Mon May 10 11:40:53 2010 +0100
@@ -0,0 +1,34 @@
+/*
+* Copyright (c) 2009-2010 Nokia Corporation and/or its subsidiary(-ies).
+* All rights reserved.
+* This component and the accompanying materials are made available
+* under the terms of the License "Eclipse Public License v1.0"
+* which accompanies this distribution, and is available
+* at the URL "http://www.eclipse.org/legal/epl-v10.html".
+*
+* Initial Contributors:
+* Nokia Corporation - initial contribution.
+*
+* Contributors:
+*
+* Description:
+*
+*/
+
+TARGET		t_userdigitisertest.exe
+TARGETTYPE	EXE
+
+CAPABILITY	SwEvent WriteDeviceData
+VENDORID	0x70000001
+SMPSAFE
+
+OS_LAYER_SYSTEMINCLUDE_SYMBIAN
+
+SOURCEPATH	../digitiser
+SOURCE		t_userdigitisertest.cpp
+
+LIBRARY		euser.lib
+LIBRARY		hal.lib
+
+
+
--- a/kerneltest/e32test/mmu/t_cachechunk.cpp	Wed May 05 05:11:16 2010 +0100
+++ b/kerneltest/e32test/mmu/t_cachechunk.cpp	Mon May 10 11:40:53 2010 +0100
@@ -98,7 +98,7 @@
 TInt PageSize;
 TInt NoFreeRam;
 RTimer Timer;
-
+TBool gFmm;
 
 
 void FillPage(TUint aOffset)
@@ -308,18 +308,44 @@
 	test_KErrNone(r);
 
 	test.Next(_L("Check Decommit on unlocked pages"));
+	// Get orignal page cache size
+	TUint minCache = 0;
+	TUint maxCache = 0;
+	TUint oldCache = 0;
+	TUint newCache = 0;
+	if (gFmm)
+		{
+		r = DPTest::CacheSize(minCache, maxCache, oldCache);
+		test_KErrNone(r);
+		}
 	r = TestChunk.Unlock(aOffset,PageSize*4);
 	test_KErrNone(r);
+
+	TUint spareCache = maxCache - oldCache;
+	if (gFmm && spareCache)
+		{// Cache wasn't at maximum so should have grown when unlocked pages were added.
+		r = DPTest::CacheSize(minCache, maxCache, newCache);
+		test_KErrNone(r);
+		TUint extraCache = (spareCache > (TUint)PageSize*4)? PageSize*4 : spareCache;
+		test_Equal(oldCache + extraCache, newCache);
+		}
 	test(FreeRam() >= NoFreeRam+PageSize*4);
 	r=TestChunk.Decommit(aOffset, PageSize*4);
 	test_KErrNone(r);
 	freeRam = FreeRam();
 	test_Compare(freeRam, >=, NoFreeRam+PageSize*4);
 	test_Equal(origChunkSize - PageSize*4, TestChunk.Size());
+
+	if (gFmm)
+		{// Cache should have shrunk after pages were decommited.
+		r = DPTest::CacheSize(minCache, maxCache, newCache);
+		test_KErrNone(r);
+		test_Equal(oldCache, newCache);
+		}
 	// Restore chunk back to original state
 	r = TestChunk.Commit(aOffset, PageSize*4);
 	test_KErrNone(r);
-	test(FreeRam() == NoFreeRam);
+	test_Equal(NoFreeRam, FreeRam());
 
 	test.Next(_L("Check Decommit on unlocked and reclaimed pages"));
 	r = TestChunk.Unlock(aOffset,PageSize*4);
@@ -351,6 +377,44 @@
 	test(freeRam==NoFreeRam);
 	test_Equal(origChunkSize, TestChunk.Size());
 
+	test.Next(_L("Check Decommit on a mixture of locked and unlocked pages"));
+	// Get orignal page cache size
+	if (gFmm)
+		{
+		r = DPTest::CacheSize(minCache, maxCache, oldCache);
+		test_KErrNone(r);
+		}
+	r = TestChunk.Unlock(aOffset,PageSize);
+	test_KErrNone(r);
+	r = TestChunk.Unlock(aOffset + PageSize*2, PageSize);
+	test_KErrNone(r);
+
+	spareCache = maxCache - oldCache;
+	if (gFmm && spareCache)
+		{// Cache wasn't at maximum so should have grown when unlocked pages were added.
+		r = DPTest::CacheSize(minCache, maxCache, newCache);
+		test_KErrNone(r);
+		TUint extraCache = (spareCache > (TUint)PageSize*2)? PageSize*2 : spareCache;
+		test_Equal(oldCache + extraCache, newCache);
+		}
+	test(FreeRam() >= NoFreeRam+PageSize*2);
+	r=TestChunk.Decommit(aOffset, PageSize*4);
+	test_KErrNone(r);
+	freeRam = FreeRam();
+	test_Compare(freeRam, >=, NoFreeRam+PageSize*4);
+	test_Equal(origChunkSize - PageSize*4, TestChunk.Size());
+
+	if (gFmm)
+		{// Cache should have shrunk after pages were decommited.
+		r = DPTest::CacheSize(minCache, maxCache, newCache);
+		test_KErrNone(r);
+		test_Equal(oldCache, newCache);
+		}
+	// Restore chunk back to original state
+	r = TestChunk.Commit(aOffset, PageSize*4);
+	test_KErrNone(r);
+	test_Equal(NoFreeRam, FreeRam());
+
 	test.End();
 	}
 
@@ -450,6 +514,10 @@
 		test.Printf(_L("This test requires an MMU\n"));
 		return KErrNone;
 		}
+	// See if were running on the Flexible Memory Model or newer.
+  	TUint32 memModelAttrib = (TUint32)UserSvr::HalFunction(EHalGroupKernel, EKernelHalMemModelInfo, NULL, NULL);	
+	gFmm = (memModelAttrib & EMemModelTypeMask) >= EMemModelTypeFlexible;
+
 	test.Start(_L("Initialise test"));
 	test.Next(_L("Load gobbler LDD"));
 	TInt r = User::LoadLogicalDevice(KGobblerLddFileName);
--- a/kerneltest/e32test/mmu/t_ramall.cpp	Wed May 05 05:11:16 2010 +0100
+++ b/kerneltest/e32test/mmu/t_ramall.cpp	Mon May 10 11:40:53 2010 +0100
@@ -20,6 +20,7 @@
 #include <e32test.h>
 #include <e32uid.h>
 #include <e32hal.h>
+#include <dptest.h>
 #include "d_shadow.h"
 #include "mmudetect.h"
 #include "freeram.h"
@@ -32,6 +33,18 @@
 TInt PageShift;
 RShadow Shadow;
 TInt InitFreeRam;
+RChunk Chunk;
+TUint ChunkCommitEnd;
+RThread TouchThread;
+TRequestStatus TouchStatus;
+TBool TouchDataStop;
+RThread FragThread;
+TRequestStatus FragStatus;
+TBool FragThreadStop;
+TBool ManualTest;
+TBool CacheSizeAdjustable;
+TUint OrigMinCacheSize;
+TUint OrigMaxCacheSize;
 
 TInt AllocPhysicalRam(TUint32& aAddr, TInt aSize, TInt aAlign)
 	{
@@ -118,6 +131,334 @@
 	
 	}
 
+
+struct SPhysAllocData
+	{
+	TUint iSize;
+	TUint iAlign;
+	TBool iCheckMaxAllocs;
+	TBool iCheckFreeRam;
+	};
+
+
+TInt FillPhysicalRam(TAny* aArgs)
+	{
+	SPhysAllocData& allocData = *((SPhysAllocData*)aArgs);
+	TUint maxAllocs = FreeRam() / allocData.iSize;
+	TUint32* physAddrs = new TUint32[maxAllocs + 1];
+	if (!physAddrs)
+		return KErrNoMemory;
+	TUint32* pa = physAddrs;
+	TUint32 alignMask = (1 << allocData.iAlign) - 1;
+	TUint initialFreeRam = FreeRam();
+	TInt r = KErrNone;
+	TUint allocations = 0;
+	for(; allocations <= maxAllocs; ++allocations)
+		{
+		TUint freeRam = FreeRam();			
+		r = AllocPhysicalRam(*pa, allocData.iSize, allocData.iAlign);
+		if (r != KErrNone)
+			break;
+		if (*pa++ & alignMask)
+			{
+			r = KErrGeneral;
+			RDebug::Printf("Error alignment phys addr 0x%08x", *(pa - 1));
+			break;
+			}
+		if (allocData.iCheckFreeRam && freeRam - allocData.iSize != (TUint)FreeRam())
+			{
+			r = KErrGeneral;
+			RDebug::Printf("Error in free ram 0x%08x orig 0x%08x", FreeRam(), freeRam);
+			break;
+			}
+		}
+
+	TUint32* physEnd = pa;
+	TBool failFrees = EFalse;
+	for (pa = physAddrs; pa < physEnd; pa++)
+		{
+		if (FreePhysicalRam(*pa, allocData.iSize) != KErrNone)
+			failFrees = ETrue;
+		}
+	if (failFrees)
+		r = KErrNotFound;
+	if (allocData.iCheckMaxAllocs && allocations > maxAllocs)
+		{
+		r = KErrOverflow;
+		RDebug::Printf("Error able to allocate too many pages");
+		}
+	if (allocData.iCheckFreeRam && initialFreeRam != (TUint)FreeRam())
+		{
+		r = KErrGeneral;
+		RDebug::Printf("Error in free ram 0x%08x initial 0x%08x", FreeRam(), initialFreeRam);
+		}
+	delete[] physAddrs;
+	if (r != KErrNone && r != KErrNoMemory)
+		return r;
+	TUint possibleAllocs = initialFreeRam / allocData.iSize;
+	if (allocData.iCheckMaxAllocs && possibleAllocs != allocations)
+		{
+		RDebug::Printf("Error in number of allocations possibleAllocs %d allocations %d", possibleAllocs, allocations);
+		return KErrGeneral;
+		}
+	return allocations;
+	}
+
+
+void TestMultipleContiguousAllocations(TUint aNumThreads, TUint aSize, TUint aAlign)
+	{
+	test.Printf(_L("TestMultiContig threads %d size 0x%x, align %d\n"), aNumThreads, aSize, aAlign);
+	SPhysAllocData allocData;
+	allocData.iSize = aSize;
+	allocData.iAlign = aAlign;
+	allocData.iCheckMaxAllocs = EFalse;
+	allocData.iCheckFreeRam = EFalse;
+	// Start several threads all contiguous allocating memory.
+	RThread* threads = new RThread[aNumThreads];
+	TRequestStatus* status = new TRequestStatus[aNumThreads];
+	TUint i = 0;
+	for (; i < aNumThreads; i++)
+		{// Need enough heap to store addr of every possible allocation + 1.
+		TUint requiredHeapMax = Max(PageSize, ((InitFreeRam / aSize) / sizeof(TUint32)) + sizeof(TUint32));
+		TInt r = threads[i].Create(KNullDesC, FillPhysicalRam, KDefaultStackSize, PageSize, requiredHeapMax, (TAny*)&allocData);
+		test_KErrNone(r);
+		threads[i].Logon(status[i]);
+		}
+	for (i = 0; i < aNumThreads; i++)
+		{
+		threads[i].Resume();
+		}
+	for (i = 0; i < aNumThreads; i++)
+		{
+		User::WaitForRequest(status[i]);
+		test_Equal(EExitKill, threads[i].ExitType());
+		TInt exitReason = threads[i].ExitReason();
+		test_Value(exitReason, exitReason >= 0 || exitReason == KErrNoMemory);
+		threads[i].Close();
+		}
+	delete[] status;
+	delete[] threads;
+	}
+
+struct STouchData
+	{
+	TUint iSize;
+	TUint iFrequency;
+	}TouchData;
+
+
+TInt TouchMemory(TAny*)
+	{
+	while (!TouchDataStop)
+		{
+		TUint8* p = Chunk.Base();
+		TUint8* pEnd = p + ChunkCommitEnd;
+		TUint8* fragPEnd = p + TouchData.iFrequency;
+		for (TUint8* fragP = p + TouchData.iSize; fragPEnd < pEnd;)
+			{
+			TUint8* data = fragP;
+			for (; data < fragPEnd; data += PageSize)
+				{
+				*data = (TUint8)(data - fragP);
+				}
+			for (data = fragP; data < fragPEnd; data += PageSize)
+				{
+				if (*data != (TUint8)(data - fragP))
+					{
+					RDebug::Printf("Error unexpected data 0x%x read from 0x%08x", *data, data);
+					return KErrGeneral;
+					}
+				}
+			fragP = fragPEnd + TouchData.iSize;
+			fragPEnd += TouchData.iFrequency;
+			}
+		}
+	return KErrNone;
+	}
+
+struct SFragData
+	{
+	TUint iSize;
+	TUint iFrequency;
+	TUint iDiscard;
+	TBool iFragThread;
+	}FragData;
+
+void FragmentMemoryFunc()
+	{
+	ChunkCommitEnd = 0;
+	TInt r;
+	while(KErrNone == (r = Chunk.Commit(ChunkCommitEnd,PageSize)) && !FragThreadStop)
+		{
+		ChunkCommitEnd += PageSize;
+		}
+	if (FragThreadStop)
+		return;
+	test_Equal(KErrNoMemory, r);
+	TUint freeBlocks = 0;
+	for (	TUint offset = 0; 
+			(offset + FragData.iSize) < ChunkCommitEnd; 
+			offset += FragData.iFrequency, freeBlocks++)
+		{
+		test_KErrNone(Chunk.Decommit(offset, FragData.iSize));
+		}
+	if (!FragData.iFragThread)
+		test_Equal(FreeRam(), freeBlocks * FragData.iSize);
+
+	if (FragData.iDiscard && CacheSizeAdjustable && !FragThreadStop)
+		{
+		TUint minCacheSize = FreeRam();
+		TUint maxCacheSize = minCacheSize;
+		TUint currentCacheSize;
+		test_KErrNone(DPTest::CacheSize(OrigMinCacheSize, OrigMaxCacheSize, currentCacheSize));
+		test_KErrNone(DPTest::SetCacheSize(minCacheSize, maxCacheSize));
+		test_KErrNone(DPTest::SetCacheSize(OrigMinCacheSize, maxCacheSize));
+		}
+	}
+
+
+void UnfragmentMemoryFunc()
+	{
+	if (FragData.iDiscard && CacheSizeAdjustable)
+		test_KErrNone(DPTest::SetCacheSize(OrigMinCacheSize, OrigMaxCacheSize));
+	Chunk.Decommit(0, Chunk.MaxSize());
+	}
+
+
+TInt FragmentMemoryThreadFunc(TAny*)
+	{
+	while (!FragThreadStop)
+		{
+		FragmentMemoryFunc();
+		UnfragmentMemoryFunc();
+		}
+	return KErrNone;
+	}
+
+
+void FragmentMemory(TUint aSize, TUint aFrequency, TBool aDiscard, TBool aTouchMemory, TBool aFragThread)
+	{
+	test_Value(aTouchMemory, !aTouchMemory || !aFragThread);
+	test_Value(aSize, aSize < aFrequency);
+	FragData.iSize = aSize;
+	FragData.iFrequency = aFrequency;
+	FragData.iDiscard = aDiscard;
+	FragData.iFragThread = aFragThread;
+
+	TChunkCreateInfo chunkInfo;
+	chunkInfo.SetDisconnected(0, 0, FreeRam());
+	chunkInfo.SetPaging(TChunkCreateInfo::EUnpaged);
+	test_KErrNone(Chunk.Create(chunkInfo));
+
+	if (aFragThread)
+		{
+		TInt r = FragThread.Create(KNullDesC, FragmentMemoryThreadFunc, KDefaultStackSize, PageSize, PageSize, NULL);
+		test_KErrNone(r);
+		FragThread.Logon(FragStatus);
+		FragThreadStop = EFalse;
+		FragThread.Resume();
+		}
+	else
+		{
+		FragmentMemoryFunc();
+		}
+	if (aTouchMemory && !ManualTest)
+		{
+		TouchData.iSize = aSize;
+		TouchData.iFrequency = aFrequency;
+		TInt r = TouchThread.Create(KNullDesC, TouchMemory, KDefaultStackSize, PageSize, PageSize, NULL);
+		test_KErrNone(r);
+		TouchThread.Logon(TouchStatus);
+		TouchDataStop = EFalse;
+		TouchThread.Resume();
+		}
+	}
+
+
+void UnfragmentMemory(TBool aDiscard, TBool aTouchMemory, TBool aFragThread)
+	{
+	test_Value(aTouchMemory, !aTouchMemory || !aFragThread);
+	if (aTouchMemory && !ManualTest)
+		{
+		TouchDataStop = ETrue;
+		User::WaitForRequest(TouchStatus);
+		test_Equal(EExitKill, TouchThread.ExitType());
+		test_KErrNone(TouchThread.ExitReason());
+		CLOSE_AND_WAIT(TouchThread);
+		}
+	if (aFragThread)
+		{
+		FragThreadStop = ETrue;
+		User::WaitForRequest(FragStatus);
+		test_Equal(EExitKill, FragThread.ExitType());
+		test_KErrNone(FragThread.ExitReason());
+		CLOSE_AND_WAIT(FragThread);
+		}
+	else
+		UnfragmentMemoryFunc();
+	CLOSE_AND_WAIT(Chunk);
+	}
+
+
+void TestFillPhysicalRam(TUint aFragSize, TUint aFragFreq, TUint aAllocSize, TUint aAllocAlign, TBool aDiscard, TBool aTouchMemory)
+	{
+	test.Printf(_L("TestFillPhysicalRam aFragSize 0x%x aFragFreq 0x%x aAllocSize 0x%x aAllocAlign %d dis %d touch %d\n"),
+				aFragSize, aFragFreq, aAllocSize, aAllocAlign, aDiscard, aTouchMemory);
+	FragmentMemory(aFragSize, aFragFreq, aDiscard, aTouchMemory, EFalse);
+	SPhysAllocData allocData;
+	// Only check free all ram could be allocated in manual tests as fixed pages may be fragmented.
+	allocData.iCheckMaxAllocs = (ManualTest && !aTouchMemory && !aAllocAlign)? ETrue : EFalse;
+	allocData.iCheckFreeRam = ETrue;
+	allocData.iSize = aAllocSize;
+	allocData.iAlign = aAllocAlign;
+	FillPhysicalRam(&allocData);
+	UnfragmentMemory(aDiscard, aTouchMemory, EFalse);
+	}
+
+
+void TestFragmentedAllocation()
+	{
+	// Test every other page free.
+	TestFillPhysicalRam(PageSize, PageSize * 2, PageSize, 0, EFalse, EFalse);
+	if (ManualTest)
+		{
+		TestFillPhysicalRam(PageSize, PageSize * 2, PageSize * 2, 0, EFalse, EFalse);
+		TestFillPhysicalRam(PageSize, PageSize * 2, PageSize, 0, EFalse, ETrue);
+		}
+	TestFillPhysicalRam(PageSize, PageSize * 2, PageSize * 2, 0, EFalse, ETrue);
+	// Test every 2 pages free.
+	TestFillPhysicalRam(PageSize * 2, PageSize * 4, PageSize * 8, 0, EFalse, EFalse);
+	if (ManualTest)
+		TestFillPhysicalRam(PageSize * 2, PageSize * 4, PageSize * 8, 0, EFalse, ETrue);
+	// Test 10 pages free then 20 pages allocated, allocate 256 pages (1MB in most cases).
+	if (ManualTest)
+		TestFillPhysicalRam(PageSize * 10, PageSize * 30, PageSize * 256, 0, EFalse, EFalse);
+	TestFillPhysicalRam(PageSize * 10, PageSize * 30, PageSize * 256, 0, EFalse, ETrue);
+
+	if (CacheSizeAdjustable)
+		{// It is possible to adjust the cache size so test phyiscally contiguous 
+		// allocations discard and move pages when required.
+		test.Next(_L("TestFragmentedAllocations with discardable data no true free memory"));
+		// Test every other page free.
+		TestFillPhysicalRam(PageSize, PageSize * 2, PageSize, 0, ETrue, EFalse);
+		if (ManualTest)
+			{
+			TestFillPhysicalRam(PageSize, PageSize * 2, PageSize, 0, ETrue, ETrue);
+			TestFillPhysicalRam(PageSize, PageSize * 2, PageSize * 2, 0, ETrue, EFalse);
+			}
+		TestFillPhysicalRam(PageSize, PageSize * 2, PageSize * 2, 0, ETrue, ETrue);
+		// Test every 2 pages free.
+		TestFillPhysicalRam(PageSize * 2, PageSize * 4, PageSize * 8, 0, ETrue, EFalse);
+		if (ManualTest)
+			TestFillPhysicalRam(PageSize * 2, PageSize * 4, PageSize * 8, 0, ETrue, ETrue);
+		// Test 10 pages free then 20 pages allocated, allocate 256 pages (1MB in most cases).
+		if (ManualTest)
+			TestFillPhysicalRam(PageSize * 10, PageSize * 30, PageSize * 256, 0, ETrue, EFalse);
+		TestFillPhysicalRam(PageSize * 10, PageSize * 30, PageSize * 256, 0, ETrue, ETrue);
+		}
+	}
+
+
 GLDEF_C TInt E32Main()
 //
 // Test RAM allocation
@@ -135,6 +476,28 @@
 	PageShift=-1;
 	for (; psz; psz>>=1, ++PageShift);
 
+	TUint currentCacheSize;
+	CacheSizeAdjustable = DPTest::CacheSize(OrigMinCacheSize, OrigMaxCacheSize, currentCacheSize) == KErrNone;
+
+	TUint memodel = UserSvr::HalFunction(EHalGroupKernel, EKernelHalMemModelInfo, NULL, NULL) & EMemModelTypeMask;
+
+	TInt cmdLineLen = User::CommandLineLength();
+	if(cmdLineLen)
+		{
+		_LIT(KManual, "manual");
+		RBuf cmdLine;
+		test_KErrNone(cmdLine.Create(cmdLineLen));
+		User::CommandLine(cmdLine);
+		cmdLine.LowerCase();
+		ManualTest = cmdLine.Find(KManual) != KErrNotFound;
+		}
+
+	// Turn off lazy dll unloading so the free ram checking isn't affected.
+	RLoader l;
+	test(l.Connect()==KErrNone);
+	test(l.CancelLazyDllUnload()==KErrNone);
+	l.Close();
+
 	InitFreeRam=FreeRam();
 	test.Printf(_L("Free RAM=%08x, Page size=%x, Page shift=%d\n"),InitFreeRam,PageSize,PageShift);
 
@@ -148,8 +511,54 @@
 	test.Next(_L("TestClaimPhys"));
 	TestClaimPhys();
 
+	if (memodel >= EMemModelTypeFlexible)
+		{
+		test.Next(_L("TestFragmentedAllocation"));
+		TestFragmentedAllocation();
+
+		test.Next(_L("TestMultipleContiguousAllocations"));
+		TestMultipleContiguousAllocations(20, PageSize * 16, 0);
+		TestMultipleContiguousAllocations(20, PageSize * 16, PageShift + 1);
+		TestMultipleContiguousAllocations(20, PageSize * 128, PageShift + 2);
+
+		FragmentMemory(PageSize, PageSize * 2, EFalse, EFalse, EFalse);
+		TestMultipleContiguousAllocations(20, PageSize * 128, PageShift + 2);
+		UnfragmentMemory(EFalse, EFalse, EFalse);
+
+		test.Next(_L("TestMultipleContiguousAllocations while accessing memory"));
+		FragmentMemory(PageSize, PageSize * 2, EFalse, ETrue, EFalse);
+		TestMultipleContiguousAllocations(20, PageSize * 128, PageShift + 2);
+		UnfragmentMemory(EFalse, ETrue, EFalse);
+		FragmentMemory(PageSize, PageSize * 2, ETrue, ETrue, EFalse);
+		TestMultipleContiguousAllocations(50, PageSize * 256, PageShift + 5);
+		UnfragmentMemory(ETrue, ETrue, EFalse);
+		FragmentMemory(PageSize * 16, PageSize * 32, ETrue, ETrue, EFalse);
+		TestMultipleContiguousAllocations(10, PageSize * 512, PageShift + 8);
+		UnfragmentMemory(ETrue, ETrue, EFalse);
+		FragmentMemory(PageSize * 32, PageSize * 64, ETrue, ETrue, EFalse);
+		TestMultipleContiguousAllocations(10, PageSize * 1024, PageShift + 10);
+		UnfragmentMemory(ETrue, ETrue, EFalse);
+
+		test.Next(_L("TestMultipleContiguousAllocations with repeated movable and discardable allocations"));
+		FragmentMemory(PageSize, PageSize * 2, EFalse, EFalse, ETrue);
+		TestMultipleContiguousAllocations(20, PageSize * 2, PageShift);
+		UnfragmentMemory(EFalse, EFalse, ETrue);
+		FragmentMemory(PageSize, PageSize * 2, EFalse, EFalse, ETrue);
+		TestMultipleContiguousAllocations(20, PageSize * 128, PageShift + 2);
+		UnfragmentMemory(EFalse, EFalse, ETrue);
+		FragmentMemory(PageSize, PageSize * 2, ETrue, EFalse, ETrue);
+		TestMultipleContiguousAllocations(50, PageSize * 256, PageShift + 5);
+		UnfragmentMemory(ETrue, EFalse, ETrue);
+		FragmentMemory(PageSize * 16, PageSize * 32, ETrue, EFalse, ETrue);
+		TestMultipleContiguousAllocations(20, PageSize * 512, PageShift + 8);
+		UnfragmentMemory(ETrue, EFalse, ETrue);
+		FragmentMemory(PageSize * 32, PageSize * 64, ETrue, EFalse, ETrue);
+		TestMultipleContiguousAllocations(20, PageSize * 1024, PageShift + 10);
+		UnfragmentMemory(ETrue, EFalse, ETrue);
+		}
+
 	Shadow.Close();
+	test.Printf(_L("Free RAM=%08x at end of test\n"),FreeRam());
 	test.End();
 	return(KErrNone);
     }
-
--- a/kerneltest/e32test/prime/t_semutx.cpp	Wed May 05 05:11:16 2010 +0100
+++ b/kerneltest/e32test/prime/t_semutx.cpp	Mon May 10 11:40:53 2010 +0100
@@ -38,7 +38,10 @@
 // 
 //
 
+#define __E32TEST_EXTENSION__
 #include <e32test.h>
+#include <u32std.h>
+#include <e32svr.h>
 
 const TInt KMaxBufferSize=10;
 const TInt KMaxArraySize=10;
@@ -221,7 +224,7 @@
 void StartWaitSemThread(RThread& aT, SWaitSem& aW, TThreadPriority aP=EPriorityLess)
 	{
 	TInt r = aT.Create(KNullDesC, &WaitSemThread, 0x1000, 0x1000, 0x1000, &aW);
-	test(r==KErrNone);
+	test_KErrNone(r);
 	aT.SetPriority(aP);
 	aT.Resume();
 	}
@@ -231,9 +234,9 @@
 	TRequestStatus s;
 	aT.Logon(s);
 	User::WaitForRequest(s);
-	test(aT.ExitType()==EExitKill);
-	test(aT.ExitReason()==aResult);
-	test(s.Int()==aResult);
+	test_Equal(EExitKill, aT.ExitType());
+	test_Equal(aResult, aT.ExitReason());
+	test_Equal(aResult, s.Int());
 	CLOSE_AND_WAIT(aT);
 	}
 
@@ -251,7 +254,7 @@
 	TTime final;
 	TInt elapsed=0;
 	TInt r = ws.iSem.CreateLocal(0);
-	test(r==KErrNone);
+	test_KErrNone(r);
 
 	RThread().SetPriority(EPriorityAbsoluteVeryLow);
 	TInt threadcount=0;
@@ -259,7 +262,7 @@
 	while (elapsed<1000000)
 		{
 		r = t.Create(KNullDesC, &DummyThread, 0x1000, NULL, NULL);
-		test(r==KErrNone);
+		test_KErrNone(r);
 		t.SetPriority(EPriorityMore);
 		t.Resume();
 		t.Close();
@@ -307,7 +310,7 @@
 	User::After(200000);
 	t.Resume();
 	WaitForWaitSemThread(t, KErrTimedOut);
-	test(ws.iSem.Wait(1)==KErrNone);
+	test_KErrNone(ws.iSem.Wait(1));
 
 	ws.iTimeout=100000;
 	StartWaitSemThread(t, ws, EPriorityMore);
@@ -316,7 +319,7 @@
 	User::After(50000);
 	t.Resume();
 	WaitForWaitSemThread(t, KErrNone);
-	test(ws.iSem.Wait(1)==KErrTimedOut);
+	test_Equal(KErrTimedOut, ws.iSem.Wait(1));
 
 	RThread t2;
 	ws.iTimeout=100000;
@@ -324,12 +327,12 @@
 	StartWaitSemThread(t2, ws, EPriorityMore);
 	t.Suspend();
 	ws.iSem.Signal();
-	test(t2.ExitType()==EExitKill);
-	test(t.ExitType()==EExitPending);
+	test_Equal(EExitKill, t2.ExitType());
+	test_Equal(EExitPending, t.ExitType());
 	t.Resume();
 	WaitForWaitSemThread(t, KErrTimedOut);
 	WaitForWaitSemThread(t2, KErrNone);
-	test(ws.iSem.Wait(1)==KErrTimedOut);
+	test_Equal(KErrTimedOut, ws.iSem.Wait(1));
 
 	ws.iTimeout=1000000;
 	initial.HomeTime();
@@ -376,11 +379,11 @@
 	initial.HomeTime();
 	StartWaitSemThread(t, ws, EPriorityMore);
 	StartWaitSemThread(t2, ws, EPriorityMuchMore);
-	test(t.ExitType()==EExitPending);
-	test(t2.ExitType()==EExitPending);
+	test_Equal(EExitPending, t.ExitType());
+	test_Equal(EExitPending, t2.ExitType());
 	ws.iSem.Close();
-	test(t.ExitType()==EExitKill);
-	test(t2.ExitType()==EExitKill);
+	test_Equal(EExitKill, t.ExitType());
+	test_Equal(EExitKill, t2.ExitType());
 	WaitForWaitSemThread(t2, KErrGeneral);
 	WaitForWaitSemThread(t, KErrGeneral);
 	final.HomeTime();
@@ -414,23 +417,23 @@
 	test.Next(_L("Producer/Consumer scenario"));
 	// Test Rsemaphore with the producer/consumer scenario	RThread thread1, thread2;
 	TRequestStatus stat1, stat2;
-	test(mutex.CreateLocal()==KErrNone);
-	test(slotAvailable.CreateLocal(KMaxBufferSize)==KErrNone);
-	test(itemAvailable.CreateLocal(0)==KErrNone);
-	test(thread1.Create(_L("Thread1"),Producer,KDefaultStackSize,0x200,0x200,NULL)==KErrNone);
-	test(thread2.Create(_L("Thread2"),Consumer,KDefaultStackSize,0x200,0x200,NULL)==KErrNone);
+	test_KErrNone(mutex.CreateLocal());
+	test_KErrNone(slotAvailable.CreateLocal(KMaxBufferSize));
+	test_KErrNone(itemAvailable.CreateLocal(0));
+	test_KErrNone(thread1.Create(_L("Thread1"),Producer,KDefaultStackSize,0x200,0x200,NULL));
+	test_KErrNone(thread2.Create(_L("Thread2"),Consumer,KDefaultStackSize,0x200,0x200,NULL));
 	thread1.Logon(stat1);
 	thread2.Logon(stat2);
-	test(stat1==KRequestPending);
-	test(stat2==KRequestPending);
+	test_Equal(KRequestPending, stat1.Int());
+	test_Equal(KRequestPending, stat2.Int());
 	thread1.Resume(); 
 	thread2.Resume();
 	User::WaitForRequest(stat1);
 	User::WaitForRequest(stat2);
-	test(stat1==KErrNone);
-	test(stat2==KErrNone);
+	test_KErrNone(stat1.Int());
+	test_KErrNone(stat2.Int());
 	for(TInt jj=0;jj<KNumProducerItems;jj++)
-		test(consumerArray[jj]==jj);		
+		test_Equal(jj, consumerArray[jj]);		
 	
 	test.Next(_L("Close"));
 	mutex.Close();
@@ -443,7 +446,7 @@
 	{
 	RMutex m;
 	test.Start(_L("Create"));
-	test(m.CreateLocal()==KErrNone);
+	test_KErrNone(m.CreateLocal());
 
 	// Test RMutex::IsHeld()
 	test.Next(_L("IsHeld ?"));
@@ -463,7 +466,7 @@
 void TestMutex()
 	{
 	test.Start(_L("Create"));
-	test(mutex.CreateLocal()==KErrNone);
+	test_KErrNone(mutex.CreateLocal());
 	
 	test.Next(_L("Threads writing to arrays test"));
 //
@@ -477,19 +480,19 @@
 //
 	arrayIndex=0;
 	RThread thread1,thread2;	
-	test(thread1.Create(_L("Thread1"),MutexThreadEntryPoint1,KDefaultStackSize,0x2000,0x2000,NULL)==KErrNone);
-	test(thread2.Create(_L("Thread2"),MutexThreadEntryPoint2,KDefaultStackSize,0x2000,0x2000,NULL)==KErrNone);			 
+	test_KErrNone(thread1.Create(_L("Thread1"),MutexThreadEntryPoint1,KDefaultStackSize,0x2000,0x2000,NULL));
+	test_KErrNone(thread2.Create(_L("Thread2"),MutexThreadEntryPoint2,KDefaultStackSize,0x2000,0x2000,NULL));			 
 	TRequestStatus stat1,stat2;
 	thread1.Logon(stat1);
 	thread2.Logon(stat2);
-	test(stat1==KRequestPending);
-	test(stat2==KRequestPending);
+	test_Equal(KRequestPending, stat1.Int());
+	test_Equal(KRequestPending, stat2.Int());
 	thread1.Resume(); 
 	thread2.Resume();
 	User::WaitForRequest(stat1);
 	User::WaitForRequest(stat2);
-	test(stat1==KErrNone);
-	test(stat2==KErrNone); 
+	test_KErrNone(stat1.Int());
+	test_KErrNone(stat2.Int()); 
 	TInt thread1ActualCount=0; 
 	TInt thread2ActualCount=0;
 	TInt ii=0;
@@ -502,10 +505,10 @@
 		ii++;
 		}
 	test.Printf(_L("T1 %d T1ACT %d T2 %d T2ACT %d"),thread1Count,thread1ActualCount,thread2Count,thread2ActualCount);
-	test(thread1ActualCount==thread1Count);
-	test(thread2ActualCount==thread2Count);
-	test(thread1Count==thread2Count);
-	test(thread1Count==(KMaxArraySize>>1));
+	test_Equal(thread1Count, thread1ActualCount);
+	test_Equal(thread2Count, thread2ActualCount);
+	test_Equal(thread2Count, thread1Count);
+	test_Equal((KMaxArraySize>>1), thread1Count);
 	
 	test.Next(_L("Close"));
 	CLOSE_AND_WAIT(thread1);
@@ -521,7 +524,7 @@
 	{
 	
 	test.Start(_L("Create"));
-	test(criticalSn.CreateLocal()==KErrNone);
+	test_KErrNone(criticalSn.CreateLocal());
 
 /***************** TO DO ***********************
 
@@ -551,19 +554,19 @@
 //
 	arrayIndex=0;
 	RThread thread1,thread2;	
-	test(thread1.Create(_L("Thread1"),CriticalSnThreadEntryPoint1,KDefaultStackSize,0x2000,0x2000,NULL)==KErrNone);
-	test(thread2.Create(_L("Thread2"),CriticalSnThreadEntryPoint2,KDefaultStackSize,0x2000,0x2000,NULL)==KErrNone);			 
+	test_KErrNone(thread1.Create(_L("Thread1"),CriticalSnThreadEntryPoint1,KDefaultStackSize,0x2000,0x2000,NULL));
+	test_KErrNone(thread2.Create(_L("Thread2"),CriticalSnThreadEntryPoint2,KDefaultStackSize,0x2000,0x2000,NULL));			 
 	TRequestStatus stat1,stat2;
 	thread1.Logon(stat1);
 	thread2.Logon(stat2);
-	test(stat1==KRequestPending);
-	test(stat2==KRequestPending);
+	test_Equal(KRequestPending, stat1.Int());
+	test_Equal(KRequestPending, stat2.Int());
 	thread1.Resume(); 
 	thread2.Resume();
 	User::WaitForRequest(stat1);
 	User::WaitForRequest(stat2);
-	test(stat1==KErrNone);
-	test(stat2==KErrNone); 
+	test_KErrNone(stat1.Int());
+	test_KErrNone(stat2.Int()); 
 	TInt thread1ActualCount=0; 
 	TInt thread2ActualCount=0;
 	TInt ii=0;
@@ -575,10 +578,10 @@
 			thread2ActualCount++;
 		ii++;
 		}
-	test(thread1ActualCount==thread1Count);
-	test(thread2ActualCount==thread2Count);
-	test(thread1Count==thread2Count);
-	test(thread1Count==(KMaxArraySize>>1));
+	test_Equal(thread1Count, thread1ActualCount);
+	test_Equal(thread2Count, thread2ActualCount);
+	test_Equal(thread2Count, thread1Count);
+	test_Equal((KMaxArraySize>>1), thread1Count);
 
 	test.Next(_L("Close"));
 	CLOSE_AND_WAIT(thread1);
@@ -590,6 +593,16 @@
 
 GLDEF_C TInt E32Main()
 	{	
+	TInt cpus = UserSvr::HalFunction(EHalGroupKernel, EKernelHalNumLogicalCpus, 0, 0);
+	if (cpus != 1)
+		{
+		test(cpus>1);
+		// This test will require compatibility mode (and probably other changes)
+		// to work on SMP - it depends on explicit scheduling order.
+		test.Printf(_L("T_SEMUTX skipped, does not work on SMP\n"));
+		return KErrNone;
+		}	
+	
 
 	test.Title();
  	__UHEAP_MARK;
--- a/kerneltest/e32test/prime/t_semutx2.cpp	Wed May 05 05:11:16 2010 +0100
+++ b/kerneltest/e32test/prime/t_semutx2.cpp	Mon May 10 11:40:53 2010 +0100
@@ -37,7 +37,10 @@
 // 
 //
 
+#define __E32TEST_EXTENSION__
 #include <e32test.h>
+#include <u32std.h>
+#include <e32svr.h>
 
 RMutex M1;
 RMutex M2;
@@ -62,8 +65,8 @@
 //#define MCOUNT(m,c)	test((m).Count() ==(c))
 // mutex count value is not visible for user any more
 #define MCOUNT(m,c) (void)(1)
-#define IDCHECK(x) test(GetNextId()==(x))
-#define NUMCHECK(x)	test(NumIdsPending()==(x))
+#define IDCHECK(x) test_Equal((x), GetNextId())
+#define NUMCHECK(x)	test_Equal((x), NumIdsPending())
 
 #define id0		id[0]
 #define id1		id[1]
@@ -153,38 +156,38 @@
 	TInt count=0;
 	TRequestStatus s;
 	TInt r=t.Create(_L("Test0"),Test0Thread,0x1000,NULL,&count);
-	test(r==KErrNone);
+	test_KErrNone(r);
 	t.Logon(s);
-	test(r==KErrNone);
+	test_KErrNone(r);
 	User::After(10000);		// make sure we have a full timeslice
 	t.Resume();
 
-	test(count==0);			// t shouldn't have run yet
+	test_Equal(0, count);			// t shouldn't have run yet
 	RThread().SetPriority(EPriorityMuchMore);	// shouldn't reschedule (priority unchanged)
-	test(count==0);
+	test_Equal(0, count);
 	RThread().SetPriority(EPriorityMore);	// shouldn't reschedule (priority decreasing, but not enough)
-	test(count==0);
+	test_Equal(0, count);
 	RThread().SetPriority(EPriorityMuchMore);	// shouldn't reschedule (priority increasing)
-	test(count==0);
+	test_Equal(0, count);
 	RThread().SetPriority(EPriorityNormal);	// should reschedule (we go behind t)
-	test(count==1);
+	test_Equal(1, count);
 	RThread().SetPriority(EPriorityLess);	// should reschedule (priority decreasing to below t)
-	test(count==2);
+	test_Equal(2, count);
 	t.SetPriority(EPriorityMuchMore);		// shouldn't reschedule (round-robin, timeslice not expired)
-	test(count==2);
+	test_Equal(2, count);
 	t.SetPriority(EPriorityNormal);			// shouldn't reschedule (t's priority decreasing)
-	test(count==2);
+	test_Equal(2, count);
 	t.SetPriority(EPriorityNormal);			// shouldn't reschedule (t's priority unchanged)
-	test(count==2);
+	test_Equal(2, count);
 	BusyWait(100000);		// use up our timeslice
 	t.SetPriority(EPriorityMuchMore);		// should reschedule (round-robin, timeslice expired)
-	test(count==3);
-	test(s==KRequestPending);
-	test(t.ExitType()==EExitPending);
+	test_Equal(3, count);
+	test_Equal(KRequestPending, s.Int());
+	test_Equal(EExitPending, t.ExitType());
 	t.SetPriority(EPriorityRealTime);		// should reschedule (t increases above current)
-	test(count==4);
-	test(s==KErrNone);						// t should have exited
-	test(t.ExitType()==EExitKill);
+	test_Equal(4, count);
+	test_KErrNone(s.Int());						// t should have exited
+	test_Equal(EExitKill, t.ExitType());
 	User::WaitForRequest(s);
 	RThread().SetPriority(EPriorityMuchMore);
 	t.Close();
@@ -201,11 +204,11 @@
 	{
 	test.Start(_L("Test signalling from wrong thread"));
 	TInt r=M1.CreateLocal();
-	test(r==KErrNone);
+	test_KErrNone(r);
 	M1.Wait();
 	RThread t;
 	r=t.Create(_L("Test1"),Test1Thread,0x1000,NULL,NULL);
-	test(r==KErrNone);
+	test_KErrNone(r);
 	TRequestStatus s;
 	t.Logon(s);
 	t.Resume();
@@ -213,9 +216,9 @@
 	User::SetJustInTime(EFalse);
 	User::WaitForRequest(s);
 	User::SetJustInTime(jit);
-	test(s==EAccessDenied);
-	test(t.ExitType()==EExitPanic);
-	test(t.ExitReason()==EAccessDenied);
+	test_Equal(EAccessDenied, s.Int());
+	test_Equal(EExitPanic, t.ExitType());
+	test_Equal(EAccessDenied, t.ExitReason());
 	test(t.ExitCategory()==_L("KERN-EXEC"));
 	t.Close();
 	M1.Close();
@@ -273,13 +276,13 @@
 
 	test.Next(_L("Create mutex"));
 	TInt r=M1.CreateLocal();
-	test(r==KErrNone);
+	test_KErrNone(r);
 
 	test.Next(_L("Create low priority thread"));
 	TInt lowcount=0;
 	RThread low;
 	r=low.Create(_L("low"),LowThread,0x1000,NULL,&lowcount);
-	test(r==KErrNone);
+	test_KErrNone(r);
 	low.SetPriority(EPriorityMuchLess);
 	test(Exists(_L("low")));
 
@@ -287,42 +290,42 @@
 	TInt medcount=0;
 	RThread med;
 	r=med.Create(_L("med"),MedThread,0x1000,NULL,&medcount);
-	test(r==KErrNone);
+	test_KErrNone(r);
 	med.SetPriority(EPriorityNormal);
 	test(Exists(_L("med")));
 
 	test.Next(_L("Start low priority thread"));
 	low.Resume();
 	User::AfterHighRes(KTestDelay/10);
-	test(lowcount==1);
+	test_Equal(1, lowcount);
 //	MCOUNT(M1,0);
 
 	test.Next(_L("Start medium priority thread"));
 	med.Resume();
 	User::AfterHighRes(KTestDelay/10);
-	test(medcount==1);
+	test_Equal(1, medcount);
 	Kick(med);
 	User::AfterHighRes(KTestDelay/10);
-	test(medcount==2);
+	test_Equal(2, medcount);
 	Kick(med);
 
 	M1.Wait();
-	test(lowcount==1);
-	test(medcount==2);
+	test_Equal(1, lowcount);
+	test_Equal(2, medcount);
 	test.Next(_L("Wait, check medium runs"));
 	User::AfterHighRes(KTestDelay/10);
-	test(medcount==3);
+	test_Equal(3, medcount);
 	M1.Signal();
 
 	test.Next(_L("Create mutex 2"));
 	r=M2.CreateLocal();
-	test(r==KErrNone);
+	test_KErrNone(r);
 
 	test.Next(_L("Create high priority thread"));
 	TInt highcount=0;
 	RThread high;
 	r=high.Create(_L("high"),HighThread,0x1000,NULL,&highcount);
-	test(r==KErrNone);
+	test_KErrNone(r);
 	high.SetPriority(EPriorityMore);
 	test(Exists(_L("high")));
 
@@ -336,15 +339,15 @@
 	User::AfterHighRes(KTestDelay/10);
 //	MCOUNT(M2,0);
 //	MCOUNT(M1,-1);
-	test(highcount==1);
+	test_Equal(1, highcount);
 
 	M2.Wait();
-	test(lowcount==2);
-	test(medcount==3);
-	test(highcount==2);
+	test_Equal(2, lowcount);
+	test_Equal(3, medcount);
+	test_Equal(2, highcount);
 	test.Next(_L("Wait, check medium runs"));
 	User::AfterHighRes(KTestDelay/10);
-	test(medcount==4);
+	test_Equal(4, medcount);
 	M2.Signal();
 
 	test.Next(_L("Kill threads"));
@@ -401,7 +404,7 @@
 	TBuf<4> b;
 	b.Num(n);
 	TInt r=t.Create(b,ThreadFunction,0x1000,NULL,aPtr);
-	test(r==KErrNone);
+	test_KErrNone(r);
 	t.Resume();
 	TUint id=t.Id();
 	test.Printf(_L("id=%d\n"),id);
@@ -516,7 +519,7 @@
 	Count=0;
 	test.Next(_L("Create mutex"));
 	TInt r=M1.CreateLocal();
-	test(r==KErrNone);
+	test_KErrNone(r);
 	MCOUNT(M1,1);
 	MutexWait();
 	MCOUNT(M1,0);
@@ -624,7 +627,7 @@
 	User::After(50000);		// let threads claim mutex
 	MutexWait();
 	MCOUNT(M1,0);			// check no threads waiting
-	test(t[2].ExitType()==EExitKill);	// check t2 has exited
+	test_Equal(EExitKill, t[2].ExitType());	// check t2 has exited
 	t[2].Close();
 	test(!Exists(2));
 	IDCHECK(id2);			// check they ran in order t2,t3,t4,t5,t1
@@ -942,12 +945,12 @@
 		{
 		if (i==3 || i==6 || i==7)
 			{
-			test(t[i].ExitType()==EExitPending);
+			test_Equal(EExitPending, t[i].ExitType());
 			}
 		else
 			{
-			test(t[i].ExitType()==EExitPanic);
-			test(t[i].ExitReason()==EBadHandle);
+			test_Equal(EExitPanic, t[i].ExitType());
+			test_Equal(EBadHandle, t[i].ExitReason());
 			test(t[i].ExitCategory()==_L("KERN-EXEC"));
 			t[i].Close();
 			test(!Exists(i));
@@ -963,8 +966,8 @@
 		{
 		if (i==3 || i==6 || i==7)
 			{
-			test(t[i].ExitType()==EExitPanic);
-			test(t[i].ExitReason()==EBadHandle);
+			test_Equal(EExitPanic, t[i].ExitType());
+			test_Equal(EBadHandle, t[i].ExitReason());
 			test(t[i].ExitCategory()==_L("KERN-EXEC"));
 			t[i].Close();
 			test(!Exists(i));
@@ -1008,11 +1011,11 @@
 	test.Start(_L("Test mutex speed"));
 	TInt count=0;
 	TInt r=M1.CreateLocal();
-	test(r==KErrNone);
+	test_KErrNone(r);
 
 	RThread t;
 	r=t.Create(_L("Speed"),MutexSpeed,0x1000,NULL,&count);
-	test(r==KErrNone);
+	test_KErrNone(r);
 	t.SetPriority(EPriorityRealTime);
 	t.Resume();
 	User::AfterHighRes(1000000);
@@ -1023,7 +1026,7 @@
 
 	TInt count2=0;
 	r=t.Create(_L("Speed2"),MutexSpeed2,0x1000,NULL,&count2);
-	test(r==KErrNone);
+	test_KErrNone(r);
 	t.SetPriority(EPriorityRealTime);
 	t.Resume();
 	User::AfterHighRes(1000000);
@@ -1074,7 +1077,7 @@
 	TBuf<4> b;
 	b.Num(n);
 	TInt r=t.Create(b,SemThreadFunction,0x1000,NULL,aPtr);
-	test(r==KErrNone);
+	test_KErrNone(r);
 	t.Resume();
 	TUint id=t.Id();
 	return id;
@@ -1147,7 +1150,7 @@
 	Count=0;
 	test.Next(_L("Create semaphore"));
 	TInt r=S.CreateLocal(2);
-	test(r==KErrNone);
+	test_KErrNone(r);
 	MCOUNT(S,2);
 	SemWait();
 	MCOUNT(S,1);
@@ -1312,20 +1315,20 @@
 		{
 		if (i==3 || i==7 || i==10)
 			{
-			test(t[i].ExitType()==EExitPending);
+			test_Equal(EExitPending, t[i].ExitType());
 			}
 		else if (i!=5)
 			{
-			test(t[i].ExitType()==EExitPanic);
-			test(t[i].ExitReason()==EBadHandle);
+			test_Equal(EExitPanic, t[i].ExitType());
+			test_Equal(EBadHandle, t[i].ExitReason());
 			test(t[i].ExitCategory()==_L("KERN-EXEC"));
 			t[i].Close();
 			test(!Exists(i));
 			}
 		else
 			{
-			test(t[i].ExitType()==EExitKill);
-			test(t[i].ExitReason()==0);
+			test_Equal(EExitKill, t[i].ExitType());
+			test_Equal(0, t[i].ExitReason());
 			t[i].Close();
 			test(!Exists(i));
 			}
@@ -1340,8 +1343,8 @@
 		{
 		if (i==3 || i==7 || i==10)
 			{
-			test(t[i].ExitType()==EExitPanic);
-			test(t[i].ExitReason()==EBadHandle);
+			test_Equal(EExitPanic, t[i].ExitType());
+			test_Equal(EBadHandle, t[i].ExitReason());
 			test(t[i].ExitCategory()==_L("KERN-EXEC"));
 			t[i].Close();
 			test(!Exists(i));
@@ -1371,11 +1374,11 @@
 	test.Start(_L("Test semaphore speed"));
 	TInt count=0;
 	TInt r=S.CreateLocal(1);
-	test(r==KErrNone);
+	test_KErrNone(r);
 
 	RThread t;
 	r=t.Create(_L("SemSpeed"),SemSpeed,0x1000,NULL,&count);
-	test(r==KErrNone);
+	test_KErrNone(r);
 	t.SetPriority(EPriorityRealTime);
 	t.Resume();
 	User::AfterHighRes(1000000);
@@ -1391,12 +1394,22 @@
 
 GLDEF_C TInt E32Main()
 	{
+	TInt cpus = UserSvr::HalFunction(EHalGroupKernel, EKernelHalNumLogicalCpus, 0, 0);
+	if (cpus != 1)
+		{
+		test(cpus>1);
+		// This test will require compatibility mode (and probably other changes)
+		// to work on SMP - it depends on explicit scheduling order.
+		test.Printf(_L("T_SEMUTX2 skipped, does not work on SMP\n"));
+		return KErrNone;
+		}	
+	
 	test.Title();
 
 	test.Start(_L("Test mutexes and semaphores"));
 	RThread().SetPriority(EPriorityMuchMore);
 	TInt r=Main.Duplicate(RThread());
-	test(r==KErrNone);
+	test_KErrNone(r);
 
 	Test0();
 	Test1();
--- a/kerneltest/e32test/system/t_condvar.cpp	Wed May 05 05:11:16 2010 +0100
+++ b/kerneltest/e32test/system/t_condvar.cpp	Mon May 10 11:40:53 2010 +0100
@@ -1,820 +1,831 @@
-// Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies).
-// All rights reserved.
-// This component and the accompanying materials are made available
-// under the terms of the License "Eclipse Public License v1.0"
-// which accompanies this distribution, and is available
-// at the URL "http://www.eclipse.org/legal/epl-v10.html".
-//
-// Initial Contributors:
-// Nokia Corporation - initial contribution.
-//
-// Contributors:
-//
-// Description:
-// e32test\system\t_condvar.cpp
-// Overview:
-// Test the use of the RCondVar & RMutex classes.
-// API Information:
-// RCondVar, RMutex
-// Details:
-// - Create some local conditional variables and mutexes and verify results
-// are as expected.
-// - Create a test thread that waits on conditional variables and mutexes, 
-// append some items on an array, signal the conditional variable and mutex,
-// the thread then counts the number of items on the array and passes the 
-// result back to the main process. Verify results are as expected. Repeat
-// with different array data.
-// - Verify that a RCondVar::Wait() panics when the thread does not hold the
-// specified mutex (mutex not locked).
-// - Test using two mutexes with 1 conditional variable, append some items to 
-// an array, verify results from the thread are as expected. 
-// - Create a second thread with higher priority, perform tests similar to
-// above, verify results are as expected.
-// - Verify the thread timeout values are as expected.
-// - Create global conditional variables and global mutexes, using two threads
-// test the RCondVar::Signal() and RMutex::Wait() results are as expected.
-// - Test various combinations of creating a thread, suspending and killing it
-// and signalling a conditional variable and mutex. Verify results are as
-// expected.
-// - Create a secondary process along with a global chunk, conditional variable 
-// and mutex. Signal the conditional variable and verify the results are as 
-// expected.
-// - Using two threads, benchmark the number of conditional variable/mutex Signal
-// and Wait iterations that can be completed per second.
-// Platforms/Drives/Compatibility:
-// All.
-// Assumptions/Requirement/Pre-requisites:
-// Failures and causes:
-// Base Port information:
-// 
-//
-
-#include <e32std.h>
-#include <e32std_private.h>
-#include <e32svr.h>
-#include <e32test.h>
-#include <e32ldr.h>
-#include <e32def.h>
-#include <e32def_private.h>
-
-RTest test(_L("T_CONDVAR"));
-RMutex M1;
-RMutex M2;
-RCondVar CV1;
-RCondVar CV2;
-
-#define __TRACE_LINE__	test.Printf(_L("Line %d\n"),__LINE__)
-
-struct SThreadData
-	{
-	SThreadData();
-	RMutex iM;
-	RCondVar iV;
-	RArray<TInt>* iA;
-	TInt iTotal;
-	TInt iInnerLoops;
-	TInt iOuterLoops;
-	TInt iTimeoutMs;
-	TInt iTimeouts;
-	TInt iBadCount;
-	};
-
-struct SThreadData2
-	{
-	SThreadData2();
-	const TText* iMutexName;
-	const TText* iCondVarName;
-	TInt iInnerLoops;
-	};
-
-SThreadData::SThreadData()
-	{
-	memset(this, 0, sizeof(*this));
-	}
-
-SThreadData2::SThreadData2()
-	{
-	memset(this, 0, sizeof(*this));
-	}
-
-TInt Thread0(TAny*)
-	{
-	return CV1.Wait(M1);
-	}
-
-TInt Thread1(TAny* a)
-	{
-	TUint32 t1, t2;
-	SThreadData& d = *(SThreadData*)a;
-	TInt r = KErrNone;
-	TInt i = 0;
-	d.iM.Wait();
-	FOREVER
-		{
-		while (d.iA->Count()<=i && r==KErrNone)
-			{
-			t1 = User::NTickCount();
-			if (d.iTimeoutMs)
-				r = d.iV.TimedWait(d.iM, d.iTimeoutMs*1000);
-			else
-				r = d.iV.Wait(d.iM);
-			t2 = User::NTickCount();
-			++d.iInnerLoops;
-			if (r == KErrTimedOut)
-				{
-				++d.iTimeouts;
-				TInt iv = (TInt)(t2-t1);
-				if (iv<d.iTimeoutMs)
-					++d.iBadCount;
-				r = KErrNone;
-				}
-			}
-		if (r != KErrNone)
-			break;
-		++d.iOuterLoops;
-		TInt c = d.iA->Count();
-		for (; i<c; ++i)
-			d.iTotal += (*d.iA)[i];
-		}
-	return r;
-	}
-
-TInt Thread2(TAny* a)
-	{
-	TUint32 t1, t2;
-	SThreadData& d = *(SThreadData*)a;
-	TInt r = KErrNone;
-	d.iM.Wait();
-	RThread::Rendezvous(KErrNone);
-	while (r==KErrNone)
-		{
-		t1 = User::NTickCount();
-		if (d.iTimeoutMs)
-			r = d.iV.TimedWait(d.iM, d.iTimeoutMs*1000);
-		else
-			r = d.iV.Wait(d.iM);
-		t2 = User::NTickCount();
-		++d.iInnerLoops;
-		if (r == KErrTimedOut)
-			{
-			++d.iTimeouts;
-			TInt iv = (TInt)(t2-t1);
-			if (iv<d.iTimeoutMs)
-				++d.iBadCount;
-			r = KErrNone;
-			}
-		}
-	return r;
-	}
-
-TInt Thread3(TAny* a)
-	{
-	SThreadData2& d = *(SThreadData2*)a;
-	RMutex m;
-	RCondVar cv;
-	TInt r = m.OpenGlobal(TPtrC(d.iMutexName), EOwnerThread);
-	if (r!=KErrNone)
-		return r;
-	r = cv.OpenGlobal(TPtrC(d.iCondVarName), EOwnerThread);
-	if (r!=KErrNone)
-		return r;
-	m.Wait();
-	while (r==KErrNone)
-		{
-		r = cv.Wait(m);
-		++d.iInnerLoops;
-		}
-	return r;
-	}
-
-TInt Thread4(TAny* a)
-	{
-	volatile TInt& count = *(volatile TInt*)a;
-	TInt r = KErrNone;
-	M2.Wait();
-	while (r==KErrNone)
-		{
-		r = CV2.Wait(M2);
-		++count;
-		}
-	return r;
-	}
-
-TInt Thread5(TAny*)
-	{
-	FOREVER
-		{
-		M2.Wait();
-		CV2.Signal();
-		M2.Signal();
-		}
-	}
-
-void RunBench()
-	{
-	test.Next(_L("Benchmark"));
-	RThread t4, t5;
-	TInt count = 0;
-	TInt r = t4.Create(KNullDesC, &Thread4, 0x1000, 0x1000, 0x1000, &count);
-	test(r==KErrNone);
-	t4.SetPriority(EPriorityLess);
-	r = t5.Create(KNullDesC, &Thread5, 0x1000, 0x1000, 0x1000, NULL);
-	test(r==KErrNone);
-	t5.SetPriority(EPriorityMuchLess);
-	t4.Resume();
-	t5.Resume();
-	User::After(500000);
-	TInt initc = count;
-	User::After(5000000);
-	TInt finalc = count;
-	test.Printf(_L("%d iterations per second\n"), (finalc-initc)/5);
-	t4.Kill(0);
-	t5.Kill(0);
-	CLOSE_AND_WAIT(t4);
-	CLOSE_AND_WAIT(t5);
-	}
-
-void CreateThread2(RThread& aThread, SThreadData& aData, TThreadPriority aPri)
-	{
-	TInt r = aThread.Create(KNullDesC, &Thread2, 0x1000, 0x1000, 0x1000, &aData);
-	test(r==KErrNone);
-	aThread.SetPriority(aPri);
-	TRequestStatus s;
-	aThread.Rendezvous(s);
-	test(s==KRequestPending);
-	aThread.Resume();
-	User::WaitForRequest(s);
-	test(s==KErrNone);
-	test(aThread.ExitType()==EExitPending);
-	aData.iM.Wait();
-	}
-
-void KillThread2(RThread& aThread)
-	{
-	TRequestStatus s;
-	aThread.Logon(s);
-	test(s==KRequestPending);
-	aThread.Terminate(0);
-	User::WaitForRequest(s);
-	test(aThread.ExitType()==EExitTerminate);
-	test(aThread.ExitReason()==0);
-	test(s==0);
-	CLOSE_AND_WAIT(aThread);
-	}
-
-void AppendToArray(SThreadData& aD, TInt aCount, ...)
-	{
-	VA_LIST list;
-	VA_START(list,aCount);
-	aD.iM.Wait();
-	while(--aCount>=0)
-		{
-		test(aD.iA->Append(VA_ARG(list,TInt))==KErrNone);
-		}
-	aD.iV.Signal();
-	aD.iM.Signal();
-	}
-
-void AppendToArrayB(SThreadData& aD, TInt aCount, ...)
-	{
-	VA_LIST list;
-	VA_START(list,aCount);
-	aD.iM.Wait();
-	while(--aCount>=0)
-		{
-		test(aD.iA->Append(VA_ARG(list,TInt))==KErrNone);
-		}
-	aD.iV.Broadcast();
-	aD.iM.Signal();
-	}
-
-void AppendToArrayB2(SThreadData& aD, TInt aCount, ...)
-	{
-	VA_LIST list;
-	VA_START(list,aCount);
-	aD.iM.Wait();
-	while(--aCount>=0)
-		{
-		test(aD.iA->Append(VA_ARG(list,TInt))==KErrNone);
-		}
-	aD.iM.Signal();
-	aD.iV.Broadcast();
-	}
-
-void Thread2Test()
-	{
-	test.Next(_L("Thread2Test"));
-	RCondVar cv2;
-	RMutex m3;
-	TInt r = cv2.CreateLocal();
-	test(r==KErrNone);
-	r = m3.CreateLocal();
-	test(r==KErrNone);
-	SThreadData d1;
-	d1.iM = m3;
-	d1.iV = cv2;
-	RThread t1;
-
-	CreateThread2(t1, d1, EPriorityLess);
-	cv2.Signal();
-	m3.Signal();
-	User::After(100000);
-	test(d1.iInnerLoops == 1);
-	KillThread2(t1);
-
-	CreateThread2(t1, d1, EPriorityLess);
-	KillThread2(t1);
-	m3.Signal();
-	test(d1.iInnerLoops == 1);
-
-	CreateThread2(t1, d1, EPriorityLess);
-	m3.Signal();
-	User::After(10000);
-	KillThread2(t1);
-	test(d1.iInnerLoops == 1);
-
-	CreateThread2(t1, d1, EPriorityLess);
-	cv2.Signal();
-	User::After(10000);
-	KillThread2(t1);
-	m3.Signal();
-	test(d1.iInnerLoops == 1);
-
-	CreateThread2(t1, d1, EPriorityLess);
-	t1.Suspend();
-	KillThread2(t1);
-	m3.Signal();
-	test(d1.iInnerLoops == 1);
-
-	CreateThread2(t1, d1, EPriorityLess);
-	User::After(10000);
-	t1.Suspend();
-	KillThread2(t1);
-	m3.Signal();
-	test(d1.iInnerLoops == 1);
-
-	CreateThread2(t1, d1, EPriorityLess);
-	cv2.Signal();
-	t1.Suspend();
-	KillThread2(t1);
-	m3.Signal();
-	test(d1.iInnerLoops == 1);
-
-	CreateThread2(t1, d1, EPriorityLess);
-	cv2.Signal();
-	User::After(10000);
-	t1.Suspend();
-	KillThread2(t1);
-	m3.Signal();
-	test(d1.iInnerLoops == 1);
-
-	cv2.Close();
-	m3.Close();
-	}
-
-const TText* KMutex1Name = _S("mtx1");
-const TText* KMutex2Name = _S("mtx2");
-const TText* KCondVar1Name = _S("cv1");
-const TText* KCondVar2Name = _S("cv2");
-
-void TestGlobal()
-	{
-	test.Next(_L("Test Global"));
-	RMutex mg1, mg2;
-	RCondVar cvg1, cvg2;
-	TInt r = mg1.CreateGlobal(TPtrC(KMutex1Name));
-	test(r==KErrNone);
-	r = mg2.CreateGlobal(TPtrC(KMutex2Name));
-	test(r==KErrNone);
-	r = cvg1.CreateGlobal(TPtrC(KCondVar1Name));
-	test(r==KErrNone);
-	r = cvg2.CreateGlobal(TPtrC(KCondVar2Name));
-	test(r==KErrNone);
-	SThreadData2 d1, d2;
-	d1.iMutexName = KMutex1Name;
-	d1.iCondVarName = KCondVar1Name;
-	d2.iMutexName = KMutex2Name;
-	d2.iCondVarName = KCondVar2Name;
-
-	RThread t1, t2;
-	r = t1.Create(KNullDesC, &Thread3, 0x1000, 0x1000, 0x1000, &d1);
-	test(r==KErrNone);
-	t1.SetPriority(EPriorityMore);
-	TRequestStatus s1;
-	t1.Logon(s1);
-	t1.Resume();
-	r = t2.Create(KNullDesC, &Thread3, 0x1000, 0x1000, 0x1000, &d2);
-	test(r==KErrNone);
-	t2.SetPriority(EPriorityMore);
-	TRequestStatus s2;
-	t2.Logon(s2);
-	t2.Resume();
-
-	test(s1==KRequestPending);
-	test(s2==KRequestPending);
-	test(d1.iInnerLoops == 0);
-	test(d2.iInnerLoops == 0);
-	cvg1.Signal();
-	test(d1.iInnerLoops == 1);
-	test(d2.iInnerLoops == 0);
-	cvg2.Signal();
-	test(d1.iInnerLoops == 1);
-	test(d2.iInnerLoops == 1);
-
-	cvg1.Close();
-	cvg2.Close();
-	test(s1==KRequestPending);
-	test(s2==KRequestPending);
-	test(d1.iInnerLoops == 1);
-	test(d2.iInnerLoops == 1);
-
-	t1.Kill(0);
-	t2.Kill(0);
-	User::WaitForRequest(s1);
-	User::WaitForRequest(s2);
-	test(t1.ExitType()==EExitKill);
-	test(t1.ExitReason()==0);
-	test(t2.ExitType()==EExitKill);
-	test(t2.ExitReason()==0);
-	CLOSE_AND_WAIT(t1);
-	CLOSE_AND_WAIT(t2);
-	r = cvg1.OpenGlobal(TPtrC(KCondVar1Name));
-	test(r==KErrNotFound);
-	test(cvg1.Handle()==0);
-	mg1.Close();
-	mg2.Close();
-	}
-
-void TestSecondaryProcess()
-	{
-	test.Next(_L("Test Secondary Process"));
-
-	RProcess p;
-	RChunk c;
-	RMutex m;
-	RCondVar cv;
-
-	//cancel lazy dll unloading
-	RLoader loader;
-	TInt r = loader.Connect();
-	test(r==KErrNone);
-	r = loader.CancelLazyDllUnload();
-	test(r==KErrNone);
-	loader.Close();
-
-	r = c.CreateGlobal(KNullDesC, 0x1000, 0x1000);
-	test(r==KErrNone);
-	volatile TInt& x = *(volatile TInt*)c.Base();
-	x = 0;
-	r = m.CreateGlobal(KNullDesC);
-	test(r==KErrNone);
-	r = cv.CreateGlobal(KNullDesC);
-	test(r==KErrNone);
-	r = p.Create(RProcess().FileName(), KNullDesC);
-	test(r==KErrNone);
-	p.SetPriority(EPriorityHigh);
-	r = p.SetParameter(1, cv);
-	test(r==KErrNone);
-	r = p.SetParameter(2, m);
-	test(r==KErrNone);
-	r = p.SetParameter(3, c);
-	test(r==KErrNone);
-	TRequestStatus s;
-	p.Logon(s);
-	p.Resume();
-	test(s==KRequestPending);
-	test(x==0);
-	TInt i;
-	for (i=0; i<10; ++i)
-		{
-		cv.Signal();
-		test(x == i+1);
-		}
-	cv.Close();
-	test(s==KRequestPending);
-	test(x==10);
-	p.Terminate(0);
-	User::WaitForRequest(s);
-	test(p.ExitType()==EExitTerminate);
-	test(p.ExitReason()==0);
-	CLOSE_AND_WAIT(p);
-	m.Close();
-	c.Close();
-	}
-
-TInt SecondaryProcess(RCondVar aCV)
-	{
-	RDebug::Print(_L("SecProc"));
-	RMutex mp;
-	RChunk cp;
-	TInt r = mp.Open(2);
-	if (r!=KErrNone)
-		return r;
-	r = cp.Open(3);
-	if (r!=KErrNone)
-		return r;
-	volatile TInt& x = *(volatile TInt*)cp.Base();
-	mp.Wait();
-	r = KErrNone;
-	while (r==KErrNone)
-		{
-		r = aCV.Wait(mp);
-		++x;
-		RDebug::Print(_L("SecProc r=%d x=%d"), r, x);
-		}
-	return r;
-	}
-
-TInt E32Main()
-	{
-	__KHEAP_MARK;
-	__UHEAP_MARK;
-
-	TInt r;
-	RCondVar cvp;
-	r = cvp.Open(1);
-	if (r==KErrNone)
-		return SecondaryProcess(cvp);
-	test.Title();
-	test.Start(_L("Create condition variable"));
-	r = CV1.CreateLocal();
-	test(r==KErrNone);
-	r = CV2.CreateLocal();
-	test(r==KErrNone);
-
-	test.Next(_L("Signal with no-one waiting"));
-	CV1.Signal();
-
-	test.Next(_L("Broadcast with no-one waiting"));
-	CV1.Broadcast();
-
-	test.Next(_L("Create mutexes"));
-	r = M1.CreateLocal();
-	test(r==KErrNone);
-	r = M2.CreateLocal();
-	test(r==KErrNone);
-
-	RArray<TInt> array;
-	SThreadData d0;
-	d0.iM = M2;
-	d0.iV = CV1;
-	d0.iA = &array;
-	test.Next(_L("Create thread to use mutex 2"));
-	RThread t0;
-	r = t0.Create(KNullDesC, &Thread1, 0x1000, 0x1000, 0x1000, &d0);
-	test(r==KErrNone);
-	t0.SetPriority(EPriorityMore);
-	TRequestStatus s0;
-	t0.Logon(s0);
-	t0.Resume();
-	__TRACE_LINE__;
-	AppendToArray(d0, 1, 4);
-	test(d0.iTotal==4);
-	__TRACE_LINE__;
-	AppendToArray(d0, 2, -3, 17);
-	test(d0.iTotal==18);
-	t0.Terminate(11);
-	User::WaitForRequest(s0);
-	test(t0.ExitType()==EExitTerminate);
-	test(t0.ExitReason()==11);
-	CLOSE_AND_WAIT(t0);
-	array.Reset();
-
-	SThreadData d;
-	d.iM = M1;
-	d.iV = CV1;
-	d.iA = &array;
-	test.Next(_L("Create thread to use mutex 1"));
-	RThread t;
-	r = t.Create(KNullDesC, &Thread1, 0x1000, 0x1000, 0x1000, &d);
-	test(r==KErrNone);
-	t.SetPriority(EPriorityMore);
-	TRequestStatus s;
-	t.Logon(s);
-	t.Resume();
-
-	test.Next(_L("Test wait with mutex unlocked"));
-	r = t0.Create(KNullDesC, &Thread0, 0x1000, 0x1000, 0x1000, NULL);
-	test(r==KErrNone);
-	t0.SetPriority(EPriorityMore);
-	t0.Logon(s0);
-	TBool jit = User::JustInTime();
-	User::SetJustInTime(EFalse);
-	t0.Resume();
-	User::WaitForRequest(s0);
-	User::SetJustInTime(jit);
-	test(t0.ExitType()==EExitPanic);
-	test(t0.ExitCategory()==_L("KERN-EXEC"));
-	test(t0.ExitReason()==ECondVarWaitMutexNotLocked);
-	CLOSE_AND_WAIT(t0);
-
-	test.Next(_L("Test trying to use two mutexes with 1 condition variable"));
-	M2.Wait();
-	r = CV1.Wait(M2);
-	M2.Signal();
-	test(r==KErrInUse);
-
-	test(d.iTotal==0);
-	__TRACE_LINE__;
-	AppendToArray(d, 1, 3);
-	test(d.iTotal==3);
-	__TRACE_LINE__;
-	AppendToArray(d, 2, 3, 19);
-	test(d.iTotal==25);
-	__TRACE_LINE__;
-	AppendToArray(d, 4, 15, -1, -2, -30);
-	test(d.iTotal==7);
-	test(d.iInnerLoops==3);
-	test(d.iOuterLoops==3);
-	__TRACE_LINE__;
-	t.Suspend();
-	__TRACE_LINE__;
-	t.Resume();
-	test(d.iTotal==7);
-	test(d.iInnerLoops==4);
-	test(d.iOuterLoops==3);
-	__TRACE_LINE__;
-	t.SetPriority(EPriorityLess);
-	test(d.iTotal==7);
-	test(d.iInnerLoops==4);
-	test(d.iOuterLoops==3);
-	__TRACE_LINE__;
-	t.SetPriority(EPriorityMore);
-	test(d.iTotal==7);
-	test(d.iInnerLoops==5);
-	test(d.iOuterLoops==3);
-	__TRACE_LINE__;
-	t.Suspend();
-	__TRACE_LINE__;
-	AppendToArray(d, 1, 4);
-	test(d.iTotal==7);
-	test(d.iInnerLoops==5);
-	test(d.iOuterLoops==3);
-	__TRACE_LINE__;
-	t.Resume();
-	test(d.iTotal==11);
-	test(d.iInnerLoops==6);
-	test(d.iOuterLoops==4);
-
-	SThreadData d2;
-	d2.iM = M1;
-	d2.iV = CV1;
-	d2.iA = &array;
-
-	test.Next(_L("Create 2nd thread"));
-	RThread t2;
-	r = t2.Create(KNullDesC, &Thread1, 0x1000, NULL, &d2);
-	test(r==KErrNone);
-	t2.SetPriority(EPriorityMuchMore);
-	TRequestStatus s2;
-	t2.Logon(s2);
-	__TRACE_LINE__;
-	t2.Resume();
-
-	test(d2.iTotal == 11);
-	test(d2.iInnerLoops == 0);
-	test(d2.iOuterLoops == 1);
-	__TRACE_LINE__;
-	AppendToArray(d, 2, 9, 10);
-	test(d2.iTotal == 30);
-	test(d2.iInnerLoops == 1);
-	test(d2.iOuterLoops == 2);
-	test(d.iTotal==11);
-	test(d.iInnerLoops==6);
-	test(d.iOuterLoops==4);
-	__TRACE_LINE__;
-	AppendToArrayB(d, 2, 20, 30);
-	test(d2.iTotal == 80);
-	test(d2.iInnerLoops == 2);
-	test(d2.iOuterLoops == 3);
-	test(d.iTotal == 80);
-	test(d.iInnerLoops == 7);
-	test(d.iOuterLoops == 5);
-	__TRACE_LINE__;
-	AppendToArrayB2(d, 2, -10, -6);
-	test(d2.iTotal == 64);
-	test(d2.iInnerLoops == 3);
-	test(d2.iOuterLoops == 4);
-	test(d.iTotal == 64);
-	test(d.iInnerLoops == 8);
-	test(d.iOuterLoops == 6);
-	__TRACE_LINE__;
-	t2.Suspend();
-	__TRACE_LINE__;
-	AppendToArray(d, 2, -8, -8);
-	test(d2.iTotal == 64);
-	test(d2.iInnerLoops == 3);
-	test(d2.iOuterLoops == 4);
-	test(d.iTotal == 48);
-	test(d.iInnerLoops == 9);
-	test(d.iOuterLoops == 7);
-	__TRACE_LINE__;
-	t2.Resume();
-	test(d2.iTotal == 48);
-	test(d2.iInnerLoops == 4);
-	test(d2.iOuterLoops == 5);
-	test(d.iTotal == 48);
-	test(d.iInnerLoops == 9);
-	test(d.iOuterLoops == 7);
-
-	// test timeouts
-	d.iTimeoutMs = 1000;
-	__TRACE_LINE__;
-	t.Suspend();
-	__TRACE_LINE__;
-	t.Resume();
-	test(d2.iTotal == 48);
-	test(d2.iInnerLoops == 4);
-	test(d2.iOuterLoops == 5);
-	test(d2.iTimeouts == 0);
-	test(d.iTotal == 48);
-	test(d.iInnerLoops == 10);
-	test(d.iOuterLoops == 7);
-	test(d.iTimeouts == 0);
-	test(array.Append(1)==0);
-	TInt nt = 0;
-	do	{
-		if (d.iTimeouts > nt)
-			{
-			test(d.iTimeouts-nt == 1);
-			nt = d.iTimeouts;
-			test.Printf(_L("Timeout %d\n"), nt);
-			test(d2.iTotal == 48);
-			test(d2.iInnerLoops == 4);
-			test(d2.iOuterLoops == 5);
-			test(d2.iTimeouts == 0);
-			test(d.iTotal == 48+nt);
-			test(d.iInnerLoops == 10+nt);
-			test(d.iOuterLoops == 7+nt);
-			test(array.Append(1)==0);
-			}
-		} while (nt<10);
-
-	d.iTimeoutMs = 0;
-	AppendToArrayB(d, 0);
-	test(d2.iTotal == 59);
-	test(d2.iInnerLoops == 5);
-	test(d2.iOuterLoops == 6);
-	test(d2.iTimeouts == 0);
-	test(d.iTotal == 59);
-	test(d.iInnerLoops == 21);
-	test(d.iOuterLoops == 18);
-	test(d.iTimeouts == 10);
-
-	__TRACE_LINE__;
-	t.SetPriority(EPriorityLess);
-	__TRACE_LINE__;
-	AppendToArrayB(d, 1, 11);
-	test(d2.iTotal == 70);
-	test(d2.iInnerLoops == 6);
-	test(d2.iOuterLoops == 7);
-	test(d2.iTimeouts == 0);
-	test(d.iTotal == 59);
-	test(d.iInnerLoops == 21);
-	test(d.iOuterLoops == 18);
-	test(d.iTimeouts == 10);
-	User::After(50000);
-	test(d2.iTotal == 70);
-	test(d2.iInnerLoops == 6);
-	test(d2.iOuterLoops == 7);
-	test(d2.iTimeouts == 0);
-	test(d.iTotal == 70);
-	test(d.iInnerLoops == 22);
-	test(d.iOuterLoops == 19);
-	test(d.iTimeouts == 10);
-
-
-
-	__TRACE_LINE__;
-	CV1.Close();
-	User::WaitForRequest(s);
-	test(t.ExitType()==EExitKill);
-	test(t.ExitReason()==KErrGeneral);
-	User::WaitForRequest(s2);
-	test(t2.ExitType()==EExitKill);
-	test(t2.ExitReason()==KErrGeneral);
-	CLOSE_AND_WAIT(t);
-	CLOSE_AND_WAIT(t2);
-
-
-	M1.Close();
-
-	TestGlobal();
-
-	Thread2Test();
-
-	TestSecondaryProcess();
-
-	RunBench();
-	M2.Close();
-	CV2.Close();
-	array.Close();
-
-	test.End();
-	test.Close();
-
-	__UHEAP_MARKEND;
-	__KHEAP_MARKEND;
-	return KErrNone;
-	}
-
+// Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies).
+// All rights reserved.
+// This component and the accompanying materials are made available
+// under the terms of the License "Eclipse Public License v1.0"
+// which accompanies this distribution, and is available
+// at the URL "http://www.eclipse.org/legal/epl-v10.html".
+//
+// Initial Contributors:
+// Nokia Corporation - initial contribution.
+//
+// Contributors:
+//
+// Description:
+// e32test\system\t_condvar.cpp
+// Overview:
+// Test the use of the RCondVar & RMutex classes.
+// API Information:
+// RCondVar, RMutex
+// Details:
+// - Create some local conditional variables and mutexes and verify results
+// are as expected.
+// - Create a test thread that waits on conditional variables and mutexes, 
+// append some items on an array, signal the conditional variable and mutex,
+// the thread then counts the number of items on the array and passes the 
+// result back to the main process. Verify results are as expected. Repeat
+// with different array data.
+// - Verify that a RCondVar::Wait() panics when the thread does not hold the
+// specified mutex (mutex not locked).
+// - Test using two mutexes with 1 conditional variable, append some items to 
+// an array, verify results from the thread are as expected. 
+// - Create a second thread with higher priority, perform tests similar to
+// above, verify results are as expected.
+// - Verify the thread timeout values are as expected.
+// - Create global conditional variables and global mutexes, using two threads
+// test the RCondVar::Signal() and RMutex::Wait() results are as expected.
+// - Test various combinations of creating a thread, suspending and killing it
+// and signalling a conditional variable and mutex. Verify results are as
+// expected.
+// - Create a secondary process along with a global chunk, conditional variable 
+// and mutex. Signal the conditional variable and verify the results are as 
+// expected.
+// - Using two threads, benchmark the number of conditional variable/mutex Signal
+// and Wait iterations that can be completed per second.
+// Platforms/Drives/Compatibility:
+// All.
+// Assumptions/Requirement/Pre-requisites:
+// Failures and causes:
+// Base Port information:
+// 
+//
+
+#include <e32std.h>
+#include <e32std_private.h>
+#include <e32svr.h>
+#include <e32test.h>
+#include <e32ldr.h>
+#include <e32def.h>
+#include <e32def_private.h>
+#include <u32std.h>
+
+RTest test(_L("T_CONDVAR"));
+RMutex M1;
+RMutex M2;
+RCondVar CV1;
+RCondVar CV2;
+
+#define __TRACE_LINE__	test.Printf(_L("Line %d\n"),__LINE__)
+
+struct SThreadData
+	{
+	SThreadData();
+	RMutex iM;
+	RCondVar iV;
+	RArray<TInt>* iA;
+	TInt iTotal;
+	TInt iInnerLoops;
+	TInt iOuterLoops;
+	TInt iTimeoutMs;
+	TInt iTimeouts;
+	TInt iBadCount;
+	};
+
+struct SThreadData2
+	{
+	SThreadData2();
+	const TText* iMutexName;
+	const TText* iCondVarName;
+	TInt iInnerLoops;
+	};
+
+SThreadData::SThreadData()
+	{
+	memset(this, 0, sizeof(*this));
+	}
+
+SThreadData2::SThreadData2()
+	{
+	memset(this, 0, sizeof(*this));
+	}
+
+TInt Thread0(TAny*)
+	{
+	return CV1.Wait(M1);
+	}
+
+TInt Thread1(TAny* a)
+	{
+	TUint32 t1, t2;
+	SThreadData& d = *(SThreadData*)a;
+	TInt r = KErrNone;
+	TInt i = 0;
+	d.iM.Wait();
+	FOREVER
+		{
+		while (d.iA->Count()<=i && r==KErrNone)
+			{
+			t1 = User::NTickCount();
+			if (d.iTimeoutMs)
+				r = d.iV.TimedWait(d.iM, d.iTimeoutMs*1000);
+			else
+				r = d.iV.Wait(d.iM);
+			t2 = User::NTickCount();
+			++d.iInnerLoops;
+			if (r == KErrTimedOut)
+				{
+				++d.iTimeouts;
+				TInt iv = (TInt)(t2-t1);
+				if (iv<d.iTimeoutMs)
+					++d.iBadCount;
+				r = KErrNone;
+				}
+			}
+		if (r != KErrNone)
+			break;
+		++d.iOuterLoops;
+		TInt c = d.iA->Count();
+		for (; i<c; ++i)
+			d.iTotal += (*d.iA)[i];
+		}
+	return r;
+	}
+
+TInt Thread2(TAny* a)
+	{
+	TUint32 t1, t2;
+	SThreadData& d = *(SThreadData*)a;
+	TInt r = KErrNone;
+	d.iM.Wait();
+	RThread::Rendezvous(KErrNone);
+	while (r==KErrNone)
+		{
+		t1 = User::NTickCount();
+		if (d.iTimeoutMs)
+			r = d.iV.TimedWait(d.iM, d.iTimeoutMs*1000);
+		else
+			r = d.iV.Wait(d.iM);
+		t2 = User::NTickCount();
+		++d.iInnerLoops;
+		if (r == KErrTimedOut)
+			{
+			++d.iTimeouts;
+			TInt iv = (TInt)(t2-t1);
+			if (iv<d.iTimeoutMs)
+				++d.iBadCount;
+			r = KErrNone;
+			}
+		}
+	return r;
+	}
+
+TInt Thread3(TAny* a)
+	{
+	SThreadData2& d = *(SThreadData2*)a;
+	RMutex m;
+	RCondVar cv;
+	TInt r = m.OpenGlobal(TPtrC(d.iMutexName), EOwnerThread);
+	if (r!=KErrNone)
+		return r;
+	r = cv.OpenGlobal(TPtrC(d.iCondVarName), EOwnerThread);
+	if (r!=KErrNone)
+		return r;
+	m.Wait();
+	while (r==KErrNone)
+		{
+		r = cv.Wait(m);
+		++d.iInnerLoops;
+		}
+	return r;
+	}
+
+TInt Thread4(TAny* a)
+	{
+	volatile TInt& count = *(volatile TInt*)a;
+	TInt r = KErrNone;
+	M2.Wait();
+	while (r==KErrNone)
+		{
+		r = CV2.Wait(M2);
+		++count;
+		}
+	return r;
+	}
+
+TInt Thread5(TAny*)
+	{
+	FOREVER
+		{
+		M2.Wait();
+		CV2.Signal();
+		M2.Signal();
+		}
+	}
+
+void RunBench()
+	{
+	test.Next(_L("Benchmark"));
+	RThread t4, t5;
+	TInt count = 0;
+	TInt r = t4.Create(KNullDesC, &Thread4, 0x1000, 0x1000, 0x1000, &count);
+	test(r==KErrNone);
+	t4.SetPriority(EPriorityLess);
+	r = t5.Create(KNullDesC, &Thread5, 0x1000, 0x1000, 0x1000, NULL);
+	test(r==KErrNone);
+	t5.SetPriority(EPriorityMuchLess);
+	t4.Resume();
+	t5.Resume();
+	User::After(500000);
+	TInt initc = count;
+	User::After(5000000);
+	TInt finalc = count;
+	test.Printf(_L("%d iterations per second\n"), (finalc-initc)/5);
+	t4.Kill(0);
+	t5.Kill(0);
+	CLOSE_AND_WAIT(t4);
+	CLOSE_AND_WAIT(t5);
+	}
+
+void CreateThread2(RThread& aThread, SThreadData& aData, TThreadPriority aPri)
+	{
+	TInt r = aThread.Create(KNullDesC, &Thread2, 0x1000, 0x1000, 0x1000, &aData);
+	test(r==KErrNone);
+	aThread.SetPriority(aPri);
+	TRequestStatus s;
+	aThread.Rendezvous(s);
+	test(s==KRequestPending);
+	aThread.Resume();
+	User::WaitForRequest(s);
+	test(s==KErrNone);
+	test(aThread.ExitType()==EExitPending);
+	aData.iM.Wait();
+	}
+
+void KillThread2(RThread& aThread)
+	{
+	TRequestStatus s;
+	aThread.Logon(s);
+	test(s==KRequestPending);
+	aThread.Terminate(0);
+	User::WaitForRequest(s);
+	test(aThread.ExitType()==EExitTerminate);
+	test(aThread.ExitReason()==0);
+	test(s==0);
+	CLOSE_AND_WAIT(aThread);
+	}
+
+void AppendToArray(SThreadData& aD, TInt aCount, ...)
+	{
+	VA_LIST list;
+	VA_START(list,aCount);
+	aD.iM.Wait();
+	while(--aCount>=0)
+		{
+		test(aD.iA->Append(VA_ARG(list,TInt))==KErrNone);
+		}
+	aD.iV.Signal();
+	aD.iM.Signal();
+	}
+
+void AppendToArrayB(SThreadData& aD, TInt aCount, ...)
+	{
+	VA_LIST list;
+	VA_START(list,aCount);
+	aD.iM.Wait();
+	while(--aCount>=0)
+		{
+		test(aD.iA->Append(VA_ARG(list,TInt))==KErrNone);
+		}
+	aD.iV.Broadcast();
+	aD.iM.Signal();
+	}
+
+void AppendToArrayB2(SThreadData& aD, TInt aCount, ...)
+	{
+	VA_LIST list;
+	VA_START(list,aCount);
+	aD.iM.Wait();
+	while(--aCount>=0)
+		{
+		test(aD.iA->Append(VA_ARG(list,TInt))==KErrNone);
+		}
+	aD.iM.Signal();
+	aD.iV.Broadcast();
+	}
+
+void Thread2Test()
+	{
+	test.Next(_L("Thread2Test"));
+	RCondVar cv2;
+	RMutex m3;
+	TInt r = cv2.CreateLocal();
+	test(r==KErrNone);
+	r = m3.CreateLocal();
+	test(r==KErrNone);
+	SThreadData d1;
+	d1.iM = m3;
+	d1.iV = cv2;
+	RThread t1;
+
+	CreateThread2(t1, d1, EPriorityLess);
+	cv2.Signal();
+	m3.Signal();
+	User::After(100000);
+	test(d1.iInnerLoops == 1);
+	KillThread2(t1);
+
+	CreateThread2(t1, d1, EPriorityLess);
+	KillThread2(t1);
+	m3.Signal();
+	test(d1.iInnerLoops == 1);
+
+	CreateThread2(t1, d1, EPriorityLess);
+	m3.Signal();
+	User::After(10000);
+	KillThread2(t1);
+	test(d1.iInnerLoops == 1);
+
+	CreateThread2(t1, d1, EPriorityLess);
+	cv2.Signal();
+	User::After(10000);
+	KillThread2(t1);
+	m3.Signal();
+	test(d1.iInnerLoops == 1);
+
+	CreateThread2(t1, d1, EPriorityLess);
+	t1.Suspend();
+	KillThread2(t1);
+	m3.Signal();
+	test(d1.iInnerLoops == 1);
+
+	CreateThread2(t1, d1, EPriorityLess);
+	User::After(10000);
+	t1.Suspend();
+	KillThread2(t1);
+	m3.Signal();
+	test(d1.iInnerLoops == 1);
+
+	CreateThread2(t1, d1, EPriorityLess);
+	cv2.Signal();
+	t1.Suspend();
+	KillThread2(t1);
+	m3.Signal();
+	test(d1.iInnerLoops == 1);
+
+	CreateThread2(t1, d1, EPriorityLess);
+	cv2.Signal();
+	User::After(10000);
+	t1.Suspend();
+	KillThread2(t1);
+	m3.Signal();
+	test(d1.iInnerLoops == 1);
+
+	cv2.Close();
+	m3.Close();
+	}
+
+const TText* KMutex1Name = _S("mtx1");
+const TText* KMutex2Name = _S("mtx2");
+const TText* KCondVar1Name = _S("cv1");
+const TText* KCondVar2Name = _S("cv2");
+
+void TestGlobal()
+	{
+	test.Next(_L("Test Global"));
+	RMutex mg1, mg2;
+	RCondVar cvg1, cvg2;
+	TInt r = mg1.CreateGlobal(TPtrC(KMutex1Name));
+	test(r==KErrNone);
+	r = mg2.CreateGlobal(TPtrC(KMutex2Name));
+	test(r==KErrNone);
+	r = cvg1.CreateGlobal(TPtrC(KCondVar1Name));
+	test(r==KErrNone);
+	r = cvg2.CreateGlobal(TPtrC(KCondVar2Name));
+	test(r==KErrNone);
+	SThreadData2 d1, d2;
+	d1.iMutexName = KMutex1Name;
+	d1.iCondVarName = KCondVar1Name;
+	d2.iMutexName = KMutex2Name;
+	d2.iCondVarName = KCondVar2Name;
+
+	RThread t1, t2;
+	r = t1.Create(KNullDesC, &Thread3, 0x1000, 0x1000, 0x1000, &d1);
+	test(r==KErrNone);
+	t1.SetPriority(EPriorityMore);
+	TRequestStatus s1;
+	t1.Logon(s1);
+	t1.Resume();
+	r = t2.Create(KNullDesC, &Thread3, 0x1000, 0x1000, 0x1000, &d2);
+	test(r==KErrNone);
+	t2.SetPriority(EPriorityMore);
+	TRequestStatus s2;
+	t2.Logon(s2);
+	t2.Resume();
+
+	test(s1==KRequestPending);
+	test(s2==KRequestPending);
+	test(d1.iInnerLoops == 0);
+	test(d2.iInnerLoops == 0);
+	cvg1.Signal();
+	test(d1.iInnerLoops == 1);
+	test(d2.iInnerLoops == 0);
+	cvg2.Signal();
+	test(d1.iInnerLoops == 1);
+	test(d2.iInnerLoops == 1);
+
+	cvg1.Close();
+	cvg2.Close();
+	test(s1==KRequestPending);
+	test(s2==KRequestPending);
+	test(d1.iInnerLoops == 1);
+	test(d2.iInnerLoops == 1);
+
+	t1.Kill(0);
+	t2.Kill(0);
+	User::WaitForRequest(s1);
+	User::WaitForRequest(s2);
+	test(t1.ExitType()==EExitKill);
+	test(t1.ExitReason()==0);
+	test(t2.ExitType()==EExitKill);
+	test(t2.ExitReason()==0);
+	CLOSE_AND_WAIT(t1);
+	CLOSE_AND_WAIT(t2);
+	r = cvg1.OpenGlobal(TPtrC(KCondVar1Name));
+	test(r==KErrNotFound);
+	test(cvg1.Handle()==0);
+	mg1.Close();
+	mg2.Close();
+	}
+
+void TestSecondaryProcess()
+	{
+	test.Next(_L("Test Secondary Process"));
+
+	RProcess p;
+	RChunk c;
+	RMutex m;
+	RCondVar cv;
+
+	//cancel lazy dll unloading
+	RLoader loader;
+	TInt r = loader.Connect();
+	test(r==KErrNone);
+	r = loader.CancelLazyDllUnload();
+	test(r==KErrNone);
+	loader.Close();
+
+	r = c.CreateGlobal(KNullDesC, 0x1000, 0x1000);
+	test(r==KErrNone);
+	volatile TInt& x = *(volatile TInt*)c.Base();
+	x = 0;
+	r = m.CreateGlobal(KNullDesC);
+	test(r==KErrNone);
+	r = cv.CreateGlobal(KNullDesC);
+	test(r==KErrNone);
+	r = p.Create(RProcess().FileName(), KNullDesC);
+	test(r==KErrNone);
+	p.SetPriority(EPriorityHigh);
+	r = p.SetParameter(1, cv);
+	test(r==KErrNone);
+	r = p.SetParameter(2, m);
+	test(r==KErrNone);
+	r = p.SetParameter(3, c);
+	test(r==KErrNone);
+	TRequestStatus s;
+	p.Logon(s);
+	p.Resume();
+	test(s==KRequestPending);
+	test(x==0);
+	TInt i;
+	for (i=0; i<10; ++i)
+		{
+		cv.Signal();
+		test(x == i+1);
+		}
+	cv.Close();
+	test(s==KRequestPending);
+	test(x==10);
+	p.Terminate(0);
+	User::WaitForRequest(s);
+	test(p.ExitType()==EExitTerminate);
+	test(p.ExitReason()==0);
+	CLOSE_AND_WAIT(p);
+	m.Close();
+	c.Close();
+	}
+
+TInt SecondaryProcess(RCondVar aCV)
+	{
+	RDebug::Print(_L("SecProc"));
+	RMutex mp;
+	RChunk cp;
+	TInt r = mp.Open(2);
+	if (r!=KErrNone)
+		return r;
+	r = cp.Open(3);
+	if (r!=KErrNone)
+		return r;
+	volatile TInt& x = *(volatile TInt*)cp.Base();
+	mp.Wait();
+	r = KErrNone;
+	while (r==KErrNone)
+		{
+		r = aCV.Wait(mp);
+		++x;
+		RDebug::Print(_L("SecProc r=%d x=%d"), r, x);
+		}
+	return r;
+	}
+
+TInt E32Main()
+	{
+	TInt cpus = UserSvr::HalFunction(EHalGroupKernel, EKernelHalNumLogicalCpus, 0, 0);
+	if (cpus != 1)
+		{
+		test(cpus>1);
+		// This test will require compatibility mode (and probably other changes)
+		// to work on SMP - it depends on explicit scheduling order.
+		test.Printf(_L("T_CONDVAR skipped, does not work on SMP\n"));
+		return KErrNone;
+		}	
+	
+	__KHEAP_MARK;
+	__UHEAP_MARK;
+
+	TInt r;
+	RCondVar cvp;
+	r = cvp.Open(1);
+	if (r==KErrNone)
+		return SecondaryProcess(cvp);
+	test.Title();
+	test.Start(_L("Create condition variable"));
+	r = CV1.CreateLocal();
+	test(r==KErrNone);
+	r = CV2.CreateLocal();
+	test(r==KErrNone);
+
+	test.Next(_L("Signal with no-one waiting"));
+	CV1.Signal();
+
+	test.Next(_L("Broadcast with no-one waiting"));
+	CV1.Broadcast();
+
+	test.Next(_L("Create mutexes"));
+	r = M1.CreateLocal();
+	test(r==KErrNone);
+	r = M2.CreateLocal();
+	test(r==KErrNone);
+
+	RArray<TInt> array;
+	SThreadData d0;
+	d0.iM = M2;
+	d0.iV = CV1;
+	d0.iA = &array;
+	test.Next(_L("Create thread to use mutex 2"));
+	RThread t0;
+	r = t0.Create(KNullDesC, &Thread1, 0x1000, 0x1000, 0x1000, &d0);
+	test(r==KErrNone);
+	t0.SetPriority(EPriorityMore);
+	TRequestStatus s0;
+	t0.Logon(s0);
+	t0.Resume();
+	__TRACE_LINE__;
+	AppendToArray(d0, 1, 4);
+	test(d0.iTotal==4);
+	__TRACE_LINE__;
+	AppendToArray(d0, 2, -3, 17);
+	test(d0.iTotal==18);
+	t0.Terminate(11);
+	User::WaitForRequest(s0);
+	test(t0.ExitType()==EExitTerminate);
+	test(t0.ExitReason()==11);
+	CLOSE_AND_WAIT(t0);
+	array.Reset();
+
+	SThreadData d;
+	d.iM = M1;
+	d.iV = CV1;
+	d.iA = &array;
+	test.Next(_L("Create thread to use mutex 1"));
+	RThread t;
+	r = t.Create(KNullDesC, &Thread1, 0x1000, 0x1000, 0x1000, &d);
+	test(r==KErrNone);
+	t.SetPriority(EPriorityMore);
+	TRequestStatus s;
+	t.Logon(s);
+	t.Resume();
+
+	test.Next(_L("Test wait with mutex unlocked"));
+	r = t0.Create(KNullDesC, &Thread0, 0x1000, 0x1000, 0x1000, NULL);
+	test(r==KErrNone);
+	t0.SetPriority(EPriorityMore);
+	t0.Logon(s0);
+	TBool jit = User::JustInTime();
+	User::SetJustInTime(EFalse);
+	t0.Resume();
+	User::WaitForRequest(s0);
+	User::SetJustInTime(jit);
+	test(t0.ExitType()==EExitPanic);
+	test(t0.ExitCategory()==_L("KERN-EXEC"));
+	test(t0.ExitReason()==ECondVarWaitMutexNotLocked);
+	CLOSE_AND_WAIT(t0);
+
+	test.Next(_L("Test trying to use two mutexes with 1 condition variable"));
+	M2.Wait();
+	r = CV1.Wait(M2);
+	M2.Signal();
+	test(r==KErrInUse);
+
+	test(d.iTotal==0);
+	__TRACE_LINE__;
+	AppendToArray(d, 1, 3);
+	test(d.iTotal==3);
+	__TRACE_LINE__;
+	AppendToArray(d, 2, 3, 19);
+	test(d.iTotal==25);
+	__TRACE_LINE__;
+	AppendToArray(d, 4, 15, -1, -2, -30);
+	test(d.iTotal==7);
+	test(d.iInnerLoops==3);
+	test(d.iOuterLoops==3);
+	__TRACE_LINE__;
+	t.Suspend();
+	__TRACE_LINE__;
+	t.Resume();
+	test(d.iTotal==7);
+	test(d.iInnerLoops==4);
+	test(d.iOuterLoops==3);
+	__TRACE_LINE__;
+	t.SetPriority(EPriorityLess);
+	test(d.iTotal==7);
+	test(d.iInnerLoops==4);
+	test(d.iOuterLoops==3);
+	__TRACE_LINE__;
+	t.SetPriority(EPriorityMore);
+	test(d.iTotal==7);
+	test(d.iInnerLoops==5);
+	test(d.iOuterLoops==3);
+	__TRACE_LINE__;
+	t.Suspend();
+	__TRACE_LINE__;
+	AppendToArray(d, 1, 4);
+	test(d.iTotal==7);
+	test(d.iInnerLoops==5);
+	test(d.iOuterLoops==3);
+	__TRACE_LINE__;
+	t.Resume();
+	test(d.iTotal==11);
+	test(d.iInnerLoops==6);
+	test(d.iOuterLoops==4);
+
+	SThreadData d2;
+	d2.iM = M1;
+	d2.iV = CV1;
+	d2.iA = &array;
+
+	test.Next(_L("Create 2nd thread"));
+	RThread t2;
+	r = t2.Create(KNullDesC, &Thread1, 0x1000, NULL, &d2);
+	test(r==KErrNone);
+	t2.SetPriority(EPriorityMuchMore);
+	TRequestStatus s2;
+	t2.Logon(s2);
+	__TRACE_LINE__;
+	t2.Resume();
+
+	test(d2.iTotal == 11);
+	test(d2.iInnerLoops == 0);
+	test(d2.iOuterLoops == 1);
+	__TRACE_LINE__;
+	AppendToArray(d, 2, 9, 10);
+	test(d2.iTotal == 30);
+	test(d2.iInnerLoops == 1);
+	test(d2.iOuterLoops == 2);
+	test(d.iTotal==11);
+	test(d.iInnerLoops==6);
+	test(d.iOuterLoops==4);
+	__TRACE_LINE__;
+	AppendToArrayB(d, 2, 20, 30);
+	test(d2.iTotal == 80);
+	test(d2.iInnerLoops == 2);
+	test(d2.iOuterLoops == 3);
+	test(d.iTotal == 80);
+	test(d.iInnerLoops == 7);
+	test(d.iOuterLoops == 5);
+	__TRACE_LINE__;
+	AppendToArrayB2(d, 2, -10, -6);
+	test(d2.iTotal == 64);
+	test(d2.iInnerLoops == 3);
+	test(d2.iOuterLoops == 4);
+	test(d.iTotal == 64);
+	test(d.iInnerLoops == 8);
+	test(d.iOuterLoops == 6);
+	__TRACE_LINE__;
+	t2.Suspend();
+	__TRACE_LINE__;
+	AppendToArray(d, 2, -8, -8);
+	test(d2.iTotal == 64);
+	test(d2.iInnerLoops == 3);
+	test(d2.iOuterLoops == 4);
+	test(d.iTotal == 48);
+	test(d.iInnerLoops == 9);
+	test(d.iOuterLoops == 7);
+	__TRACE_LINE__;
+	t2.Resume();
+	test(d2.iTotal == 48);
+	test(d2.iInnerLoops == 4);
+	test(d2.iOuterLoops == 5);
+	test(d.iTotal == 48);
+	test(d.iInnerLoops == 9);
+	test(d.iOuterLoops == 7);
+
+	// test timeouts
+	d.iTimeoutMs = 1000;
+	__TRACE_LINE__;
+	t.Suspend();
+	__TRACE_LINE__;
+	t.Resume();
+	test(d2.iTotal == 48);
+	test(d2.iInnerLoops == 4);
+	test(d2.iOuterLoops == 5);
+	test(d2.iTimeouts == 0);
+	test(d.iTotal == 48);
+	test(d.iInnerLoops == 10);
+	test(d.iOuterLoops == 7);
+	test(d.iTimeouts == 0);
+	test(array.Append(1)==0);
+	TInt nt = 0;
+	do	{
+		if (d.iTimeouts > nt)
+			{
+			test(d.iTimeouts-nt == 1);
+			nt = d.iTimeouts;
+			test.Printf(_L("Timeout %d\n"), nt);
+			test(d2.iTotal == 48);
+			test(d2.iInnerLoops == 4);
+			test(d2.iOuterLoops == 5);
+			test(d2.iTimeouts == 0);
+			test(d.iTotal == 48+nt);
+			test(d.iInnerLoops == 10+nt);
+			test(d.iOuterLoops == 7+nt);
+			test(array.Append(1)==0);
+			}
+		} while (nt<10);
+
+	d.iTimeoutMs = 0;
+	AppendToArrayB(d, 0);
+	test(d2.iTotal == 59);
+	test(d2.iInnerLoops == 5);
+	test(d2.iOuterLoops == 6);
+	test(d2.iTimeouts == 0);
+	test(d.iTotal == 59);
+	test(d.iInnerLoops == 21);
+	test(d.iOuterLoops == 18);
+	test(d.iTimeouts == 10);
+
+	__TRACE_LINE__;
+	t.SetPriority(EPriorityLess);
+	__TRACE_LINE__;
+	AppendToArrayB(d, 1, 11);
+	test(d2.iTotal == 70);
+	test(d2.iInnerLoops == 6);
+	test(d2.iOuterLoops == 7);
+	test(d2.iTimeouts == 0);
+	test(d.iTotal == 59);
+	test(d.iInnerLoops == 21);
+	test(d.iOuterLoops == 18);
+	test(d.iTimeouts == 10);
+	User::After(50000);
+	test(d2.iTotal == 70);
+	test(d2.iInnerLoops == 6);
+	test(d2.iOuterLoops == 7);
+	test(d2.iTimeouts == 0);
+	test(d.iTotal == 70);
+	test(d.iInnerLoops == 22);
+	test(d.iOuterLoops == 19);
+	test(d.iTimeouts == 10);
+
+
+
+	__TRACE_LINE__;
+	CV1.Close();
+	User::WaitForRequest(s);
+	test(t.ExitType()==EExitKill);
+	test(t.ExitReason()==KErrGeneral);
+	User::WaitForRequest(s2);
+	test(t2.ExitType()==EExitKill);
+	test(t2.ExitReason()==KErrGeneral);
+	CLOSE_AND_WAIT(t);
+	CLOSE_AND_WAIT(t2);
+
+
+	M1.Close();
+
+	TestGlobal();
+
+	Thread2Test();
+
+	TestSecondaryProcess();
+
+	RunBench();
+	M2.Close();
+	CV2.Close();
+	array.Close();
+
+	test.End();
+	test.Close();
+
+	__UHEAP_MARKEND;
+	__KHEAP_MARKEND;
+	return KErrNone;
+	}
+
--- a/kerneltest/f32test/demandpaging/t_nandpaging.cpp	Wed May 05 05:11:16 2010 +0100
+++ b/kerneltest/f32test/demandpaging/t_nandpaging.cpp	Mon May 10 11:40:53 2010 +0100
@@ -277,7 +277,6 @@
 					Drive.ControlIO(KNandGetDeferStats,statsBuf,0);
 					test.Printf(_L("PG %d PO %d(%d%%) NG %d NO %d\n"),stats.iPageGarbage,  stats.iPageOther, (TInt) ((stats.iPageOther*100)/cCount), stats.iNormalGarbage,  stats.iNormalOther);
 
-					test(stats.iPageOther>0);
 				 	pageGarbageCount+=stats.iPageGarbage; 
 				 	pageOtherCount+=stats.iPageOther;			 	
 				 	normalGarbageCount+=stats.iNormalGarbage; 
@@ -301,6 +300,7 @@
 			{
 			test.Printf(_L("\nTotals: Avg %2d %d%% CC=%4d \n"), fullTot/fullcCount, (TInt)(totChangeCount*100)/fullcCount, totChangeCount);
 			test.Printf(_L("PG %d PO %d(%d%%) NG %d NO %d\n"),pageGarbageCount,  pageOtherCount,(TInt) (pageOtherCount*100/fullcCount), normalGarbageCount,  normalOtherCount );
+			test(pageOtherCount > 0);	// Ensure at least one paging conflict occurred during the test.
 			}
 
 		// If totChangeCount does not change, nand maybe busy waiting.
@@ -511,15 +511,14 @@
 	TUint8* start = (TUint8*)romHeader+romHeader->iPageableRomStart;
 	TUint size = romHeader->iPageableRomSize;
 	TUint8* addr=NULL;
-	TBool flush;
 	while (Testing)
 		{
 			PageSemaphore.Wait(); // wait for main thread to want paging.
-			flush = (PagesBeingPaged==0);
 			addr=start+((TInt64(Random())*TInt64(size))>>32);	
-			PageDoneSemaphore.Signal(); // Acknolage request.
+			PageDoneSemaphore.Signal(); // Acknowledge request.
 
 			PageMutex.Wait();
+			TBool flush = (PagesBeingPaged==0);	// Ensure only one thread is flushing the cache at a time.
 			PagesBeingPaged++;
 			PageMutex.Signal();
 
--- a/kerneltest/f32test/server/t_falsespace.cpp	Wed May 05 05:11:16 2010 +0100
+++ b/kerneltest/f32test/server/t_falsespace.cpp	Mon May 10 11:40:53 2010 +0100
@@ -1049,7 +1049,7 @@
 	test_KErrNone(nRes);
     for(i=0; i<MaxDummyFiles; ++i)
 	    {
-        nRes = DeleteFileX(KBaseFN, i); 
+        nRes = DeleteFileX(KBaseFN, i);
         test_KErrNone(nRes);
 		}
 	}
@@ -1084,6 +1084,70 @@
 	}
 
 
+//-----------------------------------------------------------------------------
+/**
+    Test that the reserving some drive space does not takes more space than required.
+*/
+void Test0()
+{
+    test.Next(_L("test ReserveDriveSpace threshold"));
+
+    TInt nRes;
+    TVolumeIOParamInfo volIop;
+    TInt64 freespace=0;
+
+    //-- 1. format the volume
+    FormatDrive();
+
+    GetFreeDiskSpace(freespace);
+    const TInt64 freeSpace1 = freespace; //-- initial amount of free space on the volume
+
+    nRes = TheFs.VolumeIOParam(gTestDrive, volIop);
+    test_KErrNone(nRes);
+    const TInt KClusterSz = volIop.iClusterSize;
+    if(!IsPowerOf2(KClusterSz))
+        {
+        test.Next(_L("The FS hasn't reported a cluster size. The test is inconsistent, skipping"));
+        return;
+        }
+
+    //-- reserve exactly 1 cluster worth drive space.
+    nRes = TheFs.ReserveDriveSpace(gTestDrive, KClusterSz);
+    test_KErrNone(nRes);
+
+    GetFreeDiskSpace(freespace);
+    const TInt64 freeSpace2 = freespace;
+    test((freeSpace1 - freeSpace2) == KClusterSz);
+
+    //-- fill up a drive (it has a reserved space)
+    FillUpDisk();
+
+    //-- delete 1 file; 
+    nRes = DeleteFileX(KBaseName, 0);
+    test_KErrNone(nRes);
+
+    //-- try to create a file with the size that is exacly the same as free space; it should succeed
+    GetFreeDiskSpace(freespace);
+    
+    nRes = CreateEmptyFile(TheFs, _L("\\aaa1"), freespace);
+    test_KErrNone(nRes);
+
+    GetFreeDiskSpace(freespace);
+    test(freespace == 0);
+
+    //-- return the drive space to the system
+	nRes = TheFs.ReserveDriveSpace(gTestDrive,0);
+	test_KErrNone(nRes); 
+
+    //-- release drive space
+    nRes = TheFs.ReleaseReserveAccess(gTestDrive);
+    test_KErrNone(nRes);
+
+    GetFreeDiskSpace(freespace);
+    test(freespace == KClusterSz);
+
+    FormatDrive();
+}
 
 //-----------------------------------------------------------------------------
 
@@ -1092,9 +1156,9 @@
 // Do tests relative to session path
 //
 	{
-	//-- set up console output 
-	Fat_Test_Utils::SetConsole(test.Console()); 
-	
+	//-- set up console output
+	Fat_Test_Utils::SetConsole(test.Console());
+
 	// If TESTFAST mode (for automated test builds) is set, don't run LFFS tests.
 	if ((UserSvr::DebugMask(2) & 0x00000002) && IsTestingLFFS())
 		{
@@ -1133,6 +1197,7 @@
 		return;
 		}
 
+    Test0();
 	Test1();	// General test for new APIs
 	Test2();	// Test to ensure drive and session reserve limits are not exceeded
 	Test3();
@@ -1144,6 +1209,6 @@
 	Test2();	// run this test to check reserves are being cleared correctly
 
 	TestFAT4G_Boundary();
-    
+
 	TurnAllocFailureOff();
 	}
--- a/userlibandfileserver/fileserver/etshell/ts_com.cpp	Wed May 05 05:11:16 2010 +0100
+++ b/userlibandfileserver/fileserver/etshell/ts_com.cpp	Mon May 10 11:40:53 2010 +0100
@@ -256,7 +256,7 @@
 			aPath.Insert(0,TheShell->currentPath.Left(2));
 		}
 
-	RFile file;
+	RFile64 file;
 	r=file.Open(CShell::TheFs,aPath,EFileStream);
 	if (r!=KErrNone)	//		File could not be opened
 		{
@@ -1199,11 +1199,23 @@
                 //-- print out cluster size that FS reported
                 TVolumeIOParamInfo volIoInfo;
                 nRes = aFs.VolumeIOParam(aDrvNum, volIoInfo);
-                if(nRes == KErrNone && volIoInfo.iClusterSize >= 512)
+                if(nRes == KErrNone)
                 {
-                    Buf.AppendFormat(_L(", Cluster Sz:%d"), volIoInfo.iClusterSize);
+                    if(volIoInfo.iBlockSize >= 0)
+                    {
+                        Buf.AppendFormat(_L(", BlkSz:%d"), volIoInfo.iBlockSize);
+                    }
+                    
+                    if(volIoInfo.iClusterSize >= 0)
+                    {
+                        Buf.AppendFormat(_L(", ClSz:%d"), volIoInfo.iClusterSize);
+                    }
+
+                    Buf.AppendFormat(_L(", CacheFlags:0x%x"), volInfo.iFileCacheFlags);
+                
                 }
 
+
                 if(Buf.Length())
                 {
                     Buf.Append(_L("\n"));
@@ -1972,7 +1984,7 @@
 	ShellFunction::StripQuotes(aPath);
 
 	ParsePath(aPath);
-	RFile file;
+	RFile64 file;
 	TInt r=file.Open(TheShell->TheFs,aPath,EFileStream);
 	if (r!=KErrNone)
 		return(r);
@@ -3108,7 +3120,7 @@
 TInt ShellFunction::Type(TDes& aPath,TUint aSwitches)
 	{
 	ParsePath(aPath);
-	RFile file;
+	RFile64 file;
 	TInt r=file.Open(TheShell->TheFs,aPath,EFileStreamText|EFileShareReadersOnly);
 	if (r!=KErrNone)
 		return r;
--- a/userlibandfileserver/fileserver/group/release.txt	Wed May 05 05:11:16 2010 +0100
+++ b/userlibandfileserver/fileserver/group/release.txt	Mon May 10 11:40:53 2010 +0100
@@ -1,3 +1,22 @@
+Version 2.00.3039
+=================
+(Made by vfebvre 06/05/2010)
+
+1.	famustaf
+	1.	ou1cimx1#372432 CFileMan malfunctioning in copying
+
+2.	dlyokhin
+	1.	ou1cimx1#372220 File server may request more free space on the volume than necessary
+
+
+Version 2.00.3038
+=================
+(Made by vfebvre 05/05/2010)
+
+1.	niccox
+	1.	ou1cimx1#371028 shostmassstorage capabilities are too high
+
+
 Version 2.00.3037
 =================
 (Made by vfebvre 30/04/2010)
--- a/userlibandfileserver/fileserver/inc/f32file.h	Wed May 05 05:11:16 2010 +0100
+++ b/userlibandfileserver/fileserver/inc/f32file.h	Mon May 10 11:40:53 2010 +0100
@@ -680,6 +680,15 @@
     ETrue value means that the drive is finalised
     */
     EIsDriveFinalised,
+    
+    /**
+    Query the volume to ascertain whether File system extensions
+    are supported on this volume.
+    A boolean value is returned within the buffer defined as TPckgBuf<TBool>. 
+    ETrue value means that extensions are supported.
+    EFalse means they are not supported.
+     */
+    EFSysExtensionsSupported,
 	};
 
 /**
@@ -699,27 +708,28 @@
 	{
 public:
 	/**
-	The size of a block in bytes.
-	
-	Reads and writes that are aligned on block boundaries are up to twice as fast as when 
-	mis-aligned.	
-	
-	Read and write operations on certain underlying media is done in blocks.
-	A write operation that modifies only part of a block is less efficient, in general, than
-	one that modifies an entire block. Data throughput degrades linearly for reads and writes in smaller
-	sized units. 
+	The size of a media block in bytes. This is a physical property of a media and returned by the corresponding media driver.
+	This value is usually at least 512 bytes and always a power of 2. For some media types the meaning of this value is 
+    "the min. size of the aligned data buffer which write onto the media doesn't lead to read-modify-write operation."
+    Therefore, reads and writes that are aligned on block boundaries and with lenght of a multiple block size can be much faster.	
+	Read and write operations on certain underlying media is done in blocks. A write operation that modifies only part of a block is less efficient, 
+    in general, than one that modifies an entire block. Data throughput degrades linearly for reads and writes in smaller sized units. 
 	*/
 	TInt iBlockSize;
+	
 	/**
-	The size in bytes of a single disk cluster.
-	
-	Read and write operations that are aligned on cluster boundaries are more efficient.
-	
-	The file system organises and allocates the file data on the disk in clusters where each cluster is
-	one or more blocks. Files that are not zero length occupy at least one cluster of the disk, 
-	so large numbers of very small files use up more disk space than expected. 
+	The size in bytes of a single file system cluster. This is a logical property of the file system. 
+	The file system organises and allocates the data on the disk in clusters where each cluster usually consists of one or more blocks. 
+    Cluster is a minimal unit that the file system allocates on the volume. Thus, a file of 1 byte length occupies 1 cluster.
+
+	Read and write operations that are aligned on cluster boundaries are more efficient from the file system point of view.
+    In some circumstances cluster size can be less than a block size, but it is very inefficient.
+
+    This value is reported by a file system. The value less than 0 indicates a error.
 	*/
 	TInt iClusterSize;
+
+
 	/**
 	The recommended buffer size for optimised reading performance. 
 	
@@ -754,7 +764,7 @@
 
     /** 
     The maximum file size that is supported by the file system mounted on this volume. 
-    Not all file system may provide this parameter;  The value KMaxTUint64 (0xffffffffffffffff) means that this particular file system hasn't 
+    Not all file system may report this parameter;  The value KMaxTUint64 (0xffffffffffffffff) means that this particular file system hasn't 
     provided this information.
     */
     TUint64 iMaxSupportedFileSize;
@@ -1140,7 +1150,32 @@
 	@internalTechnology
 	
 	*/
-	EFileBigFile        =0x00040000
+	EFileBigFile        =0x00040000,
+	
+	/**
+	Using this flag implies that the client is making large sequential reads and/or writes
+	and it is interested in maximising the performance of the large reads and/or writes.
+	
+	The flag gives a hint to the file server and filesystem to adjust to a streaming
+	data pattern and try their best to make it optimal.
+	
+	Some conditions apply:
+	- This does not guarantee that the performance of read/write operations will increase.
+	- Using this flag for other purposes other than data streaming may lead to performance degradation.
+	- This may sacrifice user data integrity for the sake of performance.
+	
+	If a file is opened by Client A with EFileSequential, and the file is then opened
+	without EFileSequential by Client B, then this file mode will be disabled.
+	When the file handle is closed by Client B, then the EFileSequential file mode
+	will be enabled again.
+	Therefore, this mode will only be enabled if all clients set the file as such,
+	otherwise the file mode will be disabled.
+	
+	FAT file system specific information:
+	This flag improves write and file expansion performance whilst decreasing robustness
+	on a "Rugged-FAT" file system, which is applicable to internal non-removable drives.
+	*/
+	EFileSequential		=0x00080000
 
 	};
 
--- a/userlibandfileserver/fileserver/inc/f32ver.h	Wed May 05 05:11:16 2010 +0100
+++ b/userlibandfileserver/fileserver/inc/f32ver.h	Mon May 10 11:40:53 2010 +0100
@@ -58,6 +58,6 @@
 
 @see TVersion
 */
-const TInt KF32BuildVersionNumber=3037;
+const TInt KF32BuildVersionNumber=3039;
 //
 #endif
--- a/userlibandfileserver/fileserver/sfile/sf_file.cpp	Wed May 05 05:11:16 2010 +0100
+++ b/userlibandfileserver/fileserver/sfile/sf_file.cpp	Mon May 10 11:40:53 2010 +0100
@@ -241,7 +241,7 @@
     TUint32 mode=aRequest->Message().Int1();
 	if (anOpen==EFileCreate || anOpen==EFileReplace)
 		{
-		r = CheckDiskSpace(0, aRequest);
+		r = CheckDiskSpace(KMinFsCreateObjTreshold, aRequest);
 		if(r != KErrNone)
             return r;
         
@@ -601,7 +601,7 @@
 	{
 	__PRINT(_L("TFsFileTemp::DoRequestL(CFsRequest* aRequest)"));
     
-    TInt r = CheckDiskSpace(0, aRequest);
+    TInt r = CheckDiskSpace(KMinFsCreateObjTreshold, aRequest);
     if(r != KErrNone)
         return r;
 	
@@ -1652,7 +1652,7 @@
 	{
 	__PRINT(_L("TFsFileSetAtt::DoRequestL(CSessionFs* aSession)"));
     
-    TInt r = CheckDiskSpace(0, aRequest);
+    TInt r = CheckDiskSpace(KMinFsCreateObjTreshold, aRequest);
     if(r != KErrNone)
         return r;
 
@@ -1717,7 +1717,7 @@
 	{
 	__PRINT(_L("TFsFileSetModified::DoRequestL(CFsRequest* aRequest)"));
     
-    TInt r = CheckDiskSpace(0, aRequest);
+    TInt r = CheckDiskSpace(KMinFsCreateObjTreshold, aRequest);
     if(r != KErrNone)
         return r;
 
@@ -1757,7 +1757,7 @@
 	{
 	__PRINT(_L("TFsFileSet::DoRequestL(CFsRequest* aRequest)"));
 
-    TInt r = CheckDiskSpace(0, aRequest);
+    TInt r = CheckDiskSpace(KMinFsCreateObjTreshold, aRequest);
     if(r != KErrNone)
         return r;
 
@@ -1830,26 +1830,13 @@
 		// check if an attempt is made to change the share mode to EFileShareExclusive
 		// while the file has multiple readers
 	if (newMode == EFileShareExclusive && (currentMode & KFileShareMask) != EFileShareExclusive)
-		{ 
-		// Check no other CFileCB is reading the file.
-		FileShares->Lock();
-		TInt count=FileShares->Count();
-		TBool found=EFalse;
-		while(count--)
-			{
-			CFileShare* fileShare=(CFileShare*)(*FileShares)[count];
-			if (&fileShare->File()==&share->File())
-				{
-				if (found)
-					{
-					FileShares->Unlock();
-					return(KErrAccessDenied);
-					}
-				found=ETrue;
-				}
-			}
-		FileShares->Unlock();
+		{
+		// Check that this is the file's only fileshare/client
+		TDblQue<CFileShare>& aShareList = (&share->File())->FileShareList();
+		if (!(aShareList.IsFirst(share) && aShareList.IsLast(share)))
+			return KErrAccessDenied;
 		}
+	
 	share->iMode&=~KFileShareMask;
 	share->iMode|=newMode;
 	share->File().SetShare(newMode);
@@ -1882,7 +1869,7 @@
 	{
 	__PRINT(_L("TFsFileRename::DoRequestL(CFsRequest* aRequest)"));
 
-    TInt r = CheckDiskSpace(0, aRequest);
+    TInt r = CheckDiskSpace(KMinFsCreateObjTreshold, aRequest);
     if(r != KErrNone)
         return r;
 
@@ -2451,14 +2438,18 @@
 
 
 void CFileCB::PromoteShare(CFileShare* aShare)
-//
-// Manages share promotion after the share has been added to the FilsShares container.
-//
-//  - Assumes the share has already been validated using ValidateShare()
-//
-//  - The count of promoted shares (ie - non-EFileShareReadersOrWriters) is incremented
-//	  to allow the share mode to be demoted when the last promoted share is closed.
-//
+/**
+	Manages share promotion and checks the EFileSequential file mode
+	after the share has been added to the FileShares container.
+	
+	It assumes the share has already been validated using ValidateShare().
+	
+	The count of promoted shares (ie - non-EFileShareReadersOrWriters) is incremented
+	to allow the share mode to be demoted when the last promoted share is closed.
+	
+	Similarly, the count of non-EFileSequential file modes is incremented to allow
+	the file mode to be enabled when the last non-EFileSequential share is closed.
+ */
 	{
 	TShare reqShare = (TShare)(aShare->iMode & KFileShareMask);
 	if(reqShare != EFileShareReadersOrWriters)
@@ -2466,29 +2457,48 @@
 		iBody->iPromotedShares++;
 		iShare = reqShare;
 		}
+	
+	// If the file mode is not EFileSequential, then disable the 'Sequential' flag
+	if(!(aShare->iMode & EFileSequential))
+		{
+		iBody->iNonSequentialFileModes++;
+		SetSequentialMode(EFalse);
+		__PRINT(_L("CFileCB::PromoteShare - FileSequential mode is off"));
+		}
 	}
 
 
 void CFileCB::DemoteShare(CFileShare* aShare)
-//
-// Manages share demotion after the share has been removed from the FileShares container.
-//
-//  - If the share being removed is not EFileShareReadersOrWriters, then the current
-//	  share mode may require demotion back to EFileShareReadersOrWriters.
-//
-//	- This is determined by the iPromotedShares count, incremented in PromoteShare()
-//
+/**
+	Manages share demotion and checks the EFileSequential file mode
+	after the share has been removed from the FileShares container.
+	
+	If the share being removed is not EFileShareReadersOrWriters, then the current
+	share mode may require demotion back to EFileShareReadersOrWriters.
+	This is determined by the iPromotedShares count, incremented in PromoteShare().
+	
+	Similarly, if the share being removed is non-EFileSequential,
+	then the EFileSequential flag may need to be enabled,
+	which is determined by the iNonSequentialFileModes count.
+ */
 	{
-	if((aShare->iMode & KFileShareMask) != EFileShareReadersOrWriters)
+	if((aShare->iMode & KFileShareMask) != EFileShareReadersOrWriters
+		&& --iBody->iPromotedShares == 0)
 		{
-		if(--iBody->iPromotedShares == 0)
-			{
-			// Don't worry if the file has never been opened as EFileShareReadersOrWriters
-			//  - in this case the CFileCB object is about to be closed anyway.
-			iShare = EFileShareReadersOrWriters;
-			}
+		// Don't worry if the file has never been opened as EFileShareReadersOrWriters
+		//  - in this case the CFileCB object is about to be closed anyway.
+		iShare = EFileShareReadersOrWriters;
 		}
-	__ASSERT_DEBUG(iBody->iPromotedShares>=0,Fault(EFileShareBadPromoteCount));
+	__ASSERT_DEBUG(iBody->iPromotedShares>=0, Fault(EFileShareBadPromoteCount));
+	
+	if(!(aShare->iMode & EFileSequential) && --iBody->iNonSequentialFileModes == 0)
+		{
+		// As above, if the file has never been opened as EFileSequential,
+		// it implies that the CFileCB object is about to be closed anyway.
+		SetSequentialMode(ETrue);
+		__PRINT(_L("CFileCB::PromoteShare - FileSequential mode is enabled"));
+		}
+	__ASSERT_DEBUG(iBody->iNonSequentialFileModes>=0, Fault(EFileShareBadPromoteCount));
 	}
 
 
@@ -2723,7 +2733,8 @@
 
 /**
 Constructor.
-Locks the mount resource to which the shared file resides.
+Locks the mount resource to which the shared file resides
+and adds the share to the file's FileShare List.
 
 @param aFileCB File to be shared.
 */
@@ -2731,12 +2742,14 @@
 	: iFile(aFileCB)
 	{
 	AddResource(iFile->Mount());
+	iFile->AddShare(*this);
 	}
 
 /**
 Destructor.
 
 Frees mount resource to which the shared file resides,
+removes the share from the file's FileShare List,
 removes share status from the shared file and finally closes
 the file.
 */
@@ -2746,6 +2759,7 @@
 	__ASSERT_DEBUG(iCurrentRequest == NULL, Fault(ERequestQueueNotEmpty));
 
 	RemoveResource(iFile->Mount());
+	iShareLink.Deque();
 	iFile->RemoveLocks(this);
 	iFile->DemoteShare(this);
 	iFile->CancelAsyncReadRequest(this, NULL);
@@ -3027,6 +3041,7 @@
 CFileBody::CFileBody(CFileCB* aFileCB, CFileCB::MExtendedFileInterface* aExtendedFileInterface)
   : iFileCB(aFileCB),
 	iExtendedFileInterface(aExtendedFileInterface ? aExtendedFileInterface : this),
+	iShareList(_FOFF(CFileShare,iShareLink)),
 	iSizeHigh(0)
 	{
 	iFairSchedulingLen = TFileCacheSettings::FairSchedulingLen(iFileCB->DriveNumber());
@@ -3586,6 +3601,45 @@
 	}
 
 
+//---------------------------------------------------------------------------------------------------------------------
+/**
+Gets the 'Sequential' mode of the file.
+
+@return	ETrue, if the file is in 'Sequential' mode
+*/
+EXPORT_C TBool CFileCB::IsSequentialMode() const
+	{
+	return iBody->iSequential;
+	}
+
+/**
+Sets the 'Sequential' mode of the file.
+ */
+void CFileCB::SetSequentialMode(TBool aSequential)
+	{
+	iBody->iSequential = aSequential;
+	}
+
+//---------------------------------------------------------------------------------------------------------------------
+/**
+Gets the list containing the shares associated with the file.
+
+@return	The FileShare List
+*/
+TDblQue<CFileShare>& CFileCB::FileShareList() const
+	{
+	return iBody->iShareList;
+	}
+
+/**
+Adds the share to the end of the FileShare List.
+*/
+void CFileCB::AddShare(CFileShare& aFileShare)
+	{
+	iBody->iShareList.AddLast(aFileShare);
+	}
+
+
 //#####################################################################################################################
 //#  TFileShareLock class implementation
 //#####################################################################################################################
--- a/userlibandfileserver/fileserver/sfile/sf_nbs.cpp	Wed May 05 05:11:16 2010 +0100
+++ b/userlibandfileserver/fileserver/sfile/sf_nbs.cpp	Mon May 10 11:40:53 2010 +0100
@@ -22,7 +22,7 @@
 	{
 	__PRINT(_L("TFsMkDir::DoRequestL(CFsRequest* aRequest)"));
 
-    TInt r = CheckDiskSpace(0, aRequest);
+    TInt r = CheckDiskSpace(KMinFsCreateObjTreshold, aRequest);
     if(r != KErrNone)
         return r;
 
@@ -143,7 +143,7 @@
 //
 	{
 	__PRINT(_L("TFsRename::DoRequestL(CFsRequest* aRequest)"));
-    TInt r = CheckDiskSpace(0, aRequest);
+    TInt r = CheckDiskSpace(KMinFsCreateObjTreshold, aRequest);
     if(r != KErrNone)
         return r;
 	
@@ -182,7 +182,7 @@
 	{
 	__PRINT(_L("TFsReplace::DoRequestL(CFsRequest* aRequest)"));
 
-    TInt r = CheckDiskSpace(0, aRequest);
+    TInt r = CheckDiskSpace(KMinFsCreateObjTreshold, aRequest);
     if(r != KErrNone)
         return r;
 
@@ -261,7 +261,7 @@
 	{
 	__PRINT(_L("TFsSetEntry::DoRequestL(CFsRequest* aRequest)"));
 
-    TInt r = CheckDiskSpace(0, aRequest);
+    TInt r = CheckDiskSpace(KMinFsCreateObjTreshold, aRequest);
     if(r != KErrNone)
         return r;
 
@@ -892,7 +892,7 @@
 //	create the private path unless it already exists
 //
 	{
-    TInt ret = CheckDiskSpace(0, aRequest);
+    TInt ret = CheckDiskSpace(KMinFsCreateObjTreshold, aRequest);
     if(ret != KErrNone)
         return ret;
 
--- a/userlibandfileserver/fileserver/sfile/sf_std.h	Wed May 05 05:11:16 2010 +0100
+++ b/userlibandfileserver/fileserver/sfile/sf_std.h	Mon May 10 11:40:53 2010 +0100
@@ -144,6 +144,13 @@
 //-- absolute maximum file size that file server supports
 const TUint64 KMaxSupportedFileSize = KMaxTUint64;
 
+//-- this is a speculative value of a min. amount of free space on the volume necessary to create a file, directory etc.
+//-- it is used mostly in "reserve drive space" functionality, which is, actually, fundamentally flawed.
+//-- the problem is that the file server can't know exactly how much space is required to create some fs object on the volume, 
+//-- so, it has to guess. This is a default "sector size" value; the file system can round it up internally to its cluster size if any.
+const TInt KMinFsCreateObjTreshold = KDefaultVolumeBlockSize;
+
+
 //__DATA_CAGING__
 const TUint SHA1_LBLOCK=16;
 const TUint SHA1_HASH=20;
@@ -235,7 +242,7 @@
 
 enum TFsPanic
 	{
-	ELdrImportedOrdinalDoesNotExist
+	ELdrImportedOrdinalDoesNotExist	
 	};
 //
 enum TFsFault
@@ -443,7 +450,8 @@
 	ETraceLddLoadFailure,				//200
 	ETooManyDrivesPerSocket,
 	ENotificationFault,
-	EFsObjectOpen
+	EFsObjectOpen,
+	EContainerHeapCorruptionOnRemove
 	};
 
 
@@ -1838,10 +1846,6 @@
 
 typedef TPckgBuf<TMediaPswdReplyNotifyInfoV1> TMediaPswdReplyNotifyInfoV1Buf;
 
-#if defined(__WINS__)
- TInt MapWindowsFileName(TDes& aBuffer,const TDesC& aFileName);
-#endif
-
 enum TDllFindMethod {EFindInPath, EFindInSystemLibs, EFindInSystemBin, EFindExhausted};
 
 //---------------------------------------------------------------------------------------------------------------------
@@ -1867,6 +1871,7 @@
 	TInt iFairSchedulingLen;
 	TBool iNotifyAsyncReadersPending;
 	TBool iDeleteOnClose;
+	TDblQue<CFileShare> iShareList;	// A list containing the CFileShare objects associated with the file
 
 protected:
 	TInt iPromotedShares;
@@ -1876,11 +1881,14 @@
 
     /** 
     maximum file size supported by the filesystem that instantiates the CFileCB, associated with this object.
-    For example, FAT32 supports files not larger than 4GB-1. Other file systems can support larger files. 
+    For example, FAT32 supports files not larger than 4GB-1. Other file systems can support larger files.
     This member allows file server to know maximum allowed position in the file.
     The default value is KMaxTUint64
     */
-    TUint64 iMaxSupportedFileSize; 
+    TUint64 iMaxSupportedFileSize;
+    
+    TInt iNonSequentialFileModes;	// Count of clients without the 'Sequential' mode enabled
+	TBool iSequential;				// Indicates whether the file is in 'Sequential' mode
 
 public:
 	// Provides support for large file size ( file size > 4GB - 1)
--- a/userlibandfileserver/fileserver/sfile/sf_svr.cpp	Wed May 05 05:11:16 2010 +0100
+++ b/userlibandfileserver/fileserver/sfile/sf_svr.cpp	Mon May 10 11:40:53 2010 +0100
@@ -590,7 +590,7 @@
 // Set the volume name.
 //
 	{
-    TInt r = CheckDiskSpace(0, aRequest);
+    TInt r = CheckDiskSpace(KMinFsCreateObjTreshold, aRequest);
     if(r != KErrNone)
         return r;
 
@@ -1314,8 +1314,13 @@
 
                 return KErrNone;
             }
-
-            
+            case EFSysExtensionsSupported:
+            {
+                TBool supported = pDrive->GetFSys()->IsExtensionSupported();
+                TPckgBuf<TBool> data(supported);
+                aRequest->WriteL(KMsgPtr2,data);
+                return KErrNone;
+            }
 		default:
 			{
 			return KErrNotSupported;
--- a/userlibandfileserver/fileserver/sfile/sf_utl.cpp	Wed May 05 05:11:16 2010 +0100
+++ b/userlibandfileserver/fileserver/sfile/sf_utl.cpp	Mon May 10 11:40:53 2010 +0100
@@ -1067,6 +1067,8 @@
 	if(KReservedSpace == 0 || KDriveNumber == EDriveZ)
 	    return KErrNone;
 
+    ASSERT(aThreshold);
+
     //-- if the drive has a reserved space, take it into account
 	CSessionFs* session=aRequest->Session(); 
 
@@ -1074,8 +1076,7 @@
         aThreshold += KReservedSpace;
 
     //-- ask the corresponding file system if there is aThreshold bytes available.
-    //-- for some reason it's required to be strictly > than aThreshold
-    return aRequest->Drive()->RequestFreeSpaceOnMount(aThreshold+1);
+    return aRequest->Drive()->RequestFreeSpaceOnMount(aThreshold);
 	}								
 
 
--- a/userlibandfileserver/fileserver/sfsrv/cl_fman.cpp	Wed May 05 05:11:16 2010 +0100
+++ b/userlibandfileserver/fileserver/sfsrv/cl_fman.cpp	Mon May 10 11:40:53 2010 +0100
@@ -1089,6 +1089,31 @@
 including any directories in the path specified by aNew which do not
 already exist.
 
+If the source (anOld) is a FILE and the recursive operation is set,
+then all the files with the same name as anOld in the source directory
+including those in subdirectories will be copied to the destination.
+
+For example, the initial directory structure is as follows:
+C:\dir1\file.txt
+C:\dir1\subdirA\file.txt
+C:\dir1\subdirB\file.txt
+
+@code
+CFileMan* fm(CFileMan::NewL(iFs));	// Where iFs is an RFs handle
+fm->Copy(_L("C:\\dir1\\file.txt"), _L("C:\\dir2\\file.txt"), CFileMan::ERecurse);
+// OR without specifying the filename in aNew:
+fm->Copy(_L("C:\\dir1\\file.txt"), _L("C:\\dir2\\"), CFileMan::ERecurse);
+@endcode
+
+Because of the recursive behaviour, the final directory structure after
+either one of the copy operations above will be as follows:
+C:\dir1\file.txt
+C:\dir1\subdirA\file.txt
+C:\dir1\subdirB\file.txt
+C:\dir2\file.txt
+C:\dir2\subdirA\file.txt
+C:\dir2\subdirB\file.txt
+
 If recursive operation is not set, only the matching files located in
 the single directory specified in anOld are copied.
 No intermediate directories will be created; if any directories in
@@ -1122,13 +1147,13 @@
  1.2	If there is no file to operate on i.e. if source directory is empty, the
  	function will do nothing and return error code KErrNotFound.
 
- 2. Files can be copied across drives.
-
- 3. Open files can be copied if they have been opened using
-      the EFileShareReadersOnly file share mode.
-
- 4. Read-only, hidden and system files can be copied and
-   the source file's attributes are preserved in the target file.
+ 2.	Files can be copied across drives.
+
+ 3.	Open files can be copied if they have been opened using
+	the EFileShareReadersOnly file share mode.
+
+ 4.	Read-only, hidden and system files can be copied and
+	the source file's attributes are preserved in the target file.
 
 @param anOld     Path indicating the file(s) to be copied.
                  Any path components which are not specified here will be
@@ -1393,7 +1418,7 @@
   recursively by default and moves both the last directory level and all of its content.
   Notice that no trailing backslash ("\") implies moving files recursively automatically.
 
-For example, if the directory level "b" contains the files F1,F2 and F3, then:
+For example, if the directory level "b" contains the files F1, F2 and F3, then:
 @code
 CFileMan* fm(CFileMan::NewL(iFs)); // Where iFs is an RFs handle
 ...
@@ -1421,7 +1446,7 @@
 0 is passed as an argument, the operation behaves the same way as by passing
 CFileMan::ERecurse flag.
 
-for example:
+For example:
 @code
 CFileMan* fm(CFileMan::NewL(iFs)); // Where iFs is an RFs handle
 ...
@@ -1436,6 +1461,31 @@
 fm->Move(_L("C:\\a\\b"), _L("C:\\x\\y\\"), CFileMan::ERecurse);
 @endcode
 
+If the source (anOld) is a FILE and the recursive operation is set,
+then all the files with the same name as anOld in the source directory
+including those in subdirectories will be moved to the destination.
+
+For example, the initial directory structure is as follows:
+C:\src\file.txt
+C:\src\subdirA\file.txt
+C:\src\subdirB\file.txt
+
+@code
+CFileMan* fm(CFileMan::NewL(iFs));	// Where iFs is an RFs handle
+fm->Move(_L("C:\\src\\file.txt"), _L("C:\\dest\\file.txt"), CFileMan::ERecurse);
+// OR without specifying the filename in aNew:
+fm->Move(_L("C:\\src\\file.txt"), _L("C:\\dest\\"), CFileMan::ERecurse);
+@endcode
+
+Because of the recursive behaviour, the final directory structure after
+either one of the move operations above will be as follows:
+C:\src\
+C:\src\subdirA\
+C:\src\subdirB\
+C:\dest\file.txt
+C:\dest\subdirA\file.txt
+C:\dest\subdirB\file.txt
+
 Notes:
 
 -# Read-only, hidden and system files can be moved and the source file's
@@ -1443,7 +1493,7 @@
    be moved. Attempting to move an open file will return an error for
    that file, as retrieved by CFileBase::GetLastError().
 
-@param anOld	 Path indicating the files to be moved. May be either a full path, or
+@param anOld	 Path indicating the directory/files to be moved. May be either a full path, or
 				 relative to the session path. Note that if you specify a directory level,
 				 then the behaviour of the move operation is sensitive to the presence
 				 (or absence) of a trailing backslash ("\") character. Any path components
--- a/userlibandfileserver/fileserver/shostmassstorage/client/hostmsclient.mmp	Wed May 05 05:11:16 2010 +0100
+++ b/userlibandfileserver/fileserver/shostmassstorage/client/hostmsclient.mmp	Mon May 10 11:40:53 2010 +0100
@@ -1,4 +1,4 @@
-// Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
+// Copyright (c) 2008-2010 Nokia Corporation and/or its subsidiary(-ies).
 // All rights reserved.
 // This component and the accompanying materials are made available
 // under the terms of the License "Eclipse Public License v1.0"
@@ -30,7 +30,7 @@
 
 LIBRARY		efsrv.lib euser.lib hal.lib usbdescriptors.lib usbdi_utils.lib
 
-CAPABILITY ALL
+CAPABILITY TCB  ProtServ DiskAdmin AllFiles PowerMgmt CommDD NetworkControl WriteDeviceData
 
 //MACRO _USBMS_DEBUG_PRINT_
 
--- a/userlibandfileserver/fileserver/shostmassstorage/server/hostmsserver.mmp	Wed May 05 05:11:16 2010 +0100
+++ b/userlibandfileserver/fileserver/shostmassstorage/server/hostmsserver.mmp	Mon May 10 11:40:53 2010 +0100
@@ -1,4 +1,4 @@
-// Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
+// Copyright (c) 2008-2010 Nokia Corporation and/or its subsidiary(-ies).
 // All rights reserved.
 // This component and the accompanying materials are made available
 // under the terms of the License "Eclipse Public License v1.0"
@@ -62,7 +62,7 @@
 BASEADDRESS		0x61000000 
 END
 
-CAPABILITY ALL -Tcb
+CAPABILITY AllFiles CommDD
 
 UID		0 0x10286A83