201023_15
authorhgs
Wed, 23 Jun 2010 12:58:21 +0100
changeset 177 a232af6b0b1f
parent 176 af6ec97d9189
child 189 a5496987b1da
201023_15
kernel/eka/bmarm/euseru.def
kernel/eka/bwins/euseru.def
kernel/eka/bx86/euseru.def
kernel/eka/bx86gcc/euseru.def
kernel/eka/drivers/bsp/bld.inf
kernel/eka/drivers/power/smppower/idlehelper.cia
kernel/eka/drivers/power/smppower/idlehelper.cpp
kernel/eka/eabi/euseru.def
kernel/eka/euser/epoc/arm/uc_utl.cia
kernel/eka/euser/epoc/win32/uc_utl.cpp
kernel/eka/euser/epoc/x86/uc_utl.cia
kernel/eka/euser/us_exec.cpp
kernel/eka/include/drivers/smppower/idlehelper.h
kernel/eka/include/e32cia.h
kernel/eka/include/e32cmn.h
kernel/eka/include/e32std.h
kernel/eka/include/e32ver.h
kernel/eka/include/kernel/kern_priv.h
kernel/eka/include/nkern/nk_cpu.h
kernel/eka/include/nkern/nklib.h
kernel/eka/include/nkernsmp/arm/ncern.h
kernel/eka/include/nkernsmp/arm/nk_plat.h
kernel/eka/include/nkernsmp/nk_priv.h
kernel/eka/include/nkernsmp/nkern.h
kernel/eka/include/nkernsmp/x86/ncern.h
kernel/eka/include/nkernsmp/x86/nk_plat.h
kernel/eka/include/u32std.h
kernel/eka/kernel/execs.txt
kernel/eka/kernel/sexec.cpp
kernel/eka/kernel/skernel.cpp
kernel/eka/kernel/sthread.cpp
kernel/eka/nkern/arm/nklib.cia
kernel/eka/nkern/nkern.mmp
kernel/eka/nkern/nklib.cpp
kernel/eka/nkernsmp/arm/nccpu.cpp
kernel/eka/nkernsmp/arm/ncglob.cpp
kernel/eka/nkernsmp/arm/ncmonitor.cpp
kernel/eka/nkernsmp/arm/ncsched.cia
kernel/eka/nkernsmp/arm/ncsched.cpp
kernel/eka/nkernsmp/arm/ncthrd.cpp
kernel/eka/nkernsmp/arm/ncutilf.cia
kernel/eka/nkernsmp/arm/ncutils.cpp
kernel/eka/nkernsmp/nk_bal.cpp
kernel/eka/nkernsmp/nkern.cpp
kernel/eka/nkernsmp/nkern.mmp
kernel/eka/nkernsmp/nkerns.cpp
kernel/eka/nkernsmp/sched.cpp
kernel/eka/nkernsmp/x86/ncglob.cpp
kernel/eka/nkernsmp/x86/ncmonitor.cpp
kernel/eka/nkernsmp/x86/ncsched.cpp
kernel/eka/nkernsmp/x86/ncutilf.cpp
kernel/eka/nkernsmp/x86/ncutils.cpp
kernel/eka/release.txt
kernel/eka/rombuild/kernel.hby
kerneltest/e32test/group/bld.inf
kerneltest/e32test/group/d_frqchg.mmh
kerneltest/e32test/group/t_frqchg.mmp
kerneltest/e32test/group/t_semutx.mmp
kerneltest/e32test/power/d_frqchg.cpp
kerneltest/e32test/power/d_frqchg.h
kerneltest/e32test/power/t_frqchg.cpp
kerneltest/e32test/prime/t_semutx.cpp
--- a/kernel/eka/bmarm/euseru.def	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/bmarm/euseru.def	Wed Jun 23 12:58:21 2010 +0100
@@ -2268,4 +2268,9 @@
 	__DbgGetAllocFail__10RAllocator @ 2267 NONAME R3UNUSED ; RAllocator::__DbgGetAllocFail(void)
 	__DbgGetAllocFail__4Useri @ 2268 NONAME R3UNUSED ; User::__DbgGetAllocFail(int)
 	SetKeyOffset__10RArrayBasei @ 2269  NONAME R3UNUSED ; RArrayBase::SetKeyOffset(int)
+	Poll__10RSemaphore @ 2270 NONAME R3UNUSED ; RSemaphore::Poll(void)
+	Wait__6RMutexi @ 2271 NONAME R3UNUSED ; RMutex::Wait(int)
+	Poll__6RMutex @ 2272 NONAME R3UNUSED ; RMutex::Poll(void)
+	Poll__9RFastLock @ 2273 NONAME R3UNUSED ; RFastLock::Poll(void)
+	Wait__9RFastLocki @ 2274 NONAME R3UNUSED ; RFastLock::Wait(int)
 	
--- a/kernel/eka/bwins/euseru.def	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/bwins/euseru.def	Wed Jun 23 12:58:21 2010 +0100
@@ -2216,4 +2216,8 @@
 	?__DbgGetAllocFail@RAllocator@@QAE?AW4TAllocFail@1@XZ @ 2215 NONAME ; enum RAllocator::TAllocFail RAllocator::__DbgGetAllocFail(void)
 	?__DbgGetAllocFail@User@@SA?AW4TAllocFail@RAllocator@@H@Z @ 2216 NONAME ; enum RAllocator::TAllocFail User::__DbgGetAllocFail(int)
 	?SetKeyOffset@RArrayBase@@IAEXH@Z @ 2217  NONAME ; void RArrayBase::SetKeyOffset(int)
-
+	?Poll@RFastLock@@QAEHXZ @ 2218 NONAME ; public: int __thiscall RFastLock::Poll(void)
+	?Poll@RMutex@@QAEHXZ @ 2219 NONAME ; public: int __thiscall RMutex::Poll(void)
+	?Poll@RSemaphore@@QAEHXZ @ 2220 NONAME ; public: int __thiscall RSemaphore::Poll(void)
+	?Wait@RMutex@@QAEHH@Z @ 2221 NONAME ; public: int __thiscall RMutex::Wait(int)
+	?Wait@RFastLock@@QAEHH@Z @ 2222 NONAME ; public: int __thiscall RFastLock::Wait(int)
--- a/kernel/eka/bx86/euseru.def	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/bx86/euseru.def	Wed Jun 23 12:58:21 2010 +0100
@@ -2216,4 +2216,8 @@
 	?__DbgGetAllocFail@RAllocator@@QAE?AW4TAllocFail@1@XZ @ 2215 NONAME ; enum RAllocator::TAllocFail RAllocator::__DbgGetAllocFail(void)
 	?__DbgGetAllocFail@User@@SA?AW4TAllocFail@RAllocator@@H@Z @ 2216 NONAME ; enum RAllocator::TAllocFail User::__DbgGetAllocFail(int)
 	?SetKeyOffset@RArrayBase@@IAEXH@Z @ 2217  NONAME ; void RArrayBase::SetKeyOffset(int)
-	
+	?Poll@RFastLock@@QAEHXZ @ 2218 NONAME ; public: int __thiscall RFastLock::Poll(void)
+	?Poll@RMutex@@QAEHXZ @ 2219 NONAME ; public: int __thiscall RMutex::Poll(void)
+	?Poll@RSemaphore@@QAEHXZ @ 2220 NONAME ; public: int __thiscall RSemaphore::Poll(void)
+	?Wait@RMutex@@QAEHH@Z @ 2221 NONAME ; public: int __thiscall RMutex::Wait(int)
+	?Wait@RFastLock@@QAEHH@Z @ 2222 NONAME ; public: int __thiscall RFastLock::Wait(int)
--- a/kernel/eka/bx86gcc/euseru.def	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/bx86gcc/euseru.def	Wed Jun 23 12:58:21 2010 +0100
@@ -2495,4 +2495,8 @@
 	_ZN10RAllocator17__DbgGetAllocFailEv @ 2494 NONAME
 	_ZN4User17__DbgGetAllocFailEi @ 2495 NONAME
 	_ZN10RArrayBase12SetKeyOffsetEi @ 2496 NONAME
-
+	_ZN10RSemaphore4PollEv @ 2497 NONAME ; RSemaphore::Poll()
+	_ZN6RMutex4WaitEi @ 2498 NONAME ; RMutex::Wait(int)
+	_ZN6RMutex4PollEv @ 2499 NONAME ; RMutex::Poll()
+	_ZN9RFastLock4PollEv @ 2500 NONAME ; RFastLock::Poll()
+	_ZN9RFastLock4WaitEi @ 2501 NONAME ; RFastLock::Wait(int)
--- a/kernel/eka/drivers/bsp/bld.inf	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/drivers/bsp/bld.inf	Wed Jun 23 12:58:21 2010 +0100
@@ -51,13 +51,9 @@
 ../resmanus/resmanus
 ../resmanus/resmanusextended
 
+#if !defined(WINS)
 ../power/smppower/idlehelper_lib.mmp
 ../power/smppower/sample_idlehandler/smpidlehandler_lib.mmp
-
-#endif
-
-#if !defined(X86)
-#if !defined(WINS)
 ../iic/iic
 #endif
 #endif
--- a/kernel/eka/drivers/power/smppower/idlehelper.cia	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/drivers/power/smppower/idlehelper.cia	Wed Jun 23 12:58:21 2010 +0100
@@ -85,12 +85,12 @@
 	LDREX(3,1);                                                       // r3 = iIdlingCpus
     asm("orr    r3,r0,r3");                                           // orr in mask for this CPU
     asm("cmp    r3,r2");                                              // compare to iAllEngagedCpusMask
-    asm("orreq  r3,r3,#%a0" : : "i" ((TUint32)TIdleSupport::KGlobalIdleFlag)); // if equal orr in KGlobalIdleFlag
+    asm("orreq  r3,r3,#%a0" : : "i" ((TInt)TIdleSupport::KGlobalIdleFlag)); // if equal orr in KGlobalIdleFlag
     STREX(12,3,1);
     asm("cmp    r12, #0 ");                                              // 
 	asm("bne    1b ");                                                   // write didn't succeed try again
     __DATA_MEMORY_BARRIER__(r12);
-    asm("and    r0,r3,#%a0" : : "i" ((TUint32)TIdleSupport::KGlobalIdleFlag));
+    asm("and    r0,r3,#%a0" : : "i" ((TInt)TIdleSupport::KGlobalIdleFlag));
 	__JUMP(,lr);
     asm("__iAllEngagedCpusMask:");
     asm(".word %a0" : : "i" ((TInt)&TIdleSupport::iAllEngagedCpusMask));//
@@ -149,7 +149,7 @@
 #endif		
     asm("2: ");
     asm("cmp r3,r5");                       // all (old stage does not equal new stage)
-    asm("bne 3f");                            // yup return
+    asm("ldmnefd sp!, {r4-r5,pc}");         // yup return
 #ifdef SYNCPOINT_WFE		
 	__DATA_MEMORY_BARRIER__(r12);        
 	ARM_WFE;
@@ -158,8 +158,6 @@
     __DATA_MEMORY_BARRIER__(r12);           // ensure read is observed
     asm("mov r3,r2,lsr #16");               // re-read new stage
     asm("b 2b");                            // loop back
-    asm("3: ");
-    asm("ldmfd sp!, {r4-r5,pc}");         // return
     }
 
 /** 
@@ -210,7 +208,7 @@
 #endif		
     asm("2: ");
     asm("ands r3,r2,#0x80000000");          // MSB set?	
-    asm("bne 4f");                          // yup return
+    asm("ldmnefd sp!, {r4,pc}");            // yup return
 #ifdef SYNCPOINT_WFE		
 	__DATA_MEMORY_BARRIER__(r12);
 	ARM_WFE;
@@ -224,8 +222,7 @@
     __DATA_MEMORY_BARRIER__(r12);           // ensure that's written
 	ARM_SEV;
 #endif	
-    asm("4:");
-    asm("ldmfd sp!, {r4,pc}");            // return
+    asm("ldmfd sp!, {r4,pc}");            // yup return
     }
 	
 	
@@ -295,7 +292,7 @@
     }
 #endif
 
-__NAKED__  TUint32 TIdleSupport::IntPending()  
+__NAKED__  TInt TIdleSupport::IntPending()  
     {
 	asm("ldr    r1,__KCPUIFAddr");//r1 = address of iBaseIntIfAddress
 	asm("ldr	r1, [r1]");//r1 = address of Hw GIC CPU interrupt interface base address
--- a/kernel/eka/drivers/power/smppower/idlehelper.cpp	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/drivers/power/smppower/idlehelper.cpp	Wed Jun 23 12:58:21 2010 +0100
@@ -70,7 +70,7 @@
 
 TBool TIdleSupport::IsIntPending()
 	{
-	return (IntPending()!=KNoInterruptsPending);
+	return ((TUint32)IntPending()!=KNoInterruptsPending);
 	}
 		
 /**
@@ -190,7 +190,8 @@
 /**
    mark a core as retired
 
-   @pre called by idle handler
+   @pre called by idle handler as part of idle entry before 
+          any syncpoint or calls to SetLocalAndCheckSetGlobalIdle
 */	
 void TIdleSupport::MarkCoreRetired(TUint32 aCpuMask)
     {
@@ -200,7 +201,8 @@
 
 /**
    mark a core as enaged
-   @pre called outside idle handler
+   @pre called outside idle handler ( can be called in idle entry before 
+        any syncpoint or calls to SetLocalAndCheckSetGlobalIdle
  */	
 void TIdleSupport::MarkCoreEngaged(TUint32 aCpuMask)
     {
--- a/kernel/eka/eabi/euseru.def	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/eabi/euseru.def	Wed Jun 23 12:58:21 2010 +0100
@@ -2538,4 +2538,8 @@
 	_ZN10RAllocator17__DbgGetAllocFailEv @ 2537 NONAME
 	_ZN4User17__DbgGetAllocFailEi @ 2538 NONAME
 	_ZN10RArrayBase12SetKeyOffsetEi @ 2539 NONAME
-
+	_ZN10RSemaphore4PollEv @ 2540 NONAME ; RSemaphore::Poll()
+	_ZN6RMutex4WaitEi @ 2541 NONAME ; RMutex::Wait(int)
+	_ZN6RMutex4PollEv @ 2542 NONAME ; RMutex::Poll()
+	_ZN9RFastLock4PollEv @ 2543 NONAME ; RFastLock::Poll()
+	_ZN9RFastLock4WaitEi @ 2544 NONAME ; RFastLock::Wait(int)
--- a/kernel/eka/euser/epoc/arm/uc_utl.cia	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/euser/epoc/arm/uc_utl.cia	Wed Jun 23 12:58:21 2010 +0100
@@ -1321,6 +1321,42 @@
 #endif
 	}
 
+EXPORT_C __NAKED__ TInt RFastLock::Poll()
+	{
+	asm("1: ");
+	asm("add	r0, r0, #4 ");					// point to iCount
+
+#ifdef __CPU_ARM_HAS_LDREX_STREX
+	asm("2:		");
+	LDREX(		2, 0);							// read
+	asm("subs	r1, r2, #1 ");					// decrement
+	asm("bcs	3f ");							// if no borrow, lock cannot be obtained so bail out
+	STREX(		3, 1, 0);						// write
+	asm("teq	r3, #0 ");						// success?
+	asm("bne	2b ");							// no!
+	asm("mov	r0, #0 ");						// lock acquired so return KErrNone
+#ifdef __SMP__
+	__DATA_MEMORY_BARRIER__(r3);				// need acquire barrier
+#endif
+	__JUMP(,	lr);
+
+	asm("3:		");
+	asm("mov	r0, #%a0" : : "i" ((TInt)KErrTimedOut));	// else can't get it immediately so return KErrTimedOut
+	__JUMP(,	lr);
+#else	// no LDREX/STREX - ARM arch 5 CPU
+	asm("mov	r1, #1 ");						// 'looking' value
+	asm("swp	r1, r1, [r0] ");				// write looking value, read original
+	asm("subs	r2, r1, #1 ");					// decrement count
+	asm("strcc	r2, [r0] ");					// if borrow, was originally zero so write back -1 and return KErrNone
+	asm("movcc	r0, #0 ");						// got lock - return KErrNone
+ 	__JUMP(cc,	lr);
+
+	asm("strlt	r1, [r0] ");					// else if result<0 (i.e. wasn't looking value) write back original
+	asm("mov	r0, #%a0" : : "i" ((TInt)KErrTimedOut));	// else can't get it immediately so return KErrTimedOut
+ 	__JUMP(,	lr);
+#endif
+	}
+
 EXPORT_C __NAKED__ void RFastLock::Signal()
 	{
 	asm("1: ");
@@ -1356,6 +1392,122 @@
 	}
 
 
+/**
+Acquire the lock, if necessary waiting up to a specified maximum amount of time
+for it to become free.
+
+This function checks if the lock is currently held. If not the lock is marked
+as held by the current thread and the call returns immediately. If the lock is
+held by another thread the current thread will suspend until the lock becomes
+free or until the specified timeout period has elapsed.
+
+@param aTimeout The timeout value in microseconds
+
+@return KErrNone if the lock was acquired successfully.
+        KErrTimedOut if the timeout has expired.
+        KErrGeneral if the lock is being reset, i.e the lock
+        is about to  be deleted.
+        KErrArgument if aTimeout is negative;
+        otherwise one of the other system wide error codes.
+*/
+EXPORT_C __NAKED__ TInt RFastLock::Wait(TInt /*aTimeout*/)
+	{
+	asm("stmfd	sp!, {r4-r6,lr} ");
+	asm("add	r4, r0, #4 ");					// r4->iCount
+	asm("subs	r5, r1, #0 ");					// r5=aTimeout
+	asm("mov	r6, #1000 ");
+	asm("movle	r0, #%a0" : : "i" ((TInt)KErrArgument));	
+	__CPOPRET(le, "r4-r6,");					// if aTimeout<=0 return KErrArgument
+	asm("1:		");
+
+#ifdef __CPU_ARM_HAS_LDREX_STREX
+	asm("2:		");
+	LDREX(		2, 4);							// read
+	asm("subs	r12, r2, #1 ");					// decrement
+	STREX(		3, 12, 4);						// write
+	asm("teq	r3, #0 ");						// success?
+	asm("bne	2b ");							// no!
+	asm("bcs	8f ");							// if no borrow from decrement, need to wait
+#ifdef __SMP__
+	__DATA_MEMORY_BARRIER__(r3);				// no need to wait, but still need acquire barrier
+#endif
+#else	// no LDREX/STREX - ARM arch 5 CPU
+	asm("mov	r2, #1 ");						// 'looking' value
+	asm("swp	r2, r2, [r4] ");				// write looking value, read original
+	asm("subs	r12, r2, #1 ");					// decrement count
+	asm("strlt	r12, [r4] ");					// if it becomes negative, no-one was looking
+	asm("bcs	8f ");							// if no borrow, we have to wait
+#endif
+	asm("mov	r0, #0 ");						// return KErrNone
+	__POPRET("r4-r6,");
+
+// We need to wait
+	asm("8:		");
+#ifndef __CPU_ARM_HAS_LDREX_STREX
+// no LDREX/STREX - ARM arch 5 CPU
+	asm("blt	3f ");							// if it wasn't 'looking' value, branch
+
+	// it was the 'looking' value, so wait a little bit
+	asm("cmp	r5, #0 ");
+	asm("ble	9f ");							// waited too long already, return KErrTimedOut
+	asm("sub	r5, r5, r6 ");
+	asm("mov	r6, #2000 ");
+	asm("mov	r0, #1000 ");					// wait 1ms
+	asm("cmp	r5, r0 ");
+	asm("movlt	r5, r0 ");						// remaining time at least 1ms
+	asm("bl "	CSM_ZN4User12AfterHighResE27TTimeIntervalMicroSeconds32);
+	asm("b		1b ");							// try again
+#endif
+	asm("3:		");
+	asm("sub	r0, r4, #4 ");					// r0=this
+	asm("mov	r1, r5 ");						// r1=aTimeout
+	asm("bl "	CSM_ZN10RSemaphore4WaitEi);	// try to acquire semaphore
+	asm("cmp	r0, #%a0" : : "i" ((TInt)KErrTimedOut));
+	__CPOPRET(ne, "r4-r6,");					// if wait didn't time out, return
+	asm("mov	r5, #1 ");						// any further timed waits will be for minimum period
+	
+	// Before we can return KErrTimedOut we must increment iCount (since we
+	// previously decremented it in anticipation of acquiring the lock.
+	// However we must not increment iCount if it would become zero, since
+	// the semaphore will have been signalled (to counterbalance the Wait()
+	// which timed out and thus never happened). This would result in two
+	// threads being able to acquire the lock simultaneously - one by
+	// decrementing iCount from 0 to -1 without looking at the semaphore,
+	// and the other by decrementing iCount from -1 to -2 and then absorbing
+	// the spurious semaphore signal.
+	//	orig = __e32_atomic_tas_ord32(&iCount, -1, 0, 1);	// don't release lock completely
+	//	if (orig < -1)
+	//		return KErrTimedOut;	// count corrected - don't need to touch semaphore
+	// lock is actually free at this point, try again to claim it
+	//	aTimeout = 1;
+#ifdef __CPU_ARM_HAS_LDREX_STREX
+#ifdef __SMP__
+	__DATA_MEMORY_BARRIER_Z__(r3);
+#endif
+	asm("4:		");
+	LDREX(		2, 4);							// read
+	asm("adds	r2, r2, #1 ");					// increment
+	asm("bge	3b ");							// if increment would make result >=0, wait again
+	STREX(		3, 2, 4);						// write
+	asm("teq	r3, #0 ");						// success?
+	asm("bne	4b ");							// no!
+#ifdef __SMP__
+	__DATA_MEMORY_BARRIER__(r3);
+#endif
+#else	// no LDREX/STREX - ARM arch 5 CPU
+	asm("mov	r2, #1 ");						// 'looking' value
+	asm("swp	r2, r2, [r4] ");				// write looking value, read original
+	asm("adds	r12, r2, #1 ");					// increment count
+	asm("strlt	r12, [r4] ");					// if still negative, count now fixed, so return KErrTimedOut
+	asm("streq	r2, [r4] ");					// else if not 'looking' value, write back original
+	asm("bge	3b ");							// if 'looking' value or -1, wait again
+#endif
+	asm("9:		");
+	asm("mov	r0, #%a0" : : "i" ((TInt)KErrTimedOut));	// return KErrTimedOut
+	__POPRET("r4-r6,");
+	}
+
+
 // Entry point stub to allow EKA1 binaries to be executed under EKA2
 // Only called when process is first loaded
 
--- a/kernel/eka/euser/epoc/win32/uc_utl.cpp	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/euser/epoc/win32/uc_utl.cpp	Wed Jun 23 12:58:21 2010 +0100
@@ -114,6 +114,21 @@
 		RSemaphore::Wait();
 	}
 
+EXPORT_C __NAKED__ TInt RFastLock::Poll()
+	{
+	_asm xor eax, eax
+	_asm xor edx, edx
+	_asm dec edx
+
+	/* if ([ecx+4]==0) { [ecx+4]=-1; ZF=1;} else {eax=[ecx+4]; ZF=0;} */
+	_asm lock cmpxchg [ecx+4], edx
+	_asm jz short fastlock_poll_done
+	_asm mov eax, -33
+
+	fastlock_poll_done:
+	_asm ret
+	}
+
 EXPORT_C void RFastLock::Signal()
 	{
 	if (InterlockedIncrement((LPLONG)&iCount) < 0)
--- a/kernel/eka/euser/epoc/x86/uc_utl.cia	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/euser/epoc/x86/uc_utl.cia	Wed Jun 23 12:58:21 2010 +0100
@@ -42,6 +42,22 @@
 	THISCALL_EPILOG0()
 	}
 
+EXPORT_C __NAKED__ TInt RFastLock::Poll()
+	{
+	THISCALL_PROLOG0()
+	asm("xor eax, eax ");
+	asm("xor edx, edx ");
+	asm("dec edx ");
+
+	/* if ([ecx+4]==0) { [ecx+4]=-1; ZF=1;} else {eax=[ecx+4]; ZF=0;} */
+	asm("lock cmpxchg [ecx+4], edx ");
+	asm("jz short fastlock_poll_done ");
+	asm("mov eax, %0": : "i"(KErrTimedOut));
+
+	asm("fastlock_poll_done: ");
+	THISCALL_EPILOG0()
+	}
+
 EXPORT_C __NAKED__ void RFastLock::Signal()
 	{
 	THISCALL_PROLOG0()
--- a/kernel/eka/euser/us_exec.cpp	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/euser/us_exec.cpp	Wed Jun 23 12:58:21 2010 +0100
@@ -3768,8 +3768,59 @@
 */
 EXPORT_C void RMutex::Wait()
 	{
-
-	Exec::MutexWait(iHandle);
+	Exec::MutexWait(iHandle, 0);
+	}
+
+
+
+
+/**
+Acquire the mutex if it is currently free, but don't wait for it.
+
+This function checks if the mutex is currently held. If not the mutex is marked
+as held by the current thread and the call returns immediately indicating
+success. If the mutex is held by another thread the call returns immediately
+indicating failure. If the mutex is already held by the current thread a count
+is maintained of how many times the thread has acquired the mutex.
+
+@return	KErrNone if the mutex was acquired
+		KErrTimedOut if the mutex could not be acquired
+        KErrGeneral if the semaphore is being reset, i.e the semaphore
+        is about to  be deleted.
+*/
+EXPORT_C TInt RMutex::Poll()
+	{
+	return Exec::MutexWait(iHandle, -1);
+	}
+
+
+
+
+/**
+Acquire the mutex, if necessary waiting up to a specified maximum amount of time
+for it to become free.
+
+This function checks if the mutex is currently held. If not the mutex is marked
+as held by the current thread and the call returns immediately. If the mutex is
+held by another thread the current thread will suspend until the mutex becomes
+free or until the specified timeout period has elapsed. If the mutex is already
+held by the current thread a count is maintained of how many times the thread
+has acquired the mutex.
+
+@param aTimeout The timeout value in microseconds
+
+@return KErrNone if the mutex was acquired successfully.
+        KErrTimedOut if the timeout has expired.
+        KErrGeneral if the mutex is being reset, i.e the mutex
+        is about to  be deleted.
+        KErrArgument if aTimeout is negative;
+        otherwise one of the other system wide error codes.
+*/
+EXPORT_C TInt RMutex::Wait(TInt aTimeout)
+	{
+	if (aTimeout>=0)
+		return Exec::MutexWait(iHandle, aTimeout);
+	return KErrArgument;
 	}
 
 
@@ -4308,7 +4359,6 @@
 
 
 
-EXPORT_C void RSemaphore::Wait()
 /**
 Waits for a signal on the semaphore.
 
@@ -4324,19 +4374,16 @@
 
 If the semaphore is deleted, all threads waiting on that semaphore are released.
 */
-	{
-
+EXPORT_C void RSemaphore::Wait()
+	{
 	Exec::SemaphoreWait(iHandle, 0);
 	}
 
 
-
-
-EXPORT_C TInt RSemaphore::Wait(TInt aTimeout)
 /**
 Waits for a signal on the semaphore, or a timeout.
 
-@param aTimeout The timeout value in micoseconds
+@param aTimeout The timeout value in microseconds
 
 @return KErrNone if the wait has completed normally.
         KErrTimedOut if the timeout has expired.
@@ -4345,12 +4392,26 @@
         KErrArgument if aTimeout is negative;
         otherwise one of the other system wide error codes.
 */
-	{
-
-	return Exec::SemaphoreWait(iHandle, aTimeout);
-	}
-
-
+EXPORT_C TInt RSemaphore::Wait(TInt aTimeout)
+	{
+	if (aTimeout>=0)
+		return Exec::SemaphoreWait(iHandle, aTimeout);
+	return KErrArgument;
+	}
+
+
+/**
+Acquires the semaphore if that is possible without waiting.
+
+@return KErrNone if the semaphore was acquired successfully
+        KErrTimedOut if the semaphore could not be acquired
+        KErrGeneral if the semaphore is being reset, i.e the semaphore
+        is about to  be deleted.
+*/
+EXPORT_C TInt RSemaphore::Poll()
+	{
+	return Exec::SemaphoreWait(iHandle, -1);
+	}
 
 
 EXPORT_C void RSemaphore::Signal()
@@ -4389,6 +4450,54 @@
 
 
 
+#ifndef __CPU_ARM
+/**
+Acquire the lock, if necessary waiting up to a specified maximum amount of time
+for it to become free.
+
+This function checks if the lock is currently held. If not the lock is marked
+as held by the current thread and the call returns immediately. If the lock is
+held by another thread the current thread will suspend until the lock becomes
+free or until the specified timeout period has elapsed.
+
+@param aTimeout The timeout value in microseconds
+
+@return KErrNone if the lock was acquired successfully.
+        KErrTimedOut if the timeout has expired.
+        KErrGeneral if the lock is being reset, i.e the lock
+        is about to  be deleted.
+        KErrArgument if aTimeout is negative;
+        otherwise one of the other system wide error codes.
+*/
+EXPORT_C TInt RFastLock::Wait(TInt aTimeout)
+	{
+	if (aTimeout<=0)
+		return KErrArgument;
+	TInt orig = __e32_atomic_add_acq32(&iCount, TUint32(-1));
+	if (orig == 0)
+		return KErrNone;
+	FOREVER
+		{
+		TInt r = Exec::SemaphoreWait(iHandle, aTimeout);
+		if (r != KErrTimedOut)	// got lock OK or lock deleted
+			return r;
+		// Before we can return KErrTimedOut we must increment iCount (since we
+		// previously decremented it in anticipation of acquiring the lock.
+		// However we must not increment iCount if it would become zero, since
+		// the semaphore will have been signalled (to counterbalance the Wait()
+		// which timed out and thus never happened). This would result in two
+		// threads being able to acquire the lock simultaneously - one by
+		// decrementing iCount from 0 to -1 without looking at the semaphore,
+		// and the other by decrementing iCount from -1 to -2 and then absorbing
+		// the spurious semaphore signal.
+		orig = __e32_atomic_tas_ord32(&iCount, -1, 0, 1);	// don't release lock completely
+		if (orig < -1)
+			return KErrTimedOut;	// count corrected - don't need to touch semaphore
+		// lock is actually free at this point, try again to claim it
+		aTimeout = 1;
+		}
+	}
+#endif
 
 EXPORT_C RCriticalSection::RCriticalSection()
 	: iBlocked(1)
--- a/kernel/eka/include/drivers/smppower/idlehelper.h	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/include/drivers/smppower/idlehelper.h	Wed Jun 23 12:58:21 2010 +0100
@@ -171,7 +171,7 @@
 	static void ClearIdleIPI();
 	static void DoWFI();//puts current CPU in wait for interrupt state
 	static TBool IsIntPending();
-	static TUint32	IntPending();
+	static TInt	IntPending();
 	static TUint32 GetTimerCount();//HW timer can be used for tracing
 	//Atomic checks used to synchronise cores going idle
 	static TBool ClearLocalAndCheckGlobalIdle(TUint32);
--- a/kernel/eka/include/e32cia.h	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/include/e32cia.h	Wed Jun 23 12:58:21 2010 +0100
@@ -39,6 +39,7 @@
 #define CSM_Z15PanicStrayEventv " PanicStrayEvent__Fv"
 #define CSM_ZN8CServer210BadMessageERK9RMessage2 " BadMessage__8CServer2RC9RMessage2"
 #define CSM_ZN10RSemaphore4WaitEv " Wait__10RSemaphore"
+#define CSM_ZN10RSemaphore4WaitEi " Wait__10RSemaphorei"
 #define CSM_Z34PanicCObjectConFindIndexOutOfRangev " PanicCObjectConFindIndexOutOfRange__Fv"
 #define CSM_ZN7TRegion5ClearEv " Clear__7TRegion"
 #define CSM_ZN4User5AllocEi " Alloc__4Useri"
@@ -73,6 +74,7 @@
 #define CSM_Z15PanicStrayEventv " __cpp(PanicStrayEvent)"
 #define CSM_ZN8CServer210BadMessageERK9RMessage2 " __cpp(CServer2::BadMessage)"
 #define CSM_ZN10RSemaphore4WaitEv " __cpp(static_cast<void (RSemaphore::*) ()>(&RSemaphore::Wait))"
+#define CSM_ZN10RSemaphore4WaitEi " __cpp(static_cast<TInt (RSemaphore::*) (TInt)>(&RSemaphore::Wait))"
 #define CSM_Z34PanicCObjectConFindIndexOutOfRangev " __cpp(PanicCObjectConFindIndexOutOfRange)"
 #define CSM_ZN7TRegion5ClearEv " __cpp(TRegion::Clear)"
 #define CSM_ZN4User5AllocEi " __cpp(User::Alloc)"
@@ -106,6 +108,7 @@
 #define CSM_Z15PanicStrayEventv " _Z15PanicStrayEventv"
 #define CSM_ZN8CServer210BadMessageERK9RMessage2 " _ZN8CServer210BadMessageERK9RMessage2"
 #define CSM_ZN10RSemaphore4WaitEv " _ZN10RSemaphore4WaitEv"
+#define CSM_ZN10RSemaphore4WaitEi " _ZN10RSemaphore4WaitEi"
 #define CSM_Z34PanicCObjectConFindIndexOutOfRangev " _Z34PanicCObjectConFindIndexOutOfRangev"
 #define CSM_ZN7TRegion5ClearEv " _ZN7TRegion5ClearEv"
 #define CSM_ZN4User5AllocEi " _ZN4User5AllocEi"
--- a/kernel/eka/include/e32cmn.h	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/include/e32cmn.h	Wed Jun 23 12:58:21 2010 +0100
@@ -2488,6 +2488,7 @@
 	IMPORT_C TInt Open(TInt aArgumentIndex, TOwnerType aType=EOwnerProcess);
 	IMPORT_C void Wait();
 	IMPORT_C TInt Wait(TInt aTimeout);	// timeout in microseconds
+	IMPORT_C TInt Poll();		// acquire the semaphore if possible, but don't block
 	IMPORT_C void Signal();
 	IMPORT_C void Signal(TInt aCount);
 #endif
@@ -2511,6 +2512,8 @@
 	inline RFastLock();
 	IMPORT_C TInt CreateLocal(TOwnerType aType=EOwnerProcess);
 	IMPORT_C void Wait();
+	IMPORT_C TInt Wait(TInt aTimeout);	// timeout in microseconds
+	IMPORT_C TInt Poll();		// acquire the lock if possible, but don't block
 	IMPORT_C void Signal();
 private:
 	TInt iCount;
--- a/kernel/eka/include/e32std.h	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/include/e32std.h	Wed Jun 23 12:58:21 2010 +0100
@@ -3314,6 +3314,8 @@
 	IMPORT_C TInt Open(RMessagePtr2 aMessage,TInt aParam,TOwnerType aType=EOwnerProcess);
 	IMPORT_C TInt Open(TInt aArgumentIndex, TOwnerType aType=EOwnerProcess);
 	IMPORT_C void Wait();
+	IMPORT_C TInt Poll();		// acquire the lock if possible, but don't block
+	IMPORT_C TInt Wait(TInt aTimeout);	// timeout in microseconds
 	IMPORT_C void Signal();
 	IMPORT_C TBool IsHeld();
 	};
@@ -4770,6 +4772,7 @@
     IMPORT_C static void __DbgMarkCheck(TBool aKernel, TBool aCountAll, TInt aCount, const TUint8* aFileName, TInt aLineNum);
     IMPORT_C static TUint32 __DbgMarkEnd(TBool aKernel, TInt aCount);
     IMPORT_C static void __DbgSetAllocFail(TBool aKernel, RAllocator::TAllocFail aFail, TInt aRate);
+    IMPORT_C static RAllocator::TAllocFail __DbgGetAllocFail(TBool aKernel);
     IMPORT_C static void __DbgSetBurstAllocFail(TBool aKernel, RAllocator::TAllocFail aFail, TUint aRate, TUint aBurst);
 	IMPORT_C static TUint __DbgCheckFailure(TBool aKernel);
 	IMPORT_C static void PanicUnexpectedLeave(); /**< @internalComponent */
--- a/kernel/eka/include/e32ver.h	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/include/e32ver.h	Wed Jun 23 12:58:21 2010 +0100
@@ -28,7 +28,7 @@
 
 const TInt KE32MajorVersionNumber=2;
 const TInt KE32MinorVersionNumber=0;
-const TInt KE32BuildVersionNumber=3097;
+const TInt KE32BuildVersionNumber=3098;
 
 const TInt KMachineConfigurationMajorVersionNumber=1;
 const TInt KMachineConfigurationMinorVersionNumber=0;
--- a/kernel/eka/include/kernel/kern_priv.h	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/include/kernel/kern_priv.h	Wed Jun 23 12:58:21 2010 +0100
@@ -745,7 +745,7 @@
 	void ChangePendingThreadPriority(DThread* aThread, TInt aNewPriority);
 	void WakeUpNextThread();
 public:
-	TInt Wait();
+	TInt Wait(TInt aTimeout=0);	// 0 means wait forever, -1 means poll, n>0 means n nanokernel ticks
 	void Signal();
 	void Reset();
 public:
--- a/kernel/eka/include/nkern/nk_cpu.h	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/include/nkern/nk_cpu.h	Wed Jun 23 12:58:21 2010 +0100
@@ -688,6 +688,8 @@
 #error SMP not allowed without thread ID registers
 #endif
 
+#define	__SRATIO_MACHINE_CODED__
+
 #endif	//	end of __CPU_ARM
 
 #if defined(__CPU_X86) && defined(__EPOC32__)
--- a/kernel/eka/include/nkern/nklib.h	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/include/nkern/nklib.h	Wed Jun 23 12:58:21 2010 +0100
@@ -85,6 +85,39 @@
 	};
 
 
+/**
+@internalComponent
+
+Ratio represented = iM*2^iX
+e.g. 1.0 has iM=0x80000000, iX=-31
+*/
+struct SRatio
+	{
+	void Set(TUint32 aInt, TInt aDivisorExp=0);		// set this ratio to aInt/2^aDivisorExp
+	TInt Reciprocal();								// this = 1/this
+	TInt Mult(TUint32& aInt32);						// Multiply aInt32 by this ratio
+//	TInt Mult(TUint64& aInt64);						// Multiply aInt64 by this ratio
+
+	TUint32		iM;		// mantissa, normalised so bit 31=1
+	TInt16		iX;		// -exponent.
+	TUint8		iSpare1;
+	TUint8		iSpare2;
+	};
+
+/**
+@internalComponent
+
+Ratio and inverse ratio
+*/
+struct SRatioInv
+	{
+	void Set(const SRatio* aR);
+
+	SRatio		iR;
+	SRatio		iI;
+	};
+
+
 #if defined(__VC32__) || defined(__CW32__)
 extern "C"
 /** @internalComponent */
--- a/kernel/eka/include/nkernsmp/arm/ncern.h	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/include/nkernsmp/arm/ncern.h	Wed Jun 23 12:58:21 2010 +0100
@@ -97,20 +97,6 @@
 
 __ASSERT_COMPILE(sizeof(SPerCpuUncached) <= 8*sizeof(TUint64));
 
-/** Timer frequency specification
-
-Stores a frequency as a fraction of a (separately stored) maximum.
-The frequency must be at least 1/256 of the maximum.
-
-@internalTechnology
-@prototype
-*/
-struct STimerMult
-	{
-	TUint32		iFreq;						// frequency as a fraction of maximum possible, multiplied by 2^32
-	TUint32		iInverse;					// 2^24/(iFreq/2^32) = 2^56/iFreq
-	};
-
 /** Function to power up a CPU
 @publishedPartner
 @prototype
@@ -123,6 +109,12 @@
 */
 typedef void (*TCpuPowerDownFn)(TInt aCpu, SPerCpuUncached* aU);
 
+/** Function to notify changes to system clock frequencies
+@publishedPartner
+@prototype
+*/
+typedef TInt (*TFrequencyChangeFn)();
+
 /** Variant interface block
 @internalTechnology
 @prototype
@@ -138,11 +130,13 @@
 	TLinAddr	iGicCpuIfcAddr;				// address of GIC CPU interface (must be same for all CPUs)
 	TLinAddr	iLocalTimerAddr;			// address of per-CPU timer (must be same for all CPUs)
 	TLinAddr	iGlobalTimerAddr;			// address of global timer if it exists
-	volatile STimerMult*	iTimerMult[KMaxCpus];	// timer[i] frequency / iMaxTimerClock * 2^32
-	volatile TUint32*		iCpuMult[KMaxCpus];		// CPU[i] frequency / iMaxCpuClock * 2^32
+	SRatio*		iTimerFreqR[KMaxCpus];		// timer[i] frequency as a fraction of iMaxTimerClock
+	SRatio*		iCpuFreqR[KMaxCpus];		// CPU[i] frequency as a fraction of iMaxCpuClock
 	UPerCpuUncached*		iUncached[KMaxCpus];	// Pointer to uncached memory for each CPU
 	TCpuPowerUpFn			iCpuPowerUpFn;			// function used to power up a retired CPU (NULL if core control not supported)
 	TCpuPowerDownFn			iCpuPowerDownFn;		// function used to power down a CPU (NULL if power down done within idle handler itself)
+	SRatio*		iGTimerFreqR;				// global timer frequency as a fraction of iMaxTimerClock
+	TFrequencyChangeFn		iFrqChgFn;		// function to notify frequency changes
 	};
 
 // End of file
--- a/kernel/eka/include/nkernsmp/arm/nk_plat.h	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/include/nkernsmp/arm/nk_plat.h	Wed Jun 23 12:58:21 2010 +0100
@@ -66,35 +66,12 @@
 	TLinAddr			iUndStackTop;			// Top of UND stack for this CPU
 	TLinAddr			iFiqStackTop;			// Top of FIQ stack for this CPU
 	TLinAddr			iIrqStackTop;			// Top of IRQ stack for this CPU
-	volatile TUint32	iCpuFreqM;				// CPU frequency / Max CPU frequency (mantissa, bit 31=1) f/fmax=mantissa/2^shift
-	volatile TInt		iCpuFreqS;				// CPU frequency / Max CPU frequency (shift)
-	volatile TUint32	iCpuPeriodM;			// Max CPU frequency / CPU frequency (mantissa, bit 31=1) fmax/f=mantissa/2^shift
-	volatile TInt		iCpuPeriodS;			// Max CPU frequency / CPU frequency (shift)
-	volatile TUint32	iNTimerFreqM;			// Nominal Timer frequency / Max Timer frequency (mantissa, bit 31=1) f/fmax=mantissa/2^shift
-	volatile TInt		iNTimerFreqS;			// Nominal Timer frequency / Max Timer frequency (shift)
-	volatile TUint32	iNTimerPeriodM;			// Nominal Max Timer frequency / Timer frequency (mantissa, bit 31=1) fmax/f=mantissa/2^shift
-	volatile TInt		iNTimerPeriodS;			// Nominal Max Timer frequency / Timer frequency (shift)
-	volatile TUint32	iTimerFreqM;			// Timer frequency / Max Timer frequency (mantissa, bit 31=1) f/fmax=mantissa/2^shift
-	volatile TInt		iTimerFreqS;			// Timer frequency / Max Timer frequency (shift)
-	volatile TUint32	iTimerPeriodM;			// Max Timer frequency / Timer frequency (mantissa, bit 31=1) fmax/f=mantissa/2^shift
-	volatile TInt		iTimerPeriodS;			// Max Timer frequency / Timer frequency (shift)
-	volatile TUint64	iLastSyncTime;			// Timestamp at which last reference check occurred
-	volatile TUint32	iTicksSinceLastSync;	// Local timer ticks between last ref. check and next zero crossing
-	volatile TUint32	iLastTimerSet;			// Value last written to local timer counter
-	volatile TUint32	iGapEstimate;			// 2^16 * estimated gap in ticks whenever local timer counter is read then written
-	volatile TUint32	iGapCount;				// count of local timer counter RMW ops
-	volatile TUint32	iTotalTicks;			// programmed ticks since last sync
-	volatile TUint32	iDitherer;				// PRNG state for dither generation
-	volatile TInt		iFreqErrorEstimate;		// Current frequency offset between local timer and reference
-	volatile TInt		iFreqErrorLimit;		// Saturation level for frequency offset
-	volatile TInt64		iErrorIntegrator;		// Accumulator to integrate time error measurements
-	volatile TUint64	iRefAtLastCorrection;	// Value of reference timer at last correction
-	volatile TUint8		iM;						// Value controlling loop bandwidth (larger->lower loop bandwidth)
-	volatile TUint8		iN;						// Number of timer ticks between corrections = 2^iN
-	volatile TUint8		iD;						// Value controlling loop damping
-	volatile TUint8		iSSXP1;
+	SRatioInv* volatile	iNewCpuFreqRI;			// set when CPU frequency has been changed
+	SRatioInv* volatile	iNewTimerFreqRI;		// set when CPU local timer frequency has been changed
+	SRatioInv			iCpuFreqRI;				// Ratio of CPU frequency to maximum possible CPU frequency
+	SRatioInv			iTimerFreqRI;			// Ratio of CPU local timer frequency to maximum possible
 
-	TUint32				iSSXP2[19];
+	TUint32				iSSXP2[36];
 	TUint64				iSSXP3;					// one 64 bit value to guarantee alignment
 	};
 
@@ -108,7 +85,12 @@
 	GicDistributor*		iGicDistAddr;			// Address of GIC Distributor (also in TSubScheduler)
 	GicCpuIfc*			iGicCpuIfcAddr;			// Address of GIC CPU Interface (also in TSubScheduler)
 	ArmLocalTimer*		iLocalTimerAddr;		// Address of local timer registers (also in TSubScheduler)
-	TUint32				iSXP2[8];
+
+	SRatioInv			iGTimerFreqRI;			// ratio of global timer frequency to maximum possible
+	TUint64				iCount0;				// global timer count at last frequency change
+	TUint64				iTimestamp0;			// timestamp at last frequency change
+
+	TUint32				iSXP2[16];
 	};
 
 
--- a/kernel/eka/include/nkernsmp/nk_priv.h	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/include/nkernsmp/nk_priv.h	Wed Jun 23 12:58:21 2010 +0100
@@ -629,6 +629,7 @@
 __ASSERT_COMPILE(sizeof(TSubScheduler)==(1<<KSubSchedulerShift));	// make it a nice power of 2 size for easy indexing
 
 struct SCoreControlAction;
+struct SVariantInterfaceBlock;
 
 /**
 @internalComponent
@@ -669,6 +670,7 @@
 	static TBool CoreControlSupported();
 	static void CCInitiatePowerUp(TUint32 aCores);
 	static void CCIndirectPowerDown(TAny*);
+	static void DoFrequencyChanged(TAny*);
 public:
 	TLinAddr		iMonitorExceptionHandler;
 	TLinAddr		iProcessHandler;
@@ -727,11 +729,13 @@
 	TDfc			iCCRequestDfc;				// runs when a request is made to change the number of active cores
 	TDfc			iCCPowerDownDfc;			// runs when indirect power down of core(s) is required
 	TDfc			iCCIpiReactIDFC;			// runs when an IPI needs to wake up a core
+	TDfc			iFreqChgDfc;				// runs when frequency changes required
 
 	TSubScheduler*	iPoweringOff;				// CPU last to power off
 	TUint32			iDetachCount;				// detach count before power off
 
-	TUint32			i_Scheduler_Padding[54];
+	SVariantInterfaceBlock* iVIB;
+	TUint32			i_Scheduler_Padding[29];
 	};
 
 __ASSERT_COMPILE(!(_FOFF(TScheduler,iGenIPILock)&7));
@@ -739,7 +743,7 @@
 __ASSERT_COMPILE(!(_FOFF(TScheduler,iIdleBalanceLock)&7));
 __ASSERT_COMPILE(!(_FOFF(TScheduler,iEnumerateLock)&7));
 __ASSERT_COMPILE(!(_FOFF(TScheduler,iBalanceListLock)&7));
-__ASSERT_COMPILE(sizeof(TSchedulerX)==16*4);
+__ASSERT_COMPILE(sizeof(TSchedulerX)==32*4);
 __ASSERT_COMPILE(sizeof(TScheduler)==1024);
 
 extern TScheduler TheScheduler;
--- a/kernel/eka/include/nkernsmp/nkern.h	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/include/nkernsmp/nkern.h	Wed Jun 23 12:58:21 2010 +0100
@@ -1243,7 +1243,7 @@
 class TStopIPI : public TGenericIPI
 	{
 public:
-	void StopCPUs();
+	TUint32 StopCPUs();
 	void ReleaseCPUs();
 	static void Isr(TGenericIPI*);
 public:
--- a/kernel/eka/include/nkernsmp/x86/ncern.h	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/include/nkernsmp/x86/ncern.h	Wed Jun 23 12:58:21 2010 +0100
@@ -36,20 +36,6 @@
 	{
 	};
 
-/** Timer frequency specification
-
-Stores a frequency as a fraction of a (separately stored) maximum.
-The frequency must be at least 1/256 of the maximum.
-
-@internalTechnology
-@prototype
-*/
-struct STimerMult
-	{
-	TUint32		iFreq;						// frequency as a fraction of maximum possible, multiplied by 2^32
-	TUint32		iInverse;					// 2^24/(iFreq/2^32) = 2^56/iFreq
-	};
-
 /** Variant interface block
 @internalTechnology
 @prototype
@@ -59,8 +45,9 @@
 	TUint64		iMaxCpuClock;				// maximum possible CPU clock frequency on this system
 	TUint32		iTimestampFreq;				// rate at which timestamp increments
 	TUint32		iMaxTimerClock;				// maximum possible local timer clock frequency
-	volatile STimerMult* iTimerMult[KMaxCpus];	// timer[i] frequency as a fraction of iMaxTimerClock
-	volatile TUint32* iCpuMult[KMaxCpus];	// CPU[i] frequency / iMaxCpuClock * 2^32
+	SRatio*		iTimerFreqR[KMaxCpus];		// timer[i] frequency as a fraction of iMaxTimerClock
+	SRatio*		iCpuFreqR[KMaxCpus];		// CPU[i] frequency as a fraction of iMaxCpuClock
+	SRatio*		iTimestampFreqR;			// timestamp counter frequency as a fraction of
 	};
 
 // End of file
--- a/kernel/eka/include/nkernsmp/x86/nk_plat.h	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/include/nkernsmp/x86/nk_plat.h	Wed Jun 23 12:58:21 2010 +0100
@@ -42,21 +42,12 @@
 	volatile TInt		iIrqNestCount;			// IRQ nest count for this CPU (starts at -1)
 	TLinAddr			iIrqStackTop;			// Top of IRQ stack for this CPU
 	TX86Tss*			iTss;					// Address of TSS for this CPU
-	volatile TUint32	iCpuFreqM;				// CPU frequency / Max CPU frequency (mantissa, bit 31=1) f/fmax=M/2^(S+32) <=1
-	volatile TInt		iCpuFreqS;				// CPU frequency / Max CPU frequency (shift)
-	volatile TUint32	iCpuPeriodM;			// Max CPU frequency / CPU frequency (mantissa, bit 31=1) fmax/f=M/2^S >=1
-	volatile TInt		iCpuPeriodS;			// Max CPU frequency / CPU frequency (shift)
-	volatile TUint32	iNTimerFreqM;			// Nominal Timer frequency / Max Timer frequency (mantissa, bit 31=1) f/fmax=M/2^(S+32) <=1
-	volatile TInt		iNTimerFreqS;			// Nominal Timer frequency / Max Timer frequency (shift)
-	volatile TUint32	iNTimerPeriodM;			// Nominal Max Timer frequency / Timer frequency (mantissa, bit 31=1) fmax/f=M/2^S >=1
-	volatile TInt		iNTimerPeriodS;			// Nominal Max Timer frequency / Timer frequency (shift)
-	volatile TUint32	iTimerFreqM;			// Timer frequency / Max Timer frequency (mantissa, bit 31=1) f/fmax=M/2^(S+32) <=1
-	volatile TInt		iTimerFreqS;			// Timer frequency / Max Timer frequency (shift)
-	volatile TUint32	iTimerPeriodM;			// Max Timer frequency / Timer frequency (mantissa, bit 31=1) fmax/f=M/2^S >=1
-	volatile TInt		iTimerPeriodS;			// Max Timer frequency / Timer frequency (shift)
+	SRatioInv			iCpuFreqRI;				// Ratio of CPU frequency to maximum possible CPU frequency
+	SRatioInv			iTimerFreqRI;			// Ratio of CPU local timer frequency to maximum possible
+
 	volatile TUint64HL	iTimestampOffset;		// 64 bit value to add to CPU TSC to give NKern::Timestamp()
 
-	TUint32				iSSXP2[32];
+	TUint32				iSSXP2[36];
 	TUint64				iSSXP3;					// one 64 bit value to guarantee alignment
 	};
 
@@ -64,7 +55,7 @@
 struct TSchedulerX
 	{
 	TUint64				iTimerMax;				// Maximum per-CPU timer frequency (after prescaling)
-	TUint32				iSXP[14];
+	TUint32				iSXP[30];
 	};
 
 
--- a/kernel/eka/include/u32std.h	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/include/u32std.h	Wed Jun 23 12:58:21 2010 +0100
@@ -401,7 +401,7 @@
 	};
 
 /** @test */
-enum TKernelHeapDebugFunction {EDbgMarkStart,EDbgMarkCheck,EDbgMarkEnd,EDbgSetAllocFail,EDbgSetBurstAllocFail,EDbgCheckFailure};
+enum TKernelHeapDebugFunction {EDbgMarkStart,EDbgMarkCheck,EDbgMarkEnd,EDbgSetAllocFail,EDbgSetBurstAllocFail,EDbgCheckFailure,EDbgGetAllocFail};
 
 /** @test */
 class TKernelHeapMarkCheckInfo
@@ -508,6 +508,8 @@
 	EKernelConfigSMPUnsafeCPU0   = 1<<13,				// Slow compatibility mode: all SMP-unsafe processes run on CPU 0 only
 	EKernelConfigSMPCrazyInterrupts = 1<<14,			// Enables CPU target rotation for HW Interrupts.
 
+	EKernelConfigSMPLockKernelThreadsCore0 = 1<< 15,    // locks all kernel side threads to CPU 0
+
 	EKernelConfigDisableAPs = 1u<<30,
 
 	EKernelConfigTest = 1u<<31,							// Only used by test code for __PLATSEC_UNLOCKED__
--- a/kernel/eka/kernel/execs.txt	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/kernel/execs.txt	Wed Jun 23 12:58:21 2010 +0100
@@ -680,6 +680,8 @@
 
 slow {
 	name = MutexWait
+	return = TInt
+	arg2 = TInt
 	handle = mutex
 }
 
--- a/kernel/eka/kernel/sexec.cpp	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/kernel/sexec.cpp	Wed Jun 23 12:58:21 2010 +0100
@@ -96,14 +96,26 @@
 	return aChunk->Top();
 	}
 
-void ExecHandler::MutexWait(DMutex* aMutex)
+TInt ExecHandler::MutexWait(DMutex* aMutex, TInt aTimeout)
 //
 // Wait for the mutex.
 //
 	{
-
-//	__KTRACE_OPT(KEXEC,Kern::Printf("Exec::MutexWait"));
-	aMutex->Wait();
+	if (aTimeout)	// 0 means wait forever, -1 means poll
+		{
+		if (aTimeout<-1)
+			{
+			return KErrArgument;
+			}
+		if (aTimeout>0)
+			{
+			// Convert microseconds to NTimer ticks, rounding up
+			TInt ntp = NKern::TickPeriod();
+			aTimeout += ntp-1;
+			aTimeout /= ntp;
+			}
+		}
+	return aMutex->Wait(aTimeout);
 	}
 
 void ExecHandler::MutexSignal(DMutex* aMutex)
@@ -555,20 +567,21 @@
 // Wait for a signal.
 //
 	{
-
 	__KTRACE_OPT(KEXEC,Kern::Printf("Exec::SemaphoreWait"));
-	if (aTimeout)
+	if (aTimeout)	// 0 means wait forever, -1 means poll
 		{
-		if (aTimeout<0)
+		if (aTimeout<-1)
 			{
 			NKern::UnlockSystem();
 			return KErrArgument;
 			}
-
-		// Convert microseconds to NTimer ticks, rounding up
-		TInt ntp = NKern::TickPeriod();
-		aTimeout += ntp-1;
-		aTimeout /= ntp;
+		if (aTimeout>0)
+			{
+			// Convert microseconds to NTimer ticks, rounding up
+			TInt ntp = NKern::TickPeriod();
+			aTimeout += ntp-1;
+			aTimeout /= ntp;
+			}
 		}
 	return aSemaphore->Wait(aTimeout);
 	}
--- a/kernel/eka/kernel/skernel.cpp	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/kernel/skernel.cpp	Wed Jun 23 12:58:21 2010 +0100
@@ -74,6 +74,9 @@
 
 // Wait for semaphore with timeout
 // Enter with system locked, return with system unlocked.
+// If aNTicks==0, wait forever
+// If aNTicks==-1, poll (don't block)
+// If aNTicks>0, timeout is aNTicks nanokernel ticks
 TInt DSemaphore::Wait(TInt aNTicks)
 	{
 	__KTRACE_OPT(KSEMAPHORE,Kern::Printf("Semaphore %O Wait %d Timeout %d",this,iCount,aNTicks));
@@ -84,14 +87,23 @@
 		r=KErrGeneral;
 	else if (--iCount<0)
 		{
-		DThread* pC=TheCurrentThread;
-		pC->iMState=DThread::EWaitSemaphore;
-		pC->iWaitObj=this;
-		iWaitQ.Add(pC);
-		BTRACE_KS(BTrace::ESemaphoreBlock, this);
-		r=NKern::Block(aNTicks,NKern::ERelease,SYSTEM_LOCK);
-		__ASSERT_DEBUG(pC->iMState==DThread::EReady,K::Fault(K::ESemWaitBadState));
-		COND_BTRACE_KS(r==KErrNone, BTrace::ESemaphoreAcquire, this);
+		if (aNTicks >= 0)
+			{
+			DThread* pC=TheCurrentThread;
+			pC->iMState=DThread::EWaitSemaphore;
+			pC->iWaitObj=this;
+			iWaitQ.Add(pC);
+			BTRACE_KS(BTrace::ESemaphoreBlock, this);
+			r=NKern::Block(aNTicks,NKern::ERelease,SYSTEM_LOCK);
+			__ASSERT_DEBUG(pC->iMState==DThread::EReady,K::Fault(K::ESemWaitBadState));
+			COND_BTRACE_KS(r==KErrNone, BTrace::ESemaphoreAcquire, this);
+			}
+		else
+			{
+			++iCount;
+			NKern::UnlockSystem();
+			r = KErrTimedOut;	// couldn't acquire semaphore immediately, so fail
+			}
 		return r;
 		}
 #ifdef BTRACE_SYMBIAN_KERNEL_SYNC
@@ -326,10 +338,14 @@
 extern const SNThreadHandlers EpocThreadHandlers;
 #endif
 
-// Enter and return with system locked.
-TInt DMutex::Wait()
+// Wait for mutex with timeout
+// Enter with system locked, return with system unlocked.
+// If aNTicks==0, wait forever
+// If aNTicks==-1, poll (don't block)
+// If aNTicks>0, timeout is aNTicks nanokernel ticks
+TInt DMutex::Wait(TInt aNTicks)
 	{
-	__KTRACE_OPT(KSEMAPHORE,Kern::Printf("Mutex %O Wait hold %O hldc=%d wtc=%d",this,iCleanup.iThread,iHoldCount,iWaitCount));
+	__KTRACE_OPT(KSEMAPHORE,Kern::Printf("Mutex %O Wait(%d) hold %O hldc=%d wtc=%d",this,aNTicks,iCleanup.iThread,iHoldCount,iWaitCount));
 	__ASSERT_SYSTEM_LOCK;
 	__ASSERT_DEBUG(NCurrentThread()->iHandlers==&EpocThreadHandlers, K::Fault(K::EMutexWaitNotDThread));
 	DThread* pC=TheCurrentThread;
@@ -353,6 +369,8 @@
 			BTRACE_KS(BTrace::EMutexAcquire, this);
 			return KErrNone;
 			}
+		if (aNTicks<0)
+			return KErrTimedOut;	// poll mode - can't get mutex immediately so fail
 		K::PINestLevel=0;
 		pC->iMState=DThread::EWaitMutex;
 		pC->iWaitObj=this;
@@ -375,7 +393,7 @@
 		// return value is set at the point where the thread is released from its wait
 		// condition). However we can still detect this situation since the thread will
 		// have been placed into the EReady state when the mutex was reset.
-		TInt r=NKern::Block(0, NKern::ERelease|NKern::EClaim|NKern::EObstruct, SYSTEM_LOCK);
+		TInt r=NKern::Block(aNTicks, NKern::ERelease|NKern::EClaim|NKern::EObstruct, SYSTEM_LOCK);
 		if (r==KErrNone && pC->iMState==DThread::EReady)
 			r = KErrGeneral;	// mutex has been reset
 		if (r!=KErrNone)		// if we get an error here...
--- a/kernel/eka/kernel/sthread.cpp	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/kernel/sthread.cpp	Wed Jun 23 12:58:21 2010 +0100
@@ -589,29 +589,28 @@
 	ni.iStackSize=iSupervisorStackSize;
 #ifdef __SMP__
 	TUint32 config = TheSuperPage().KernelConfigFlags();
+	ni.iGroup = 0;
+	ni.iCpuAffinity = KCpuAffinityAny;
 	if (iThreadType==EThreadUser)
 		{
 		// user thread
 		if ((config & EKernelConfigSMPUnsafeCPU0) && iOwningProcess->iSMPUnsafeCount)
 			{
 			ni.iCpuAffinity = 0; // compatibility mode
-			ni.iGroup = 0;
 			}
 		else
 			{
-			ni.iCpuAffinity = KCpuAffinityAny;
 			if ((config & EKernelConfigSMPUnsafeCompat) && iOwningProcess->iSMPUnsafeCount)
 				ni.iGroup = iOwningProcess->iSMPUnsafeGroup;
-			else
-				ni.iGroup = 0;
 			}
-		
 		}
 	else
 		{
-		// kernel thread
-		ni.iCpuAffinity = 0;
-		ni.iGroup = 0;
+		if (config & EKernelConfigSMPLockKernelThreadsCore0) 
+			{
+			// kernel thread
+			ni.iCpuAffinity = 0;
+			}
 		}
 #endif
 	if (iThreadType!=EThreadInitial)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/kernel/eka/nkern/arm/nklib.cia	Wed Jun 23 12:58:21 2010 +0100
@@ -0,0 +1,194 @@
+// Copyright (c) 2010-2010 Nokia Corporation and/or its subsidiary(-ies).
+// All rights reserved.
+// This component and the accompanying materials are made available
+// under the terms of the License "Eclipse Public License v1.0"
+// which accompanies this distribution, and is available
+// at the URL "http://www.eclipse.org/legal/epl-v10.html".
+//
+// Initial Contributors:
+// Nokia Corporation - initial contribution.
+//
+// Contributors:
+//
+// Description:
+// e32\nkern\arm\nklib.cia
+// 
+//
+
+#include <e32atomics.h>
+#include <nklib.h>
+
+#ifdef __SRATIO_MACHINE_CODED__
+__NAKED__ void SRatio::Set(TUint32 /*aInt*/, TInt /*aDivisorExp*/)
+	{
+#ifdef __CPU_ARM_HAS_CLZ
+	CLZ(		3,1);						// r3=31-MSB(r1), 32 if r1=0
+	asm("add	r2, r2, r3 ");				// r2=shift+aDivisorExp
+	asm("movs	r1, r1, lsl r3 ");			// shift r1 left so bit 31=1
+	asm("rsb	r2, r2, #0 ");				// r2 = -shift-aDivisorExp
+	asm("moveq	r2, #0 ");					// if aInt=0, r2=0
+	asm("bicne	r2, r2, #0xff000000 ");		// else clear iSpare fields
+	asm("bicne	r2, r2, #0x00ff0000 ");		//
+#else
+	asm("rsb	r2, r2, #0 ");				// r2 = -aDivisorExp
+	asm("cmp	r1, #0x00010000 ");			// if aInt top 16 bits clear ...
+	asm("movcc	r1, r1, lsl #16 ");			// ... shift 16 bits left ...
+	asm("subcc	r2, r2, #16 ");				// ... and subtract 16 from iX
+	asm("cmp	r1, #0x01000000 ");
+	asm("movcc	r1, r1, lsl #8 ");
+	asm("subcc	r2, r2, #8 ");
+	asm("cmp	r1, #0x10000000 ");
+	asm("movcc	r1, r1, lsl #4 ");
+	asm("subcc	r2, r2, #4 ");
+	asm("cmp	r1, #0x40000000 ");
+	asm("movcc	r1, r1, lsl #2 ");
+	asm("subcc	r2, r2, #2 ");
+	asm("cmp	r1, #0x80000000 ");
+	asm("subcc	r2, r2, #1 ");
+	asm("cmp	r1, #0 ");
+	asm("moveq	r2, #0 ");					// if aInt=0, r2=0
+	asm("bicne	r2, r2, #0xff000000 ");		// else clear iSpare fields
+	asm("bicne	r2, r2, #0x00ff0000 ");		//
+#endif
+	asm("stmia	r0, {r1,r2} ");				// iM in r1, iX in bottom 16 bits of r2
+	__JUMP(,	lr);
+	}
+
+__NAKED__ TInt SRatio::Reciprocal()
+	{
+	asm("ldr	r1, [r0] ");				// r1 = iM
+	asm("ldrsh	r12, [r0, #4] ");			// r12 = iX
+	asm("rsbs	r2, r1, #0 ");
+	asm("beq	0f ");						// divide by zero
+	asm("add	r12, r12, #63 ");
+	asm("rsb	r12, r12, #0 ");			// r12 = -63 - iX
+	asm("addvs	r12, r12, #1 ");			// if iM==0x80000000 r12 = -62 - iX (ratio = 2^(31+iX) so reciprocal = 2^(-31-iX) = 2^(31 + (-62-iX))
+	asm("bvs	1f ");						// iM=0x80000000
+
+	// 2^(32+iX) > r > 2^(31+iX)
+	// 2^(-32-iX) < 1/r < 2^(-31-iX)
+	// 2^(31+(-63-iX)) < 1/r < 2^(31+(-62-iX))
+	asm("mov	r2, #0 ");					// accumulates result
+	asm("mov	r3, #0x80000000 ");			// 33 bit accumulator in C:R3 initialised to 2^32
+	asm("2:		");
+	asm("adds	r3, r3, r3 ");
+	asm("cmpcc	r3, r1 ");
+	asm("subcs	r3, r3, r1 ");				// if C=1 or r3>=r1, r3-=r1
+	asm("adcs	r2, r2, r2 ");				// next result bit
+	asm("bcc	2b ");						// finished when we have 33 bits (when top bit shifted off)
+	asm("movs	r2, r2, lsr #1 ");			// rounding bit into C
+	asm("orr	r2, r2, #0x80000000 ");		// top bit back
+	asm("adcs	r2, r2, #0 ");				// add rounding bit
+	asm("movcs	r2, #0x80000000 ");			// if carry, increment exponent
+	asm("addcs	r12, r12, #1 ");
+
+	asm("1:		");
+	asm("cmp	r12, #-32768 ");
+	asm("blt	9f ");						// underflow
+	asm("cmp	r12, #32768 ");
+	asm("bge	8f ");						// overflow
+	asm("str	r2, [r0] ");				// iM
+	asm("strh	r12, [r0, #4] ");			// iX
+	asm("mov	r0, #0 ");
+	__JUMP(,	lr);
+
+	asm("0:		");
+	asm("mov	r0, #%a0" : : "i" ((TInt)KErrDivideByZero));
+	__JUMP(,	lr);
+
+	asm("8:		");
+	asm("mov	r0, #%a0" : : "i" ((TInt)KErrOverflow));
+	__JUMP(,	lr);
+
+	asm("9:		");
+	asm("mov	r0, #%a0" : : "i" ((TInt)KErrUnderflow));
+	__JUMP(,	lr);
+	}
+
+__NAKED__ TInt SRatio::Mult(TUint32& /*aInt32*/)
+	{
+	asm("ldr	r3, [r0] ");				// r3 = iM
+	asm("mov	r12, r0 ");
+	asm("ldr	r0, [r1] ");				// r0 = aInt32
+	asm("cmp	r3, #0 ");
+	asm("cmpne	r0, #0 ");
+	asm("beq	0f ");						// result zero
+	asm("umull	r2, r3, r0, r3 ");			// r3:r2 = aInt32 * iM (lowest value 0x0000000080000000)
+	asm("ldrsh	r12, [r12, #4] ");			// r12 = iX
+#ifdef __CPU_ARM_HAS_CLZ
+	CLZ(		0, 3);						// r0 = number of leading zeros in r3:r2 (can't be >32)
+#else
+	asm("str	r12, [sp, #-4]! ");
+	asm("movs	r12, r3 ");
+	asm("mov	r0, #0 ");
+	asm("cmp	r12, #0x00010000 ");
+	asm("movcc	r12, r12, lsl #16 ");
+	asm("addcc	r0, r0, #16 ");
+	asm("cmp	r12, #0x01000000 ");
+	asm("movcc	r12, r12, lsl #8 ");
+	asm("addcc	r0, r0, #8 ");
+	asm("cmp	r12, #0x10000000 ");
+	asm("movcc	r12, r12, lsl #4 ");
+	asm("addcc	r0, r0, #4 ");
+	asm("cmp	r12, #0x40000000 ");
+	asm("movcc	r12, r12, lsl #2 ");
+	asm("addcc	r0, r0, #2 ");
+	asm("cmp	r12, #0 ");
+	asm("ldr	r12, [sp], #4 ");			// r12 = iX
+	asm("addgt	r0, r0, #1 ");
+	asm("moveq	r0, #32 ");					// r0 = number of leading zeros in r3:r2 (can't be >32)
+#endif
+	asm("rsb	r0, r0, #63 ");				// bit number of most significant bit
+	asm("add	r0, r0, r12 ");				// bit number of most significant bit after exponent shift
+	asm("cmp	r0, #32 ");
+	asm("bge	8f ");						// overflow
+	asm("cmp	r0, #-1 ");
+	asm("blt	9f ");						// underflow
+	asm("adds	r12, r12, #32 ");			// shift needed to get result into top 32 bits (>0 left, <0 right)
+	asm("beq	1f ");						// no shift
+	asm("blt	2f ");						// right shift
+	asm("rsb	r0, r12, #32 ");
+	asm("mov	r3, r3, lsl r12 ");
+	asm("orr	r3, r3, r2, lsr r0 ");
+	asm("mov	r2, r2, lsl r12 ");			// r3:r2 <<= r12
+	asm("b		1f ");
+	asm("2:		");
+	asm("rsb	r12, r12, #0 ");
+	asm("rsb	r0, r12, #32 ");
+	asm("mov	r2, r2, lsr r12 ");
+	asm("orr	r2, r2, r3, lsl r0 ");
+	asm("mov	r3, r3, lsr r12 ");			// r3:r2 >>= r12
+	asm("1:		");
+	asm("adds	r2, r2, r2 ");				// rounding
+	asm("adcs	r3, r3, #0 ");
+	asm("bcs	8f ");						// overflow
+	asm("beq	9f ");						// underflow
+	asm("mov	r0, #0 ");
+	asm("str	r3, [r1] ");
+	__JUMP(,	lr);
+
+	asm("0:		");
+	asm("mov	r0, #0 ");
+	asm("str	r0, [r1] ");
+	__JUMP(,	lr);
+
+	asm("8:		");
+	asm("mvn	r0, #0 ");
+	asm("str	r0, [r1] ");
+	asm("mov	r0, #%a0" : : "i" ((TInt)KErrOverflow));
+	__JUMP(,	lr);
+
+	asm("9:		");
+	asm("mov	r0, #0 ");
+	asm("str	r0, [r1] ");
+	asm("mov	r0, #%a0" : : "i" ((TInt)KErrUnderflow));
+	__JUMP(,	lr);
+	}
+
+//TInt SRatio::Mult(TUint64& aInt64)
+//	{
+//	}
+
+#endif
+
+
--- a/kernel/eka/nkern/nkern.mmp	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/nkern/nkern.mmp	Wed Jun 23 12:58:21 2010 +0100
@@ -16,12 +16,13 @@
 //
 
 sourcepath				../nkern
-source					nkern.cpp
+source					nkern.cpp nklib.cpp
 #ifdef MARM
 sourcepath				../common/arm
 source					atomics.cia
 sourcepath				../nkern/arm
 source					vectors.cia ncsched.cpp ncsched.cia nctimer.cia ncutilf.cia
+source					nklib.cia
 
 // X86
 #elif defined(X86)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/kernel/eka/nkern/nklib.cpp	Wed Jun 23 12:58:21 2010 +0100
@@ -0,0 +1,135 @@
+// Copyright (c) 2010-2010 Nokia Corporation and/or its subsidiary(-ies).
+// All rights reserved.
+// This component and the accompanying materials are made available
+// under the terms of the License "Eclipse Public License v1.0"
+// which accompanies this distribution, and is available
+// at the URL "http://www.eclipse.org/legal/epl-v10.html".
+//
+// Initial Contributors:
+// Nokia Corporation - initial contribution.
+//
+// Contributors:
+//
+// Description:
+// e32\nkern\nklib.cpp
+// 
+//
+
+#include <e32atomics.h>
+#include <nklib.h>
+
+#ifndef __SRATIO_MACHINE_CODED__
+void SRatio::Set(TUint32 aInt, TInt aDivisorExp)
+	{
+	iSpare1 = 0;
+	iSpare2 = 0;
+	iM = aInt;
+	if (iM)
+		{
+		TInt ms1 = __e32_find_ms1_32(iM);
+		TInt shift = 31 - ms1;
+		iM <<= shift;
+		iX = (TInt16)(-shift - aDivisorExp);
+		}
+	else
+		iX = 0;
+	}
+
+TInt SRatio::Reciprocal()
+	{
+	if (iM==0)
+		return KErrDivideByZero;
+	// Calculate 2^32/iM
+	TInt exp=0;
+	if (iM == 0x80000000u)
+		{
+		// ratio = 2^(31+iX) so reciprocal = 2^(-31-iX) = 2^(31 + (-62-iX))
+		exp = -62-iX;
+		}
+	else
+		{
+		// 2^32/iM = 1.xxx
+		TUint64 r64 = MAKE_TUINT64(0u-iM,0);
+		TUint32 q32 = (TUint32)(r64/TUint64(iM));	// next 32 bits of result
+		iM = 0x80000000u | (q32>>1);
+		exp = -63-iX;
+		if (q32 & 1)
+			{
+			if (++iM==0)
+				iM=0x80000000u, ++exp;
+			}
+		}
+	if (exp < -32768)
+		{
+		iM = 0;
+		iX = 0;
+		return KErrUnderflow;
+		}
+	if (exp > 32767)
+		{
+		iM = 0xffffffffu;
+		iX = 32767;
+		return KErrOverflow;
+		}
+	iX = (TInt16)exp;
+	return KErrNone;
+	}
+
+TInt SRatio::Mult(TUint32& aInt32)
+	{
+	TUint64 x = aInt32;
+	x *= TUint64(iM);
+	if (x==0)
+		{
+		aInt32 = 0;
+		return KErrNone;
+		}
+	TInt ms1 = __e32_find_ms1_64(x);
+	TInt ms1b = ms1 + iX;
+	if (ms1b>=32)
+		{
+		aInt32 = ~0u;
+		return KErrOverflow;
+		}
+	if (ms1b<-1)
+		{
+		aInt32 = 0;
+		return KErrUnderflow;
+		}
+	TInt shift = ms1b - ms1 + 31;
+	if (shift > 0)
+		x <<= shift;
+	else if (shift < 0)
+		x >>= (-shift);
+	x += MAKE_TUINT64(0,0x40000000u);
+	if (x >> 63)
+		{
+		aInt32 = ~0u;
+		return KErrOverflow;
+		}
+	aInt32 = (TUint32)(x>>31);
+	return aInt32 ? KErrNone : KErrUnderflow;
+	}
+
+//TInt SRatio::Mult(TUint64& aInt64)
+//	{
+//	}
+#endif
+
+void SRatioInv::Set(const SRatio* a)
+	{
+	if (a)
+		{
+		iR = *a;
+		iI = iR;
+		iI.Reciprocal();
+		}
+	else
+		{
+		iR.Set(1);
+		iI.Set(1);
+		}
+	}
+
+
+
--- a/kernel/eka/nkernsmp/arm/nccpu.cpp	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/nkernsmp/arm/nccpu.cpp	Wed Jun 23 12:58:21 2010 +0100
@@ -20,9 +20,6 @@
 #include <arm_scu.h>
 #include <arm_tmr.h>
 
-extern "C" {
-extern SVariantInterfaceBlock* VIB;
-}
 
 struct SAPBootPage : public SFullArmRegSet
 	{
@@ -94,7 +91,7 @@
 
 	KickCpu(&bootPage.iAPBootPtr[a.iCpu], bp_phys);
 
-	TUint32 n = TUint32(VIB->iMaxCpuClock >> 3);
+	TUint32 n = TUint32(TheScheduler.iVIB->iMaxCpuClock >> 3);
 	n = -n;
 	TUint32 b = 0;
 	do	{
--- a/kernel/eka/nkernsmp/arm/ncglob.cpp	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/nkernsmp/arm/ncglob.cpp	Wed Jun 23 12:58:21 2010 +0100
@@ -33,8 +33,6 @@
 //TSubScheduler*	SubSchedulerLookupTable[256];
 TUint32 CrashStateOut;
 SFullArmRegSet DefaultRegSet;
-
-SVariantInterfaceBlock* VIB;
 }
 
 #ifdef __USE_BTRACE_LOCK__
--- a/kernel/eka/nkernsmp/arm/ncmonitor.cpp	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/nkernsmp/arm/ncmonitor.cpp	Wed Jun 23 12:58:21 2010 +0100
@@ -43,22 +43,20 @@
 	m.Printf("i_ScuAddr  %08x i_GicDist  %08x i_GicCpuIf %08x i_LocTmrA  %08x\r\n", x.iScuAddr, x.iGicDistAddr, x.iGicCpuIfcAddr, x.iLocalTimerAddr);
 	m.Printf("i_IrqCount %08x i_IrqNest  %08x i_ExcInfo  %08x i_CrashSt  %08x\r\n", x.iIrqCount, x.iIrqNestCount, x.iExcInfo, x.iCrashState);
 	m.Printf("i_AbtStkTp %08x i_UndSktTp %08x i_FiqStkTp %08x i_IrqStkTp %08x\r\n", x.iAbtStackTop, x.iUndStackTop, x.iFiqStackTop, x.iIrqStackTop);
-	m.Printf("CpuFreqM   %08x CpuFreqS   %08x CpuPeriodM %08x CpuPeriodS %08x\r\n", x.iCpuFreqM, x.iCpuFreqS, x.iCpuPeriodM, x.iCpuPeriodS);
-	m.Printf("NTmrFreqM  %08x NTmrFreqS  %08x NTmPeriodM %08x NTmPeriodS %08x\r\n", x.iNTimerFreqM, x.iNTimerFreqS, x.iNTimerPeriodM, x.iNTimerPeriodS);
-	m.Printf("TmrFreqM   %08x TmrFreqS   %08x TmrPeriodM %08x TmrPeriodS %08x\r\n", x.iTimerFreqM, x.iTimerFreqS, x.iTimerPeriodM, x.iTimerPeriodS);
-	m.Printf("iLastSyncT %08x %08x            TicksSince %08x LastTmrSet %08x\r\n", I64HIGH(x.iLastSyncTime), I64LOW(x.iLastSyncTime), x.iTicksSinceLastSync, x.iLastTimerSet);
-	m.Printf("GapEstimat %08x GapCount   %08x TotalTicks %08x Ditherer   %08x\r\n", x.iGapEstimate, x.iGapCount, x.iTotalTicks, x.iDitherer);
-	m.Printf("FreqErrEst %08x FreqErrLim %08x ErrorInteg %08x %08x\r\n", x.iFreqErrorEstimate, x.iFreqErrorLimit, I64HIGH(x.iErrorIntegrator), I64LOW(x.iErrorIntegrator));
-	m.Printf("RefAtLastC %08x %08x            M=%02x N=%02x D=%02x\r\n", I64HIGH(x.iRefAtLastCorrection), I64LOW(x.iRefAtLastCorrection), x.iM, x.iN, x.iD);
+	m.Printf("CpuFreqM   %08x CpuFreqS   %08x CpuPeriodM %08x CpuPeriodS %08x\r\n", x.iCpuFreqRI.iR.iM, x.iCpuFreqRI.iR.iX, x.iCpuFreqRI.iI.iM, x.iCpuFreqRI.iI.iX);
+	m.Printf("TmrFreqM   %08x TmrFreqS   %08x TmrPeriodM %08x TmrPeriodS %08x\r\n", x.iTimerFreqRI.iR.iM, x.iTimerFreqRI.iR.iX, x.iTimerFreqRI.iI.iM, x.iTimerFreqRI.iI.iX);
 	}
 
 void DisplaySchedulerExt(Monitor& m, TScheduler& s)
 	{
-	volatile TUint32* sx = (volatile TUint32*)&s.iSX;
-	m.Printf("Extras  0: %08x  1: %08x  2: %08x  3: %08x\r\n",sx[0],sx[1],sx[2],sx[3]);
-	m.Printf("Extras  4: %08x  5: %08x  6: %08x  7: %08x\r\n",sx[4],sx[5],sx[6],sx[7]);
-	m.Printf("Extras  8: %08x  9: %08x  A: %08x  B: %08x\r\n",sx[8],sx[9],sx[10],sx[11]);
-	m.Printf("Extras  C: %08x  D: %08x  E: %08x  F: %08x\r\n",sx[12],sx[13],sx[14],sx[15]);
+	TSchedulerX& sx = s.iSX;
+	m.Printf("iTimerMax  %08x %08x\r\n", I64HIGH(sx.iTimerMax), I64LOW(sx.iTimerMax));
+	m.Printf("iGTmrA     %08x  iScuAddr %08x  iGicDistA %08x  iGicCpuIfcA %08x  iLocTmrA %08x\r\n",
+		sx.iGlobalTimerAddr, sx.iScuAddr, sx.iGicDistAddr, sx.iGicCpuIfcAddr, sx.iLocalTimerAddr);
+	m.Printf("iGTFreqM   %08x iGTFreqS   %08x iGTPeriodM %08x iGTPeriodS %08x\r\n",
+		sx.iGTimerFreqRI.iR.iM, sx.iGTimerFreqRI.iR.iX, sx.iGTimerFreqRI.iI.iM, sx.iGTimerFreqRI.iI.iX);
+	m.Printf("iCount0    %08x %08x   iTimestamp0 %08x %08x\r\n",
+		I64HIGH(sx.iCount0), I64LOW(sx.iCount0), I64HIGH(sx.iTimestamp0), I64LOW(sx.iTimestamp0));
 	}
 
 void DumpRegisters(Monitor& m, SFullArmRegSet& a)
--- a/kernel/eka/nkernsmp/arm/ncsched.cia	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/nkernsmp/arm/ncsched.cia	Wed Jun 23 12:58:21 2010 +0100
@@ -949,15 +949,18 @@
 	asm("cmp	r12, #0 ");
 	asm("bne	0f ");					// initial (i.e. idle) thread, so skip
 	asm("ldr	r3, [r2, #%a0]" : : "i" _FOFF(ArmLocalTimer,iTimerCount));
-	asm("ldr	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iSSX.iTimerPeriodM));
-	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iSSX.iTimerPeriodS));
+	asm("ldr	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iSSX.iTimerFreqRI.iI.iM));
+	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iSSX.iTimerFreqRI.iI.iX));
 	asm("cmp	r3, #0 ");
 	asm("movmi	r0, #0 ");				// if timer count is negative, save zero
 	asm("bmi	1f ");
-	asm("umull	r0, r3, r12, r3 ");		// scale up to max timer clock (R3:R0)
+	asm("mov	r2, r2, lsl #16 ");
+	asm("mov	r2, r2, asr #16 ");
+	asm("umull	r0, r3, r12, r3 ");		// scale up to max timer clock (R3:R0) - need to shift right by -iX
+	asm("rsb	r2, r2, #0 ");
 	asm("rsb	r12, r2, #32 ");
-	asm("movs	r0, r0, lsr r2 ");		// r0 >>= iSSX.iTimerPeriodS, C = last bit shifted off (rounding)
-	asm("orr	r0, r0, r3, lsl r12 ");	// bottom bits from r12 into top bits of r0
+	asm("movs	r0, r0, lsr r2 ");		// r0 >>= iSSX.iTimerFreqRI.iI.iX, C = last bit shifted off (rounding)
+	asm("orr	r0, r0, r3, lsl r12 ");	// bottom bits from r3 into top bits of r0
 	asm("adcs	r0, r0, #0 ");			// round using last bit shifted off
 	asm("1:		");
 	asm("str	r0, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iTime));
@@ -971,103 +974,7 @@
 #error Use of local timer for NKern::Timestamp() no longer supported
 #else
 
-/*	Update aOld's execution time and set up the timer for aNew
-	Update this CPU's timestamp value
-
-	if (!aOld) aOld=iInitialThread
-	if (!aNew) aNew=iInitialThread
-	newcount = aNew->iTime>0 ? Max(aNew->iTime*iSSX.iTimerFreqM/2^(32+iTimerFreqS), 1) : 2^31-1
-	cli()
-	oldcount = timer count
-	if (oldcount<=0 || aOld!=aNew)
-		{
-		timer count = newcount
-		iSSX.iLastTimerSet = newcount
-		if (aOld!=aNew)
-			{
-			TUint64 now = NKern::Timestamp();
-			elapsed = iLastTimestamp -= now;
-			iLastTimestamp = now;
-			aOld->iTotalCpuTime.i64 += elapsed;
-			if (!aOld->iActiveState)
-				aOld->iTotalActiveTime.i64 += (now - aOld->iLastActivationTime.i64);
-			++iReschedCount.i64;
-			++aNew->iRunCount.i64;
-			}
-		}
-	sti()
- */
-__NAKED__ void TSubScheduler::UpdateThreadTimes(NThreadBase* /*aOld*/, NThreadBase* /*aNew*/)
-	{
-	asm("cmp	r2, #0 ");
-	asm("ldreq	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iInitialThread));
-	asm("ldr	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iSSX.iTimerFreqM));
-	asm("cmp	r1, #0 ");
-	asm("ldreq	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iInitialThread));
-	asm("ldr	r3, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iTime));
-	asm("stmfd	sp!, {r4-r7} ");
-	asm("cmp	r1, r2 ");
-	asm("beq	2f ");
-	asm("ldr	r6, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iRunCount.i32[0]));
-	asm("ldr	r7, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iRunCount.i32[1]));
-	asm("adds	r6, r6, #1 ");
-	asm("str	r6, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iRunCount.i32[0]));
-	asm("ldr	r4, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedCount.i32[0]));
-	asm("ldr	r6, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedCount.i32[1]));
-	asm("adcs	r7, r7, #0 ");
-	asm("str	r7, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iRunCount.i32[1]));
-	asm("adds	r4, r4, #1 ");
-	asm("adcs	r6, r6, #0 ");
-	asm("str	r4, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedCount.i32[0]));
-	asm("str	r6, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedCount.i32[1]));
-	asm("2:		");
-	asm("ldr	r6, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iSSX.iLocalTimerAddr));
-	asm("cmp	r3, #1 ");					// aNew->iTime > 0 ?
-	asm("movlt	r3, #0x7fffffff ");			// if not, use 2^31-1
-	asm("blt	3f ");
-	asm("cmp	r1, r2 ");					// different thread?
-	asm("beq	0f ");						// no - finish
-	asm("ldr	r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iSSX.iTimerFreqS));
-	asm("umull	r4, r3, r12, r3 ");			// r3:r4 = aNew->iTime * iTimerFreqM
-	asm("adds	r4, r4, r4 ");				// bit 31 into C
-	asm("teq	r5, #0 ");					// check for iTimerFreqS=0 without changing C
-	asm("movnes	r3, r3, lsr r5 ");			// if not, r3>>=iTimerFreqS, last bit shifted out into C
-	asm("adcs	r3, r3, #0 ");				// round using last bit shifted off
-	asm("3:		");
-	asm("str	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iSSX.iLastTimerSet));
-	asm("str	r3, [r6, #%a0]" : : "i" _FOFF(ArmLocalTimer,iTimerCount));	// set new timeslice value in timer
-
-	asm("ldr	r6, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iLastTimestamp.i32[0]));
-	asm("ldr	r7, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iLastTimestamp.i32[1]));
-	asm("stmfd	sp!, {r0-r2,lr} ");
-	asm("bl		Timestamp__5NKern ");		// R1:R0 = current time
-	asm("mov	r4, r0 ");
-	asm("mov	r5, r1 ");					// R5:R4 = current time
-	asm("ldmfd	sp!, {r0-r2,lr} ");
-	asm("str	r4, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iLastTimestamp.i32[0]));
-	asm("ldr	r3, [r1, #%a0]!" : : "i" _FOFF(NThreadBase,iTotalCpuTime.i64));
-	asm("ldr	r12, [r1, #4] ");
-	asm("str	r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iLastTimestamp.i32[1]));
-	asm("stmdb	r1, {r4-r5} ");				// aOld->iLastRunTime
-	asm("ldrb	r2, [r1, #%a0]" : : "i" (_FOFF(NSchedulable,iActiveState)-_FOFF(NThreadBase,iTotalCpuTime.i64)));
-	asm("subs	r6, r4, r6 ");
-	asm("sbcs	r7, r5, r7 ");				// R7:R6 = time since last reschedule
-	asm("adds	r3, r3, r6 ");
-	asm("adcs	r12, r12, r7 ");			// total CPU time of old thread
-	asm("stmia	r1!, {r3,r12} ");			// store, r1=&aOld.iLastActivationTime
-	asm("cmp	r2, #0 ");					// old thread still active?
-	asm("bne	0f ");						// yes - done
-	asm("ldmia	r1!, {r2,r3,r6,r7} ");		// R3:R2 = last activation time, R7:R6=total active time
-	asm("subs	r2, r4, r2 ");
-	asm("sbcs	r3, r5, r3 ");				// R3:R2 = time since last activation
-	asm("adds	r6, r6, r2 ");
-	asm("adcs	r7, r7, r3 ");				// R7:R6 = new total active time
-	asm("stmdb	r1, {r6,r7} ");
-
-	asm("0:		");
-	asm("ldmfd	sp!, {r4-r7} ");
-	__JUMP(,lr);
-	}
+#error UpdateThreadTimes assembler out of date!
 
 #endif
 #endif	// __UTT_MACHINE_CODED__
--- a/kernel/eka/nkernsmp/arm/ncsched.cpp	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/nkernsmp/arm/ncsched.cpp	Wed Jun 23 12:58:21 2010 +0100
@@ -163,26 +163,32 @@
 #if !defined(__UTT_MACHINE_CODED__)
 void TSubScheduler::UpdateThreadTimes(NThreadBase* aOld, NThreadBase* aNew)
 	{
+	/* If necessary update local timer frequency (DVFS) */
+	SRatioInv* pNCF = iSSX.iNewCpuFreqRI;
+	if (pNCF)
+		{
+		iSSX.iCpuFreqRI = *pNCF;
+		__e32_atomic_store_rel_ptr(&iSSX.iNewCpuFreqRI, 0);
+		}
+	SRatioInv* pNTF = iSSX.iNewTimerFreqRI;
+	if (pNTF)
+		{
+		iSSX.iTimerFreqRI = *pNTF;
+		__e32_atomic_store_rel_ptr(&iSSX.iNewTimerFreqRI, 0);
+		}
 	if (!aOld)
 		aOld = iInitialThread;
 	if (!aNew)
 		aNew = iInitialThread;
-	if (aNew!=aOld || aNew->iTime<=0)
+	if (aNew!=aOld || aNew->iTime<=0 || pNTF)
 		{
 		TUint32 tmrval = 0x7fffffffu;
 		if (aNew->iTime > 0)
 			{
-			TUint64 x = TUint64(aNew->iTime) * TUint64(iSSX.iTimerFreqM);
-			tmrval = I64HIGH(x);
-			if (iSSX.iTimerFreqS)
-				{
-				tmrval += ((1u<<iSSX.iTimerFreqS)-1);
-				tmrval >>= iSSX.iTimerFreqS;
-				}
-			else if (I64LOW(x) & 0x80000000u)
-				++tmrval;
+			tmrval = aNew->iTime;	// this will have been computed based on the old timer frequency
+			iSSX.iTimerFreqRI.iR.Mult(tmrval);
 			}
-		iSSX.iLastTimerSet = tmrval;
+//		iSSX.iLastTimerSet = tmrval;
 		iSSX.iLocalTimerAddr->iTimerCount = tmrval;
 		}
 	if (aNew!=aOld)
--- a/kernel/eka/nkernsmp/arm/ncthrd.cpp	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/nkernsmp/arm/ncthrd.cpp	Wed Jun 23 12:58:21 2010 +0100
@@ -132,10 +132,32 @@
 		{
 		NKern::EnableAllInterrupts();
 
+#if defined(__CPU_ARM_HAS_GLOBAL_TIMER_BLOCK) && defined(__NKERN_TIMESTAMP_USE_SCU_GLOBAL_TIMER__)
+
+		if (cpu == 0) 
+			{
+			// start global timer if necessary
+			ArmGlobalTimer& GT = GLOBAL_TIMER;
+			if (!(GT.iTimerCtrl & E_ArmGTmrCtrl_TmrEnb))
+				{
+				// timer not currently enabled
+				GT.iTimerCtrl = 0;
+				__e32_io_completion_barrier();
+				GT.iTimerStatus = E_ArmGTmrStatus_Event;
+				__e32_io_completion_barrier();
+				GT.iTimerCountLow = 0;
+				GT.iTimerCountHigh = 0;
+				__e32_io_completion_barrier();
+				GT.iTimerCtrl = E_ArmGTmrCtrl_TmrEnb;	// enable timer with prescale factor of 1
+				__e32_io_completion_barrier();
+				}
+			}
+		
+#endif
+
 		// start local timer
 		ArmLocalTimer& T = LOCAL_TIMER;
 		T.iTimerCtrl = E_ArmTmrCtrl_IntEn | E_ArmTmrCtrl_Reload | E_ArmTmrCtrl_Enable;
-
 		// Initialise timestamp
 		InitTimestamp(ss, aInfo);
 		}
--- a/kernel/eka/nkernsmp/arm/ncutilf.cia	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/nkernsmp/arm/ncutilf.cia	Wed Jun 23 12:58:21 2010 +0100
@@ -20,9 +20,6 @@
 #include <arm_gic.h>
 #include <arm_tmr.h>
 
-extern "C" {
-extern SVariantInterfaceBlock* VIB;
-}
 
 __NAKED__ void Arm::GetUserSpAndLr(TAny*) 
 	{
@@ -304,11 +301,10 @@
 	asm("ldr	r3, __TheScheduler ");
 	asm("mrs	r12, cpsr ");				// r12 = saved interrupt mask
 	asm("stmfd	sp!, {r4-r7} ");
-	asm("ldr	r5, [r3, #%a0]" : : "i" _FOFF(TScheduler,iSub[0]));						// r5->subscheduler for CPU0
 	asm("ldr	r4, [r3, #%a0]" : : "i" _FOFF(TScheduler,iSX.iGlobalTimerAddr));		// r4 points to global timer
 	__ASM_CLI();							// disable all interrupts
-	asm("ldr	r6, [r5, #%a0]" : : "i" _FOFF(TSubScheduler,iSSX.iTicksSinceLastSync));	// r6 = count value of last frequency change (low)
-	asm("ldr	r7, [r5, #%a0]" : : "i" _FOFF(TSubScheduler,iSSX.iLastTimerSet));		// r7 = count value of last frequency change (high)
+	asm("ldr	r6, [r3, #%a0]" : : "i" _FOFF(TScheduler,iSX.iCount0));					// r6 = count value of last frequency change (low)
+	asm("ldr	r7, [r3, #%a0]" : : "i" (_FOFF(TScheduler,iSX.iCount0)+4));				// r7 = count value of last frequency change (high)
 	asm("ldr	r2, [r4, #%a0]" : : "i" _FOFF(ArmGlobalTimer,iTimerCountHigh));			// r2 = current timer counter high word
 
 	// To read 64 bit timer value, read high, low, high
@@ -317,21 +313,23 @@
 	asm("mov	r1, r2 ");					// r1 = previous value of timer counter high word
 	asm("ldr	r0, [r4, #%a0]" : : "i" _FOFF(ArmGlobalTimer,iTimerCountLow));			// r0 = current timer counter low word
 	asm("ldr	r2, [r4, #%a0]" : : "i" _FOFF(ArmGlobalTimer,iTimerCountHigh));			// r2 = current timer counter high word
+	asm("mov	r5, r3 ");					// r5 = &TheScheduler
 	asm("cmp	r1, r2 ");					// high word changed?
 	asm("bne	1b ");						// if so, retry
 
 	// Now have R1:R0 = 64 bit global timer count
-	asm("ldr	r3, [r5, #%a0]" : : "i" _FOFF(TSubScheduler,iSSX.iNTimerPeriodM));		// r3 = period multiplier
-	asm("ldr	r4, [r5, #%a0]" : : "i" _FOFF(TSubScheduler,iSSX.iNTimerPeriodS));		// r4 = period multiplier shift
+	asm("ldr	r3, [r5, #%a0]" : : "i" _FOFF(TScheduler,iSX.iGTimerFreqRI.iI.iM));		// r3 = period multiplier
+	asm("ldrsh	r4, [r5, #%a0]" : : "i" _FOFF(TScheduler,iSX.iGTimerFreqRI.iI.iX));		// r4 = period multiplier shift
 	asm("subs	r6, r0, r6 ");				// r7:r6 = ticks from last frequency change
 	asm("sbcs	r7, r1, r7 ");
 	asm("umull	r0, r1, r6, r3 ");
 	asm("mov	r2, #0 ");
 	asm("umlal	r1, r2, r7, r3 ");			// r2:r1:r0 = delta * period multiplier
-	asm("rsb	r3, r4, #32 ");
-	asm("ldr	r6, [r5, #%a0]!" : : "i" _FOFF(TSubScheduler,iSSX.iLastSyncTime));		// r6 = timestamp at last freq change (low)
+	asm("ldr	r6, [r5, #%a0]!" : : "i" _FOFF(TScheduler,iSX.iTimestamp0));			// r6 = timestamp at last freq change (low)
 	asm("ldr	r7, [r5, #4] ");														// r7 = timestamp at last freq change (high)
 	asm("msr	cpsr, r12 ");				// restore interrupts
+	asm("rsb	r4, r4, #0 ");
+	asm("rsb	r3, r4, #32 ");
 	asm("movs	r0, r0, lsr r4 ");			// rounding bit into C
 	asm("orr	r0, r0, r1, lsl r3 ");
 	asm("mov	r1, r1, lsr r4 ");
@@ -345,6 +343,58 @@
 	asm(".word	%a0" : : "i" ((TInt)&TheScheduler));
 	}
 
+// Compensate for a change of frequency of the clocking driving the ARM Global Timer
+// Call with interrupts disabled
+__NAKED__ void ArmGlobalTimerFreqChg(const SRatioInv* /*aNewGTimerFreqRI*/)
+	{
+	asm("ldr	r3, __TheScheduler ");
+	asm("stmfd	sp!, {r4-r7} ");
+	asm("ldr	r4, [r3, #%a0]" : : "i" _FOFF(TScheduler,iSX.iGlobalTimerAddr));		// r4 points to global timer
+	asm("ldr	r6, [r3, #%a0]" : : "i" _FOFF(TScheduler,iSX.iCount0));					// r6 = count value of last frequency change (low)
+	asm("ldr	r7, [r3, #%a0]" : : "i" (_FOFF(TScheduler,iSX.iCount0)+4));				// r7 = count value of last frequency change (high)
+	asm("ldr	r12, [r4, #%a0]" : : "i" _FOFF(ArmGlobalTimer,iTimerCountHigh));		// r12 = current timer counter high word
+	asm("mov	r5, r3 ");					// r5 = &TheScheduler
+
+	// To read 64 bit timer value, read high, low, high
+	// If two high values match -> OK, else repeat
+	asm("1:		");
+	asm("mov	r3, r12 ");					// r3 = previous value of timer counter high word
+	asm("ldr	r2, [r4, #%a0]" : : "i" _FOFF(ArmGlobalTimer,iTimerCountLow));			// r0 = current timer counter low word
+	asm("ldr	r12, [r4, #%a0]" : : "i" _FOFF(ArmGlobalTimer,iTimerCountHigh));		// r12 = current timer counter high word
+	asm("cmp	r3, r12 ");					// high word changed?
+	asm("bne	1b ");						// if so, retry
+
+	// Now have R3:R2 = 64 bit global timer count
+	asm("str	r2, [r5, #%a0]" : : "i" _FOFF(TScheduler,iSX.iCount0));					// update count value at last frequency change
+	asm("str	r3, [r5, #%a0]" : : "i" (_FOFF(TScheduler,iSX.iCount0)+4));				// to be equal to current count value
+	asm("subs	r6, r2, r6 ");				// r7:r6 = ticks (at old frequency) from last frequency change
+	asm("sbcs	r7, r3, r7 ");
+	asm("ldr	r3, [r5, #%a0]" : : "i" _FOFF(TScheduler,iSX.iGTimerFreqRI.iI.iM));		// r3 = old period multiplier
+	asm("ldrsh	r4, [r5, #%a0]" : : "i" _FOFF(TScheduler,iSX.iGTimerFreqRI.iI.iX));		// r4 = old period multiplier shift
+	asm("ldmia	r0, {r0,r1,r2,r12} ");		// r1:r0=new frequency multiplier, r12:r2=new period multiplier
+	asm("str	r0, [r5, #%a0]" : : "i" _FOFF(TScheduler,iSX.iGTimerFreqRI.iR.iM));		// update frequency multiplier
+	asm("str	r1, [r5, #%a0]" : : "i" _FOFF(TScheduler,iSX.iGTimerFreqRI.iR.iX));		// update frequency multiplier
+	asm("str	r2, [r5, #%a0]" : : "i" _FOFF(TScheduler,iSX.iGTimerFreqRI.iI.iM));		// update period multiplier
+	asm("str	r12, [r5, #%a0]" : : "i" _FOFF(TScheduler,iSX.iGTimerFreqRI.iI.iX));	// update period multiplier
+	asm("umull	r0, r1, r6, r3 ");
+	asm("mov	r2, #0 ");
+	asm("umlal	r1, r2, r7, r3 ");			// r2:r1:r0 = delta * old period multiplier
+	asm("ldr	r6, [r5, #%a0]!" : : "i" _FOFF(TScheduler,iSX.iTimestamp0));			// r6 = timestamp at last freq change (low)
+	asm("ldr	r7, [r5, #4] ");														// r7 = timestamp at last freq change (high)
+	asm("rsb	r4, r4, #0 ");
+	asm("rsb	r3, r4, #32 ");
+	asm("movs	r0, r0, lsr r4 ");			// rounding bit into C
+	asm("orr	r0, r0, r1, lsl r3 ");
+	asm("mov	r1, r1, lsr r4 ");
+	asm("orr	r1, r1, r2, lsl r3 ");		// r1:r0 = (delta * old period multiplier) >> old period multiplier shift
+	asm("adcs	r0, r0, r6 ");				// scaled delta + timestamp at last freq change
+	asm("adcs	r1, r1, r7 ");
+	asm("stmia	r5, {r0,r1} ");				// timestamp at last freq change = now
+	__DATA_MEMORY_BARRIER_Z__(r12);			/* Ensure all updates visible */
+	asm("ldmfd	sp!, {r4-r7} ");
+	__JUMP(,lr);
+	}
+
 #elif defined(__NKERN_TIMESTAMP_USE_INLINE_BSP_CODE__)
 #define __DEFINE_NKERN_TIMESTAMP_ASM__
 #include <variant_timestamp.h>
--- a/kernel/eka/nkernsmp/arm/ncutils.cpp	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/nkernsmp/arm/ncutils.cpp	Wed Jun 23 12:58:21 2010 +0100
@@ -22,8 +22,6 @@
 #include <nk_irq.h>
 
 extern "C" {
-extern SVariantInterfaceBlock* VIB;
-
 extern TUint KernCoreStats_EnterIdle(TUint aCore);
 extern void KernCoreStats_LeaveIdle(TInt aCookie,TUint aCore);
 
@@ -31,6 +29,7 @@
 extern void send_irq_ipi(TSubScheduler*, TInt);
 }
 
+TInt ClockFrequenciesChanged();
 
 
 /******************************************************************************
@@ -89,66 +88,47 @@
 void NKern::Init0(TAny* a)
 	{
 	__KTRACE_OPT(KBOOT,DEBUGPRINT("VIB=%08x", a));
-	VIB = (SVariantInterfaceBlock*)a;
-	__NK_ASSERT_ALWAYS(VIB && VIB->iVer==0 && VIB->iSize==sizeof(SVariantInterfaceBlock));
-	__KTRACE_OPT(KBOOT,DEBUGPRINT("iVer=%d iSize=%d", VIB->iVer, VIB->iSize));
-	__KTRACE_OPT(KBOOT,DEBUGPRINT("iMaxCpuClock=%08x %08x", I64HIGH(VIB->iMaxCpuClock), I64LOW(VIB->iMaxCpuClock)));
-	__KTRACE_OPT(KBOOT,DEBUGPRINT("iMaxTimerClock=%u", VIB->iMaxTimerClock));
-	__KTRACE_OPT(KBOOT,DEBUGPRINT("iScuAddr=%08x", VIB->iScuAddr));
-	__KTRACE_OPT(KBOOT,DEBUGPRINT("iGicDistAddr=%08x", VIB->iGicDistAddr));
-	__KTRACE_OPT(KBOOT,DEBUGPRINT("iGicCpuIfcAddr=%08x", VIB->iGicCpuIfcAddr));
-	__KTRACE_OPT(KBOOT,DEBUGPRINT("iLocalTimerAddr=%08x", VIB->iLocalTimerAddr));
-	__KTRACE_OPT(KBOOT,DEBUGPRINT("iGlobalTimerAddr=%08x", VIB->iGlobalTimerAddr));
+	SVariantInterfaceBlock* v = (SVariantInterfaceBlock*)a;
+	TheScheduler.iVIB = v;
+	__NK_ASSERT_ALWAYS(v && v->iVer==0 && v->iSize==sizeof(SVariantInterfaceBlock));
+	__KTRACE_OPT(KBOOT,DEBUGPRINT("iVer=%d iSize=%d", v->iVer, v->iSize));
+	__KTRACE_OPT(KBOOT,DEBUGPRINT("iMaxCpuClock=%08x %08x", I64HIGH(v->iMaxCpuClock), I64LOW(v->iMaxCpuClock)));
+	__KTRACE_OPT(KBOOT,DEBUGPRINT("iMaxTimerClock=%u", v->iMaxTimerClock));
+	__KTRACE_OPT(KBOOT,DEBUGPRINT("iScuAddr=%08x", v->iScuAddr));
+	__KTRACE_OPT(KBOOT,DEBUGPRINT("iGicDistAddr=%08x", v->iGicDistAddr));
+	__KTRACE_OPT(KBOOT,DEBUGPRINT("iGicCpuIfcAddr=%08x", v->iGicCpuIfcAddr));
+	__KTRACE_OPT(KBOOT,DEBUGPRINT("iLocalTimerAddr=%08x", v->iLocalTimerAddr));
+	__KTRACE_OPT(KBOOT,DEBUGPRINT("iGlobalTimerAddr=%08x", v->iGlobalTimerAddr));
 
 	TScheduler& s = TheScheduler;
-	s.iSX.iScuAddr = (ArmScu*)VIB->iScuAddr;
-	s.iSX.iGicDistAddr = (GicDistributor*)VIB->iGicDistAddr;
-	s.iSX.iGicCpuIfcAddr = (GicCpuIfc*)VIB->iGicCpuIfcAddr;
-	s.iSX.iLocalTimerAddr = (ArmLocalTimer*)VIB->iLocalTimerAddr;
-	s.iSX.iTimerMax = (VIB->iMaxTimerClock / 1);		// use prescaler value of 1
+	s.iSX.iScuAddr = (ArmScu*)v->iScuAddr;
+	s.iSX.iGicDistAddr = (GicDistributor*)v->iGicDistAddr;
+	s.iSX.iGicCpuIfcAddr = (GicCpuIfc*)v->iGicCpuIfcAddr;
+	s.iSX.iLocalTimerAddr = (ArmLocalTimer*)v->iLocalTimerAddr;
+	s.iSX.iTimerMax = (v->iMaxTimerClock / 1);		// use prescaler value of 1
 #ifdef	__CPU_ARM_HAS_GLOBAL_TIMER_BLOCK
-	s.iSX.iGlobalTimerAddr = (ArmGlobalTimer*)VIB->iGlobalTimerAddr;
+	s.iSX.iGlobalTimerAddr = (ArmGlobalTimer*)v->iGlobalTimerAddr;
+	s.iSX.iGTimerFreqRI.Set(v->iGTimerFreqR);
+	v->iGTimerFreqR = 0;
 #endif
 
 	TInt i;
 	for (i=0; i<KMaxCpus; ++i)
 		{
 		TSubScheduler& ss = TheSubSchedulers[i];
-		ss.iSSX.iCpuFreqM = KMaxTUint32;
-		ss.iSSX.iCpuFreqS = 0;
-		ss.iSSX.iCpuPeriodM = 0x80000000u;
-		ss.iSSX.iCpuPeriodS = 31;
-		ss.iSSX.iNTimerFreqM = KMaxTUint32;
-		ss.iSSX.iNTimerFreqS = 0;
-		ss.iSSX.iNTimerPeriodM = 0x80000000u;
-		ss.iSSX.iNTimerPeriodS = 31;
-		ss.iSSX.iTimerFreqM = KMaxTUint32;
-		ss.iSSX.iTimerFreqS = 0;
-		ss.iSSX.iTimerPeriodM = 0x80000000u;
-		ss.iSSX.iTimerPeriodS = 31;
-		ss.iSSX.iLastSyncTime = 0;
-		ss.iSSX.iTicksSinceLastSync = 0;
-		ss.iSSX.iLastTimerSet = 0;
-		ss.iSSX.iGapEstimate = 10<<16;
-		ss.iSSX.iGapCount = 0;
-		ss.iSSX.iTotalTicks = 0;
-		ss.iSSX.iDitherer = 1;
-		ss.iSSX.iFreqErrorEstimate = 0;
-		ss.iSSX.iFreqErrorLimit = 0x00100000;
-		ss.iSSX.iErrorIntegrator = 0;
-		ss.iSSX.iRefAtLastCorrection = 0;
-		ss.iSSX.iM = 4;
-		ss.iSSX.iN = 18;
-		ss.iSSX.iD = 3;
-		VIB->iTimerMult[i] = 0;
-		VIB->iCpuMult[i] = 0;
-		UPerCpuUncached* u = VIB->iUncached[i];
+		ss.iSSX.iCpuFreqRI.Set(v->iCpuFreqR[i]);
+		ss.iSSX.iTimerFreqRI.Set(v->iTimerFreqR[i]);
+
+		v->iCpuFreqR[i] = 0;
+		v->iTimerFreqR[i] = 0;
+		UPerCpuUncached* u = v->iUncached[i];
 		ss.iUncached = u;
 		u->iU.iDetachCount = 0;
 		u->iU.iAttachCount = 0;
 		u->iU.iPowerOffReq = FALSE;
 		u->iU.iDetachCompleteFn = &DetachComplete;
 		}
+	v->iFrqChgFn = &ClockFrequenciesChanged;
 	__e32_io_completion_barrier();
 	InterruptInit0();
 	}
@@ -374,16 +354,11 @@
 		s.AllCpusIdle();
 	s.iIdleSpinLock.UnlockOnly();
 
-	//TUint cookie = KernCoreStats::EnterIdle((TUint8)ss.iCpuNum);
 	TUint cookie = KernCoreStats_EnterIdle((TUint8)ss.iCpuNum);
 
 	arg |= retire;
 	NKIdle(arg);
 
-	//KernCoreStats::LeaveIdle(cookie, (TUint8)ss.iCpuNum);
-	KernCoreStats_LeaveIdle(cookie, (TUint8)ss.iCpuNum);
-
-
 	// interrupts have not been reenabled
 	s.iIdleSpinLock.LockOnly();
 
@@ -415,6 +390,8 @@
 	if (ci == 0)
 		s.FirstBackFromIdle();
 
+	KernCoreStats_LeaveIdle(cookie, (TUint8)ss.iCpuNum);
+
 	if (retire)
 		{
 		s.iCCReactivateDfc.RawAdd();	// kick load balancer to give us some work
@@ -432,12 +409,12 @@
 
 TBool TScheduler::CoreControlSupported()
 	{
-	return VIB->iCpuPowerUpFn != 0;
+	return TheScheduler.iVIB->iCpuPowerUpFn != 0;
 	}
 
 void TScheduler::CCInitiatePowerUp(TUint32 aCores)
 	{
-	TCpuPowerUpFn pUp = VIB->iCpuPowerUpFn;
+	TCpuPowerUpFn pUp = TheScheduler.iVIB->iCpuPowerUpFn;
 	if (pUp && aCores)
 		{
 		TInt i;
@@ -463,7 +440,7 @@
 
 void TScheduler::CCIndirectPowerDown(TAny*)
 	{
-	TCpuPowerDownFn pDown = VIB->iCpuPowerDownFn;
+	TCpuPowerDownFn pDown = TheScheduler.iVIB->iCpuPowerDownFn;
 	if (pDown)
 		{
 		TInt i;
@@ -555,3 +532,167 @@
 #endif
 	}
 
+/******************************************************************************
+ * Notify frequency changes
+ ******************************************************************************/
+
+struct SFrequencies
+	{
+	void Populate();
+	void Apply();
+	TBool AddToQueue();
+
+	SFrequencies*	iNext;
+	TUint32			iWhich;
+	SRatioInv		iNewCpuRI[KMaxCpus];
+	SRatioInv		iNewTimerRI[KMaxCpus];
+	SRatioInv		iNewGTimerRI;
+	NFastSemaphore*	iSem;
+
+	static SFrequencies* volatile Head;
+	};
+
+SFrequencies* volatile SFrequencies::Head;
+
+TBool SFrequencies::AddToQueue()
+	{
+	SFrequencies* h = Head;
+	do	{
+		iNext = h;
+		} while(!__e32_atomic_cas_rel_ptr(&Head, &h, this));
+	return !h;	// TRUE if list was empty
+	}
+
+
+void SFrequencies::Populate()
+	{
+	TScheduler& s = TheScheduler;
+	TInt cpu;
+	iWhich = 0;
+	SRatio* ri = (SRatio*)__e32_atomic_swp_ord_ptr(&s.iVIB->iGTimerFreqR, 0);
+	if (ri)
+		{
+		iNewGTimerRI.Set(ri);
+		iWhich |= 0x80000000u;
+		}
+	for (cpu=0; cpu<s.iNumCpus; ++cpu)
+		{
+		TSubScheduler& ss = *s.iSub[cpu];
+		ri = (SRatio*)__e32_atomic_swp_ord_ptr(&s.iVIB->iCpuFreqR[cpu], 0);
+		if (ri)
+			{
+			iNewCpuRI[cpu].Set(ri);
+			iWhich |= ss.iCpuMask;
+			}
+		ri = (SRatio*)__e32_atomic_swp_ord_ptr(&s.iVIB->iTimerFreqR[cpu], 0);
+		if (ri)
+			{
+			iNewTimerRI[cpu].Set(ri);
+			iWhich |= (ss.iCpuMask<<8);
+			}
+		}
+	}
+
+#if defined(__NKERN_TIMESTAMP_USE_SCU_GLOBAL_TIMER__)
+extern void ArmGlobalTimerFreqChg(const SRatioInv* /*aNewGTimerFreqRI*/);
+#endif
+
+void SFrequencies::Apply()
+	{
+	if (!iWhich)
+		return;
+	TScheduler& s = TheScheduler;
+	TStopIPI ipi;
+	TUint32 stopped = ipi.StopCPUs();
+	TInt cpu;
+	TUint32 wait = 0;
+	for (cpu=0; cpu<s.iNumCpus; ++cpu)
+		{
+		TSubScheduler& ss = *s.iSub[cpu];
+		TUint32 m = 1u<<cpu;
+		TUint32 m2 = m | (m<<8);
+		if (stopped & m)
+			{
+			// CPU is running so let it update
+			if (iWhich & m2)
+				{
+				if (iWhich & m)
+					ss.iSSX.iNewCpuFreqRI = &iNewCpuRI[cpu];
+				if (iWhich & (m<<8))
+					ss.iSSX.iNewTimerFreqRI = &iNewTimerRI[cpu];
+				ss.iRescheduleNeededFlag = 1;
+				wait |= m;
+				}
+			}
+		else
+			{
+			// CPU is not running so update directly
+			if (iWhich & m)
+				{
+				ss.iSSX.iCpuFreqRI = iNewCpuRI[cpu];
+				}
+			if (iWhich & (m<<8))
+				{
+				ss.iSSX.iTimerFreqRI = iNewTimerRI[cpu];
+				}
+			}
+		}
+#if defined(__NKERN_TIMESTAMP_USE_SCU_GLOBAL_TIMER__)
+	if (iWhich & 0x80000000u)
+		{
+		ArmGlobalTimerFreqChg(&iNewGTimerRI);
+		}
+#endif
+	ipi.ReleaseCPUs();	// this CPU handled here
+	while(wait)
+		{
+		cpu = __e32_find_ls1_32(wait);
+		TSubScheduler& ss = *s.iSub[cpu];
+		if (!ss.iSSX.iNewCpuFreqRI && !ss.iSSX.iNewTimerFreqRI)
+			wait &= ~ss.iCpuMask;
+		__chill();
+		}
+	}
+
+void TScheduler::DoFrequencyChanged(TAny*)
+	{
+	SFrequencies* list = (SFrequencies*)__e32_atomic_swp_ord_ptr(&SFrequencies::Head, 0);
+	if (!list)
+		return;
+	list->Populate();
+	list->Apply();
+	SFrequencies* rev = 0;
+	while (list)
+		{
+		SFrequencies* next = list->iNext;
+		list->iNext = rev;
+		rev = list;
+		list = next;
+		}
+	while (rev)
+		{
+		NFastSemaphore* s = rev->iSem;
+		rev = rev->iNext;
+		NKern::FSSignal(s);
+		}
+	}
+
+TInt ClockFrequenciesChanged()
+	{
+	TScheduler& s = TheScheduler;
+	NFastSemaphore sem(0);
+	SFrequencies f;
+	f.iSem = &sem;
+	NThread* ct = NKern::CurrentThread();
+	NThread* lbt = TScheduler::LBThread();
+	NKern::ThreadEnterCS();
+	TBool first = f.AddToQueue();
+	if (!lbt || lbt == ct)
+		TScheduler::DoFrequencyChanged(&s);
+	else if (first)
+		s.iFreqChgDfc.Enque();
+	NKern::FSWait(&sem);
+	NKern::ThreadLeaveCS();
+	return KErrNone;
+	}
+
--- a/kernel/eka/nkernsmp/nk_bal.cpp	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/nkernsmp/nk_bal.cpp	Wed Jun 23 12:58:21 2010 +0100
@@ -135,6 +135,7 @@
 	s.iCCReactivateDfc.SetDfcQ(rbQ);
 	s.iCCRequestDfc.SetDfcQ(rbQ);
 	s.iCCPowerDownDfc.SetDfcQ(rbQ);
+	s.iFreqChgDfc.SetDfcQ(rbQ);
 	NThreadBase* lbt = rbQ->iThread;
 	lbt->iRebalanceAttr = 1;
 	TUint32 f = NKern::CpuTimeMeasFreq();
--- a/kernel/eka/nkernsmp/nkern.cpp	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/nkernsmp/nkern.cpp	Wed Jun 23 12:58:21 2010 +0100
@@ -2846,28 +2846,62 @@
 
 /**	Stop all other CPUs
 
-	Call with kernel locked
+Call with kernel unlocked, returns with kernel locked.
+Returns mask of CPUs halted plus current CPU.
 */
-void TStopIPI::StopCPUs()
+TUint32 TStopIPI::StopCPUs()
 	{
+	CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"TStopIPI::StopCPUs()");
+	TScheduler& s = TheScheduler;
 	iFlag = 0;
+	NKern::ThreadEnterCS();
+
+	// Stop any cores powering up or down for now
+	// A core already on the way down will stop just before the transition to SHUTDOWN_FINAL
+	// A core already on the way up will carry on powering up
+	TInt irq = s.iGenIPILock.LockIrqSave();
+	++s.iCCDeferCount;	// stops bits in iIpiAcceptCpus being cleared, but doesn't stop them being set
+						// but iIpiAcceptCpus | s.iCpusComingUp is constant
+	TUint32 act2 = s.iIpiAcceptCpus;		// CPUs still accepting IPIs
+	TUint32 cu = s.iCpusComingUp;			// CPUs powering up
+	s.iGenIPILock.UnlockIrqRestore(irq);
+	TUint32 cores = act2 | cu;
+	if (cu)
+		{
+		// wait for CPUs coming up to start accepting IPIs
+		while (cores & ~s.iIpiAcceptCpus)
+			{
+			__snooze();	// snooze until cores have come up
+			}
+		}
+	NKern::Lock();
 	QueueAllOther(&Isr);	// send IPIs to all other CPUs
 	WaitEntry();			// wait for other CPUs to reach the ISR
+	return cores;
 	}
 
+
+/**	Release the stopped CPUs
+
+Call with kernel locked, returns with kernel unlocked.
+*/
 void TStopIPI::ReleaseCPUs()
 	{
-	iFlag = 1;				// allow other CPUs to proceed
+	__e32_atomic_store_rel32(&iFlag, 1);	// allow other CPUs to proceed
 	WaitCompletion();		// wait for them to finish with this IPI
+	NKern::Unlock();
+	TheScheduler.CCUnDefer();
+	NKern::ThreadLeaveCS();
 	}
 
 void TStopIPI::Isr(TGenericIPI* a)
 	{
 	TStopIPI* s = (TStopIPI*)a;
-	while (!s->iFlag)
+	while (!__e32_atomic_load_acq32(&s->iFlag))
 		{
 		__chill();
 		}
+	__e32_io_completion_barrier();
 	}
 
 
--- a/kernel/eka/nkernsmp/nkern.mmp	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/nkernsmp/nkern.mmp	Wed Jun 23 12:58:21 2010 +0100
@@ -17,6 +17,8 @@
 
 sourcepath				../nkernsmp
 source					nkern.cpp nkerns.cpp sched.cpp dfcs.cpp nk_timer.cpp nk_irq.cpp nk_bal.cpp
+sourcepath				../nkern
+source					nklib.cpp
 
 #ifdef MARM
 sourcepath				../common/arm
@@ -24,6 +26,8 @@
 sourcepath				../nkernsmp/arm
 source					vectors.cia ncsched.cpp ncsched.cia nctimer.cia ncutilf.cia ncirq.cpp ncirq.cia ncthrd.cia
 source					ncutils.cia ncutils.cpp ncthrd.cpp ncglob.cpp nccpu.cpp nccpu.cia
+sourcepath				../nkern/arm
+source					nklib.cia
 
 
 #elif defined(X86)
--- a/kernel/eka/nkernsmp/nkerns.cpp	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/nkernsmp/nkerns.cpp	Wed Jun 23 12:58:21 2010 +0100
@@ -242,8 +242,8 @@
 		iCurrent = iReady;
 		iCpuAffinity = iLastCpu;
 		iEventState = (iLastCpu<<EEventCpuShift) | (iLastCpu<<EThreadCpuShift);
-		ss.SSAddEntry(this);
-		i_NThread_Initial = TRUE;
+		i_NThread_Initial = TRUE;	// must set initial thread flag before adding to subscheduler
+		ss.SSAddEntry(this);		// in order to get correct ready thread count (i.e. not including the idle thread)
 		iACount = 1;
 		ss.iInitialThread = (NThread*)this;
 		NKern::Unlock();		// now that current thread is defined
@@ -538,6 +538,7 @@
 
 NThread* TScheduler::LBThread()
 	{
-	return (NThread*)(TheScheduler.iRebalanceDfcQ->iThread);
+	TDfcQue* rbQ = TheScheduler.iRebalanceDfcQ;
+	return rbQ ? (NThread*)(rbQ->iThread) : 0;
 	}
 
--- a/kernel/eka/nkernsmp/sched.cpp	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/nkernsmp/sched.cpp	Wed Jun 23 12:58:21 2010 +0100
@@ -60,7 +60,8 @@
 		iCCRequestLevel(1),		// only boot CPU for now
 		iCCRequestDfc(&CCRequestDfcFn, this, 2),
 		iCCPowerDownDfc(&CCIndirectPowerDown, this, 0),
-		iCCIpiReactIDFC(&CCIpiReactivateFn, this)
+		iCCIpiReactIDFC(&CCIpiReactivateFn, this),
+		iFreqChgDfc(&DoFrequencyChanged, this, 6)
 	{
 	TInt i;
 	for (i=0; i<KMaxCpus; ++i)
@@ -1360,4 +1361,3 @@
 	DoIdle();
 	}
 
-
--- a/kernel/eka/nkernsmp/x86/ncglob.cpp	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/nkernsmp/x86/ncglob.cpp	Wed Jun 23 12:58:21 2010 +0100
@@ -32,8 +32,6 @@
 TSubScheduler TheSubSchedulers[KMaxCpus];
 extern "C" {
 TSubScheduler* SubSchedulerLookupTable[256];
-
-SVariantInterfaceBlock* VIB;
 }
 
 #ifdef __USE_BTRACE_LOCK__
--- a/kernel/eka/nkernsmp/x86/ncmonitor.cpp	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/nkernsmp/x86/ncmonitor.cpp	Wed Jun 23 12:58:21 2010 +0100
@@ -35,9 +35,8 @@
 	m.Printf("Extras[ 4] %08x Extras[ 5] %08x Extras[ 6] %08x Extras[ 7] %08x\r\n", x.iSSXP[4], x.iSSXP[5], x.iSSXP[6], x.iSSXP[7]);
 	m.Printf("Extras[ 8] %08x i_IrqCount %08x i_ExcInfo  %08x i_CrashSt  %08x\r\n", x.iSSXP[8], x.iIrqCount, x.iExcInfo, x.iCrashState);
 	m.Printf("i_APICID   %08x i_IrqNestC %08x i_IrqStkTp %08x i_Tss      %08x\r\n", x.iAPICID, x.iIrqNestCount, x.iIrqStackTop, x.iTss);
-	m.Printf("CpuFreqM   %08x CpuFreqS   %08x CpuPeriodM %08x CpuPeriodS %08x\r\n", x.iCpuFreqM, x.iCpuFreqS, x.iCpuPeriodM, x.iCpuPeriodS);
-	m.Printf("NTmrFreqM  %08x NTmrFreqS  %08x NTmPeriodM %08x NTmPeriodS %08x\r\n", x.iNTimerFreqM, x.iNTimerFreqS, x.iNTimerPeriodM, x.iNTimerPeriodS);
-	m.Printf("TmrFreqM   %08x TmrFreqS   %08x TmrPeriodM %08x TmrPeriodS %08x\r\n", x.iTimerFreqM, x.iTimerFreqS, x.iTimerPeriodM, x.iTimerPeriodS);
+	m.Printf("CpuFreqM   %08x CpuFreqS   %08x CpuPeriodM %08x CpuPeriodS %08x\r\n", x.iCpuFreqRI.iR.iM, x.iCpuFreqRI.iR.iX, x.iCpuFreqRI.iI.iM, x.iCpuFreqRI.iI.iX);
+	m.Printf("TmrFreqM   %08x TmrFreqS   %08x TmrPeriodM %08x TmrPeriodS %08x\r\n", x.iTimerFreqRI.iR.iM, x.iTimerFreqRI.iR.iX, x.iTimerFreqRI.iI.iM, x.iTimerFreqRI.iI.iX);
 	m.Printf("TmstampOff %08x %08x            iSSXP2[0]  %08x iSSXP2[1]  %08x\r\n", I64HIGH(x.iTimestampOffset.i64), I64LOW(x.iTimestampOffset.i64), x.iSSXP2[0], x.iSSXP2[1]);
 	}
 
--- a/kernel/eka/nkernsmp/x86/ncsched.cpp	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/nkernsmp/x86/ncsched.cpp	Wed Jun 23 12:58:21 2010 +0100
@@ -99,11 +99,8 @@
 	{
 	if (aT->iTime>0 && !aT->i_NThread_Initial)
 		{
-		TUint32 remain32 = read_apic_reg(CURRCNT);
-		TUint64 x(remain32);
-		x *= TUint32(iSSX.iTimerPeriodM);
-		x += 1u<<(iSSX.iTimerPeriodS-1);
-		x >>= iSSX.iTimerPeriodS;
+		TUint32 x = read_apic_reg(CURRCNT);
+		iSSX.iTimerFreqRI.iI.Mult(x);
 		aT->iTime = (TInt)x;
 		}
 	write_apic_reg(INITCNT, 0);
@@ -128,13 +125,9 @@
 		aNew = iInitialThread;
 	if (aNew->iTime>0)
 		{
-		TUint32 remain32 = (TUint32)aNew->iTime;
-		TUint64 x(remain32);
-		x *= TUint32(iSSX.iTimerFreqM);
-		x += TUint64(0x80000000u)<<iSSX.iTimerFreqS;
-		x >>= (32+iSSX.iTimerFreqS);
-		write_apic_reg(LVTTMR, TIMESLICE_VECTOR);
-		write_apic_reg(INITCNT, (TUint32)x);
+		TUint32 x = (TUint32)aNew->iTime;
+		iSSX.iTimerFreqRI.iR.Mult(x);
+		write_apic_reg(INITCNT, x);
 		}
 	if (aNew!=aOld)
 		{
--- a/kernel/eka/nkernsmp/x86/ncutilf.cpp	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/nkernsmp/x86/ncutilf.cpp	Wed Jun 23 12:58:21 2010 +0100
@@ -15,11 +15,8 @@
 // 
 //
 
-#include <nkern.h>
+#include <nk_priv.h>
 
-extern "C" {
-extern SVariantInterfaceBlock* VIB;
-}
 
 /******************************************************************************
  * Spin lock
@@ -65,6 +62,6 @@
 */
 EXPORT_C TUint32 NKern::TimestampFrequency()
 	{
-	return VIB->iTimestampFreq;
+	return TheScheduler.iVIB->iTimestampFreq;
 	}
 
--- a/kernel/eka/nkernsmp/x86/ncutils.cpp	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/nkernsmp/x86/ncutils.cpp	Wed Jun 23 12:58:21 2010 +0100
@@ -17,10 +17,6 @@
 
 #include <x86.h>
 
-extern "C" {
-extern SVariantInterfaceBlock* VIB;
-}
-
 //#define __DBG_MON_FAULT__
 //#define __RAM_LOADED_CODE__
 //#define __EARLY_DEBUG__
@@ -325,33 +321,25 @@
 void NKern::Init0(TAny* a)
 	{
 	__KTRACE_OPT(KBOOT,DEBUGPRINT("VIB=%08x", a));
-	VIB = (SVariantInterfaceBlock*)a;
-	__NK_ASSERT_ALWAYS(VIB && VIB->iVer==0 && VIB->iSize==sizeof(SVariantInterfaceBlock));
-	__KTRACE_OPT(KBOOT,DEBUGPRINT("iVer=%d iSize=%d", VIB->iVer, VIB->iSize));
-	__KTRACE_OPT(KBOOT,DEBUGPRINT("iMaxCpuClock=%08x %08x", I64HIGH(VIB->iMaxCpuClock), I64LOW(VIB->iMaxCpuClock)));
-	__KTRACE_OPT(KBOOT,DEBUGPRINT("iTimestampFreq=%u", VIB->iTimestampFreq));
-	__KTRACE_OPT(KBOOT,DEBUGPRINT("iMaxTimerClock=%u", VIB->iMaxTimerClock));
+	SVariantInterfaceBlock* v = (SVariantInterfaceBlock*)a;
+	TheScheduler.iVIB = v;
+	__NK_ASSERT_ALWAYS(v && v->iVer==0 && v->iSize==sizeof(SVariantInterfaceBlock));
+	__KTRACE_OPT(KBOOT,DEBUGPRINT("iVer=%d iSize=%d", v->iVer, v->iSize));
+	__KTRACE_OPT(KBOOT,DEBUGPRINT("iMaxCpuClock=%08x %08x", I64HIGH(v->iMaxCpuClock), I64LOW(v->iMaxCpuClock)));
+	__KTRACE_OPT(KBOOT,DEBUGPRINT("iTimestampFreq=%u", v->iTimestampFreq));
+	__KTRACE_OPT(KBOOT,DEBUGPRINT("iMaxTimerClock=%u", v->iMaxTimerClock));
 	TInt i;
 	for (i=0; i<KMaxCpus; ++i)
 		{
 		TSubScheduler& ss = TheSubSchedulers[i];
-		ss.iSSX.iCpuFreqM = KMaxTUint32;
-		ss.iSSX.iCpuFreqS = 0;
-		ss.iSSX.iCpuPeriodM = 0x80000000u;
-		ss.iSSX.iCpuPeriodS = 31;
-		ss.iSSX.iNTimerFreqM = KMaxTUint32;
-		ss.iSSX.iNTimerFreqS = 0;
-		ss.iSSX.iNTimerPeriodM = 0x80000000u;
-		ss.iSSX.iNTimerPeriodS = 31;
-		ss.iSSX.iTimerFreqM = KMaxTUint32;
-		ss.iSSX.iTimerFreqS = 0;
-		ss.iSSX.iTimerPeriodM = 0x80000000u;
-		ss.iSSX.iTimerPeriodS = 31;
+		ss.iSSX.iCpuFreqRI.Set(v->iCpuFreqR[i]);
+		ss.iSSX.iTimerFreqRI.Set(v->iTimerFreqR[i]);
+
 		ss.iSSX.iTimestampOffset.i64 = 0;
-		VIB->iTimerMult[i] = 0;
-		VIB->iCpuMult[i] = 0;
+		v->iCpuFreqR[i] = 0;
+		v->iTimerFreqR[i] = 0;
 		}
-	TheScheduler.iSX.iTimerMax = (VIB->iMaxTimerClock / 128);
+	TheScheduler.iSX.iTimerMax = (v->iMaxTimerClock / 128);
 	InitFpu();
 	InterruptInit0();
 	}
@@ -400,3 +388,6 @@
 	{
 	}
 
+void TScheduler::DoFrequencyChanged(TAny*)
+	{
+	}
--- a/kernel/eka/release.txt	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/release.txt	Wed Jun 23 12:58:21 2010 +0100
@@ -1,3 +1,12 @@
+Version 2.00.3098
+=================
+(Made by vfebvre 18/06/2010)
+
+1.	garciato
+	1.	RP520151
+		REQ:417-52765 - SMP: Product Quality SMP Kernel for Bridge
+
+
 Version 2.00.3097
 =================
 (Made by vfebvre 16/06/2010)
--- a/kernel/eka/rombuild/kernel.hby	Wed Jun 23 12:52:28 2010 +0100
+++ b/kernel/eka/rombuild/kernel.hby	Wed Jun 23 12:58:21 2010 +0100
@@ -17,10 +17,12 @@
 #define EKernelConfigSMPUnsafeCompat	12
 #define EKernelConfigSMPUnsafeCPU0		13
 #define EKernelConfigSMPCrazyInterrupts	14
+#define EKernelConfigSMPLockKernelThreadsCore0 15
 #define EKernelConfigDisableAPs			30
 
 #define CRAZYSCHEDULING(state)	kernelconfig EKernelConfigCrazyScheduling state
 #define SMPUNSAFECOMPAT(state)	kernelconfig EKernelConfigSMPUnsafeCompat state
 #define SMPUNSAFECPU0(state)	kernelconfig EKernelConfigSMPUnsafeCPU0 state
 #define CRAZYINTERRUPTS(state)	kernelconfig EKernelConfigSMPCrazyInterrupts state
+#define SMPLOCKKERNELTHREADSCPU0(state)	kernelconfig EKernelConfigSMPLockKernelThreadsCore0 state
 #define	SMP_USE_BP_ONLY(state)	kernelconfig EKernelConfigDisableAPs state
--- a/kerneltest/e32test/group/bld.inf	Wed Jun 23 12:52:28 2010 +0100
+++ b/kerneltest/e32test/group/bld.inf	Wed Jun 23 12:58:21 2010 +0100
@@ -83,6 +83,7 @@
  positive check ( #ifdef SMP ), however these binaries will not be included in BTB 
  autotest images for SMP platforms. Refer to DTW-KHS BTB00055 for more details.
  ******************************************************************************/
+d_timestamp					support
 d_kerncorestats				support
 d_implicit					support
 d_emitest					support
@@ -711,6 +712,9 @@
 t_domain_slave  support
 domainPolicyTest support
 t_switchoff
+t_frqchg
+// /E32TEST/TIMESTAMP test
+t_timestamp
 
 // /E32TEST/PRIME tests
 t_kern      support
@@ -1064,7 +1068,4 @@
 
 //pci tests
 t_pci
-// timestamp or fastcounter test
-t_timestamp
-d_timestamp		support
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/kerneltest/e32test/group/d_frqchg.mmh	Wed Jun 23 12:58:21 2010 +0100
@@ -0,0 +1,48 @@
+// Copyright (c) 2010-2010 Nokia Corporation and/or its subsidiary(-ies).
+// All rights reserved.
+// This component and the accompanying materials are made available
+// under the terms of the License "Eclipse Public License v1.0"
+// which accompanies this distribution, and is available
+// at the URL "http://www.eclipse.org/legal/epl-v10.html".
+//
+// Initial Contributors:
+// Nokia Corporation - initial contribution.
+//
+// Contributors:
+//
+// Description:
+// e32test/group/d_frqchg.mmh
+// 
+//
+
+// NOTE : include your variant.mmh before this file
+
+#ifdef EPOC32
+target			VariantTarget(d_frqchg,ldd)
+#else
+target			d_frqchg.ldd
+#endif
+#include		"kernel/kern_ext.mmh"
+
+targettype		ldd
+romtarget		d_frqchg.ldd
+
+sourcepath		../power
+source			d_frqchg.cpp
+
+sourcepath		../../../kernel/eka/nkern
+source			nklib.cpp
+
+#ifdef MARM
+sourcepath		../../../kernel/eka/nkern/arm
+source			nklib.cia
+#endif
+
+
+epocallowdlldata
+
+vendorid		0x70000001
+capability		all
+
+macro CPU_AFFINITY_ANY
+smpsafe
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/kerneltest/e32test/group/t_frqchg.mmp	Wed Jun 23 12:58:21 2010 +0100
@@ -0,0 +1,27 @@
+// Copyright (c) 2010-2010 Nokia Corporation and/or its subsidiary(-ies).
+// All rights reserved.
+// This component and the accompanying materials are made available
+// under the terms of the License "Eclipse Public License v1.0"
+// which accompanies this distribution, and is available
+// at the URL "http://www.eclipse.org/legal/epl-v10.html".
+//
+// Initial Contributors:
+// Nokia Corporation - initial contribution.
+//
+// Contributors:
+//
+// Description:
+// e32test\group\t_frqchg.mmp
+// 
+//
+
+target			t_frqchg.exe
+targettype		exe
+sourcepath		../power
+source			t_frqchg.cpp
+library			euser.lib hal.lib
+OS_LAYER_SYSTEMINCLUDE_SYMBIAN
+capability		all
+vendorid		0x70000001
+epocheapsize	0x00001000 0x00100000
+smpsafe
--- a/kerneltest/e32test/group/t_semutx.mmp	Wed Jun 23 12:52:28 2010 +0100
+++ b/kerneltest/e32test/group/t_semutx.mmp	Wed Jun 23 12:58:21 2010 +0100
@@ -19,7 +19,7 @@
 TARGETTYPE     EXE
 SOURCEPATH	../prime
 SOURCE         t_semutx.cpp
-LIBRARY        euser.lib
+LIBRARY        euser.lib hal.lib
 OS_LAYER_SYSTEMINCLUDE_SYMBIAN
 
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/kerneltest/e32test/power/d_frqchg.cpp	Wed Jun 23 12:58:21 2010 +0100
@@ -0,0 +1,421 @@
+// Copyright (c) 2010-2010 Nokia Corporation and/or its subsidiary(-ies).
+// All rights reserved.
+// This component and the accompanying materials are made available
+// under the terms of the License "Eclipse Public License v1.0"
+// which accompanies this distribution, and is available
+// at the URL "http://www.eclipse.org/legal/epl-v10.html".
+//
+// Initial Contributors:
+// Nokia Corporation - initial contribution.
+//
+// Contributors:
+//
+// Description:
+// e32test\power\d_frqchg.cpp
+// LDD for testing frequency changing
+// 
+//
+
+#include <kernel/kernel.h>
+#include "d_frqchg.h"
+
+#if defined(__EPOC32__) && defined(__SMP__) && defined(__MARM__)
+#define __SUPPORT_LOCAL_TIMER_PRESCALE__
+
+#include <nk_priv.h>
+#include <arm_tmr.h>
+#endif
+
+
+#ifdef __PLATFORM_SUPPORTS_DVFS__
+/**
+  Baseport needs to supply this function to disable DVFS whilst test is running. 
+  The test relies on changing prescalers in local and global timer directly rather than
+  actually changing frequency. Consequently DVFS must be disabled when the test is running
+
+  This function when driver is loaded. 
+  @return KErrNone if succesful
+ */
+extern TInt DisableDvfs();
+
+/**
+   if plaftorm supports DVFS this function will be called when the driver is unloaded
+ */
+extern void RestoreDvfs();
+#endif
+
+
+
+#if defined(__SUPPORT_LOCAL_TIMER_PRESCALE__)
+TInt Multiply(SRatio& aDest, const SRatio& aSrc)
+	{
+	TUint64 x = aDest.iM;
+	TUint64 y = aSrc.iM;
+	x *= y;
+	if (x==0)
+		{
+		aDest.iM = 0;
+		aDest.iX = 0;
+		return KErrNone;
+		}
+	TInt exp = aDest.iX + aSrc.iX + 32;
+	if (TInt64(x) >= 0)
+		x<<=1, --exp;
+	aDest.iM = I64HIGH(x);
+	if (I64LOW(x) & 0x80000000u)
+		{
+		if (++aDest.iM == 0)
+			aDest.iM = 0x80000000u, ++exp;
+		}
+	if (exp > 32767)
+		{
+		aDest.iM = 0xffffffffu;
+		aDest.iX = 32767;
+		return KErrOverflow;
+		}
+	if (exp < -32768)
+		{
+		aDest.iM = 0;
+		aDest.iX = 0;
+		return KErrUnderflow;
+		}
+	aDest.iX = (TInt16)exp;
+	return KErrNone;
+	}
+
+// Calculate frequency ratio for specified prescale value
+// Ratio = (default+1)/(current+1)
+void PrescaleRatio(SRatio& aR, TInt aDefault, TInt aCurrent)
+	{
+	SRatio df;
+	df.Set(TUint32(aDefault+1));
+	aR.Set(TUint32(aCurrent+1));
+	aR.Reciprocal();
+	Multiply(aR, df);
+	}
+#endif
+
+class DFrqChgFactory : public DLogicalDevice
+//
+// Test LDD factory
+//
+	{
+public:
+	DFrqChgFactory();
+	virtual ~DFrqChgFactory();
+	virtual TInt Install(); 					//overriding pure virtual
+	virtual void GetCaps(TDes8& aDes) const;	//overriding pure virtual
+	virtual TInt Create(DLogicalChannelBase*& aChannel); 	//overriding pure virtual
+	};
+
+class DFrqChg : public DLogicalChannelBase
+//
+// Test logical channel
+//
+	{
+public:
+	virtual ~DFrqChg();
+protected:
+	virtual TInt DoCreate(TInt aUnit, const TDesC8* anInfo, const TVersion& aVer);
+	virtual TInt Request(TInt aReqNo, TAny* a1, TAny* a2);
+#if defined(__SUPPORT_LOCAL_TIMER_PRESCALE__)
+	void PopulateDefaultPrescaleList();
+	void SetLocalTimerPrescaler(TUint32 aCpus, TInt aPrescale);
+	TScheduler* iS;
+	TInt iDefaultPrescale[KMaxCpus];
+#if defined(__CPU_ARM_HAS_GLOBAL_TIMER_BLOCK) && defined( __NKERN_TIMESTAMP_USE_SCU_GLOBAL_TIMER__)
+	void SetGlobalTimerPrescaler(TInt aPrescale);
+	TInt iDefaultGTPrescale;
+#endif
+#endif
+
+	};
+
+
+
+DECLARE_STANDARD_LDD()
+	{
+	return new DFrqChgFactory;
+	}
+
+//
+// Constructor
+//
+DFrqChgFactory::DFrqChgFactory()
+	{
+	}
+
+//
+// Destructor, called on unload
+//
+DFrqChgFactory::~DFrqChgFactory()
+	{
+#ifdef __PLATFORM_SUPPORTS_DVFS__
+	RestoreDvfs();
+#endif
+	}
+
+
+
+//
+// Create new channel
+//
+TInt DFrqChgFactory::Create(DLogicalChannelBase*& aChannel)
+	{
+	aChannel=new DFrqChg;
+	return aChannel?KErrNone:KErrNoMemory;
+	}
+
+
+//
+// Install the LDD - overriding pure virtual
+//
+TInt DFrqChgFactory::Install()
+	{
+#ifdef __PLATFORM_SUPPORTS_DVFS__
+	TInt r = DisableDvfs();
+	if (KErrNone != r) return r;
+#endif
+	return SetName(&KLddName);
+	}
+
+
+//
+// Get capabilities - overriding pure virtual
+//
+void DFrqChgFactory::GetCaps(TDes8& /*aDes*/) const
+	{
+	}
+
+
+//
+// Create channel
+//
+TInt DFrqChg::DoCreate(TInt /*aUnit*/, const TDesC8* /*aInfo*/, const TVersion& /*aVer*/)
+	{
+#if defined(__SUPPORT_LOCAL_TIMER_PRESCALE__)
+	iS = (TScheduler*)TScheduler::Ptr();
+	PopulateDefaultPrescaleList();
+#endif
+	return KErrNone;
+	}
+
+
+//
+// Destructor
+//
+DFrqChg::~DFrqChg()
+	{
+#if defined(__SUPPORT_LOCAL_TIMER_PRESCALE__)
+	// restore prescalers
+	SetLocalTimerPrescaler((TUint32) -1, -1);
+#if defined(__CPU_ARM_HAS_GLOBAL_TIMER_BLOCK) && defined( __NKERN_TIMESTAMP_USE_SCU_GLOBAL_TIMER__)
+	SetGlobalTimerPrescaler(-1);
+#endif
+#endif
+	}
+
+
+TInt DFrqChg::Request(TInt aReqNo, TAny* a1, TAny* a2)
+	{
+	SRatioInv ri;
+	TInt r = KErrNone;
+	switch (aReqNo)
+		{
+		case RFrqChg::EControl_RatioSet:
+			{
+			kumemget32(&ri.iR, a1, sizeof(SRatio));
+			ri.iR.Set(ri.iR.iM, (TUint32)a2);
+			kumemput32(a1, &ri.iR, sizeof(SRatio));
+			break;
+			}
+		case RFrqChg::EControl_RatioReciprocal:
+			{
+			kumemget32(&ri.iR, a1, sizeof(SRatio));
+			r = ri.iR.Reciprocal();
+			kumemput32(a1, &ri.iR, sizeof(SRatio));
+			break;
+			}
+		case RFrqChg::EControl_RatioMult:
+			{
+			kumemget32(&ri.iR, a1, sizeof(SRatio));
+			kumemget32(&ri.iI.iM, a2, sizeof(TUint32));
+			r = ri.iR.Mult(ri.iI.iM);
+			kumemput32(a2, &ri.iI.iM, sizeof(TUint32));
+			break;
+			}
+		case RFrqChg::EControl_RatioInvSet:
+			{
+			SRatio ratio;
+			const SRatio* p = 0;
+			if (a2)
+				{
+				kumemget32(&ratio, a2, sizeof(SRatio));
+				p = &ratio;
+				}
+			ri.Set(p);
+			kumemput32(a1, &ri, sizeof(SRatioInv));
+			break;
+			}
+#if defined(__EPOC32__) && defined(__SMP__) && defined(__MARM__)
+		case RFrqChg::EControl_FrqChgTestPresent:
+			break;
+		case RFrqChg::EControl_SetCurrentThreadPriority:
+			NKern::ThreadSetPriority(NKern::CurrentThread(), (TInt)a1);
+			break;
+		case RFrqChg::EControl_SetCurrentThreadCpu:
+			{
+			TUint32 old = 0;
+			old =  NKern::ThreadSetCpuAffinity(NKern::CurrentThread(), (TUint32)a1);
+			if (a2) 
+				{
+				kumemput32(a2, &old, sizeof(TUint32));
+				}
+			
+			old =  NKern::ThreadSetCpuAffinity(NKern::CurrentThread(), (TUint32)a1);
+			}
+			
+			break;
+		case RFrqChg::EControl_SetCurrentThreadTimeslice:
+			{
+			TInt ts = NKern::TimesliceTicks((TUint32)a1);
+			NKern::ThreadSetTimeslice(NKern::CurrentThread(), ts);
+			NKern::YieldTimeslice();
+			break;
+			}
+#endif
+#if defined(__SUPPORT_LOCAL_TIMER_PRESCALE__)
+		case RFrqChg::EControl_SetLocalTimerPrescaler:
+			{
+			TUint32 cpus = (TUint32)a1;
+			TInt prescale = (TInt)a2;
+			SetLocalTimerPrescaler(cpus, prescale);
+			break;
+			}
+#if defined(__CPU_ARM_HAS_GLOBAL_TIMER_BLOCK) && defined( __NKERN_TIMESTAMP_USE_SCU_GLOBAL_TIMER__)
+	case RFrqChg::EControl_ReadGlobalTimerAndTimestamp:
+		    {
+			ArmGlobalTimer* tmr = iS->iSX.iGlobalTimerAddr;
+			TUint32 highlow[2];
+			do
+				{
+				highlow[1] = tmr->iTimerCountHigh;
+				highlow[0] = tmr->iTimerCountLow;
+				} while(highlow[1]!=tmr->iTimerCountHigh);
+			TUint64 ts = NKern::Timestamp();
+			kumemput32(a1,&highlow[0],sizeof(TUint64));
+			kumemput32(a2,&ts,sizeof(TUint64));
+			break;
+		 }
+	case RFrqChg::EControl_SetGlobalTimerPrescaler:
+		    {
+			SetGlobalTimerPrescaler((TInt)a1);
+			break;
+		 }
+#endif
+#endif
+		default:
+			r = KErrNotSupported;
+			break;
+		}
+	return r;
+	}
+
+
+#if defined(__SUPPORT_LOCAL_TIMER_PRESCALE__)
+void DFrqChg::PopulateDefaultPrescaleList()
+	{
+	TInt nc = NKern::NumberOfCpus();
+	NThread* nt = NKern::CurrentThread();
+	TUint32 aff0 = NKern::ThreadSetCpuAffinity(nt, 0);
+	TInt i;
+	for (i=0; i<nc; ++i)
+		{
+		NKern::ThreadSetCpuAffinity(nt, i);
+		ArmLocalTimer* tmr = (ArmLocalTimer*)iS->iSX.iLocalTimerAddr;
+		TInt pv = (tmr->iTimerCtrl & E_ArmTmrCtrl_PrescaleMask) >> E_ArmTmrCtrl_PrescaleShift;
+		iDefaultPrescale[i] = pv;
+		}
+	NKern::ThreadSetCpuAffinity(nt, aff0);
+#if defined(__CPU_ARM_HAS_GLOBAL_TIMER_BLOCK) && defined( __NKERN_TIMESTAMP_USE_SCU_GLOBAL_TIMER__)
+	ArmGlobalTimer* tmr = iS->iSX.iGlobalTimerAddr;
+	TInt pv = (tmr->iTimerCtrl & E_ArmGTmrCtrl_PrescaleMask) >> E_ArmGTmrCtrl_PrescaleShift;
+	iDefaultGTPrescale = pv;
+#endif
+	}
+
+void DFrqChg::SetLocalTimerPrescaler(TUint32 aCpus, TInt aPrescale)
+	{
+	TInt nc = NKern::NumberOfCpus();
+	NThread* nt = NKern::CurrentThread();
+	TUint32 aff0 = NKern::ThreadSetCpuAffinity(nt, 0);
+	TInt i;
+	for (i=0; i<nc; ++i)
+		{
+		NKern::ThreadSetCpuAffinity(nt, i);
+		}
+	for (i=0; i<nc; ++i)
+		{
+		NKern::ThreadSetCpuAffinity(nt, i);
+		TInt pv = aPrescale;
+		if (pv < 0)
+			pv = iDefaultPrescale[i];
+		if (aCpus & (1u<<i))
+			{
+			TInt irq = NKern::DisableAllInterrupts();
+			ArmLocalTimer* tmr = (ArmLocalTimer*)iS->iSX.iLocalTimerAddr;
+			tmr->iTimerCtrl = (tmr->iTimerCtrl &~ E_ArmTmrCtrl_PrescaleMask) | ((pv << E_ArmTmrCtrl_PrescaleShift) & E_ArmTmrCtrl_PrescaleMask);
+			__e32_io_completion_barrier();
+			NKern::RestoreInterrupts(irq);
+			}
+		}
+	NKern::ThreadSetCpuAffinity(nt, aff0);
+	if (aCpus & 0x80000000u)
+		{
+		// notify nanokernel of frequency changes
+		SVariantInterfaceBlock* vib = iS->iVIB;
+		SRatio ratio[KMaxCpus];
+		for (i=0; i<nc; ++i)
+			{
+			if (aCpus & (1u<<i))
+				{
+				if (aPrescale<0)
+					ratio[i].Set(1);
+				else
+					PrescaleRatio(ratio[i], iDefaultPrescale[i], aPrescale);
+				vib->iTimerFreqR[i] = &ratio[i];
+				}
+			}
+		(*vib->iFrqChgFn)();
+		}
+	}
+
+#if defined(__CPU_ARM_HAS_GLOBAL_TIMER_BLOCK) && defined( __NKERN_TIMESTAMP_USE_SCU_GLOBAL_TIMER__)
+void DFrqChg::SetGlobalTimerPrescaler(TInt aPrescale)
+	{
+	TInt pv = aPrescale;
+	if (pv <= 0)
+		pv = iDefaultGTPrescale;
+
+	ArmGlobalTimer* tmr = iS->iSX.iGlobalTimerAddr;
+	// TInt irq = NKern::DisableAllInterrupts(); 
+	tmr->iTimerCtrl = (tmr->iTimerCtrl &~ E_ArmGTmrCtrl_PrescaleMask) | ((pv << E_ArmGTmrCtrl_PrescaleShift) & E_ArmGTmrCtrl_PrescaleMask);
+	__e32_io_completion_barrier();
+	// NKern::RestoreInterrupts(irq);
+
+	// notify nanokernel of frequency changes
+	SVariantInterfaceBlock* vib = iS->iVIB;
+	SRatio ratio;
+	
+	if (aPrescale<=0)
+		ratio.Set(1);
+	else
+		PrescaleRatio(ratio, iDefaultGTPrescale, aPrescale);
+
+	vib->iGTimerFreqR = &ratio;
+	(*vib->iFrqChgFn)();
+	}
+
+#endif
+#endif
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/kerneltest/e32test/power/d_frqchg.h	Wed Jun 23 12:58:21 2010 +0100
@@ -0,0 +1,107 @@
+// Copyright (c) 2010-2010 Nokia Corporation and/or its subsidiary(-ies).
+// All rights reserved.
+// This component and the accompanying materials are made available
+// under the terms of the License "Eclipse Public License v1.0"
+// which accompanies this distribution, and is available
+// at the URL "http://www.eclipse.org/legal/epl-v10.html".
+//
+// Initial Contributors:
+// Nokia Corporation - initial contribution.
+//
+// Contributors:
+//
+// Description:
+// e32test\power\d_frqchg.h
+// 
+//
+
+#if !defined(__D_FRQCHG_H__)
+#define __D_FRQCHG_H__
+
+#include <e32cmn.h>
+
+struct SRatio;
+struct SRatioInv;
+
+#ifndef __KERNEL_MODE__
+#include <e32std.h>
+
+struct SRatio
+	{
+	TUint32		iM;		// mantissa, normalised so bit 31=1
+	TInt16		iX;		// -exponent.
+	TUint8		iSpare1;
+	TUint8		iSpare2;
+	};
+
+struct SRatioInv
+	{
+	SRatio		iR;
+	SRatio		iI;
+	};
+#endif
+
+_LIT(KLddName,"D_FRQCHG.LDD");
+
+class RFrqChg : public RBusLogicalChannel
+	{
+public:
+	enum TControl
+		{
+		EControl_RatioSet,
+		EControl_RatioReciprocal,
+		EControl_RatioMult,
+		EControl_RatioInvSet,
+		EControl_FrqChgTestPresent,
+		EControl_SetCurrentThreadPriority,
+		EControl_SetCurrentThreadCpu,
+		EControl_SetCurrentThreadTimeslice,
+		EControl_SetLocalTimerPrescaler,
+		EControl_ReadGlobalTimerAndTimestamp,
+		EControl_SetGlobalTimerPrescaler,
+		ENumControls
+		};
+
+public:
+	inline TInt Open();
+	inline TInt RatioSet(SRatio& aRatio, TUint32 aInt, TInt aDivisorExp=0);
+	inline TInt RatioReciprocal(SRatio& aRatio);
+	inline TInt RatioMult(SRatio& aRatio, TUint32& aInt32);
+	inline TInt RatioInvSet(SRatioInv& aRI, const SRatio* aR);
+	inline TInt FrqChgTestPresent();
+	inline TInt SetCurrentThreadPriority(TInt aPri);
+	inline TInt SetCurrentThreadCpu(TUint32 aCpu, TUint32* aOldAffinity = NULL);
+	inline TInt SetCurrentThreadTimeslice(TInt aSlice);
+	inline TInt SetLocalTimerPrescaler(TUint32 aCpus, TInt aPrescale);
+	inline TInt ReadGlobalTimerAndTimestamp(TUint64& aTimerValue, TUint64& aTimestamp);
+	inline TInt SetGlobalTimerPrescaler(TInt aPrescale);
+	};
+
+#ifndef __KERNEL_MODE__
+inline TInt RFrqChg::Open()
+	{ return DoCreate(KLddName,TVersion(0,1,1),KNullUnit,NULL,NULL); }
+inline TInt RFrqChg::RatioSet(SRatio& aR, TUint32 aInt, TInt aDivisorExp)
+	{ aR.iM=aInt; return DoControl(EControl_RatioSet, (TAny*)&aR, (TAny*)aDivisorExp); }
+inline TInt RFrqChg::RatioReciprocal(SRatio& aR)
+	{ return DoControl(EControl_RatioReciprocal, (TAny*)&aR); }
+inline TInt RFrqChg::RatioMult(SRatio& aR, TUint32& aInt32)
+	{ return DoControl(EControl_RatioMult, (TAny*)&aR, (TAny*)&aInt32); }
+inline TInt RFrqChg::RatioInvSet(SRatioInv& aRI, const SRatio* aR)
+	{ return DoControl(EControl_RatioInvSet, (TAny*)&aRI, (TAny*)aR); }
+inline TInt RFrqChg::FrqChgTestPresent()
+	{ return DoControl(EControl_FrqChgTestPresent); }
+inline TInt RFrqChg::SetCurrentThreadPriority(TInt aPri)
+	{ return DoControl(EControl_SetCurrentThreadPriority, (TAny*)aPri); }
+inline TInt RFrqChg::SetCurrentThreadCpu(TUint32 aCpu, TUint32* aOldAffinity)
+	{ return DoControl(EControl_SetCurrentThreadCpu, (TAny*)aCpu, (TAny*) aOldAffinity); }
+inline TInt RFrqChg::SetCurrentThreadTimeslice(TInt aSlice)
+	{ return DoControl(EControl_SetCurrentThreadTimeslice, (TAny*)aSlice); }
+inline TInt RFrqChg::SetLocalTimerPrescaler(TUint32 aCpus, TInt aPrescale)
+	{ return DoControl(EControl_SetLocalTimerPrescaler, (TAny*)aCpus, (TAny*)aPrescale); }
+inline TInt RFrqChg::ReadGlobalTimerAndTimestamp(TUint64& aTimerValue, TUint64& aTimestamp)
+	{ return DoControl(EControl_ReadGlobalTimerAndTimestamp, (TAny*)&aTimerValue, (TAny*) &aTimestamp); }
+inline TInt RFrqChg::SetGlobalTimerPrescaler(TInt aPrescale)
+	{ return DoControl(EControl_SetGlobalTimerPrescaler, (TAny*)aPrescale); }
+#endif
+
+#endif   //__D_FRQCHG_H__
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/kerneltest/e32test/power/t_frqchg.cpp	Wed Jun 23 12:58:21 2010 +0100
@@ -0,0 +1,775 @@
+// Copyright (c) 2010-2010 Nokia Corporation and/or its subsidiary(-ies).
+// All rights reserved.
+// This component and the accompanying materials are made available
+// under the terms of the License "Eclipse Public License v1.0"
+// which accompanies this distribution, and is available
+// at the URL "http://www.eclipse.org/legal/epl-v10.html".
+//
+// Initial Contributors:
+// Nokia Corporation - initial contribution.
+//
+// Contributors:
+//
+// Description:
+// e32test\power\t_frqchg.cpp
+//
+//
+
+#define __E32TEST_EXTENSION__
+#include <e32test.h>
+#include <e32math.h>
+#include <e32atomics.h>
+#include <hal.h>
+#include "d_frqchg.h"
+#include <e32svr.h>
+#include "u32std.h"
+
+RFrqChg Driver;
+RTest test(_L("T_FRQCHG"));
+
+// test will fail if slice is > (expected+KSliceDeltaPercent%of expexted) 
+// or < (expected-KSliceDeltaPercent%expected)
+const TInt KSliceDeltaPercent = 5;   
+// test will fail for global timer based timestamps if interval measured 
+// is > (expected+KTimeStampDeltaPercent%of expexted) 
+// or < (expected-KTimeStampDeltaPercent%expected)
+const TInt KTimeStampDeltaPercent = 5;
+
+TInt RealToRatio(SRatio& aRatio, const TRealX& aReal)
+	{
+	aRatio.iSpare1 = 0;
+	aRatio.iSpare2 = 0;
+	if (aReal.iSign || aReal.IsZero() || aReal.IsNaN())
+		{
+		aRatio.iM = 0;
+		aRatio.iX = 0;
+		return (aReal.IsZero()) ? KErrNone : KErrNotSupported;
+		}
+	TRealX rx(aReal);
+	TRealX rr(rx);
+	rr.iExp -= 32;
+	rr.iMantLo = 0;
+	rr.iMantHi = 0x80000000u;
+	rx += rr;	// rounding
+	TInt exp = rx.iExp - 32767 - 31;
+	if (exp < -32768)
+		{
+		aRatio.iM = 0;
+		aRatio.iX = 0;
+		return KErrUnderflow;
+		}
+	if (exp > 32767)
+		{
+		aRatio.iM = 0xffffffffu;
+		aRatio.iX = 32767;
+		return KErrOverflow;
+		}
+	aRatio.iM = rx.iMantHi;
+	aRatio.iX = (TInt16)exp;
+	return KErrNone;
+	}
+
+TInt RatioToReal(TRealX& a, const SRatio& aRatio)
+	{
+	a.iSign = 0;
+	a.iFlag = 0;
+	a.iMantLo = 0;
+	a.iMantHi = aRatio.iM;
+	if (!aRatio.iM)
+		{
+		a.SetZero();
+		return KErrNone;
+		}
+	TInt exp = aRatio.iX + 31 + 32767;
+	if (exp > 65534)
+		{
+		a.SetInfinite(EFalse);
+		}
+	else
+		{
+		a.iExp = (TUint16)exp;
+		}
+	return KErrNone;
+	}
+
+TInt RatioSetValue(TRealX& a, TUint32 aInt, TInt aDivisorExp)
+	{
+	a.Set(TUint(aInt));
+	TInt exp = a.iExp;
+	exp -= aDivisorExp;
+	if (exp<1)
+		{
+		a.SetZero();
+		return KErrUnderflow;
+		}
+	if (exp>65534)
+		{
+		a.SetInfinite(EFalse);
+		return KErrOverflow;
+		}
+	a.iExp = (TInt16)exp;
+	return KErrNone;
+	}
+
+TInt RatioReciprocal(SRatio& aRatio)
+	{
+	TRealX rx;
+	TInt r = RatioToReal(rx, aRatio);
+	if (r != KErrNone)
+		return r;
+	rx = TRealX(1) / rx;
+	return RealToRatio(aRatio, rx);
+	}
+
+TInt RatioMult(const SRatio& aRatio, TUint32& aInt32)
+	{
+	TRealX rx;
+	TInt r = RatioToReal(rx, aRatio);
+	if (r != KErrNone)
+		return r;
+	r = rx.MultEq(TRealX((TUint)aInt32));
+	if (r != KErrNone)
+		return r;
+	if (rx.IsZero())
+		{
+		aInt32 = 0;
+		return KErrNone;
+		}
+	rx.AddEq(TRealX(0.5));
+	if (rx<TRealX(1))
+		{
+		aInt32 = 0;
+		return KErrUnderflow;
+		}
+	if (rx.iExp > 32767+31)
+		{
+		aInt32 = ~0u;
+		return KErrOverflow;
+		}
+	aInt32 = rx.operator TUint();
+	return KErrNone;
+	}
+
+void RatioPrint(const char* aTitle, const SRatio& aRatio)
+	{
+	TPtrC8 t8((const TUint8*)aTitle);
+	TBuf<256> t16;
+	t16.Copy(t8);
+	test.Printf(_L("%S: %08x %04x\n"), &t16, aRatio.iM, TUint16(aRatio.iX));
+	}
+
+void RatioPrint2(const char* aTitle, const SRatio& aR1, const SRatio& aR2)
+	{
+	TPtrC8 t8((const TUint8*)aTitle);
+	TBuf<256> t16;
+	t16.Copy(t8);
+	test.Printf(_L("%S: %08x %04x   %08x %04x\n"), &t16, aR1.iM, TUint16(aR1.iX), aR2.iM, TUint16(aR2.iX));
+	}
+
+void TestEqual(const SRatio& aActual, const SRatio& aExpected)
+	{
+	if (aActual.iM==aExpected.iM && aActual.iX==aExpected.iX)
+		return;
+	RatioPrint("Actual", aActual);
+	RatioPrint("Expected", aExpected);
+	test(0);
+	}
+
+const TUint32 MultTestIntegers[] =
+	{
+	0u, 1u, 2u, 3u, 5u, 7u, 11u, 13u, 17u, 19u, 23u, 29u, 31u, 37u, 41u, 43u, 47u,
+	50u, 51u, 53u, 59u, 61u, 63u, 67u, 71u, 72u, 81u, 100u, 127u, 133u, 187u, 200u,
+	4u, 8u, 16u, 32u, 64u, 128u, 256u, 512u, 1024u, 2048u, 4096u, 8192u, 16384u,
+	32768u, 65536u, 131072u, 262144u, 524288u, 1048576u, 2097152u, 4194304u, 8388608u,
+	16777216u, 33554432u, 67108864u, 134217728u, 268435456u, 536870912u, 1073741824u,
+	2147483648u, 4294967295u,
+	9u, 27u, 243u, 729u, 2187u, 6561u, 19683u, 59049u, 177147u, 531441u, 1594323u,
+	4782969u, 14348907u, 43046721u, 129140163u, 387420489u, 1162261467u, 3486784401u,
+	25u, 125u, 625u, 3125u, 15625u, 78125u, 390625u, 1953125u, 9765625u,
+	48828125u, 244140625u, 1220703125u,
+	49u, 343u, 2401u, 16807u, 117649u, 823543u, 5764801u, 40353607u, 282475249u, 1977326743u
+	};
+
+void Test1M(const SRatio& aRatio)
+	{
+	SRatio ratio = aRatio;
+	const TInt N = sizeof(MultTestIntegers)/sizeof(MultTestIntegers[0]);
+	test.Printf(_L("Testing %d integers\n"), N);
+	TInt i;
+	for (i=0; i<N; ++i)
+		{
+		TUint32 I = MultTestIntegers[i];
+		TUint32 I0 = I;
+		TUint32 I1 = I;
+		TInt r0 = RatioMult(aRatio, I0);
+		TInt r1 = Driver.RatioMult(ratio, I1);
+		if (r0!=KErrNone || r1!=KErrNone)
+			{
+			if (r0!=r1)
+				{
+				test.Printf(_L("Return code mismatch r0=%d r1=%d (I=%08x I0=%08x I1=%08x)\n"), r0, r1, I, I0, I1);
+				test(0);
+				}
+			}
+		else if (I0!=I1)
+			{
+			test.Printf(_L("Result mismatch I=%08x I0=%08x I1=%08x\n"), I, I0, I1);
+			}
+		}
+	}
+
+void Test1(TUint32 aInt, TInt aDivisorExp)
+	{
+	TRealX realx;
+	SRatio r0x;
+	SRatio r0;
+	SRatio r1x;
+	SRatio r1;
+	TInt r;
+	test.Printf(_L("Test1 %08x %d\n"), aInt, aDivisorExp);
+	r = RatioSetValue(realx, aInt, aDivisorExp);
+	test_KErrNone(r);
+	r = RealToRatio(r0x, realx);
+	test_KErrNone(r);
+	r = Driver.RatioSet(r0, aInt, aDivisorExp);
+	RatioPrint2("R0X,R0", r0x, r0);
+	TestEqual(r0, r0x);
+	Test1M(r0);
+	r1x = r0x;
+	r = RatioReciprocal(r1x);
+	test_KErrNone(r);
+	r1 = r0;
+	r = Driver.RatioReciprocal(r1);
+	test_KErrNone(r);
+	RatioPrint2("R1X,R1", r1x, r1);
+	TestEqual(r1, r1x);
+	Test1M(r1);
+	}
+
+void TestRatios()
+	{
+	Test1(1,0);
+	Test1(3,0);
+	Test1(0xb504f334u,32);
+	Test1(0xc90fdaa2u,30);
+	Test1(10,0);
+	Test1(0xcccccccd,35);
+	Test1(100,0);
+	Test1(0xa3d70a3d,38);
+	}
+
+class CircBuf
+	{
+public:
+	static CircBuf* New(TInt aSlots);
+	CircBuf();
+	~CircBuf();
+	TInt TryPut(TUint32 aIn);
+	void Reset();
+public:
+	volatile TUint32* iBufBase;
+	TUint32 iSlotCount;
+	volatile TUint32 iPutIndex;
+	};
+
+CircBuf* CircBuf::New(TInt aSlots)
+	{
+	test(TUint32(aSlots-1)<65536);
+	CircBuf* p = new CircBuf();
+	p->iSlotCount = aSlots;
+	p->iPutIndex = 0;
+	p->iBufBase = (TUint32*)User::Alloc(aSlots*sizeof(TUint32));
+	if (!p->iBufBase)
+		{
+		delete p;
+		p = 0;
+		}
+	__e32_memory_barrier();
+	return p;
+	}
+
+CircBuf::CircBuf()
+	{
+	iBufBase = 0;
+	}
+
+CircBuf::~CircBuf()
+	{
+	User::Free((TAny*)iBufBase);
+	}
+
+TInt CircBuf::TryPut(TUint32 aIn)
+	{
+	TUint32 orig = __e32_atomic_tau_rlx32(&iPutIndex, iSlotCount, 0, 1);
+	if (orig == iSlotCount)
+		return KErrOverflow;
+	iBufBase[orig] = aIn;
+	return KErrNone;
+	}
+
+void CircBuf::Reset()
+	{
+	__e32_atomic_store_ord32(&iPutIndex, 0);
+	}
+
+
+
+class CTimesliceTestThread : public CBase
+	{
+public:
+	CTimesliceTestThread();
+	~CTimesliceTestThread();
+	static CTimesliceTestThread* New(TUint32 aId, TInt aCpu, TInt aSlice, CircBuf* aBuf);
+	void Start();
+	void Wait();
+	TBool Finished();
+	TInt Construct(TUint32 aId, TInt aCpu, TInt aSlice, CircBuf* aBuf);
+	static TInt ThreadFunc(TAny*);
+public:
+	RThread	iThread;
+	TRequestStatus iExitStatus;
+	TUint32 iId;
+	CircBuf* iBuf;
+	TUint32 iFreq;
+	TUint32 iThresh;
+	TUint32 iThresh2;
+	TInt iCpu;
+	TInt iSlice;
+	};
+
+CTimesliceTestThread::CTimesliceTestThread()
+	{
+	iThread.SetHandle(0);
+	}
+
+CTimesliceTestThread::~CTimesliceTestThread()
+	{
+	if (iThread.Handle())
+		{
+		if (iThread.ExitType() == EExitPending)
+			{
+			iThread.Kill(0);
+			Wait();
+			}
+		CLOSE_AND_WAIT(iThread);
+		}
+	}
+
+TInt CTimesliceTestThread::Construct(TUint32 aId, TInt aCpu, TInt aSlice, CircBuf* aBuf)
+	{
+	iId = aId;
+	iCpu = aCpu;
+	iSlice = aSlice;
+	iBuf = aBuf;
+
+	TInt r = HAL::Get(HAL::EFastCounterFrequency, (TInt&)iFreq);
+	if (r!=KErrNone)
+		return r;
+	iThresh = iFreq / 3000;
+	if (iThresh < 10)
+		iThresh = 10;
+	iThresh2 = iFreq;
+	TBuf<16> name = _L("TSThrd");
+	name.AppendNum(iId);
+	r = iThread.Create(name, &ThreadFunc, 0x1000, NULL, this);
+	if (r!=KErrNone)
+		return r;
+	iThread.Logon(iExitStatus);
+	if (iExitStatus != KRequestPending)
+		{
+		iThread.Kill(0);
+		iThread.Close();
+		iThread.SetHandle(0);
+		return iExitStatus.Int();
+		}
+	return KErrNone;
+	}
+
+CTimesliceTestThread* CTimesliceTestThread::New(TUint32 aId, TInt aCpu, TInt aSlice, CircBuf* aBuf)
+	{
+	CTimesliceTestThread* p = new CTimesliceTestThread;
+	if (p)
+		{
+		TInt r = p->Construct(aId, aCpu, aSlice, aBuf);
+		if (r != KErrNone)
+			{
+			delete p;
+			p = 0;
+			}
+		}
+	return p;
+	}
+
+void CTimesliceTestThread::Start()
+	{
+	iThread.Resume();
+	}
+
+TBool CTimesliceTestThread::Finished()
+	{
+	return (KRequestPending!=iExitStatus.Int());
+	}
+
+void CTimesliceTestThread::Wait()
+	{
+	User::WaitForRequest(iExitStatus);
+	}
+
+TInt CTimesliceTestThread::ThreadFunc(TAny* aPtr)
+	{
+	CTimesliceTestThread& a = *(CTimesliceTestThread*)aPtr;
+	Driver.SetCurrentThreadCpu(a.iCpu);
+	Driver.SetCurrentThreadPriority(63);
+	Driver.SetCurrentThreadTimeslice(a.iSlice);
+	User::AfterHighRes(100000);
+	TUint id = a.iId;
+	TUint32 last_interval_begin = User::FastCounter();
+	TUint32 last_seen_time = User::FastCounter();
+	FOREVER
+		{
+		TUint32 nfc = User::FastCounter();
+		TUint32 delta = nfc - last_seen_time;
+		TUint32 interval_length = last_seen_time - last_interval_begin;
+		if (delta > a.iThresh || interval_length > a.iThresh2)
+			{
+			last_interval_begin = nfc;
+			TUint32 x = (id<<30) | (interval_length&0x3fffffffu);
+			TInt r = a.iBuf->TryPut(x);
+			if (r != KErrNone)
+				break;
+			}
+		last_seen_time = nfc;
+		}
+	return KErrNone;
+	}
+
+CircBuf* RunTimesliceTest(TInt aCpu, TInt aSlice, TInt aCount, TInt aInterfere = 0)
+ 	{
+	TUint32 oldaff = 0;
+	TUint32 interfereAffinity = 0; 
+	TUint tellKernel = 0x80000000u;
+	
+	CircBuf* buf = CircBuf::New(aCount);
+	test(buf != 0);
+	CTimesliceTestThread* t0 = CTimesliceTestThread::New(0, aCpu, aSlice, buf);
+	test(t0 != 0);
+	CTimesliceTestThread* t1 = CTimesliceTestThread::New(1, aCpu, aSlice, buf);
+	test(t1 != 0);
+
+	if (aInterfere) 
+		{
+		if (aInterfere < 0) 
+			{
+			tellKernel = 0;
+			}
+		TInt r = UserSvr::HalFunction(EHalGroupKernel, EKernelHalNumLogicalCpus, 0, 0);
+		test(r>0);
+		interfereAffinity = (0x80000000 | ((0x1<<r)-1)) & ~0x2; // all except core 1
+		if (0x80000001 == interfereAffinity) 
+			{
+			interfereAffinity = 0;   // dual core system (not doing this fails affinity check later)
+			}
+		
+		Driver.SetCurrentThreadCpu(interfereAffinity , &oldaff);   // move away from core 1 (doesn't hurt though not much difference gained)
+		Driver.SetCurrentThreadPriority(63);                       // changing prescaler requires running on core 1 so priority needs to 
+		}                                                          // match test threads
+
+
+	t0->Start();
+	t1->Start();
+	if (aInterfere) 
+		{
+		TInt prescale = 1;
+		while (!t0->Finished() || !t1->Finished()) 
+			{
+			User::AfterHighRes(23000);
+			Driver.SetLocalTimerPrescaler((1u<<1)|tellKernel, prescale);
+			prescale++;
+			if (prescale >  4) 
+				{
+				prescale = 0;
+				}
+			}
+		}
+
+	t0->Wait();
+	t1->Wait();
+	
+	delete t0;
+	delete t1;
+	if (aInterfere) 
+		{
+		TUint32 aff;
+		Driver.SetLocalTimerPrescaler((1u<<1)|0x80000000u, -1);
+		RThread().SetPriority(EPriorityNormal);
+		Driver.SetCurrentThreadCpu(oldaff,&aff);
+		test_Equal(aff,interfereAffinity);
+		}
+	return buf;
+	}
+
+TUint32 ticks_to_us(TUint32 aTicks, TUint32 aF)
+	{
+	TUint64 x = TUint64(aTicks) * TUint64(1000000);
+	TUint64 f64 = aF;
+	x += (f64>>1);
+	x /= f64;
+	return I64LOW(x);
+	}
+
+void DisplayBuffer(CircBuf* aBuf, TUint32 aSlice )
+	{
+	TUint32 f;
+	TInt r = HAL::Get(HAL::EFastCounterFrequency, (TInt&)f);
+	test_KErrNone(r);
+	TUint32* p = (TUint32*)aBuf->iBufBase;
+	TInt c = aBuf->iSlotCount;
+	TInt i;
+	TInt lid = -1;
+	TUint32 min = ~0u;
+	TUint32 max = 0;
+	TUint32 totivus = 0;
+	TBool firstchg = ETrue;
+	for (i=0; i<c; ++i)
+		{
+		TUint32 x = p[i];
+		TUint32 id = x>>30;
+		TUint32 iv = (x<<2)>>2;
+		TUint32 ivus = ticks_to_us(iv,f);
+		if (lid >= 0)
+			{
+			if (lid == (TInt)id)
+				totivus += ivus;
+			else
+				{
+				if (!firstchg)
+					{
+					if (totivus < min)
+						min = totivus;
+					if (totivus > max)
+						max = totivus;
+					}
+				else
+					firstchg = EFalse;
+				totivus = ivus;
+				}
+			}
+		lid = (TInt)id;
+		test.Printf(_L("ID: %1d IV: %10d (=%10dus) TIV %10dus\n"), id, iv, ivus, totivus);
+		}
+
+	if (aSlice > 0)
+		{
+		// check timeslices where within acceptable ranges
+		TUint32 sliceError = KSliceDeltaPercent*aSlice/100;
+		test_Compare(max,<,aSlice+sliceError);  
+		test_Compare(min,>,aSlice-sliceError);  
+		}
+	test.Printf(_L("RANGE %d-%dus (%dus)\n"), min, max, max-min);
+	}
+
+void TT()
+	{
+	test.Printf(_L("Timeslicing test ...\n"));
+	CircBuf* b = RunTimesliceTest(1, 50000, 100);
+	test.Next(_L("Baseline - expecting normal"));
+	DisplayBuffer(b,50000u);
+	delete b;
+
+	Driver.SetLocalTimerPrescaler(1u<<1, 1);
+	b = RunTimesliceTest(1, 50000, 100);
+	test.Next(_L("expecting double"));
+	DisplayBuffer(b,100000u);
+	delete b;
+
+	Driver.SetLocalTimerPrescaler(1u<<1|0x80000000u, 1);
+	test.Next(_L("expecting normal again"));
+	b = RunTimesliceTest(1, 50000, 100);
+	DisplayBuffer(b,50000u);
+	delete b;
+
+	test.Next(_L("expecting half"));
+	Driver.SetLocalTimerPrescaler(1u<<1, -1);
+	b = RunTimesliceTest(1, 50000, 100);
+	DisplayBuffer(b,25000u);
+	delete b;
+
+	Driver.SetLocalTimerPrescaler(1u<<1|0x80000000u, -1);
+	test.Next(_L("expecting normal again"));
+	b = RunTimesliceTest(1, 50000, 100);
+	DisplayBuffer(b,50000u);
+	delete b;
+
+	b = RunTimesliceTest(1, 50000, 200 ,-1);
+	test.Next(_L("expecting random"));
+	DisplayBuffer(b,0u);  // timeslices should be fairly random on this run
+
+	b = RunTimesliceTest(1, 50000, 200 ,1);
+	test.Next(_L("expecting normal again"));
+	DisplayBuffer(b,50000u);
+	delete b;
+	}
+
+struct SGTRecord
+	{
+	TUint64 iTSInterval;
+	TUint64 iGTInterval;
+	};
+
+
+SGTRecord* RunGTTest(TInt aCount, TInt aWait)
+	{
+	TUint64 lastgt,lastts,gt,ts;
+
+	SGTRecord* res = new SGTRecord[aCount];
+	test(res!=0);
+
+
+	TInt r = Driver.ReadGlobalTimerAndTimestamp(lastgt,lastts);
+	test_Equal(r,KErrNone);
+
+	for (TInt i = 0; i < aCount; i++) 
+		{
+		User::AfterHighRes(aWait);
+		
+		TInt r = Driver.ReadGlobalTimerAndTimestamp(gt,ts);
+		test_Equal(r,KErrNone);
+		res[i].iGTInterval = gt-lastgt;
+		lastgt = gt;
+		res[i].iTSInterval = ts-lastts;
+		lastts = ts;
+		}
+
+	return res;
+	}
+
+void DisplayGTResults(SGTRecord* aRec, TInt aCount, TUint32 aFreq, TUint64 aExpectedTSInterval, TUint64 aExpectedGTInterval)
+	{
+	SGTRecord max = { 0ul , 0ul };
+	SGTRecord min = { KMaxTUint64 , KMaxTUint64 };
+	
+	TUint64 errgt = (aExpectedGTInterval*KTimeStampDeltaPercent)/100;
+	TUint64 errts = (aExpectedTSInterval*KTimeStampDeltaPercent)/100;
+
+	
+	for (TInt i = 0 ; i < aCount; i++) 
+		{
+		test.Printf(_L("gt interval : %Lu (gtticks) %Lu (us)\n"),
+					aRec[i].iGTInterval,
+					aRec[i].iTSInterval*1000000u/TUint64(aFreq));
+		
+		if (max.iTSInterval < aRec[i].iTSInterval) 
+			{
+			max.iTSInterval = aRec[i].iTSInterval;
+			}
+		if (max.iGTInterval < aRec[i].iGTInterval) 
+			{
+			max.iGTInterval = aRec[i].iGTInterval;
+			}
+		
+		if (min.iTSInterval > aRec[i].iTSInterval) 
+			{
+			min.iTSInterval = aRec[i].iTSInterval;
+			}
+		if (min.iGTInterval > aRec[i].iGTInterval) 
+			{
+			min.iGTInterval = aRec[i].iGTInterval;
+			}
+		}
+	
+	test.Printf(_L("RANGE Global Timer %Lu-%Lu ticks (%Lu ticks)\n"),
+				min.iGTInterval, max.iGTInterval, max.iGTInterval-min.iGTInterval);
+	
+	test.Printf(_L("RANGE Timestamp %Lu-%Lu us (%Lu us)\n"),
+				(1000000u*min.iGTInterval)/TUint64(aFreq), (1000000u*max.iGTInterval)/TUint64(aFreq),
+				(1000000u*max.iGTInterval)/TUint64(aFreq) - (1000000u*min.iGTInterval)/TUint64(aFreq));
+	
+	if (errts) 
+		{
+		test_Compare(max.iTSInterval,<,aExpectedTSInterval+errts);  
+		test_Compare(min.iTSInterval,>,aExpectedTSInterval);  
+		}
+	
+	if (errgt) 
+		{
+		test_Compare(max.iGTInterval,<,aExpectedGTInterval+errgt);  
+		test_Compare(min.iGTInterval,>,aExpectedGTInterval);  
+		}
+	
+	}
+
+void GTT()
+	{
+	test.Printf(_L("Global timer tests ...\n"));
+	TUint64 gt,ts;
+
+	TInt r = Driver.ReadGlobalTimerAndTimestamp(gt,ts);
+	if (KErrNotSupported == r ) 
+		{
+		test.Printf(_L("Global timer not supported in this plaform, skipping GT tests\n"));
+		return;
+		}
+
+	TUint32 f;
+	r = HAL::Get(HAL::EFastCounterFrequency, (TInt&)f);
+	test_KErrNone(r);
+	TInt wait = 100000; // 100ms
+	TInt count = 10;
+	
+	TUint64 expectedTs = (TUint64(f)*TUint64(wait))/1000000u;
+	TUint64 expectedGtOrig = expectedTs;
+	
+	SGTRecord* rec;
+	for (TInt i = 0; i < 10; i++)
+		{
+		TUint64 expectedGt = expectedGtOrig/(i+1);
+		r = Driver.SetGlobalTimerPrescaler(i);
+		test_KErrNone(r);
+		rec = RunGTTest(count, wait);
+		test.Printf(_L("expectedTS %Lu expectedGT %Lu\n"),expectedTs,expectedGt);
+		DisplayGTResults(rec,count, f, expectedTs , expectedGt);
+		delete rec;
+		}
+
+	r = Driver.SetGlobalTimerPrescaler(-1); // back to default
+	test_KErrNone(r);
+	}
+
+void RunTests()
+	{
+	TestRatios();
+	if (Driver.FrqChgTestPresent()!=KErrNone)
+		{
+		test.Printf(_L("Frequency Change not supported on this platform\n"));
+		return;
+		}
+	TT();
+	GTT();
+	}
+
+GLDEF_C TInt E32Main()
+	{
+	test.Title();
+	test.Start(_L("Testing"));
+	TInt r = User::LoadLogicalDevice(KLddName);
+	if (r==KErrNotFound)
+		{
+		test.Printf(_L("Test not supported on this platform\n"));
+		}
+	else 
+		{
+		if (r!=KErrNone)
+			{
+			test_Equal(KErrAlreadyExists, r);
+			}
+		r = Driver.Open();
+		test_KErrNone(r);
+		RunTests();
+		Driver.Close();
+		}
+
+	test.End();
+	r = User::FreeLogicalDevice(KLddName);
+	test_KErrNone(r);
+	return KErrNone;
+	}
--- a/kerneltest/e32test/prime/t_semutx.cpp	Wed Jun 23 12:52:28 2010 +0100
+++ b/kerneltest/e32test/prime/t_semutx.cpp	Wed Jun 23 12:58:21 2010 +0100
@@ -40,7 +40,9 @@
 
 #define __E32TEST_EXTENSION__
 #include <e32test.h>
-#include <u32std.h>
+#include <hal.h>
+#include <e32atomics.h>
+#include <u32hal.h>
 #include <e32svr.h>
 
 const TInt KMaxBufferSize=10;
@@ -51,13 +53,352 @@
 
 RTest test(_L("T_SEMUTX"));
 RMutex mutex;
-RCriticalSection criticalSn;	
+RCriticalSection criticalSn;
 TInt thread1Count,thread2Count;
 TInt arrayIndex;
-TInt array[KMaxArraySize];  
+TInt array[KMaxArraySize];
 TInt consumerArray[KNumProducerItems];
-RSemaphore slotAvailable,itemAvailable;  
+RSemaphore slotAvailable,itemAvailable;
+TBool doCpuLocking = EFalse;
+
+// return num of cpus in system
+TInt NumCpus()
+	{
+	TInt r = UserSvr::HalFunction(EHalGroupKernel, EKernelHalNumLogicalCpus, 0, 0);
+	return r;
+	}
+
+
+TInt LockCurrentThreadToCpu0(TBool aCallingIsMainTestThread = EFalse)
+	{
+	if (aCallingIsMainTestThread) 
+		{
+		if (NumCpus() > 1) 
+			{
+			doCpuLocking = ETrue;
+			return UserSvr::HalFunction(EHalGroupKernel, EKernelHalLockThreadToCpu, 0, 0); 
+			}
+		else
+			{
+			return KErrNone;
+			}
+		}
+	return UserSvr::HalFunction(EHalGroupKernel, EKernelHalLockThreadToCpu, 0, 0); 
+	}
+
+TInt UnlockCurrentThreadToCpu0(TBool aCallingIsMainTestThread = EFalse)
+	{
+	if (aCallingIsMainTestThread) 
+		{
+		if (NumCpus() > 1) 
+			{
+			doCpuLocking = EFalse;
+			return UserSvr::HalFunction(EHalGroupKernel, EKernelHalLockThreadToCpu, (TAny*) 0xffffffffu, 0); 
+			}
+		else
+			{
+			return KErrNone;
+			}
+		}
+	return UserSvr::HalFunction(EHalGroupKernel, EKernelHalLockThreadToCpu, (TAny*) 0xffffffffu, 0); 
+	}
+
+
+/******************************************************************************
+ * Random Number Generation
+ ******************************************************************************/
+void Random(TUint64& a)
+	{
+	TInt i;
+	for (i=64; i>0; --i)
+		{
+		TUint64 x = a<<1;
+		TUint64 y = x<<1;
+		x^=y;
+		a = (y>>1) | (x>>63);
+		}
+	}
+
+// Returns 256*log2(a/2^64)
+TInt Log2(TUint64 a)
+	{
+	const TUint64 KBit63 = UI64LIT(0x8000000000000000);
+	TInt n = __e32_find_ms1_64(a);
+	a <<= (63-n);
+	n -= 64;
+	TInt i;
+	for (i=0; i<8; ++i)
+		{
+		a >>= 32;
+		a *= a;
+		n <<= 1;
+		if (a & KBit63)
+			{
+			++n;
+			}
+		else
+			{
+			a <<= 1;
+			}
+		}
+	return n;
+	}
+
+TUint32 ExpRV(TUint64 aU, TUint32 aMean, TUint32 aTick)
+	{
+	TInt n = -Log2(aU);
+	TUint64 x = TUint64(n) * TUint64(aMean);
+	x *= TUint64(22713);	// 2^15 * ln2
+	TUint64 p(aTick);
+	p <<= 22;
+	x += p;
+	p += p;
+	x /= p;
+	return I64LOW(x);
+	}
+
+
+
+/*----------------------------------------------------------------------------*/
+class MLock
+	{
+public:
+	enum {EPollable=1, ETimeoutAvail=2, ENestable=4, ELimit1=8, ELooseTimeout=16};
+public:
+	virtual TInt Flags()=0;
+	virtual void Release()=0;
+	virtual void Wait()=0;
+	virtual void Signal()=0;
+	virtual TInt Wait(TInt aTimeout);
+	virtual TInt Poll();
+	};
+
+TInt MLock::Wait(TInt)
+	{ return KErrNotSupported; }
+TInt MLock::Poll()
+	{ return KErrNotSupported; }
+
+/*----------------------------------------------------------------------------*/
+class LockS : public MLock
+	{
+public:
+	LockS();
+	virtual TInt Flags();
+	virtual void Release();
+	virtual void Wait();
+	virtual void Signal();
+	virtual TInt Wait(TInt aTimeout);
+	virtual TInt Poll();
+public:
+	RSemaphore	iT;
+	};
+
+LockS::LockS()
+	{ test_KErrNone(iT.CreateLocal(1)); }
+TInt LockS::Flags()
+	{ return EPollable|ETimeoutAvail; }
+void LockS::Release()
+	{ iT.Close(); }
+void LockS::Wait()
+	{ iT.Wait(); }
+void LockS::Signal()
+	{ iT.Signal(); }
+TInt LockS::Wait(TInt aTimeout)
+	{ return iT.Wait(aTimeout); }
+TInt LockS::Poll()
+	{ return iT.Poll(); }
+
+/*----------------------------------------------------------------------------*/
+class LockM : public MLock
+	{
+public:
+	LockM();
+	virtual TInt Flags();
+	virtual void Release();
+	virtual void Wait();
+	virtual void Signal();
+	virtual TInt Wait(TInt aTimeout);
+	virtual TInt Poll();
+public:
+	RMutex		iT;
+	};
+
+LockM::LockM()
+	{ test_KErrNone(iT.CreateLocal()); }
+TInt LockM::Flags()
+	{ return EPollable|ETimeoutAvail|ENestable|ELimit1; }
+void LockM::Release()
+	{ iT.Close(); }
+void LockM::Wait()
+	{ iT.Wait(); }
+void LockM::Signal()
+	{ iT.Signal(); }
+TInt LockM::Wait(TInt aTimeout)
+	{ return iT.Wait(aTimeout); }
+TInt LockM::Poll()
+	{ return iT.Poll(); }
+
+/*----------------------------------------------------------------------------*/
+
+class LockFL : public MLock
+	{
+public:
+	LockFL();
+	virtual TInt Flags();
+	virtual void Release();
+	virtual void Wait();
+	virtual void Signal();
+	virtual TInt Wait(TInt aTimeout);
+	virtual TInt Poll();
+public:
+	RFastLock	iT;
+	};
+
+LockFL::LockFL()
+	{ test_KErrNone(iT.CreateLocal()); }
+TInt LockFL::Flags()
+	{ return ETimeoutAvail|EPollable|ELimit1|ELooseTimeout; }
+void LockFL::Release()
+	{ iT.Close(); }
+void LockFL::Wait()
+	{ iT.Wait(); }
+void LockFL::Signal()
+	{ iT.Signal(); }
+TInt LockFL::Wait(TInt aTimeout)
+	{ return iT.Wait(aTimeout); }
+TInt LockFL::Poll()
+	{ return iT.Poll(); }
+
+/*----------------------------------------------------------------------------*/
+class LockCS : public MLock
+	{
+public:
+	LockCS();
+	virtual TInt Flags();
+	virtual void Release();
+	virtual void Wait();
+	virtual void Signal();
+public:
+	RCriticalSection iT;
+	};
+
+LockCS::LockCS()
+	{ test_KErrNone(iT.CreateLocal()); }
+TInt LockCS::Flags()
+	{ return ELimit1; }
+void LockCS::Release()
+	{ iT.Close(); }
+void LockCS::Wait()
+	{ iT.Wait(); }
+void LockCS::Signal()
+	{ iT.Signal(); }
+
+
 			 
+/*----------------------------------------------------------------------------*/
+class LFSR
+	{
+public:
+	LFSR(TInt aBits, TInt aTap2, TInt aTap3=0, TInt aTap4=0);
+	~LFSR();
+	void Step();
+	void Step(TInt aSteps);
+	TBool operator==(const LFSR& a) const;
+public:
+	TUint32* iData;
+	TInt iBits;
+	TInt iTap2;
+	TInt iTap3;
+	TInt iTap4;
+	TInt iNW;
+	TInt iSh1;
+	TInt iIx2;
+	TInt iSh2;
+	TInt iIx3;
+	TInt iSh3;
+	TInt iIx4;
+	TInt iSh4;
+	};
+
+LFSR::LFSR(TInt aBits, TInt aTap2, TInt aTap3, TInt aTap4)
+	{
+	iBits = aBits;
+	iTap2 = aTap2;
+	iTap3 = aTap3;
+	iTap4 = aTap4;
+	iNW = (aBits + 31) >> 5;
+	iData = (TUint32*)User::AllocZ(iNW*sizeof(TUint32));
+	test(iData!=0);
+	iData[0] = 1;
+	iSh1 = (aBits-1)&31;
+	iIx2 = (iTap2-1)>>5;
+	iSh2 = (iTap2-1)&31;
+	if (iTap3)
+		{
+		iIx3 = (iTap3-1)>>5;
+		iSh3 = (iTap3-1)&31;
+		}
+	else
+		{
+		iIx3 = -1;
+		iSh3 = 0;
+		}
+	if (iTap4)
+		{
+		iIx4 = (iTap4-1)>>5;
+		iSh4 = (iTap4-1)&31;
+		}
+	else
+		{
+		iIx4 = -1;
+		iSh4 = 0;
+		}
+	}
+
+LFSR::~LFSR()
+	{
+	User::Free(iData);
+	}
+
+void LFSR::Step(TInt aSteps)
+	{
+	while (aSteps--)
+		Step();
+	}
+
+void LFSR::Step()
+	{
+	TUint32 b = iData[iNW-1]>>iSh1;
+	b ^= (iData[iIx2]>>iSh2);
+	if (iIx3>=0)
+		b ^= (iData[iIx3]>>iSh3);
+	if (iIx4>=0)
+		b ^= (iData[iIx4]>>iSh4);
+	b &= 1;
+	TInt i;
+	for (i=0; i<iNW; ++i)
+		{
+		TUint32 bb = iData[i] >> 31;
+		iData[i] = (iData[i]<<1)|b;
+		b = bb;
+		}
+	iData[iNW-1] &= ((2u<<iSh1)-1u);
+	}
+
+TBool LFSR::operator==(const LFSR& a) const
+	{
+	if (iBits!=a.iBits || iTap2!=a.iTap2 || iTap3!=a.iTap3 || iTap4!=a.iTap4 || iNW!=a.iNW)
+		return EFalse;
+	if (iData==a.iData)
+		return ETrue;
+	if (memcompare((const TUint8*)iData, iNW, (const TUint8*)a.iData, a.iNW))
+		return EFalse;
+	return ETrue;
+	}
+
+
+
+/*----------------------------------------------------------------------------*/
 class CStack
 	{
 public:	   
@@ -118,6 +459,7 @@
 // Mutex test thread 1
 //
 	{	
+	TInt n = NumCpus();
 
 	thread1Count=0;
 	TBool running=ETrue;
@@ -133,6 +475,16 @@
 		else
 			running=EFalse;
 		mutex.Signal();
+
+		if (n > 1) 
+			{
+			// when the mutex is singaled, due to priority balancing, the other
+			// thread will be scheduled to run on a CPU other than this one. The delay
+			// in getting that thread to run means that this one can manage to re-claim the 
+			// mutex before the other thread gets to run. So we add a small delay here 
+			User::After(100); 
+			}
+
 		} while (running);
 	return(KErrNone);
 	}
@@ -142,6 +494,7 @@
 // Mutex test thread 2
 //
 	{
+	TInt n = NumCpus();
 
 	thread2Count=0;
 	TBool running=ETrue;
@@ -157,6 +510,17 @@
 		else
 			running=EFalse;
 		mutex.Signal();
+
+		if (n > 1) 
+			{
+			// when the mutex is singaled, due to priority balancing, the other
+			// thread will be scheduled to run on a CPU other than this one. The delay
+			// in getting that thread to run means that this one can manage to re-claim the 
+			// mutex before the other thread gets to run. So we add a small delay here 
+			User::After(100); 
+			}
+		
+
 		} while (running);
 	return(KErrNone);
 	}
@@ -209,27 +573,80 @@
 	return(KErrNone);
 	}
 
-struct SWaitSem
+
+/*----------------------------------------------------------------------------*/
+struct SWaitLock
 	{
-	RSemaphore iSem;
+	enum {EDummy=-2, EPoll=-1, EInfinite=0};
+
+	static TInt WaitLockThread(TAny*);
+	void Start(RThread& aT, TThreadPriority aP=EPriorityLess);
+	void Wait(RThread& aT, TInt aResult);
+	TInt DoTest2(RThread& aT, TInt aTimeout, TInt aResult, TThreadPriority aP=EPriorityLess);
+	void Test2();
+	void TestSignalled();
+	void TestNotSignalled();
+	void TestState();
+
+
+	MLock* iLock;
 	TInt iTimeout;
 	};
 
-TInt WaitSemThread(TAny* a)
+TInt SWaitLock::WaitLockThread(TAny* a)
 	{
-	SWaitSem& ws = *(SWaitSem*)a;
-	return ws.iSem.Wait(ws.iTimeout);
+	
+	if (doCpuLocking)
+		{
+		TInt r = LockCurrentThreadToCpu0();
+		if (KErrNone!=r) return r;
+		// Rendevous was requested
+		RThread::Rendezvous(KErrNone);
+		}
+	
+	SWaitLock& w = *(SWaitLock*)a;
+	TInt lfl = w.iLock->Flags();
+	TBool limit1 = lfl & MLock::ELimit1;
+	TInt r;
+	switch (w.iTimeout)
+		{
+		case EDummy:
+			return KErrNone;
+		case EPoll:
+			r = w.iLock->Poll();
+			break;
+		case EInfinite:
+			w.iLock->Wait();
+			r = KErrNone;
+			break;
+		default:
+			r = w.iLock->Wait(w.iTimeout);
+			break;
+		}
+	if (limit1 && r==KErrNone)
+		w.iLock->Signal();
+	return r;
 	}
 
-void StartWaitSemThread(RThread& aT, SWaitSem& aW, TThreadPriority aP=EPriorityLess)
+void SWaitLock::Start(RThread& aT, TThreadPriority aP)
 	{
-	TInt r = aT.Create(KNullDesC, &WaitSemThread, 0x1000, 0x1000, 0x1000, &aW);
+	TRequestStatus st;
+	TInt r = aT.Create(KNullDesC, &WaitLockThread, 0x1000, 0x1000, 0x1000, this);
 	test_KErrNone(r);
 	aT.SetPriority(aP);
+	if (doCpuLocking) 
+		{
+		aT.Rendezvous(st);
+		}
 	aT.Resume();
+	if (doCpuLocking) 
+		{
+		User::WaitForRequest(st);
+		test_KErrNone(st.Int());
+		}
 	}
 
-void WaitForWaitSemThread(RThread& aT, TInt aResult)
+void SWaitLock::Wait(RThread& aT, TInt aResult)
 	{
 	TRequestStatus s;
 	aT.Logon(s);
@@ -240,32 +657,67 @@
 	CLOSE_AND_WAIT(aT);
 	}
 
-TInt DummyThread(TAny*)
+TInt SWaitLock::DoTest2(RThread& aT, TInt aTimeout, TInt aResult, TThreadPriority aP)
 	{
-	return 0;
+	TTime initial;
+	TTime final;
+	iTimeout = aTimeout;
+	initial.HomeTime();
+	Start(aT, aP);
+	Wait(aT, aResult);
+	final.HomeTime();
+	TInt elapsed = I64INT(final.Int64()-initial.Int64());
+	return elapsed;
+	}
+
+void SWaitLock::TestSignalled()
+	{
+	TInt r = iLock->Poll();
+	if (r == KErrNotSupported)
+		r = iLock->Wait(1);
+	test_KErrNone(r);
 	}
 
-void TestSemaphore2()
+void SWaitLock::TestNotSignalled()
+	{
+	TInt r = iLock->Poll();
+	if (r == KErrNotSupported)
+		r = iLock->Wait(1);
+	test_Equal(KErrTimedOut, r);
+	}
+
+void SWaitLock::TestState()
 	{
-	test.Start(_L("Test semaphore wait with timeout"));
-	SWaitSem ws;
+	if (iLock->Flags() & MLock::ELimit1)
+		TestSignalled();	// not signalled afterwards
+	else
+		TestNotSignalled();
+	}
+
+void SWaitLock::Test2()
+	{
+	test.Start(_L("SWaitLock::Test2"));
 	RThread t;
+	RThread t2;
 	TTime initial;
 	TTime final;
-	TInt elapsed=0;
-	TInt r = ws.iSem.CreateLocal(0);
-	test_KErrNone(r);
+	TInt elapsed = 0;
+	TInt r = 0;
+	TInt lfl = iLock->Flags();
+	TBool nestable = lfl & MLock::ENestable;
+	TBool limit1 = lfl & MLock::ELimit1;
+	TBool pollable = lfl & MLock::EPollable;
+	TBool to = lfl & MLock::ETimeoutAvail;
+	TBool lto = lfl & MLock::ELooseTimeout;
 
 	RThread().SetPriority(EPriorityAbsoluteVeryLow);
 	TInt threadcount=0;
+	iTimeout = EDummy;
 	initial.HomeTime();
 	while (elapsed<1000000)
 		{
-		r = t.Create(KNullDesC, &DummyThread, 0x1000, NULL, NULL);
-		test_KErrNone(r);
-		t.SetPriority(EPriorityMore);
-		t.Resume();
-		t.Close();
+		Start(t, EPriorityMore);
+		Wait(t, KErrNone);
 		++threadcount;
 		final.HomeTime();
 		elapsed = I64INT(final.Int64()-initial.Int64());
@@ -275,125 +727,735 @@
 	TInt overhead = 1000000/threadcount;
 	test.Printf(_L("overhead = %dus\n"),overhead);
 
-	ws.iTimeout=1000000;
-	initial.HomeTime();
-	StartWaitSemThread(t, ws);
-	WaitForWaitSemThread(t, KErrTimedOut);
-	final.HomeTime();
-	elapsed = I64INT(final.Int64()-initial.Int64());
-	test.Printf(_L("Time taken = %dus\n"), elapsed);
-	test(elapsed>=900000+overhead && elapsed<1500000+overhead);
+	iLock->Wait();
 
-	ws.iTimeout=-1;
-	initial.HomeTime();
-	StartWaitSemThread(t, ws);
-	WaitForWaitSemThread(t, KErrArgument);
-	final.HomeTime();
-	elapsed = I64INT(final.Int64()-initial.Int64());
-	test.Printf(_L("Time taken = %dus\n"), elapsed);
+	if (to)
+		{
+		elapsed = DoTest2(t, 1000000, KErrTimedOut);
+		test.Printf(_L("Time taken = %dus\n"), elapsed);
+		test(elapsed>=900000+overhead && elapsed<1500000+overhead);
+		elapsed = DoTest2(t, -99, KErrArgument);
+		test.Printf(_L("Time taken = %dus\n"), elapsed);
+		}
 
-	ws.iTimeout=2000000;
-	initial.HomeTime();
-	StartWaitSemThread(t, ws);
-	User::After(1000000);
-	ws.iSem.Signal();
-	WaitForWaitSemThread(t, KErrNone);
-	final.HomeTime();
-	elapsed = I64INT(final.Int64()-initial.Int64());
-	test.Printf(_L("Time taken = %dus\n"), elapsed);
-	test(elapsed>=900000+overhead && elapsed<1500000+overhead);
+	if (pollable)
+		{
+		test.Printf(_L("Testing Poll() function\n"));
+		r = iLock->Poll();
+		test_Equal((nestable ? KErrNone : KErrTimedOut), r);
+		if (nestable)
+			{
+			iTimeout=EPoll;
+			r = iLock->Poll();
+			test_KErrNone(r);
+			iLock->Signal();
+			Start(t, EPriorityMore);
+			Wait(t, KErrTimedOut);
+			}
+		iLock->Signal();
+		if (nestable)
+			{
+			iTimeout=EPoll;
+			r = iLock->Poll();
+			test_KErrNone(r);
+			iLock->Signal();
+			Start(t, EPriorityMore);
+			Wait(t, KErrTimedOut);
+			iLock->Signal();
+			Start(t, EPriorityMore);
+			Wait(t, KErrNone);
+			}
+		r = iLock->Poll();
+		test_KErrNone(r);
+		if (!nestable)
+			{
+			r = iLock->Poll();
+			test_Equal(KErrTimedOut, r);
+			iLock->Signal();
+			if (!limit1)
+				{
+				iLock->Signal();
+				r = iLock->Poll();
+				test_KErrNone(r);
+				}
+			r = iLock->Poll();
+			test_KErrNone(r);
+			r = iLock->Poll();
+			test_Equal(KErrTimedOut, r);
+			}
+		elapsed = DoTest2(t, EPoll, KErrTimedOut);
+		test.Printf(_L("Time taken = %dus\n"), elapsed);
+		test(elapsed<=50000+3*overhead);
+		iLock->Signal();
+		elapsed = DoTest2(t, EPoll, KErrNone);
+		test.Printf(_L("Time taken = %dus\n"), elapsed);
+		test(elapsed<=50000+3*overhead);
+		TestState();
+		iLock->Signal();
+		r = LockCurrentThreadToCpu0(ETrue);
+		test_KErrNone(r);
+		Start(t, EPriorityMuchMore);
+		Start(t2, EPriorityMore);
+		test_Equal(EExitKill, t2.ExitType());
+		test_Equal(EExitKill, t.ExitType());
+		Wait(t2, limit1 ? KErrNone : KErrTimedOut);
+		Wait(t, KErrNone);
+		r = UnlockCurrentThreadToCpu0(ETrue);
+		test_KErrNone(r);
+		TestState();
+		}
+	else
+		{
+		test.Printf(_L("Poll() function not supported\n"));
+		}
 
-	ws.iTimeout=100000;
-	StartWaitSemThread(t, ws, EPriorityMore);
-	t.Suspend();
-	ws.iSem.Signal();
-	User::After(200000);
-	t.Resume();
-	WaitForWaitSemThread(t, KErrTimedOut);
-	test_KErrNone(ws.iSem.Wait(1));
+	if (to)
+		{
+		iTimeout=2000000;
+		initial.HomeTime();
+		Start(t);
+		User::After(1000000);
+		iLock->Signal();
+		Wait(t, KErrNone);
+		final.HomeTime();
+		elapsed = I64INT(final.Int64()-initial.Int64());
+		test.Printf(_L("Time taken = %dus\n"), elapsed);
+		test(elapsed>=900000+overhead && elapsed<1500000+overhead);
+		TestState();
 
-	ws.iTimeout=100000;
-	StartWaitSemThread(t, ws, EPriorityMore);
-	t.Suspend();
-	ws.iSem.Signal();
-	User::After(50000);
-	t.Resume();
-	WaitForWaitSemThread(t, KErrNone);
-	test_Equal(KErrTimedOut, ws.iSem.Wait(1));
+		r = LockCurrentThreadToCpu0(ETrue);
+		test_KErrNone(r);
 
-	RThread t2;
-	ws.iTimeout=100000;
-	StartWaitSemThread(t, ws, EPriorityMuchMore);
-	StartWaitSemThread(t2, ws, EPriorityMore);
-	t.Suspend();
-	ws.iSem.Signal();
-	test_Equal(EExitKill, t2.ExitType());
-	test_Equal(EExitPending, t.ExitType());
-	t.Resume();
-	WaitForWaitSemThread(t, KErrTimedOut);
-	WaitForWaitSemThread(t2, KErrNone);
-	test_Equal(KErrTimedOut, ws.iSem.Wait(1));
+		if (!lto)
+			{
+			iTimeout=100000;
+			Start(t, EPriorityMore);
+			t.Suspend();
+			iLock->Signal();
+			User::After(200000);
+			t.Resume();
+			Wait(t, KErrTimedOut);
+			TestSignalled();
 
-	ws.iTimeout=1000000;
-	initial.HomeTime();
-	StartWaitSemThread(t2, ws, EPriorityMore);
-	StartWaitSemThread(t, ws, EPriorityMuchMore);
-	ws.iSem.Signal();
-	WaitForWaitSemThread(t, KErrNone);
-	final.HomeTime();
-	elapsed = I64INT(final.Int64()-initial.Int64());
-	test.Printf(_L("Time taken = %dus\n"), elapsed);
-	WaitForWaitSemThread(t2, KErrTimedOut);
-	final.HomeTime();
-	elapsed = I64INT(final.Int64()-initial.Int64());
-	test.Printf(_L("Time taken = %dus\n"), elapsed);
-	test(elapsed>=900000+2*overhead && elapsed<1500000+2*overhead);
+			iTimeout=100000;
+			Start(t, EPriorityMore);
+			t.Suspend();
+			iLock->Signal();
+			User::After(50000);
+			t.Resume();
+			Wait(t, KErrNone);
+			TestState();
+
+			iTimeout=100000;
+			Start(t, EPriorityMuchMore);
+			Start(t2, EPriorityMore);
+			t.Suspend();
+			iLock->Signal();
+			test_Equal(EExitKill, t2.ExitType());
+			test_Equal(EExitPending, t.ExitType());
+			t.Resume();
+			Wait(t, limit1 ? KErrNone : KErrTimedOut);
+			Wait(t2, KErrNone);
+			TestState();
+			}
 
-	ws.iTimeout=1000000;
-	initial.HomeTime();
-	StartWaitSemThread(t2, ws, EPriorityMore);
-	StartWaitSemThread(t, ws, EPriorityMuchMore);
-	WaitForWaitSemThread(t, KErrTimedOut);
-	final.HomeTime();
-	elapsed = I64INT(final.Int64()-initial.Int64());
-	test.Printf(_L("Time taken = %dus\n"), elapsed);
-	WaitForWaitSemThread(t2, KErrTimedOut);
-	final.HomeTime();
-	elapsed = I64INT(final.Int64()-initial.Int64());
-	test.Printf(_L("Time taken = %dus\n"), elapsed);
-	test(elapsed>=900000+2*overhead && elapsed<1500000+2*overhead);
+		iTimeout=1000000;
+		initial.HomeTime();
+		Start(t2, EPriorityMore);
+		Start(t, EPriorityMuchMore);
+		iLock->Signal();
+		Wait(t, KErrNone);
+		final.HomeTime();
+		elapsed = I64INT(final.Int64()-initial.Int64());
+		test.Printf(_L("Time taken = %dus\n"), elapsed);
+		Wait(t2, limit1 ? KErrNone : KErrTimedOut);
+		final.HomeTime();
+		elapsed = I64INT(final.Int64()-initial.Int64());
+		test.Printf(_L("Time taken = %dus\n"), elapsed);
+		if (!limit1)
+			{
+			test(elapsed>=900000+2*overhead && elapsed<1500000+2*overhead);
+			}
+		TestState();
+
+		iTimeout=1000000;
+		initial.HomeTime();
+		Start(t2, EPriorityMore);
+		Start(t, EPriorityMuchMore);
+		Wait(t, KErrTimedOut);
+		final.HomeTime();
+		elapsed = I64INT(final.Int64()-initial.Int64());
+		test.Printf(_L("Time taken = %dus\n"), elapsed);
+		Wait(t2, KErrTimedOut);
+		final.HomeTime();
+		elapsed = I64INT(final.Int64()-initial.Int64());
+		test.Printf(_L("Time taken = %dus\n"), elapsed);
+		test(elapsed>=900000+2*overhead && elapsed<1500000+2*overhead);
 
-	ws.iTimeout=1000000;
-	initial.HomeTime();
-	StartWaitSemThread(t2, ws, EPriorityMore);
-	StartWaitSemThread(t, ws, EPriorityMuchMore);
-	t.Kill(299792458);
-	WaitForWaitSemThread(t2, KErrTimedOut);
-	WaitForWaitSemThread(t, 299792458);
-	final.HomeTime();
-	elapsed = I64INT(final.Int64()-initial.Int64());
-	test.Printf(_L("Time taken = %dus\n"), elapsed);
-	test(elapsed>=900000+2*overhead && elapsed<1500000+2*overhead);
+		iTimeout=1000000;
+		initial.HomeTime();
+		Start(t2, EPriorityMore);
+		Start(t, EPriorityMuchMore);
+		t.Kill(299792458);
+		Wait(t2, KErrTimedOut);
+		Wait(t, 299792458);
+		final.HomeTime();
+		elapsed = I64INT(final.Int64()-initial.Int64());
+		test.Printf(_L("Time taken = %dus\n"), elapsed);
+		test(elapsed>=900000+2*overhead && elapsed<1500000+2*overhead);
 
-	ws.iTimeout=1000000;
-	initial.HomeTime();
-	StartWaitSemThread(t, ws, EPriorityMore);
-	StartWaitSemThread(t2, ws, EPriorityMuchMore);
-	test_Equal(EExitPending, t.ExitType());
-	test_Equal(EExitPending, t2.ExitType());
-	ws.iSem.Close();
-	test_Equal(EExitKill, t.ExitType());
-	test_Equal(EExitKill, t2.ExitType());
-	WaitForWaitSemThread(t2, KErrGeneral);
-	WaitForWaitSemThread(t, KErrGeneral);
-	final.HomeTime();
-	elapsed = I64INT(final.Int64()-initial.Int64());
-	test.Printf(_L("Time taken = %dus\n"), elapsed);
-	test(elapsed<=50000+3*overhead);
-
+		iTimeout=1000000;
+		initial.HomeTime();
+		Start(t, EPriorityMore);
+		Start(t2, EPriorityMuchMore);
+		test_Equal(EExitPending, t.ExitType());
+		test_Equal(EExitPending, t2.ExitType());
+		iLock->Release();
+		test_Equal(EExitKill, t.ExitType());
+		test_Equal(EExitKill, t2.ExitType());
+		Wait(t2, KErrGeneral);
+		Wait(t, KErrGeneral);
+		final.HomeTime();
+		elapsed = I64INT(final.Int64()-initial.Int64());
+		test.Printf(_L("Time taken = %dus\n"), elapsed);
+		test(elapsed<=50000+3*overhead);
+		r = UnlockCurrentThreadToCpu0(ETrue);
+		test_KErrNone(r);
+		}
+	else
+		{
+		test.Printf(_L("Timed waits not supported\n"));
+		iLock->Release();
+		}
 	test.End();
 	}
 
+volatile TBool NoRepeat = EFalse;
+void TestPollTimeout()
+	{
+	SWaitLock w;
+	do	{
+		test.Printf(_L("TestPollTimeout - RSemaphore\n"));
+		LockS ls;
+		w.iLock = &ls;
+		w.Test2();	// Release()s ls
+		} while(NoRepeat);
+	do	{
+		test.Printf(_L("TestPollTimeout - RMutex\n"));
+		LockM lm;
+		w.iLock = &lm;
+		w.Test2();	// Release()s lm
+		} while(NoRepeat);
+	do	{
+		test.Printf(_L("TestPollTimeout - RFastLock\n"));
+		LockFL fl;
+		w.iLock = &fl;
+		w.Test2();	// Release()s fl
+		} while(NoRepeat);
+	}
+
+
+/*----------------------------------------------------------------------------*/
+class CMXThreadGrp;
+
+struct SStats
+	{
+	SStats();
+	void Add(TInt aValue);
+	void Add(const SStats& aS);
+	TInt Count() const {return iN;}
+	TInt Min() const;
+	TInt Max() const;
+	TInt Mean() const;
+
+	TInt64	iSum;
+	TInt	iMin;
+	TInt	iMax;
+	TInt	iN;
+	TInt	iSp;
+	};
+
+SStats::SStats()
+	{
+	iSum = 0;
+	iMax = KMinTInt;
+	iMin = ~iMax;
+	iN = 0;
+	iSp = 0;
+	}
+
+void SStats::Add(TInt aValue)
+	{
+	TInt64 v = aValue;
+	iSum += v;
+	++iN;
+	if (aValue > iMax)
+		iMax = aValue;
+	if (aValue < iMin)
+		iMin = aValue;
+	}
+
+void SStats::Add(const SStats& a)
+	{
+	iN += a.iN;
+	iSum += a.iSum;
+	if (a.iMax > iMax)
+		iMax = a.iMax;
+	if (a.iMin < iMin)
+		iMin = a.iMin;
+	}
+
+TInt SStats::Min() const
+	{return iN ? iMin : 0;}
+
+TInt SStats::Max() const
+	{return iN ? iMax : 0;}
+
+TInt SStats::Mean() const
+	{
+	if (iN==0)
+		return 0;
+	return (TInt)(iSum/TInt64(iN));
+	}
+
+TUint32 ticks_to_us(TUint32 aTicks, TUint32 aF)
+	{
+	TUint64 x = aTicks;
+	TUint64 f = aF;
+	x *= TUint64(1000000);
+	x += (f>>1);
+	x /= f;
+	return I64LOW(x);
+	}
+
+class CMXThread : public CBase
+	{
+private:
+	CMXThread();
+	~CMXThread();
+	static CMXThread* New(CMXThreadGrp* aG, TUint32 aId, TUint32 aL, TUint32 aD);
+	void Start();
+	void Wait();
+	TInt Construct(CMXThreadGrp* aG, TUint32 aId, TUint32 aL, TUint32 aD);
+	TInt Steps();
+	TInt Action();
+	TInt Run();
+	static TInt ThreadFunc(TAny*);
+	void PrintStats();
+private:
+	TUint64 iSeed;
+	RThread	iThread;
+	TRequestStatus iExitStatus;
+	CMXThreadGrp* iG;
+	LFSR* iDummyLfsr;
+	TUint32 iId;
+	TUint32 iLambda;
+	TUint32 iDummySteps;
+	TInt iTotalSteps;
+	TInt iIterations;
+	TInt iPolls;
+	TInt iPollFails;
+	SStats iStats;
+	SStats iTimeoutStats;
+private:
+	friend class CMXThreadGrp;
+	};
+
+class CMXThreadGrp : public CBase
+	{
+public:
+	static CMXThreadGrp* New(MLock* aLock, TInt aNThreads, TUint32 aLambda, TUint32 aDummySteps, TUint32 aTime);
+	CMXThreadGrp();
+	~CMXThreadGrp();
+	TBool Run();
+	void PrintStats();
+private:
+	TInt Construct(MLock* aLock, TInt aNThreads, TUint32 aLambda, TUint32 aDummySteps, TUint32 aTime);
+private:
+	TInt iNThreads;
+	CMXThread** iThreads;
+	MLock* iLock;
+	LFSR* iLfsr;
+	LFSR* iLfsr0;
+	TUint32 iNTickPeriod;
+	TUint32 iFCF;
+	TUint32 iNTicks;
+	TInt iTotalSteps;
+	TInt iIterations;
+	TInt iPolls;
+	TInt iPollFails;
+	SStats iStats;
+	SStats iTimeoutStats;
+private:
+	friend class CMXThread;
+	};
+
+CMXThread::CMXThread()
+	{
+	iThread.SetHandle(0);
+	}
+
+CMXThread::~CMXThread()
+	{
+	delete iDummyLfsr;
+	if (iThread.Handle())
+		{
+		if (iThread.ExitType() == EExitPending)
+			{
+			iThread.Kill(0);
+			Wait();
+			}
+		CLOSE_AND_WAIT(iThread);
+		}
+	}
+
+void CMXThread::PrintStats()
+	{
+	test.Printf(_L("Thread %d:\n"), iId);
+	test.Printf(_L(" ST:%10d IT:%10d P:%10d PF:%10d TO:%10d\n"), iTotalSteps, iIterations, iPolls, iPollFails, iTimeoutStats.Count());
+	TUint32 min, max, mean;
+	min = ticks_to_us(iStats.Min(), iG->iFCF);
+	max = ticks_to_us(iStats.Max(), iG->iFCF);
+	mean = ticks_to_us(iStats.Mean(), iG->iFCF);
+	test.Printf(_L(" Lock acquire times MIN %10d MAX %10d AVG %10d\n"), min, max, mean);
+	min = ticks_to_us(iTimeoutStats.Min(), iG->iFCF);
+	max = ticks_to_us(iTimeoutStats.Max(), iG->iFCF);
+	mean = ticks_to_us(iTimeoutStats.Mean(), iG->iFCF);
+	test.Printf(_L(" Lock timeout times MIN %10d MAX %10d AVG %10d\n"), min, max, mean);
+	}
+
+TInt CMXThread::Construct(CMXThreadGrp* aG, TUint32 aId, TUint32 aL, TUint32 aD)
+	{
+	iG = aG;
+	iId = aId;
+	iLambda = aL;
+	iDummySteps = aD;
+	iSeed = iId + 1;
+	iDummyLfsr = new LFSR(785,693);
+	if (!iDummyLfsr)
+		return KErrNoMemory;
+	TBuf<16> name = _L("TSThrd");
+	name.AppendNum(iId);
+	TInt r = iThread.Create(name, &ThreadFunc, 0x1000, NULL, this);
+	if (r!=KErrNone)
+		return r;
+	iThread.Logon(iExitStatus);
+	if (iExitStatus != KRequestPending)
+		{
+		iThread.Kill(0);
+		iThread.Close();
+		iThread.SetHandle(0);
+		return iExitStatus.Int();
+		}
+	iThread.SetPriority(EPriorityLess);
+	return KErrNone;
+	}
+
+CMXThread* CMXThread::New(CMXThreadGrp* aG, TUint32 aId, TUint32 aL, TUint32 aD)
+	{
+	CMXThread* p = new CMXThread;
+	if (p)
+		{
+		TInt r = p->Construct(aG, aId, aL, aD);
+		if (r != KErrNone)
+			{
+			delete p;
+			p = 0;
+			}
+		}
+	return p;
+	}
+
+void CMXThread::Start()
+	{
+	iThread.Resume();
+	}
+
+void CMXThread::Wait()
+	{
+	User::WaitForRequest(iExitStatus);
+	}
+
+TInt CMXThread::ThreadFunc(TAny* aPtr)
+	{
+	CMXThread& a = *(CMXThread*)aPtr;
+	return a.Run();
+	}
+
+TInt CMXThread::Steps()
+	{
+	Random(iSeed);
+	return ExpRV(iSeed, iLambda, 1);
+	}
+
+TInt CMXThread::Action()
+	{
+	Random(iSeed);
+	return I64LOW(iSeed)%3;
+	}
+
+TInt CMXThread::Run()
+	{
+	MLock* lock = iG->iLock;
+	LFSR* lfsr = iG->iLfsr;
+	TInt lfl = lock->Flags();
+	TBool pollable = lfl & MLock::EPollable;
+	TBool to = lfl & MLock::ETimeoutAvail;
+	TUint32 start_time = User::NTickCount();
+	TInt r;
+
+	FOREVER
+		{
+		TUint32 now = User::NTickCount();
+		if (now - start_time >= iG->iNTicks)
+			break;
+		++iIterations;
+		iDummyLfsr->Step(iDummySteps);
+		TInt action = Action();
+		TInt steps = Steps();
+		TUint32 initial = User::FastCounter();
+		if (action==2 && to)
+			{
+			r = lock->Wait(1000);
+			if (r!=KErrNone)
+				{
+				TUint32 final = User::FastCounter();
+				TInt elapsed = TInt(final - initial);
+				iTimeoutStats.Add(elapsed);
+				}
+			}
+		else if (action==1 && pollable)
+			{
+			++iPolls;
+			r = lock->Poll();
+			if (r!=KErrNone)
+				++iPollFails;
+			}
+		else
+			{
+			lock->Wait();
+			r = KErrNone;
+			}
+		if (r == KErrNone)
+			{
+			TUint32 final = User::FastCounter();
+			lfsr->Step(steps);
+			lock->Signal();
+			TInt elapsed = TInt(final - initial);
+			iTotalSteps += steps;
+			iStats.Add(elapsed);
+			}
+		}
+
+	return KErrNone;
+	}
+
+CMXThreadGrp* CMXThreadGrp::New(MLock* aLock, TInt aNThreads, TUint32 aLambda, TUint32 aDummySteps, TUint32 aTime)
+	{
+	CMXThreadGrp* p = new CMXThreadGrp;
+	if (p)
+		{
+		TInt r = p->Construct(aLock, aNThreads, aLambda, aDummySteps, aTime);
+		if (r != KErrNone)
+			{
+			delete p;
+			p = 0;
+			}
+		}
+	return p;
+	}
+
+CMXThreadGrp::CMXThreadGrp()
+	{
+	}
+
+TInt CMXThreadGrp::Construct(MLock* aLock, TInt aNThreads, TUint32 aLambda, TUint32 aDummySteps, TUint32 aTime)
+	{
+	iNThreads = aNThreads;
+	iLock = aLock;
+	TInt r = HAL::Get(HAL::EFastCounterFrequency, (TInt&)iFCF);
+	if (r!=KErrNone)
+		return r;
+	r = HAL::Get(HAL::ENanoTickPeriod, (TInt&)iNTickPeriod);
+	if (r!=KErrNone)
+		return r;
+	iNTicks = (aTime+iNTickPeriod-1)/iNTickPeriod;
+	iLfsr = new LFSR(785,693);
+	iLfsr0 = new LFSR(785,693);
+	if (!iLfsr || !iLfsr0)
+		return KErrNoMemory;
+	iThreads = (CMXThread**)User::AllocZ(iNThreads*sizeof(CMXThread*));
+	if (!iThreads)
+		return KErrNoMemory;
+	TInt i;
+	for (i=0; i<iNThreads; ++i)
+		{
+		iThreads[i] = CMXThread::New(this, i, aLambda, aDummySteps);
+		if (!iThreads[i])
+			return KErrNoMemory;
+		}
+	return KErrNone;
+	}
+
+CMXThreadGrp::~CMXThreadGrp()
+	{
+	delete iLfsr;
+	delete iLfsr0;
+	if (iThreads)
+		{
+		TInt i;
+		for (i=0; i<iNThreads; ++i)
+			delete iThreads[i];
+		}
+	User::Free(iThreads);
+	}
+
+TBool CMXThreadGrp::Run()
+	{
+	TInt i;
+	test.Printf(_L("Starting test with N=%d L=%d D=%d T=%d\n"), iNThreads, iThreads[0]->iLambda, iThreads[0]->iDummySteps, iNTicks);
+	for (i=0; i<iNThreads; ++i)
+		iThreads[i]->Start();
+	for (i=0; i<iNThreads; ++i)
+		iThreads[i]->Wait();
+	for (i=0; i<iNThreads; ++i)
+		{
+		iTotalSteps += iThreads[i]->iTotalSteps;
+		iIterations += iThreads[i]->iIterations;
+		iPolls += iThreads[i]->iPolls;
+		iPollFails += iThreads[i]->iPollFails;
+		iStats.Add(iThreads[i]->iStats);
+		iTimeoutStats.Add(iThreads[i]->iTimeoutStats);
+		}
+	test.Printf(_L("Total LFSR steps %d\n"), iTotalSteps);
+	iLfsr0->Step(iTotalSteps);
+	TBool ok = (*iLfsr == *iLfsr0);
+	return ok;
+	}
+
+void CMXThreadGrp::PrintStats()
+	{
+	TInt i;
+	for (i=0; i<iNThreads; ++i)
+		{
+		iThreads[i]->PrintStats();
+		}
+	test.Printf(_L("TOTALS:\n"));
+	test.Printf(_L(" ST:%10d IT:%10d P:%10d PF:%10d TO:%10d\n"), iTotalSteps, iIterations, iPolls, iPollFails, iTimeoutStats.Count());
+	TUint32 min, max, mean;
+	min = ticks_to_us(iStats.Min(), iFCF);
+	max = ticks_to_us(iStats.Max(), iFCF);
+	mean = ticks_to_us(iStats.Mean(), iFCF);
+	test.Printf(_L(" Lock acquire times MIN %10d MAX %10d AVG %10d\n"), min, max, mean);
+	min = ticks_to_us(iTimeoutStats.Min(), iFCF);
+	max = ticks_to_us(iTimeoutStats.Max(), iFCF);
+	mean = ticks_to_us(iTimeoutStats.Mean(), iFCF);
+	test.Printf(_L(" Lock timeout times MIN %10d MAX %10d AVG %10d\n"), min, max, mean);
+	}
+
+TUint32 Calibrate()
+	{
+	TUint32 fcf;
+	TInt r = HAL::Get(HAL::EFastCounterFrequency, (TInt&)fcf);
+	test_KErrNone(r);
+	LFSR* d = new LFSR(785,693);
+	test(d!=0);
+	TInt steps = 2;
+	TUint32 ticks = fcf/10;
+	TUint32 elapsed;
+	FOREVER
+		{
+		TUint32 h0 = User::FastCounter();
+		d->Step(steps);
+		TUint32 h1 = User::FastCounter();
+		elapsed = h1 - h0;
+		if (elapsed > ticks)
+			break;
+		steps *= 2;
+		}
+	delete d;
+	test.Printf(_L("%d steps in %d fast ticks\n"), steps, elapsed);
+	TUint64 x = elapsed;
+	TUint64 s = steps;
+	TUint64 y = fcf;
+	y /= x;
+	s *= y;	// steps per second
+	TUint32 res = I64LOW(s);
+	test.Printf(_L("%d steps per second\n"), res);
+	return res;
+	}
+
+void DoTMX(MLock* aLock, TInt aNThreads, TUint32 aLambda, TUint32 aDummySteps, TUint32 aTime, TBool aShouldFail=EFalse)
+	{
+	CMXThreadGrp* g = CMXThreadGrp::New(aLock, aNThreads, aLambda, aDummySteps, aTime);
+	test(g!=0);
+	TBool ok = g->Run();
+	if (aShouldFail)
+		{
+		test(!ok);
+		}
+	else
+		{
+		test(ok);
+		}
+	g->PrintStats();
+	delete g;
+	}
+
+void DoTMX(MLock* aLock, TUint32 aLambda, TUint32 aDummySteps, TUint32 aTime)
+	{
+	TInt n;
+	for (n=1; n<=4; ++n)
+		{
+		TUint32 l = (n<2) ? aLambda : (aLambda/(n-1));
+		DoTMX(aLock, n, l, aDummySteps, aTime);
+		}
+	aLock->Release();
+	}
+
+
+void TestMutualExclusion()
+	{
+	TInt ntp;
+	TInt r = HAL::Get(HAL::ENanoTickPeriod, ntp);
+	test_KErrNone(r);
+	test.Printf(_L("Nanokernel tick period = %dus\n"), ntp);
+	TUint32 sps = Calibrate();
+	TUint32 lambda = sps/2000;
+	TUint32 dummy = sps/2000;
+	TUint32 time = 5000000;
+	do	{
+		test.Printf(_L("TestMutualExclusion - RSemaphore\n"));
+		LockS ls;
+		DoTMX(&ls, lambda, dummy, time);
+		} while(NoRepeat);
+	do	{
+		test.Printf(_L("TestMutualExclusion - RSemaphore init=2\n"));
+		LockS ls2;
+		ls2.Signal();	// count=2
+		DoTMX(&ls2, 4, lambda, dummy, time, ETrue);
+		} while(NoRepeat);
+	do	{
+		test.Printf(_L("TestMutualExclusion - RMutex\n"));
+		LockM lm;
+		DoTMX(&lm, lambda, dummy, time);
+		} while(NoRepeat);
+	do	{
+		test.Printf(_L("TestMutualExclusion - RFastLock\n"));
+		LockFL fl;
+		DoTMX(&fl, lambda, dummy, time);
+		} while(NoRepeat);
+	do	{
+		test.Printf(_L("TestMutualExclusion - RCriticalSection\n"));
+		LockCS cs;
+		DoTMX(&cs, lambda, dummy, time);
+		} while(NoRepeat);
+	}
+
+
+
+
+/*----------------------------------------------------------------------------*/
 void TestSemaphore()
 	{
 /*********** TO DO ************/
@@ -417,23 +1479,23 @@
 	test.Next(_L("Producer/Consumer scenario"));
 	// Test Rsemaphore with the producer/consumer scenario	RThread thread1, thread2;
 	TRequestStatus stat1, stat2;
-	test_KErrNone(mutex.CreateLocal());
-	test_KErrNone(slotAvailable.CreateLocal(KMaxBufferSize));
-	test_KErrNone(itemAvailable.CreateLocal(0));
-	test_KErrNone(thread1.Create(_L("Thread1"),Producer,KDefaultStackSize,0x200,0x200,NULL));
-	test_KErrNone(thread2.Create(_L("Thread2"),Consumer,KDefaultStackSize,0x200,0x200,NULL));
+	test(mutex.CreateLocal()==KErrNone);
+	test(slotAvailable.CreateLocal(KMaxBufferSize)==KErrNone);
+	test(itemAvailable.CreateLocal(0)==KErrNone);
+	test(thread1.Create(_L("Thread1"),Producer,KDefaultStackSize,0x200,0x200,NULL)==KErrNone);
+	test(thread2.Create(_L("Thread2"),Consumer,KDefaultStackSize,0x200,0x200,NULL)==KErrNone);
 	thread1.Logon(stat1);
 	thread2.Logon(stat2);
-	test_Equal(KRequestPending, stat1.Int());
-	test_Equal(KRequestPending, stat2.Int());
+	test(stat1==KRequestPending);
+	test(stat2==KRequestPending);
 	thread1.Resume(); 
 	thread2.Resume();
 	User::WaitForRequest(stat1);
 	User::WaitForRequest(stat2);
-	test_KErrNone(stat1.Int());
-	test_KErrNone(stat2.Int());
+	test(stat1==KErrNone);
+	test(stat2==KErrNone);
 	for(TInt jj=0;jj<KNumProducerItems;jj++)
-		test_Equal(jj, consumerArray[jj]);		
+		test(consumerArray[jj]==jj);		
 	
 	test.Next(_L("Close"));
 	mutex.Close();
@@ -446,7 +1508,7 @@
 	{
 	RMutex m;
 	test.Start(_L("Create"));
-	test_KErrNone(m.CreateLocal());
+	test(m.CreateLocal()==KErrNone);
 
 	// Test RMutex::IsHeld()
 	test.Next(_L("IsHeld ?"));
@@ -466,7 +1528,7 @@
 void TestMutex()
 	{
 	test.Start(_L("Create"));
-	test_KErrNone(mutex.CreateLocal());
+	test(mutex.CreateLocal()==KErrNone);
 	
 	test.Next(_L("Threads writing to arrays test"));
 //
@@ -480,19 +1542,19 @@
 //
 	arrayIndex=0;
 	RThread thread1,thread2;	
-	test_KErrNone(thread1.Create(_L("Thread1"),MutexThreadEntryPoint1,KDefaultStackSize,0x2000,0x2000,NULL));
-	test_KErrNone(thread2.Create(_L("Thread2"),MutexThreadEntryPoint2,KDefaultStackSize,0x2000,0x2000,NULL));			 
+	test(thread1.Create(_L("Thread1"),MutexThreadEntryPoint1,KDefaultStackSize,0x2000,0x2000,NULL)==KErrNone);
+	test(thread2.Create(_L("Thread2"),MutexThreadEntryPoint2,KDefaultStackSize,0x2000,0x2000,NULL)==KErrNone);			 
 	TRequestStatus stat1,stat2;
 	thread1.Logon(stat1);
 	thread2.Logon(stat2);
-	test_Equal(KRequestPending, stat1.Int());
-	test_Equal(KRequestPending, stat2.Int());
+	test(stat1==KRequestPending);
+	test(stat2==KRequestPending);
 	thread1.Resume(); 
 	thread2.Resume();
 	User::WaitForRequest(stat1);
 	User::WaitForRequest(stat2);
-	test_KErrNone(stat1.Int());
-	test_KErrNone(stat2.Int()); 
+	test(stat1==KErrNone);
+	test(stat2==KErrNone); 
 	TInt thread1ActualCount=0; 
 	TInt thread2ActualCount=0;
 	TInt ii=0;
@@ -504,11 +1566,11 @@
 			thread2ActualCount++;
 		ii++;
 		}
-	test.Printf(_L("T1 %d T1ACT %d T2 %d T2ACT %d"),thread1Count,thread1ActualCount,thread2Count,thread2ActualCount);
-	test_Equal(thread1Count, thread1ActualCount);
-	test_Equal(thread2Count, thread2ActualCount);
-	test_Equal(thread2Count, thread1Count);
-	test_Equal((KMaxArraySize>>1), thread1Count);
+	test.Printf(_L("T1 %d T1ACT %d T2 %d T2ACT %d\n"),thread1Count,thread1ActualCount,thread2Count,thread2ActualCount);
+	test(thread1ActualCount==thread1Count);
+	test(thread2ActualCount==thread2Count);
+	test(thread1Count==thread2Count);
+	test(thread1Count==(KMaxArraySize>>1));
 	
 	test.Next(_L("Close"));
 	CLOSE_AND_WAIT(thread1);
@@ -524,7 +1586,7 @@
 	{
 	
 	test.Start(_L("Create"));
-	test_KErrNone(criticalSn.CreateLocal());
+	test(criticalSn.CreateLocal()==KErrNone);
 
 /***************** TO DO ***********************
 
@@ -553,20 +1615,21 @@
 // threads think.
 //
 	arrayIndex=0;
+
 	RThread thread1,thread2;	
-	test_KErrNone(thread1.Create(_L("Thread1"),CriticalSnThreadEntryPoint1,KDefaultStackSize,0x2000,0x2000,NULL));
-	test_KErrNone(thread2.Create(_L("Thread2"),CriticalSnThreadEntryPoint2,KDefaultStackSize,0x2000,0x2000,NULL));			 
+	test(thread1.Create(_L("Thread1"),CriticalSnThreadEntryPoint1,KDefaultStackSize,0x2000,0x2000,NULL)==KErrNone);
+	test(thread2.Create(_L("Thread2"),CriticalSnThreadEntryPoint2,KDefaultStackSize,0x2000,0x2000,NULL)==KErrNone);			 
 	TRequestStatus stat1,stat2;
 	thread1.Logon(stat1);
 	thread2.Logon(stat2);
-	test_Equal(KRequestPending, stat1.Int());
-	test_Equal(KRequestPending, stat2.Int());
+	test(stat1==KRequestPending);
+	test(stat2==KRequestPending);
 	thread1.Resume(); 
 	thread2.Resume();
 	User::WaitForRequest(stat1);
 	User::WaitForRequest(stat2);
-	test_KErrNone(stat1.Int());
-	test_KErrNone(stat2.Int()); 
+	test(stat1==KErrNone);
+	test(stat2==KErrNone); 
 	TInt thread1ActualCount=0; 
 	TInt thread2ActualCount=0;
 	TInt ii=0;
@@ -578,10 +1641,10 @@
 			thread2ActualCount++;
 		ii++;
 		}
-	test_Equal(thread1Count, thread1ActualCount);
-	test_Equal(thread2Count, thread2ActualCount);
-	test_Equal(thread2Count, thread1Count);
-	test_Equal((KMaxArraySize>>1), thread1Count);
+	test(thread1ActualCount==thread1Count);
+	test(thread2ActualCount==thread2Count);
+	test(thread1Count==thread2Count);
+	test(thread1Count==(KMaxArraySize>>1));
 
 	test.Next(_L("Close"));
 	CLOSE_AND_WAIT(thread1);
@@ -593,22 +1656,13 @@
 
 GLDEF_C TInt E32Main()
 	{	
-	TInt cpus = UserSvr::HalFunction(EHalGroupKernel, EKernelHalNumLogicalCpus, 0, 0);
-	if (cpus != 1)
-		{
-		test(cpus>1);
-		// This test will require compatibility mode (and probably other changes)
-		// to work on SMP - it depends on explicit scheduling order.
-		test.Printf(_L("T_SEMUTX skipped, does not work on SMP\n"));
-		return KErrNone;
-		}	
-	
 
 	test.Title();
  	__UHEAP_MARK;
+	TestMutualExclusion();
+	TestPollTimeout();
 	test.Start(_L("Test RSemaphore"));
 	TestSemaphore();
-	TestSemaphore2();
 	test.Next(_L("Test RMutex"));
 	TestMutex();
 	TestMutex2();