kernel/eka/nkernsmp/arm/ncsched.cia
branchRCL_3
changeset 256 c1f20ce4abcf
parent 31 56f325a607ea
child 257 3e88ff8f41d5
equal deleted inserted replaced
249:a179b74831c9 256:c1f20ce4abcf
    52 
    52 
    53 //#define __DEBUG_BAD_ADDR
    53 //#define __DEBUG_BAD_ADDR
    54 
    54 
    55 extern "C" void NewThreadTrace(NThread* a);
    55 extern "C" void NewThreadTrace(NThread* a);
    56 extern "C" void send_accumulated_resched_ipis();
    56 extern "C" void send_accumulated_resched_ipis();
       
    57 extern "C" void wake_up_for_ipi(TSubScheduler*, TInt);
    57 
    58 
    58 
    59 
    59 __NAKED__ void TScheduler::Reschedule()
    60 __NAKED__ void TScheduler::Reschedule()
    60 	{
    61 	{
    61 	//
    62 	//
    69 	GET_RWNO_TID(,r0)						// r0->TSubScheduler
    70 	GET_RWNO_TID(,r0)						// r0->TSubScheduler
    70 	asm("stmfd	sp!, {r2,lr} ");			// save original SP/resched flag, return address
    71 	asm("stmfd	sp!, {r2,lr} ");			// save original SP/resched flag, return address
    71 	__ASM_CLI();							// interrupts off
    72 	__ASM_CLI();							// interrupts off
    72 	asm("ldr	r1, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iDfcPendingFlag)&~3));	// check iDfcPendingFlag and iExIDfcPendingFlag
    73 	asm("ldr	r1, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iDfcPendingFlag)&~3));	// check iDfcPendingFlag and iExIDfcPendingFlag
    73 	asm("mov	r11, r0 ");					// r11->TSubScheduler
    74 	asm("mov	r11, r0 ");					// r11->TSubScheduler
    74 	asm("ldr	r10, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LocalTimerAddr));	// r10->CPU local timer
    75 	asm("ldr	r10, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iSSX.iLocalTimerAddr));	// r10->CPU local timer
    75 
    76 
    76 	asm("start_resched: ");
    77 	asm("start_resched: ");
    77 	asm("movs	r1, r1, lsr #16 ");			// check if IDFCs or ExIDFCs pending
    78 	asm("movs	r1, r1, lsr #16 ");			// check if IDFCs or ExIDFCs pending
    78 
    79 
    79 	asm("blne "	CSM_ZN13TSubScheduler9QueueDfcsEv);		// queue any pending DFCs
    80 	asm("blne "	CSM_ZN13TSubScheduler9QueueDfcsEv);		// queue any pending DFCs
   827 	asm("msr cpsr, r1 ");					// restore interrupts
   828 	asm("msr cpsr, r1 ");					// restore interrupts
   828 	__JUMP(,lr);
   829 	__JUMP(,lr);
   829 	}
   830 	}
   830 
   831 
   831 
   832 
   832 extern "C" __NAKED__ void send_irq_ipi(TSubScheduler*)
   833 extern "C" __NAKED__ void send_irq_ipi(TSubScheduler*, TInt)
   833 	{
   834 	{
       
   835 	asm("tst	r1, #%a0" : : "i" ((TInt)EQueueEvent_WakeUp) );
       
   836 	asm("bne "	CSM_CFUNC(wake_up_for_ipi));
   834 	__DATA_SYNC_BARRIER_Z__(r3);			// need DSB before sending any IPI
   837 	__DATA_SYNC_BARRIER_Z__(r3);			// need DSB before sending any IPI
   835 	asm("ldr	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuMask));
   838 	asm("ldr	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuMask));
   836 	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr));	// we assume i_GicDistAddr is the same for all CPUs
   839 	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iGicDistAddr));	// we assume i_GicDistAddr is the same for all CPUs
   837 	asm("mov	r1, #%a0" : : "i" ((TInt)TRANSFERRED_IRQ_VECTOR));
   840 	asm("mov	r1, #%a0" : : "i" ((TInt)TRANSFERRED_IRQ_VECTOR));
   838 	asm("orr	r1, r1, r3, lsl #16 ");
   841 	asm("orr	r1, r1, r3, lsl #16 ");
   839 	asm("str	r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs
   842 	asm("str	r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs
   840 	__JUMP(,lr);
   843 	__JUMP(,lr);
   841 	}
   844 	}
   843 // Send any outstanding reschedule IPIs when the kernel is unlocked on this CPU.
   846 // Send any outstanding reschedule IPIs when the kernel is unlocked on this CPU.
   844 // Call with interrupts disabled, R0->TSubScheduler, R12=R0->iReschedIPIs
   847 // Call with interrupts disabled, R0->TSubScheduler, R12=R0->iReschedIPIs
   845 // Return with R0 unaltered.
   848 // Return with R0 unaltered.
   846 extern "C" __NAKED__ void send_accumulated_resched_ipis()
   849 extern "C" __NAKED__ void send_accumulated_resched_ipis()
   847 	{
   850 	{
   848 	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr));
   851 	asm("ldr	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iScheduler));
       
   852 	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iGicDistAddr));
       
   853 	asm("ldr	r1, [r3, #%a0]" : : "i" _FOFF(TScheduler, iThreadAcceptCpus));
       
   854 	asm("bics	r1, r12, r1 ");
       
   855 	asm("bne	2f ");
       
   856 	asm("1:		");
   849 	asm("mov	r1, #0 ");
   857 	asm("mov	r1, #0 ");
   850 	asm("str	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedIPIs));
   858 	asm("str	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedIPIs));
   851 	__DATA_SYNC_BARRIER__(r1);				// need DSB before sending any IPI
   859 	__DATA_SYNC_BARRIER__(r1);				// need DSB before sending any IPI
   852 	asm("mov	r1, r12, lsl #16 ");
   860 	asm("mov	r1, r12, lsl #16 ");
   853 //	asm("orr	r1, r1, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR));	RESCHED_IPI_VECTOR=0
   861 //	asm("orr	r1, r1, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR));	RESCHED_IPI_VECTOR=0
   854 	asm("str	r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs
   862 	asm("str	r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs
   855 	__JUMP(,lr);
   863 	__JUMP(,lr);
       
   864 
       
   865 	asm("2:		");
       
   866 	asm("stmfd	sp!, {r0,lr} ");
       
   867 	asm("mov	r0, r3 ");
       
   868 	asm("mov	r1, r12 ");
       
   869 	asm("bl		ReschedInactiveCpus__10TSchedulerUl ");
       
   870 	asm("mov	r12, r0 ");
       
   871 	asm("ldmfd	sp!, {r0,lr} ");
       
   872 	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iGicDistAddr));
       
   873 	asm("b		1b ");
   856 	}
   874 	}
   857 
   875 
   858 // Send a reschedule IPI to the specified CPU
   876 // Send a reschedule IPI to the specified CPU
   859 extern "C" __NAKED__ void send_resched_ipi(TInt /*aCpu*/)
   877 extern "C" __NAKED__ void send_resched_ipi(TInt /*aCpu*/)
   860 	{
   878 	{
   861 	GET_RWNO_TID(,r3);
   879 	GET_RWNO_TID(,r3);
   862 	__DATA_SYNC_BARRIER_Z__(r2);			// need DSB before sending any IPI
   880 	__DATA_SYNC_BARRIER_Z__(r2);			// need DSB before sending any IPI
   863 	asm("ldr	r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr));	// we assume i_GicDistAddr is the same for all CPUs
   881 	asm("ldr	r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iGicDistAddr));	// we assume i_GicDistAddr is the same for all CPUs
   864 	ASM_DEBUG1(SendReschedIPI,r0);
   882 	ASM_DEBUG1(SendReschedIPI,r0);
   865 	asm("mov	r1, #0x10000 ");
   883 	asm("mov	r1, #0x10000 ");
   866 	asm("mov	r1, r1, lsl r0 ");	// 0x10000<<aCpu
   884 	asm("mov	r1, r1, lsl r0 ");	// 0x10000<<aCpu
   867 //	asm("orr	r1, r1, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR));	RESCHED_IPI_VECTOR=0
   885 //	asm("orr	r1, r1, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR));	RESCHED_IPI_VECTOR=0
   868 	asm("str	r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs
   886 	asm("str	r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs
   873 // *** DON'T DO ANY TRACING OR INSTRUMENTATION ***
   891 // *** DON'T DO ANY TRACING OR INSTRUMENTATION ***
   874 extern "C" __NAKED__ void send_self_resched_ipi()
   892 extern "C" __NAKED__ void send_self_resched_ipi()
   875 	{
   893 	{
   876 	GET_RWNO_TID(,r3);
   894 	GET_RWNO_TID(,r3);
   877 	__DATA_SYNC_BARRIER_Z__(r2);			// need DSB before sending any IPI
   895 	__DATA_SYNC_BARRIER_Z__(r2);			// need DSB before sending any IPI
   878 	asm("ldr	r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr));	// we assume i_GicDistAddr is the same for all CPUs
   896 	asm("ldr	r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iGicDistAddr));	// we assume i_GicDistAddr is the same for all CPUs
   879 	asm("mov	r1, #0x02000000 ");			// target = requesting CPU only
   897 	asm("mov	r1, #0x02000000 ");			// target = requesting CPU only
   880 //	asm("orr	r1, r1, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR));	RESCHED_IPI_VECTOR=0
   898 //	asm("orr	r1, r1, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR));	RESCHED_IPI_VECTOR=0
   881 	asm("str	r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPI
   899 	asm("str	r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPI
   882 	__JUMP(,lr);
   900 	__JUMP(,lr);
   883 	}
   901 	}
   884 
       
   885 extern "C" __NAKED__ void send_resched_ipis(TUint32 aMask)
       
   886 	{
       
   887 	ASM_DEBUG1(SendReschedIPIs,r0);
       
   888 	__DATA_SYNC_BARRIER_Z__(r2);			// need DSB before sending any IPI
       
   889 	asm("cmp	r0, #0 ");		// any bits set in aMask?
       
   890 	GET_RWNO_TID(ne,r3);
       
   891 	asm("ldrne	r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr));	// we assume i_GicDistAddr is the same for all CPUs
       
   892 	asm("movne	r0, r0, lsl #16 ");
       
   893 //	asm("orrne	r0, r0, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR));	RESCHED_IPI_VECTOR=0
       
   894 	asm("strne	r0, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs if any
       
   895 	__JUMP(,lr);
       
   896 	}
       
   897 
       
   898 
   902 
   899 extern "C" __NAKED__ void send_resched_ipi_and_wait(TInt /*aCpu*/)
   903 extern "C" __NAKED__ void send_resched_ipi_and_wait(TInt /*aCpu*/)
   900 	{
   904 	{
   901 	asm("ldr	r1, __TheSubSchedulers ");
   905 	asm("ldr	r1, __TheSubSchedulers ");
   902 	asm("mov	r2, #0x10000 ");
   906 	asm("mov	r2, #0x10000 ");
   903 	asm("mov	r2, r2, lsl r0 ");	// 0x10000<<aCpu
   907 	asm("mov	r2, r2, lsl r0 ");	// 0x10000<<aCpu
   904 	ASM_DEBUG1(SendReschedIPIAndWait,r0);
   908 	ASM_DEBUG1(SendReschedIPIAndWait,r0);
   905 	asm("add	r0, r1, r0, lsl #9 ");	// sizeof(TSubScheduler)=512
   909 	asm("add	r0, r1, r0, lsl #%a0 " : : "i" ((TInt)KSubSchedulerShift));
   906 	asm("ldr	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr));	// we assume i_GicDistAddr is the same for all CPUs
   910 	asm("ldr	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iGicDistAddr));	// we assume i_GicDistAddr is the same for all CPUs
   907 	asm("ldr	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqCount));
   911 	asm("ldr	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iIrqCount));
   908 	__DATA_SYNC_BARRIER_Z__(r1);		// make sure i_IrqCount is read before IPI is sent
   912 	__DATA_SYNC_BARRIER_Z__(r1);		// make sure i_IrqCount is read before IPI is sent
   909 //	asm("orr	r2, r2, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR));	RESCHED_IPI_VECTOR=0
   913 //	asm("orr	r2, r2, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR));	RESCHED_IPI_VECTOR=0
   910 	asm("str	r2, [r3, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs
   914 	asm("str	r2, [r3, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs
   911 	__DATA_SYNC_BARRIER__(r1);			// make sure IPI has been sent
   915 	__DATA_SYNC_BARRIER__(r1);			// make sure IPI has been sent
   912 	asm("1: ");
   916 	asm("1: ");
   913 	asm("ldrb	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag));
   917 	asm("ldrb	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag));
   914 	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqNestCount));
   918 	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iIrqNestCount));
   915 	asm("ldr	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqCount));
   919 	asm("ldr	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iIrqCount));
   916 	asm("cmp	r1, #0 ");
   920 	asm("cmp	r1, #0 ");
   917 	asm("beq	0f ");					// iRescheduleNeededFlag not set -> wait
   921 	asm("beq	0f ");					// iRescheduleNeededFlag not set -> wait
   918 	asm("cmp	r2, #0 ");
   922 	asm("cmp	r2, #0 ");
   919 	asm("bge	2f ");					// if other CPU is in an ISR, finish
   923 	asm("bge	2f ");					// if other CPU is in an ISR, finish
   920 	asm("cmp	r3, r12 ");				// if not, has i_IrqCount changed?
   924 	asm("cmp	r3, r12 ");				// if not, has i_IrqCount changed?
   928 
   932 
   929 	asm("__TheSubSchedulers: ");
   933 	asm("__TheSubSchedulers: ");
   930 	asm(".word TheSubSchedulers ");
   934 	asm(".word TheSubSchedulers ");
   931 	}
   935 	}
   932 
   936 
       
   937 
   933 /*	If the current thread is subject to timeslicing, update its remaining time
   938 /*	If the current thread is subject to timeslicing, update its remaining time
   934 	from the current CPU's local timer. Don't stop the timer.
   939 	from the current CPU's local timer. Don't stop the timer.
   935 	If the remaining time is negative, save it as zero.
   940 	If the remaining time is negative, save it as zero.
   936  */
   941  */
   937 __NAKED__ void TSubScheduler::SaveTimesliceTimer(NThreadBase* /*aThread*/)
   942 __NAKED__ void TSubScheduler::SaveTimesliceTimer(NThreadBase* /*aThread*/)
   938 	{
   943 	{
   939 	asm("ldr	r3, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iTime));
   944 	asm("ldr	r3, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iTime));
   940 	asm("ldrb	r12, [r1, #%a0]" : : "i" _FOFF(NThreadBase,i_NThread_Initial));
   945 	asm("ldrb	r12, [r1, #%a0]" : : "i" _FOFF(NThreadBase,i_NThread_Initial));
   941 	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LocalTimerAddr));
   946 	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iSSX.iLocalTimerAddr));
   942 	asm("cmp	r3, #0 ");
   947 	asm("cmp	r3, #0 ");
   943 	asm("ble	0f ");					// thread isn't timesliced or timeslice already expired so skip
   948 	asm("ble	0f ");					// thread isn't timesliced or timeslice already expired so skip
   944 	asm("cmp	r12, #0 ");
   949 	asm("cmp	r12, #0 ");
   945 	asm("bne	0f ");					// initial (i.e. idle) thread, so skip
   950 	asm("bne	0f ");					// initial (i.e. idle) thread, so skip
   946 	asm("ldr	r3, [r2, #%a0]" : : "i" _FOFF(ArmLocalTimer,iTimerCount));
   951 	asm("ldr	r3, [r2, #%a0]" : : "i" _FOFF(ArmLocalTimer,iTimerCount));
   947 	asm("ldr	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimerMultI));
   952 	asm("ldr	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iSSX.iTimerFreqRI.iI.iM));
       
   953 	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iSSX.iTimerFreqRI.iI.iX));
   948 	asm("cmp	r3, #0 ");
   954 	asm("cmp	r3, #0 ");
   949 	asm("movmi	r0, #0 ");				// if timer count is negative, save zero
   955 	asm("movmi	r0, #0 ");				// if timer count is negative, save zero
   950 	asm("bmi	1f ");
   956 	asm("bmi	1f ");
   951 	asm("umull	r0, r3, r12, r3 ");		// scale up to max timer clock
   957 	asm("mov	r2, r2, lsl #16 ");
   952 	asm("adds	r0, r0, #0x00800000 ");
   958 	asm("mov	r2, r2, asr #16 ");
   953 	asm("adcs	r3, r3, #0 ");
   959 	asm("umull	r0, r3, r12, r3 ");		// scale up to max timer clock (R3:R0) - need to shift right by -iX
   954 	asm("mov	r0, r0, lsr #24 ");
   960 	asm("rsb	r2, r2, #0 ");
   955 	asm("orr	r0, r0, r3, lsl #8 ");
   961 	asm("rsb	r12, r2, #32 ");
       
   962 	asm("movs	r0, r0, lsr r2 ");		// r0 >>= iSSX.iTimerFreqRI.iI.iX, C = last bit shifted off (rounding)
       
   963 	asm("orr	r0, r0, r3, lsl r12 ");	// bottom bits from r3 into top bits of r0
       
   964 	asm("adcs	r0, r0, #0 ");			// round using last bit shifted off
   956 	asm("1:		");
   965 	asm("1:		");
   957 	asm("str	r0, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iTime));
   966 	asm("str	r0, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iTime));
   958 	asm("0:		");
   967 	asm("0:		");
   959 	__JUMP(,lr);
   968 	__JUMP(,lr);
   960 	}
   969 	}
   961 
   970 
   962 /*	Update aOld's execution time and set up the timer for aNew
   971 
   963 	Update this CPU's timestamp value
   972 #if defined(__UTT_MACHINE_CODED__)
   964 
   973 #if defined(__NKERN_TIMESTAMP_USE_LOCAL_TIMER__)
   965 	if (!aOld) aOld=iInitialThread
   974 #error Use of local timer for NKern::Timestamp() no longer supported
   966 	if (!aNew) aNew=iInitialThread
   975 #else
   967 	newcount = aNew->iTime>0 ? Max(aNew->iTime*i_TimerMultF/2^32, 1) : 2^31-1
   976 
   968 	cli()
   977 #error UpdateThreadTimes assembler out of date!
   969 	oldcount = timer count
   978 
   970 	if (oldcount<=0 || aOld!=aNew)
   979 #endif
   971 		{
   980 #endif	// __UTT_MACHINE_CODED__
   972 		timer count = newcount
       
   973 		elapsed = i_LastTimerSet - oldcount
       
   974 		i_LastTimerSet = newcount
       
   975 		elapsed = elapsed * i_TimerMultI / 2^24
       
   976 		aOld->iTotalCpuTime64 += elapsed
       
   977 		correction = i_TimestampError;
       
   978 		if (correction > i_MaxCorrection)
       
   979 			correction = i_MaxCorrection
       
   980 		else if (correction < -i_MaxCorrection)
       
   981 			correction = -i_MaxCorrection
       
   982 		i_TimestampError -= correction
       
   983 		i_LastTimestamp += elapsed + i_TimerGap - correction
       
   984 		}
       
   985 	sti()
       
   986  */
       
   987 __NAKED__ void TSubScheduler::UpdateThreadTimes(NThreadBase* /*aOld*/, NThreadBase* /*aNew*/)
       
   988 	{
       
   989 	asm("cmp	r2, #0 ");
       
   990 	asm("ldreq	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iInitialThread));
       
   991 	asm("ldr	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimerMultF));
       
   992 	asm("cmp	r1, #0 ");
       
   993 	asm("ldreq	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iInitialThread));
       
   994 	asm("ldr	r3, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iTime));
       
   995 	asm("stmfd	sp!, {r4-r7} ");
       
   996 	asm("ldr	r6, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iRunCount64));
       
   997 	asm("ldr	r7, [r2, #%a0]" : : "i" (_FOFF(NThreadBase,iRunCount64)+4));
       
   998 	asm("cmp	r1, r2 ");
       
   999 	asm("beq	2f ");
       
  1000 	asm("adds	r6, r6, #1 ");
       
  1001 	asm("str	r6, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iRunCount64));
       
  1002 	asm("ldr	r4, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedCount64));
       
  1003 	asm("ldr	r6, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iReschedCount64)+4));
       
  1004 	asm("adcs	r7, r7, #0 ");
       
  1005 	asm("str	r7, [r2, #%a0]" : : "i" (_FOFF(NThreadBase,iRunCount64)+4));
       
  1006 	asm("adds	r4, r4, #1 ");
       
  1007 	asm("adcs	r6, r6, #0 ");
       
  1008 	asm("str	r4, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedCount64));
       
  1009 	asm("str	r6, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iReschedCount64)+4));
       
  1010 	asm("2:		");
       
  1011 	asm("cmp	r3, #1 ");					// aNew->iTime > 0 ?
       
  1012 	asm("umullge r4, r3, r12, r3 ");
       
  1013 	asm("ldr	r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LocalTimerAddr));
       
  1014 	asm("movlt	r3, #0x7fffffff ");
       
  1015 	asm("addges	r3, r3, r4, lsr #31 ");		// round up top 32 bits if bit 31 set
       
  1016 	asm("moveq	r3, #1 ");					// if result zero, limit to 1
       
  1017 	asm("ldr	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LastTimerSet));
       
  1018 	__ASM_CLI();
       
  1019 	asm("ldr	r4, [r5, #%a0]" : : "i" _FOFF(ArmLocalTimer,iTimerCount));
       
  1020 	asm("cmp	r1, r2 ");
       
  1021 	asm("bne	1f ");
       
  1022 	asm("cmp	r4, #0 ");
       
  1023 	asm("bgt	0f ");						// same thread, timeslice not expired -> leave timer alone
       
  1024 	asm("1:		");
       
  1025 	asm("str	r3, [r5, #%a0]" : : "i" _FOFF(ArmLocalTimer,iTimerCount));	// set new timeslice value in timer
       
  1026 	asm("ldr	r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimerMultI));
       
  1027 	asm("str	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LastTimerSet));
       
  1028 	asm("sub	r12, r12, r4 ");			// r12 = elapsed (actual timer ticks)
       
  1029 	asm("umull	r4, r5, r12, r5 ");
       
  1030 	asm("ldr	r3, [r1, #%a0]!" : : "i" _FOFF(NThreadBase,iTotalCpuTime64));
       
  1031 	asm("ldr	r12, [r1, #4] ");
       
  1032 	asm("adds	r4, r4, #0x00800000 ");
       
  1033 	asm("adcs	r5, r5, #0 ");
       
  1034 	asm("mov	r4, r4, lsr #24 ");
       
  1035 	asm("orr	r4, r4, r5, lsl #8 ");		// r4 = elapsed
       
  1036 	asm("adds	r3, r3, r4 ");
       
  1037 	asm("adcs	r12, r12, #0 ");
       
  1038 	asm("stmia	r1, {r3,r12} ");			// aOld->iTotalCpuTime64 += elapsed
       
  1039 	asm("ldr	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimestampError));
       
  1040 	asm("ldr	r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_MaxCorrection));
       
  1041 	asm("ldr	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iLastTimestamp64));
       
  1042 	asm("ldr	r2, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iLastTimestamp64)+4));
       
  1043 	asm("mov	r12, r3 ");
       
  1044 	asm("cmp	r3, r5 ");
       
  1045 	asm("movgt	r3, r5 ");					// if (correction>i_MaxCorrection) correction=i_MaxCorrection
       
  1046 	asm("cmn	r3, r5 ");
       
  1047 	asm("rsblt	r3, r5, #0 ");				// if (correction+i_MaxCorrection<0) correction=-i_MaxCorrection
       
  1048 	asm("ldr	r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimerGap));
       
  1049 	asm("sub	r12, r12, r3 ");
       
  1050 	asm("str	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimestampError));
       
  1051 	asm("add	r4, r4, r5 ");				// r4 = elapsed + i_TimerGap
       
  1052 	asm("adds	r1, r1, r4 ");
       
  1053 	asm("adcs	r2, r2, #0 ");				// iLastTimestamp64 + (elapsed + i_TimerGap)
       
  1054 	asm("subs	r1, r1, r3 ");
       
  1055 	asm("sbcs	r1, r1, r3, asr #32 ");		// iLastTimestamp64 + (elapsed + i_TimerGap - correction)
       
  1056 	asm("str	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iLastTimestamp64));
       
  1057 	asm("str	r2, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iLastTimestamp64)+4));
       
  1058 	asm("0:		");
       
  1059 	__ASM_STI();
       
  1060 	asm("ldmfd	sp!, {r4-r7} ");
       
  1061 	__JUMP(,lr);
       
  1062 	}
       
  1063 
       
  1064