kernel/eka/nkernsmp/arm/ncsched.cia
changeset 90 947f0dc9f7a8
parent 31 56f325a607ea
child 177 a232af6b0b1f
child 184 0e2270015475
equal deleted inserted replaced
52:2d65c2f76d7b 90:947f0dc9f7a8
    52 
    52 
    53 //#define __DEBUG_BAD_ADDR
    53 //#define __DEBUG_BAD_ADDR
    54 
    54 
    55 extern "C" void NewThreadTrace(NThread* a);
    55 extern "C" void NewThreadTrace(NThread* a);
    56 extern "C" void send_accumulated_resched_ipis();
    56 extern "C" void send_accumulated_resched_ipis();
       
    57 extern "C" void wake_up_for_ipi(TSubScheduler*, TInt);
    57 
    58 
    58 
    59 
    59 __NAKED__ void TScheduler::Reschedule()
    60 __NAKED__ void TScheduler::Reschedule()
    60 	{
    61 	{
    61 	//
    62 	//
    69 	GET_RWNO_TID(,r0)						// r0->TSubScheduler
    70 	GET_RWNO_TID(,r0)						// r0->TSubScheduler
    70 	asm("stmfd	sp!, {r2,lr} ");			// save original SP/resched flag, return address
    71 	asm("stmfd	sp!, {r2,lr} ");			// save original SP/resched flag, return address
    71 	__ASM_CLI();							// interrupts off
    72 	__ASM_CLI();							// interrupts off
    72 	asm("ldr	r1, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iDfcPendingFlag)&~3));	// check iDfcPendingFlag and iExIDfcPendingFlag
    73 	asm("ldr	r1, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iDfcPendingFlag)&~3));	// check iDfcPendingFlag and iExIDfcPendingFlag
    73 	asm("mov	r11, r0 ");					// r11->TSubScheduler
    74 	asm("mov	r11, r0 ");					// r11->TSubScheduler
    74 	asm("ldr	r10, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LocalTimerAddr));	// r10->CPU local timer
    75 	asm("ldr	r10, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iSSX.iLocalTimerAddr));	// r10->CPU local timer
    75 
    76 
    76 	asm("start_resched: ");
    77 	asm("start_resched: ");
    77 	asm("movs	r1, r1, lsr #16 ");			// check if IDFCs or ExIDFCs pending
    78 	asm("movs	r1, r1, lsr #16 ");			// check if IDFCs or ExIDFCs pending
    78 
    79 
    79 	asm("blne "	CSM_ZN13TSubScheduler9QueueDfcsEv);		// queue any pending DFCs
    80 	asm("blne "	CSM_ZN13TSubScheduler9QueueDfcsEv);		// queue any pending DFCs
   827 	asm("msr cpsr, r1 ");					// restore interrupts
   828 	asm("msr cpsr, r1 ");					// restore interrupts
   828 	__JUMP(,lr);
   829 	__JUMP(,lr);
   829 	}
   830 	}
   830 
   831 
   831 
   832 
   832 extern "C" __NAKED__ void send_irq_ipi(TSubScheduler*)
   833 extern "C" __NAKED__ void send_irq_ipi(TSubScheduler*, TInt)
   833 	{
   834 	{
       
   835 	asm("tst	r1, #%a0" : : "i" ((TInt)EQueueEvent_WakeUp) );
       
   836 	asm("bne "	CSM_CFUNC(wake_up_for_ipi));
   834 	__DATA_SYNC_BARRIER_Z__(r3);			// need DSB before sending any IPI
   837 	__DATA_SYNC_BARRIER_Z__(r3);			// need DSB before sending any IPI
   835 	asm("ldr	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuMask));
   838 	asm("ldr	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuMask));
   836 	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr));	// we assume i_GicDistAddr is the same for all CPUs
   839 	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iGicDistAddr));	// we assume i_GicDistAddr is the same for all CPUs
   837 	asm("mov	r1, #%a0" : : "i" ((TInt)TRANSFERRED_IRQ_VECTOR));
   840 	asm("mov	r1, #%a0" : : "i" ((TInt)TRANSFERRED_IRQ_VECTOR));
   838 	asm("orr	r1, r1, r3, lsl #16 ");
   841 	asm("orr	r1, r1, r3, lsl #16 ");
   839 	asm("str	r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs
   842 	asm("str	r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs
   840 	__JUMP(,lr);
   843 	__JUMP(,lr);
   841 	}
   844 	}
   843 // Send any outstanding reschedule IPIs when the kernel is unlocked on this CPU.
   846 // Send any outstanding reschedule IPIs when the kernel is unlocked on this CPU.
   844 // Call with interrupts disabled, R0->TSubScheduler, R12=R0->iReschedIPIs
   847 // Call with interrupts disabled, R0->TSubScheduler, R12=R0->iReschedIPIs
   845 // Return with R0 unaltered.
   848 // Return with R0 unaltered.
   846 extern "C" __NAKED__ void send_accumulated_resched_ipis()
   849 extern "C" __NAKED__ void send_accumulated_resched_ipis()
   847 	{
   850 	{
   848 	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr));
   851 	asm("ldr	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iScheduler));
       
   852 	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iGicDistAddr));
       
   853 	asm("ldr	r1, [r3, #%a0]" : : "i" _FOFF(TScheduler, iThreadAcceptCpus));
       
   854 	asm("bics	r1, r12, r1 ");
       
   855 	asm("bne	2f ");
       
   856 	asm("1:		");
   849 	asm("mov	r1, #0 ");
   857 	asm("mov	r1, #0 ");
   850 	asm("str	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedIPIs));
   858 	asm("str	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedIPIs));
   851 	__DATA_SYNC_BARRIER__(r1);				// need DSB before sending any IPI
   859 	__DATA_SYNC_BARRIER__(r1);				// need DSB before sending any IPI
   852 	asm("mov	r1, r12, lsl #16 ");
   860 	asm("mov	r1, r12, lsl #16 ");
   853 //	asm("orr	r1, r1, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR));	RESCHED_IPI_VECTOR=0
   861 //	asm("orr	r1, r1, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR));	RESCHED_IPI_VECTOR=0
   854 	asm("str	r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs
   862 	asm("str	r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs
   855 	__JUMP(,lr);
   863 	__JUMP(,lr);
       
   864 
       
   865 	asm("2:		");
       
   866 	asm("stmfd	sp!, {r0,lr} ");
       
   867 	asm("mov	r0, r3 ");
       
   868 	asm("mov	r1, r12 ");
       
   869 	asm("bl		ReschedInactiveCpus__10TSchedulerUl ");
       
   870 	asm("mov	r12, r0 ");
       
   871 	asm("ldmfd	sp!, {r0,lr} ");
       
   872 	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iGicDistAddr));
       
   873 	asm("b		1b ");
   856 	}
   874 	}
   857 
   875 
   858 // Send a reschedule IPI to the specified CPU
   876 // Send a reschedule IPI to the specified CPU
   859 extern "C" __NAKED__ void send_resched_ipi(TInt /*aCpu*/)
   877 extern "C" __NAKED__ void send_resched_ipi(TInt /*aCpu*/)
   860 	{
   878 	{
   861 	GET_RWNO_TID(,r3);
   879 	GET_RWNO_TID(,r3);
   862 	__DATA_SYNC_BARRIER_Z__(r2);			// need DSB before sending any IPI
   880 	__DATA_SYNC_BARRIER_Z__(r2);			// need DSB before sending any IPI
   863 	asm("ldr	r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr));	// we assume i_GicDistAddr is the same for all CPUs
   881 	asm("ldr	r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iGicDistAddr));	// we assume i_GicDistAddr is the same for all CPUs
   864 	ASM_DEBUG1(SendReschedIPI,r0);
   882 	ASM_DEBUG1(SendReschedIPI,r0);
   865 	asm("mov	r1, #0x10000 ");
   883 	asm("mov	r1, #0x10000 ");
   866 	asm("mov	r1, r1, lsl r0 ");	// 0x10000<<aCpu
   884 	asm("mov	r1, r1, lsl r0 ");	// 0x10000<<aCpu
   867 //	asm("orr	r1, r1, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR));	RESCHED_IPI_VECTOR=0
   885 //	asm("orr	r1, r1, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR));	RESCHED_IPI_VECTOR=0
   868 	asm("str	r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs
   886 	asm("str	r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs
   873 // *** DON'T DO ANY TRACING OR INSTRUMENTATION ***
   891 // *** DON'T DO ANY TRACING OR INSTRUMENTATION ***
   874 extern "C" __NAKED__ void send_self_resched_ipi()
   892 extern "C" __NAKED__ void send_self_resched_ipi()
   875 	{
   893 	{
   876 	GET_RWNO_TID(,r3);
   894 	GET_RWNO_TID(,r3);
   877 	__DATA_SYNC_BARRIER_Z__(r2);			// need DSB before sending any IPI
   895 	__DATA_SYNC_BARRIER_Z__(r2);			// need DSB before sending any IPI
   878 	asm("ldr	r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr));	// we assume i_GicDistAddr is the same for all CPUs
   896 	asm("ldr	r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iGicDistAddr));	// we assume i_GicDistAddr is the same for all CPUs
   879 	asm("mov	r1, #0x02000000 ");			// target = requesting CPU only
   897 	asm("mov	r1, #0x02000000 ");			// target = requesting CPU only
   880 //	asm("orr	r1, r1, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR));	RESCHED_IPI_VECTOR=0
   898 //	asm("orr	r1, r1, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR));	RESCHED_IPI_VECTOR=0
   881 	asm("str	r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPI
   899 	asm("str	r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPI
   882 	__JUMP(,lr);
   900 	__JUMP(,lr);
   883 	}
   901 	}
   884 
       
   885 extern "C" __NAKED__ void send_resched_ipis(TUint32 aMask)
       
   886 	{
       
   887 	ASM_DEBUG1(SendReschedIPIs,r0);
       
   888 	__DATA_SYNC_BARRIER_Z__(r2);			// need DSB before sending any IPI
       
   889 	asm("cmp	r0, #0 ");		// any bits set in aMask?
       
   890 	GET_RWNO_TID(ne,r3);
       
   891 	asm("ldrne	r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr));	// we assume i_GicDistAddr is the same for all CPUs
       
   892 	asm("movne	r0, r0, lsl #16 ");
       
   893 //	asm("orrne	r0, r0, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR));	RESCHED_IPI_VECTOR=0
       
   894 	asm("strne	r0, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs if any
       
   895 	__JUMP(,lr);
       
   896 	}
       
   897 
       
   898 
   902 
   899 extern "C" __NAKED__ void send_resched_ipi_and_wait(TInt /*aCpu*/)
   903 extern "C" __NAKED__ void send_resched_ipi_and_wait(TInt /*aCpu*/)
   900 	{
   904 	{
   901 	asm("ldr	r1, __TheSubSchedulers ");
   905 	asm("ldr	r1, __TheSubSchedulers ");
   902 	asm("mov	r2, #0x10000 ");
   906 	asm("mov	r2, #0x10000 ");
   903 	asm("mov	r2, r2, lsl r0 ");	// 0x10000<<aCpu
   907 	asm("mov	r2, r2, lsl r0 ");	// 0x10000<<aCpu
   904 	ASM_DEBUG1(SendReschedIPIAndWait,r0);
   908 	ASM_DEBUG1(SendReschedIPIAndWait,r0);
   905 	asm("add	r0, r1, r0, lsl #9 ");	// sizeof(TSubScheduler)=512
   909 	asm("add	r0, r1, r0, lsl #%a0 " : : "i" ((TInt)KSubSchedulerShift));
   906 	asm("ldr	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr));	// we assume i_GicDistAddr is the same for all CPUs
   910 	asm("ldr	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iGicDistAddr));	// we assume i_GicDistAddr is the same for all CPUs
   907 	asm("ldr	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqCount));
   911 	asm("ldr	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iIrqCount));
   908 	__DATA_SYNC_BARRIER_Z__(r1);		// make sure i_IrqCount is read before IPI is sent
   912 	__DATA_SYNC_BARRIER_Z__(r1);		// make sure i_IrqCount is read before IPI is sent
   909 //	asm("orr	r2, r2, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR));	RESCHED_IPI_VECTOR=0
   913 //	asm("orr	r2, r2, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR));	RESCHED_IPI_VECTOR=0
   910 	asm("str	r2, [r3, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs
   914 	asm("str	r2, [r3, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs
   911 	__DATA_SYNC_BARRIER__(r1);			// make sure IPI has been sent
   915 	__DATA_SYNC_BARRIER__(r1);			// make sure IPI has been sent
   912 	asm("1: ");
   916 	asm("1: ");
   913 	asm("ldrb	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag));
   917 	asm("ldrb	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag));
   914 	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqNestCount));
   918 	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iIrqNestCount));
   915 	asm("ldr	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqCount));
   919 	asm("ldr	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iIrqCount));
   916 	asm("cmp	r1, #0 ");
   920 	asm("cmp	r1, #0 ");
   917 	asm("beq	0f ");					// iRescheduleNeededFlag not set -> wait
   921 	asm("beq	0f ");					// iRescheduleNeededFlag not set -> wait
   918 	asm("cmp	r2, #0 ");
   922 	asm("cmp	r2, #0 ");
   919 	asm("bge	2f ");					// if other CPU is in an ISR, finish
   923 	asm("bge	2f ");					// if other CPU is in an ISR, finish
   920 	asm("cmp	r3, r12 ");				// if not, has i_IrqCount changed?
   924 	asm("cmp	r3, r12 ");				// if not, has i_IrqCount changed?
   928 
   932 
   929 	asm("__TheSubSchedulers: ");
   933 	asm("__TheSubSchedulers: ");
   930 	asm(".word TheSubSchedulers ");
   934 	asm(".word TheSubSchedulers ");
   931 	}
   935 	}
   932 
   936 
       
   937 
   933 /*	If the current thread is subject to timeslicing, update its remaining time
   938 /*	If the current thread is subject to timeslicing, update its remaining time
   934 	from the current CPU's local timer. Don't stop the timer.
   939 	from the current CPU's local timer. Don't stop the timer.
   935 	If the remaining time is negative, save it as zero.
   940 	If the remaining time is negative, save it as zero.
   936  */
   941  */
   937 __NAKED__ void TSubScheduler::SaveTimesliceTimer(NThreadBase* /*aThread*/)
   942 __NAKED__ void TSubScheduler::SaveTimesliceTimer(NThreadBase* /*aThread*/)
   938 	{
   943 	{
   939 	asm("ldr	r3, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iTime));
   944 	asm("ldr	r3, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iTime));
   940 	asm("ldrb	r12, [r1, #%a0]" : : "i" _FOFF(NThreadBase,i_NThread_Initial));
   945 	asm("ldrb	r12, [r1, #%a0]" : : "i" _FOFF(NThreadBase,i_NThread_Initial));
   941 	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LocalTimerAddr));
   946 	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iSSX.iLocalTimerAddr));
   942 	asm("cmp	r3, #0 ");
   947 	asm("cmp	r3, #0 ");
   943 	asm("ble	0f ");					// thread isn't timesliced or timeslice already expired so skip
   948 	asm("ble	0f ");					// thread isn't timesliced or timeslice already expired so skip
   944 	asm("cmp	r12, #0 ");
   949 	asm("cmp	r12, #0 ");
   945 	asm("bne	0f ");					// initial (i.e. idle) thread, so skip
   950 	asm("bne	0f ");					// initial (i.e. idle) thread, so skip
   946 	asm("ldr	r3, [r2, #%a0]" : : "i" _FOFF(ArmLocalTimer,iTimerCount));
   951 	asm("ldr	r3, [r2, #%a0]" : : "i" _FOFF(ArmLocalTimer,iTimerCount));
   947 	asm("ldr	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimerMultI));
   952 	asm("ldr	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iSSX.iTimerPeriodM));
       
   953 	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iSSX.iTimerPeriodS));
   948 	asm("cmp	r3, #0 ");
   954 	asm("cmp	r3, #0 ");
   949 	asm("movmi	r0, #0 ");				// if timer count is negative, save zero
   955 	asm("movmi	r0, #0 ");				// if timer count is negative, save zero
   950 	asm("bmi	1f ");
   956 	asm("bmi	1f ");
   951 	asm("umull	r0, r3, r12, r3 ");		// scale up to max timer clock
   957 	asm("umull	r0, r3, r12, r3 ");		// scale up to max timer clock (R3:R0)
   952 	asm("adds	r0, r0, #0x00800000 ");
   958 	asm("rsb	r12, r2, #32 ");
   953 	asm("adcs	r3, r3, #0 ");
   959 	asm("movs	r0, r0, lsr r2 ");		// r0 >>= iSSX.iTimerPeriodS, C = last bit shifted off (rounding)
   954 	asm("mov	r0, r0, lsr #24 ");
   960 	asm("orr	r0, r0, r3, lsl r12 ");	// bottom bits from r12 into top bits of r0
   955 	asm("orr	r0, r0, r3, lsl #8 ");
   961 	asm("adcs	r0, r0, #0 ");			// round using last bit shifted off
   956 	asm("1:		");
   962 	asm("1:		");
   957 	asm("str	r0, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iTime));
   963 	asm("str	r0, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iTime));
   958 	asm("0:		");
   964 	asm("0:		");
   959 	__JUMP(,lr);
   965 	__JUMP(,lr);
   960 	}
   966 	}
   961 
   967 
       
   968 
       
   969 #if defined(__UTT_MACHINE_CODED__)
       
   970 #if defined(__NKERN_TIMESTAMP_USE_LOCAL_TIMER__)
       
   971 #error Use of local timer for NKern::Timestamp() no longer supported
       
   972 #else
       
   973 
   962 /*	Update aOld's execution time and set up the timer for aNew
   974 /*	Update aOld's execution time and set up the timer for aNew
   963 	Update this CPU's timestamp value
   975 	Update this CPU's timestamp value
   964 
   976 
   965 	if (!aOld) aOld=iInitialThread
   977 	if (!aOld) aOld=iInitialThread
   966 	if (!aNew) aNew=iInitialThread
   978 	if (!aNew) aNew=iInitialThread
   967 	newcount = aNew->iTime>0 ? Max(aNew->iTime*i_TimerMultF/2^32, 1) : 2^31-1
   979 	newcount = aNew->iTime>0 ? Max(aNew->iTime*iSSX.iTimerFreqM/2^(32+iTimerFreqS), 1) : 2^31-1
   968 	cli()
   980 	cli()
   969 	oldcount = timer count
   981 	oldcount = timer count
   970 	if (oldcount<=0 || aOld!=aNew)
   982 	if (oldcount<=0 || aOld!=aNew)
   971 		{
   983 		{
   972 		timer count = newcount
   984 		timer count = newcount
   973 		elapsed = i_LastTimerSet - oldcount
   985 		iSSX.iLastTimerSet = newcount
   974 		i_LastTimerSet = newcount
   986 		if (aOld!=aNew)
   975 		elapsed = elapsed * i_TimerMultI / 2^24
   987 			{
   976 		aOld->iTotalCpuTime64 += elapsed
   988 			TUint64 now = NKern::Timestamp();
   977 		correction = i_TimestampError;
   989 			elapsed = iLastTimestamp -= now;
   978 		if (correction > i_MaxCorrection)
   990 			iLastTimestamp = now;
   979 			correction = i_MaxCorrection
   991 			aOld->iTotalCpuTime.i64 += elapsed;
   980 		else if (correction < -i_MaxCorrection)
   992 			if (!aOld->iActiveState)
   981 			correction = -i_MaxCorrection
   993 				aOld->iTotalActiveTime.i64 += (now - aOld->iLastActivationTime.i64);
   982 		i_TimestampError -= correction
   994 			++iReschedCount.i64;
   983 		i_LastTimestamp += elapsed + i_TimerGap - correction
   995 			++aNew->iRunCount.i64;
       
   996 			}
   984 		}
   997 		}
   985 	sti()
   998 	sti()
   986  */
   999  */
   987 __NAKED__ void TSubScheduler::UpdateThreadTimes(NThreadBase* /*aOld*/, NThreadBase* /*aNew*/)
  1000 __NAKED__ void TSubScheduler::UpdateThreadTimes(NThreadBase* /*aOld*/, NThreadBase* /*aNew*/)
   988 	{
  1001 	{
   989 	asm("cmp	r2, #0 ");
  1002 	asm("cmp	r2, #0 ");
   990 	asm("ldreq	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iInitialThread));
  1003 	asm("ldreq	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iInitialThread));
   991 	asm("ldr	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimerMultF));
  1004 	asm("ldr	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iSSX.iTimerFreqM));
   992 	asm("cmp	r1, #0 ");
  1005 	asm("cmp	r1, #0 ");
   993 	asm("ldreq	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iInitialThread));
  1006 	asm("ldreq	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iInitialThread));
   994 	asm("ldr	r3, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iTime));
  1007 	asm("ldr	r3, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iTime));
   995 	asm("stmfd	sp!, {r4-r7} ");
  1008 	asm("stmfd	sp!, {r4-r7} ");
   996 	asm("ldr	r6, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iRunCount64));
       
   997 	asm("ldr	r7, [r2, #%a0]" : : "i" (_FOFF(NThreadBase,iRunCount64)+4));
       
   998 	asm("cmp	r1, r2 ");
  1009 	asm("cmp	r1, r2 ");
   999 	asm("beq	2f ");
  1010 	asm("beq	2f ");
       
  1011 	asm("ldr	r6, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iRunCount.i32[0]));
       
  1012 	asm("ldr	r7, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iRunCount.i32[1]));
  1000 	asm("adds	r6, r6, #1 ");
  1013 	asm("adds	r6, r6, #1 ");
  1001 	asm("str	r6, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iRunCount64));
  1014 	asm("str	r6, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iRunCount.i32[0]));
  1002 	asm("ldr	r4, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedCount64));
  1015 	asm("ldr	r4, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedCount.i32[0]));
  1003 	asm("ldr	r6, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iReschedCount64)+4));
  1016 	asm("ldr	r6, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedCount.i32[1]));
  1004 	asm("adcs	r7, r7, #0 ");
  1017 	asm("adcs	r7, r7, #0 ");
  1005 	asm("str	r7, [r2, #%a0]" : : "i" (_FOFF(NThreadBase,iRunCount64)+4));
  1018 	asm("str	r7, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iRunCount.i32[1]));
  1006 	asm("adds	r4, r4, #1 ");
  1019 	asm("adds	r4, r4, #1 ");
  1007 	asm("adcs	r6, r6, #0 ");
  1020 	asm("adcs	r6, r6, #0 ");
  1008 	asm("str	r4, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedCount64));
  1021 	asm("str	r4, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedCount.i32[0]));
  1009 	asm("str	r6, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iReschedCount64)+4));
  1022 	asm("str	r6, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedCount.i32[1]));
  1010 	asm("2:		");
  1023 	asm("2:		");
       
  1024 	asm("ldr	r6, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iSSX.iLocalTimerAddr));
  1011 	asm("cmp	r3, #1 ");					// aNew->iTime > 0 ?
  1025 	asm("cmp	r3, #1 ");					// aNew->iTime > 0 ?
  1012 	asm("umullge r4, r3, r12, r3 ");
  1026 	asm("movlt	r3, #0x7fffffff ");			// if not, use 2^31-1
  1013 	asm("ldr	r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LocalTimerAddr));
  1027 	asm("blt	3f ");
  1014 	asm("movlt	r3, #0x7fffffff ");
  1028 	asm("cmp	r1, r2 ");					// different thread?
  1015 	asm("addges	r3, r3, r4, lsr #31 ");		// round up top 32 bits if bit 31 set
  1029 	asm("beq	0f ");						// no - finish
  1016 	asm("moveq	r3, #1 ");					// if result zero, limit to 1
  1030 	asm("ldr	r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iSSX.iTimerFreqS));
  1017 	asm("ldr	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LastTimerSet));
  1031 	asm("umull	r4, r3, r12, r3 ");			// r3:r4 = aNew->iTime * iTimerFreqM
  1018 	__ASM_CLI();
  1032 	asm("adds	r4, r4, r4 ");				// bit 31 into C
  1019 	asm("ldr	r4, [r5, #%a0]" : : "i" _FOFF(ArmLocalTimer,iTimerCount));
  1033 	asm("teq	r5, #0 ");					// check for iTimerFreqS=0 without changing C
  1020 	asm("cmp	r1, r2 ");
  1034 	asm("movnes	r3, r3, lsr r5 ");			// if not, r3>>=iTimerFreqS, last bit shifted out into C
  1021 	asm("bne	1f ");
  1035 	asm("adcs	r3, r3, #0 ");				// round using last bit shifted off
  1022 	asm("cmp	r4, #0 ");
  1036 	asm("3:		");
  1023 	asm("bgt	0f ");						// same thread, timeslice not expired -> leave timer alone
  1037 	asm("str	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iSSX.iLastTimerSet));
  1024 	asm("1:		");
  1038 	asm("str	r3, [r6, #%a0]" : : "i" _FOFF(ArmLocalTimer,iTimerCount));	// set new timeslice value in timer
  1025 	asm("str	r3, [r5, #%a0]" : : "i" _FOFF(ArmLocalTimer,iTimerCount));	// set new timeslice value in timer
  1039 
  1026 	asm("ldr	r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimerMultI));
  1040 	asm("ldr	r6, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iLastTimestamp.i32[0]));
  1027 	asm("str	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LastTimerSet));
  1041 	asm("ldr	r7, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iLastTimestamp.i32[1]));
  1028 	asm("sub	r12, r12, r4 ");			// r12 = elapsed (actual timer ticks)
  1042 	asm("stmfd	sp!, {r0-r2,lr} ");
  1029 	asm("umull	r4, r5, r12, r5 ");
  1043 	asm("bl		Timestamp__5NKern ");		// R1:R0 = current time
  1030 	asm("ldr	r3, [r1, #%a0]!" : : "i" _FOFF(NThreadBase,iTotalCpuTime64));
  1044 	asm("mov	r4, r0 ");
       
  1045 	asm("mov	r5, r1 ");					// R5:R4 = current time
       
  1046 	asm("ldmfd	sp!, {r0-r2,lr} ");
       
  1047 	asm("str	r4, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iLastTimestamp.i32[0]));
       
  1048 	asm("ldr	r3, [r1, #%a0]!" : : "i" _FOFF(NThreadBase,iTotalCpuTime.i64));
  1031 	asm("ldr	r12, [r1, #4] ");
  1049 	asm("ldr	r12, [r1, #4] ");
  1032 	asm("adds	r4, r4, #0x00800000 ");
  1050 	asm("str	r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iLastTimestamp.i32[1]));
  1033 	asm("adcs	r5, r5, #0 ");
  1051 	asm("stmdb	r1, {r4-r5} ");				// aOld->iLastRunTime
  1034 	asm("mov	r4, r4, lsr #24 ");
  1052 	asm("ldrb	r2, [r1, #%a0]" : : "i" (_FOFF(NSchedulable,iActiveState)-_FOFF(NThreadBase,iTotalCpuTime.i64)));
  1035 	asm("orr	r4, r4, r5, lsl #8 ");		// r4 = elapsed
  1053 	asm("subs	r6, r4, r6 ");
  1036 	asm("adds	r3, r3, r4 ");
  1054 	asm("sbcs	r7, r5, r7 ");				// R7:R6 = time since last reschedule
  1037 	asm("adcs	r12, r12, #0 ");
  1055 	asm("adds	r3, r3, r6 ");
  1038 	asm("stmia	r1, {r3,r12} ");			// aOld->iTotalCpuTime64 += elapsed
  1056 	asm("adcs	r12, r12, r7 ");			// total CPU time of old thread
  1039 	asm("ldr	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimestampError));
  1057 	asm("stmia	r1!, {r3,r12} ");			// store, r1=&aOld.iLastActivationTime
  1040 	asm("ldr	r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_MaxCorrection));
  1058 	asm("cmp	r2, #0 ");					// old thread still active?
  1041 	asm("ldr	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iLastTimestamp64));
  1059 	asm("bne	0f ");						// yes - done
  1042 	asm("ldr	r2, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iLastTimestamp64)+4));
  1060 	asm("ldmia	r1!, {r2,r3,r6,r7} ");		// R3:R2 = last activation time, R7:R6=total active time
  1043 	asm("mov	r12, r3 ");
  1061 	asm("subs	r2, r4, r2 ");
  1044 	asm("cmp	r3, r5 ");
  1062 	asm("sbcs	r3, r5, r3 ");				// R3:R2 = time since last activation
  1045 	asm("movgt	r3, r5 ");					// if (correction>i_MaxCorrection) correction=i_MaxCorrection
  1063 	asm("adds	r6, r6, r2 ");
  1046 	asm("cmn	r3, r5 ");
  1064 	asm("adcs	r7, r7, r3 ");				// R7:R6 = new total active time
  1047 	asm("rsblt	r3, r5, #0 ");				// if (correction+i_MaxCorrection<0) correction=-i_MaxCorrection
  1065 	asm("stmdb	r1, {r6,r7} ");
  1048 	asm("ldr	r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimerGap));
  1066 
  1049 	asm("sub	r12, r12, r3 ");
       
  1050 	asm("str	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimestampError));
       
  1051 	asm("add	r4, r4, r5 ");				// r4 = elapsed + i_TimerGap
       
  1052 	asm("adds	r1, r1, r4 ");
       
  1053 	asm("adcs	r2, r2, #0 ");				// iLastTimestamp64 + (elapsed + i_TimerGap)
       
  1054 	asm("subs	r1, r1, r3 ");
       
  1055 	asm("sbcs	r1, r1, r3, asr #32 ");		// iLastTimestamp64 + (elapsed + i_TimerGap - correction)
       
  1056 	asm("str	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iLastTimestamp64));
       
  1057 	asm("str	r2, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iLastTimestamp64)+4));
       
  1058 	asm("0:		");
  1067 	asm("0:		");
  1059 	__ASM_STI();
       
  1060 	asm("ldmfd	sp!, {r4-r7} ");
  1068 	asm("ldmfd	sp!, {r4-r7} ");
  1061 	__JUMP(,lr);
  1069 	__JUMP(,lr);
  1062 	}
  1070 	}
  1063 
  1071 
  1064 
  1072 #endif
       
  1073 #endif	// __UTT_MACHINE_CODED__