846 // Send any outstanding reschedule IPIs when the kernel is unlocked on this CPU. |
843 // Send any outstanding reschedule IPIs when the kernel is unlocked on this CPU. |
847 // Call with interrupts disabled, R0->TSubScheduler, R12=R0->iReschedIPIs |
844 // Call with interrupts disabled, R0->TSubScheduler, R12=R0->iReschedIPIs |
848 // Return with R0 unaltered. |
845 // Return with R0 unaltered. |
849 extern "C" __NAKED__ void send_accumulated_resched_ipis() |
846 extern "C" __NAKED__ void send_accumulated_resched_ipis() |
850 { |
847 { |
851 asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iScheduler)); |
848 asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr)); |
852 asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iGicDistAddr)); |
|
853 asm("ldr r1, [r3, #%a0]" : : "i" _FOFF(TScheduler, iThreadAcceptCpus)); |
|
854 asm("bics r1, r12, r1 "); |
|
855 asm("bne 2f "); |
|
856 asm("1: "); |
|
857 asm("mov r1, #0 "); |
849 asm("mov r1, #0 "); |
858 asm("str r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedIPIs)); |
850 asm("str r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedIPIs)); |
859 __DATA_SYNC_BARRIER__(r1); // need DSB before sending any IPI |
851 __DATA_SYNC_BARRIER__(r1); // need DSB before sending any IPI |
860 asm("mov r1, r12, lsl #16 "); |
852 asm("mov r1, r12, lsl #16 "); |
861 // asm("orr r1, r1, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR)); RESCHED_IPI_VECTOR=0 |
853 // asm("orr r1, r1, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR)); RESCHED_IPI_VECTOR=0 |
862 asm("str r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq)); // trigger IPIs |
854 asm("str r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq)); // trigger IPIs |
863 __JUMP(,lr); |
855 __JUMP(,lr); |
864 |
|
865 asm("2: "); |
|
866 asm("stmfd sp!, {r0,lr} "); |
|
867 asm("mov r0, r3 "); |
|
868 asm("mov r1, r12 "); |
|
869 asm("bl ReschedInactiveCpus__10TSchedulerUl "); |
|
870 asm("mov r12, r0 "); |
|
871 asm("ldmfd sp!, {r0,lr} "); |
|
872 asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iGicDistAddr)); |
|
873 asm("b 1b "); |
|
874 } |
856 } |
875 |
857 |
876 // Send a reschedule IPI to the specified CPU |
858 // Send a reschedule IPI to the specified CPU |
877 extern "C" __NAKED__ void send_resched_ipi(TInt /*aCpu*/) |
859 extern "C" __NAKED__ void send_resched_ipi(TInt /*aCpu*/) |
878 { |
860 { |
879 GET_RWNO_TID(,r3); |
861 GET_RWNO_TID(,r3); |
880 __DATA_SYNC_BARRIER_Z__(r2); // need DSB before sending any IPI |
862 __DATA_SYNC_BARRIER_Z__(r2); // need DSB before sending any IPI |
881 asm("ldr r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iGicDistAddr)); // we assume i_GicDistAddr is the same for all CPUs |
863 asm("ldr r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr)); // we assume i_GicDistAddr is the same for all CPUs |
882 ASM_DEBUG1(SendReschedIPI,r0); |
864 ASM_DEBUG1(SendReschedIPI,r0); |
883 asm("mov r1, #0x10000 "); |
865 asm("mov r1, #0x10000 "); |
884 asm("mov r1, r1, lsl r0 "); // 0x10000<<aCpu |
866 asm("mov r1, r1, lsl r0 "); // 0x10000<<aCpu |
885 // asm("orr r1, r1, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR)); RESCHED_IPI_VECTOR=0 |
867 // asm("orr r1, r1, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR)); RESCHED_IPI_VECTOR=0 |
886 asm("str r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq)); // trigger IPIs |
868 asm("str r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq)); // trigger IPIs |
891 // *** DON'T DO ANY TRACING OR INSTRUMENTATION *** |
873 // *** DON'T DO ANY TRACING OR INSTRUMENTATION *** |
892 extern "C" __NAKED__ void send_self_resched_ipi() |
874 extern "C" __NAKED__ void send_self_resched_ipi() |
893 { |
875 { |
894 GET_RWNO_TID(,r3); |
876 GET_RWNO_TID(,r3); |
895 __DATA_SYNC_BARRIER_Z__(r2); // need DSB before sending any IPI |
877 __DATA_SYNC_BARRIER_Z__(r2); // need DSB before sending any IPI |
896 asm("ldr r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iGicDistAddr)); // we assume i_GicDistAddr is the same for all CPUs |
878 asm("ldr r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr)); // we assume i_GicDistAddr is the same for all CPUs |
897 asm("mov r1, #0x02000000 "); // target = requesting CPU only |
879 asm("mov r1, #0x02000000 "); // target = requesting CPU only |
898 // asm("orr r1, r1, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR)); RESCHED_IPI_VECTOR=0 |
880 // asm("orr r1, r1, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR)); RESCHED_IPI_VECTOR=0 |
899 asm("str r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq)); // trigger IPI |
881 asm("str r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq)); // trigger IPI |
900 __JUMP(,lr); |
882 __JUMP(,lr); |
901 } |
883 } |
|
884 |
|
885 extern "C" __NAKED__ void send_resched_ipis(TUint32 aMask) |
|
886 { |
|
887 ASM_DEBUG1(SendReschedIPIs,r0); |
|
888 __DATA_SYNC_BARRIER_Z__(r2); // need DSB before sending any IPI |
|
889 asm("cmp r0, #0 "); // any bits set in aMask? |
|
890 GET_RWNO_TID(ne,r3); |
|
891 asm("ldrne r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr)); // we assume i_GicDistAddr is the same for all CPUs |
|
892 asm("movne r0, r0, lsl #16 "); |
|
893 // asm("orrne r0, r0, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR)); RESCHED_IPI_VECTOR=0 |
|
894 asm("strne r0, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq)); // trigger IPIs if any |
|
895 __JUMP(,lr); |
|
896 } |
|
897 |
902 |
898 |
903 extern "C" __NAKED__ void send_resched_ipi_and_wait(TInt /*aCpu*/) |
899 extern "C" __NAKED__ void send_resched_ipi_and_wait(TInt /*aCpu*/) |
904 { |
900 { |
905 asm("ldr r1, __TheSubSchedulers "); |
901 asm("ldr r1, __TheSubSchedulers "); |
906 asm("mov r2, #0x10000 "); |
902 asm("mov r2, #0x10000 "); |
907 asm("mov r2, r2, lsl r0 "); // 0x10000<<aCpu |
903 asm("mov r2, r2, lsl r0 "); // 0x10000<<aCpu |
908 ASM_DEBUG1(SendReschedIPIAndWait,r0); |
904 ASM_DEBUG1(SendReschedIPIAndWait,r0); |
909 asm("add r0, r1, r0, lsl #%a0 " : : "i" ((TInt)KSubSchedulerShift)); |
905 asm("add r0, r1, r0, lsl #9 "); // sizeof(TSubScheduler)=512 |
910 asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iGicDistAddr)); // we assume i_GicDistAddr is the same for all CPUs |
906 asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr)); // we assume i_GicDistAddr is the same for all CPUs |
911 asm("ldr r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iIrqCount)); |
907 asm("ldr r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqCount)); |
912 __DATA_SYNC_BARRIER_Z__(r1); // make sure i_IrqCount is read before IPI is sent |
908 __DATA_SYNC_BARRIER_Z__(r1); // make sure i_IrqCount is read before IPI is sent |
913 // asm("orr r2, r2, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR)); RESCHED_IPI_VECTOR=0 |
909 // asm("orr r2, r2, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR)); RESCHED_IPI_VECTOR=0 |
914 asm("str r2, [r3, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq)); // trigger IPIs |
910 asm("str r2, [r3, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq)); // trigger IPIs |
915 __DATA_SYNC_BARRIER__(r1); // make sure IPI has been sent |
911 __DATA_SYNC_BARRIER__(r1); // make sure IPI has been sent |
916 asm("1: "); |
912 asm("1: "); |
917 asm("ldrb r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag)); |
913 asm("ldrb r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag)); |
918 asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iIrqNestCount)); |
914 asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqNestCount)); |
919 asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iIrqCount)); |
915 asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqCount)); |
920 asm("cmp r1, #0 "); |
916 asm("cmp r1, #0 "); |
921 asm("beq 0f "); // iRescheduleNeededFlag not set -> wait |
917 asm("beq 0f "); // iRescheduleNeededFlag not set -> wait |
922 asm("cmp r2, #0 "); |
918 asm("cmp r2, #0 "); |
923 asm("bge 2f "); // if other CPU is in an ISR, finish |
919 asm("bge 2f "); // if other CPU is in an ISR, finish |
924 asm("cmp r3, r12 "); // if not, has i_IrqCount changed? |
920 asm("cmp r3, r12 "); // if not, has i_IrqCount changed? |
932 |
928 |
933 asm("__TheSubSchedulers: "); |
929 asm("__TheSubSchedulers: "); |
934 asm(".word TheSubSchedulers "); |
930 asm(".word TheSubSchedulers "); |
935 } |
931 } |
936 |
932 |
937 |
|
938 /* If the current thread is subject to timeslicing, update its remaining time |
933 /* If the current thread is subject to timeslicing, update its remaining time |
939 from the current CPU's local timer. Don't stop the timer. |
934 from the current CPU's local timer. Don't stop the timer. |
940 If the remaining time is negative, save it as zero. |
935 If the remaining time is negative, save it as zero. |
941 */ |
936 */ |
942 __NAKED__ void TSubScheduler::SaveTimesliceTimer(NThreadBase* /*aThread*/) |
937 __NAKED__ void TSubScheduler::SaveTimesliceTimer(NThreadBase* /*aThread*/) |
943 { |
938 { |
944 asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iTime)); |
939 asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iTime)); |
945 asm("ldrb r12, [r1, #%a0]" : : "i" _FOFF(NThreadBase,i_NThread_Initial)); |
940 asm("ldrb r12, [r1, #%a0]" : : "i" _FOFF(NThreadBase,i_NThread_Initial)); |
946 asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iSSX.iLocalTimerAddr)); |
941 asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LocalTimerAddr)); |
947 asm("cmp r3, #0 "); |
942 asm("cmp r3, #0 "); |
948 asm("ble 0f "); // thread isn't timesliced or timeslice already expired so skip |
943 asm("ble 0f "); // thread isn't timesliced or timeslice already expired so skip |
949 asm("cmp r12, #0 "); |
944 asm("cmp r12, #0 "); |
950 asm("bne 0f "); // initial (i.e. idle) thread, so skip |
945 asm("bne 0f "); // initial (i.e. idle) thread, so skip |
951 asm("ldr r3, [r2, #%a0]" : : "i" _FOFF(ArmLocalTimer,iTimerCount)); |
946 asm("ldr r3, [r2, #%a0]" : : "i" _FOFF(ArmLocalTimer,iTimerCount)); |
952 asm("ldr r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iSSX.iTimerFreqRI.iI.iM)); |
947 asm("ldr r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimerMultI)); |
953 asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iSSX.iTimerFreqRI.iI.iX)); |
|
954 asm("cmp r3, #0 "); |
948 asm("cmp r3, #0 "); |
955 asm("movmi r0, #0 "); // if timer count is negative, save zero |
949 asm("movmi r0, #0 "); // if timer count is negative, save zero |
956 asm("bmi 1f "); |
950 asm("bmi 1f "); |
957 asm("mov r2, r2, lsl #16 "); |
951 asm("umull r0, r3, r12, r3 "); // scale up to max timer clock |
958 asm("mov r2, r2, asr #16 "); |
952 asm("adds r0, r0, #0x00800000 "); |
959 asm("umull r0, r3, r12, r3 "); // scale up to max timer clock (R3:R0) - need to shift right by -iX |
953 asm("adcs r3, r3, #0 "); |
960 asm("rsb r2, r2, #0 "); |
954 asm("mov r0, r0, lsr #24 "); |
961 asm("rsb r12, r2, #32 "); |
955 asm("orr r0, r0, r3, lsl #8 "); |
962 asm("movs r0, r0, lsr r2 "); // r0 >>= iSSX.iTimerFreqRI.iI.iX, C = last bit shifted off (rounding) |
|
963 asm("orr r0, r0, r3, lsl r12 "); // bottom bits from r3 into top bits of r0 |
|
964 asm("adcs r0, r0, #0 "); // round using last bit shifted off |
|
965 asm("1: "); |
956 asm("1: "); |
966 asm("str r0, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iTime)); |
957 asm("str r0, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iTime)); |
967 asm("0: "); |
958 asm("0: "); |
968 __JUMP(,lr); |
959 __JUMP(,lr); |
969 } |
960 } |
970 |
961 |
971 |
962 /* Update aOld's execution time and set up the timer for aNew |
972 #if defined(__UTT_MACHINE_CODED__) |
963 Update this CPU's timestamp value |
973 #if defined(__NKERN_TIMESTAMP_USE_LOCAL_TIMER__) |
964 |
974 #error Use of local timer for NKern::Timestamp() no longer supported |
965 if (!aOld) aOld=iInitialThread |
975 #else |
966 if (!aNew) aNew=iInitialThread |
976 |
967 newcount = aNew->iTime>0 ? Max(aNew->iTime*i_TimerMultF/2^32, 1) : 2^31-1 |
977 #error UpdateThreadTimes assembler out of date! |
968 cli() |
978 |
969 oldcount = timer count |
979 #endif |
970 if (oldcount<=0 || aOld!=aNew) |
980 #endif // __UTT_MACHINE_CODED__ |
971 { |
|
972 timer count = newcount |
|
973 elapsed = i_LastTimerSet - oldcount |
|
974 i_LastTimerSet = newcount |
|
975 elapsed = elapsed * i_TimerMultI / 2^24 |
|
976 aOld->iTotalCpuTime64 += elapsed |
|
977 correction = i_TimestampError; |
|
978 if (correction > i_MaxCorrection) |
|
979 correction = i_MaxCorrection |
|
980 else if (correction < -i_MaxCorrection) |
|
981 correction = -i_MaxCorrection |
|
982 i_TimestampError -= correction |
|
983 i_LastTimestamp += elapsed + i_TimerGap - correction |
|
984 } |
|
985 sti() |
|
986 */ |
|
987 __NAKED__ void TSubScheduler::UpdateThreadTimes(NThreadBase* /*aOld*/, NThreadBase* /*aNew*/) |
|
988 { |
|
989 asm("cmp r2, #0 "); |
|
990 asm("ldreq r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iInitialThread)); |
|
991 asm("ldr r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimerMultF)); |
|
992 asm("cmp r1, #0 "); |
|
993 asm("ldreq r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iInitialThread)); |
|
994 asm("ldr r3, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iTime)); |
|
995 asm("stmfd sp!, {r4-r7} "); |
|
996 asm("ldr r6, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iRunCount64)); |
|
997 asm("ldr r7, [r2, #%a0]" : : "i" (_FOFF(NThreadBase,iRunCount64)+4)); |
|
998 asm("cmp r1, r2 "); |
|
999 asm("beq 2f "); |
|
1000 asm("adds r6, r6, #1 "); |
|
1001 asm("str r6, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iRunCount64)); |
|
1002 asm("ldr r4, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedCount64)); |
|
1003 asm("ldr r6, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iReschedCount64)+4)); |
|
1004 asm("adcs r7, r7, #0 "); |
|
1005 asm("str r7, [r2, #%a0]" : : "i" (_FOFF(NThreadBase,iRunCount64)+4)); |
|
1006 asm("adds r4, r4, #1 "); |
|
1007 asm("adcs r6, r6, #0 "); |
|
1008 asm("str r4, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedCount64)); |
|
1009 asm("str r6, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iReschedCount64)+4)); |
|
1010 asm("2: "); |
|
1011 asm("cmp r3, #1 "); // aNew->iTime > 0 ? |
|
1012 asm("umullge r4, r3, r12, r3 "); |
|
1013 asm("ldr r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LocalTimerAddr)); |
|
1014 asm("movlt r3, #0x7fffffff "); |
|
1015 asm("addges r3, r3, r4, lsr #31 "); // round up top 32 bits if bit 31 set |
|
1016 asm("moveq r3, #1 "); // if result zero, limit to 1 |
|
1017 asm("ldr r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LastTimerSet)); |
|
1018 __ASM_CLI(); |
|
1019 asm("ldr r4, [r5, #%a0]" : : "i" _FOFF(ArmLocalTimer,iTimerCount)); |
|
1020 asm("cmp r1, r2 "); |
|
1021 asm("bne 1f "); |
|
1022 asm("cmp r4, #0 "); |
|
1023 asm("bgt 0f "); // same thread, timeslice not expired -> leave timer alone |
|
1024 asm("1: "); |
|
1025 asm("str r3, [r5, #%a0]" : : "i" _FOFF(ArmLocalTimer,iTimerCount)); // set new timeslice value in timer |
|
1026 asm("ldr r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimerMultI)); |
|
1027 asm("str r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LastTimerSet)); |
|
1028 asm("sub r12, r12, r4 "); // r12 = elapsed (actual timer ticks) |
|
1029 asm("umull r4, r5, r12, r5 "); |
|
1030 asm("ldr r3, [r1, #%a0]!" : : "i" _FOFF(NThreadBase,iTotalCpuTime64)); |
|
1031 asm("ldr r12, [r1, #4] "); |
|
1032 asm("adds r4, r4, #0x00800000 "); |
|
1033 asm("adcs r5, r5, #0 "); |
|
1034 asm("mov r4, r4, lsr #24 "); |
|
1035 asm("orr r4, r4, r5, lsl #8 "); // r4 = elapsed |
|
1036 asm("adds r3, r3, r4 "); |
|
1037 asm("adcs r12, r12, #0 "); |
|
1038 asm("stmia r1, {r3,r12} "); // aOld->iTotalCpuTime64 += elapsed |
|
1039 asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimestampError)); |
|
1040 asm("ldr r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_MaxCorrection)); |
|
1041 asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iLastTimestamp64)); |
|
1042 asm("ldr r2, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iLastTimestamp64)+4)); |
|
1043 asm("mov r12, r3 "); |
|
1044 asm("cmp r3, r5 "); |
|
1045 asm("movgt r3, r5 "); // if (correction>i_MaxCorrection) correction=i_MaxCorrection |
|
1046 asm("cmn r3, r5 "); |
|
1047 asm("rsblt r3, r5, #0 "); // if (correction+i_MaxCorrection<0) correction=-i_MaxCorrection |
|
1048 asm("ldr r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimerGap)); |
|
1049 asm("sub r12, r12, r3 "); |
|
1050 asm("str r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimestampError)); |
|
1051 asm("add r4, r4, r5 "); // r4 = elapsed + i_TimerGap |
|
1052 asm("adds r1, r1, r4 "); |
|
1053 asm("adcs r2, r2, #0 "); // iLastTimestamp64 + (elapsed + i_TimerGap) |
|
1054 asm("subs r1, r1, r3 "); |
|
1055 asm("sbcs r1, r1, r3, asr #32 "); // iLastTimestamp64 + (elapsed + i_TimerGap - correction) |
|
1056 asm("str r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iLastTimestamp64)); |
|
1057 asm("str r2, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iLastTimestamp64)+4)); |
|
1058 asm("0: "); |
|
1059 __ASM_STI(); |
|
1060 asm("ldmfd sp!, {r4-r7} "); |
|
1061 __JUMP(,lr); |
|
1062 } |
|
1063 |
|
1064 |