diff -r c1f20ce4abcf -r 3e88ff8f41d5 kernel/eka/nkernsmp/arm/ncutils.cia --- a/kernel/eka/nkernsmp/arm/ncutils.cia Tue Aug 31 16:34:26 2010 +0300 +++ b/kernel/eka/nkernsmp/arm/ncutils.cia Wed Sep 01 12:34:56 2010 +0100 @@ -190,28 +190,26 @@ { SET_RWNO_TID(,r1); __ASM_CLI_MODE(MODE_ABT); - asm("str sp, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iAbtStackTop)); + asm("str sp, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, i_AbtStackTop)); asm("mvn r3, #0 "); asm("str r3, [sp, #%a0]" : : "i" _FOFF(SFullArmRegSet, iExcCode)); - asm("str r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iIrqNestCount)); + asm("str r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqNestCount)); __ASM_CLI_MODE(MODE_UND); - asm("str sp, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iUndStackTop)); + asm("str sp, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, i_UndStackTop)); __ASM_CLI_MODE(MODE_FIQ); - asm("str sp, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iFiqStackTop)); + asm("str sp, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, i_FiqStackTop)); __ASM_CLI_MODE(MODE_IRQ); - asm("str sp, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iIrqStackTop)); + asm("str sp, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqStackTop)); __ASM_CLI_MODE(MODE_SVC); asm("ldr r2, __TheScheduler "); - asm("ldr r3, [r2, #%a0]" : : "i" _FOFF(TScheduler, iSX.iScuAddr)); - asm("str r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iScuAddr)); - asm("ldr r3, [r2, #%a0]" : : "i" _FOFF(TScheduler, iSX.iGicDistAddr)); - asm("str r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iGicDistAddr)); - asm("ldr r3, [r2, #%a0]" : : "i" _FOFF(TScheduler, iSX.iGicCpuIfcAddr)); - asm("str r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iGicCpuIfcAddr)); - asm("ldr r3, [r2, #%a0]" : : "i" _FOFF(TScheduler, iSX.iLocalTimerAddr)); - asm("str r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iLocalTimerAddr)); - asm("ldr r3, [r2, #%a0]" : : "i" _FOFF(TScheduler, iSX.iGlobalTimerAddr)); - asm("str r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iGlobalTimerAddr)); + asm("ldr r3, [r2, #%a0]" : : "i" _FOFF(TScheduler, i_ScuAddr)); + asm("str r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, i_ScuAddr)); + asm("ldr r3, [r2, #%a0]" : : "i" _FOFF(TScheduler, i_GicDistAddr)); + asm("str r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr)); + asm("ldr r3, [r2, #%a0]" : : "i" _FOFF(TScheduler, i_GicCpuIfcAddr)); + asm("str r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicCpuIfcAddr)); + asm("ldr r3, [r2, #%a0]" : : "i" _FOFF(TScheduler, i_LocalTimerAddr)); + asm("str r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, i_LocalTimerAddr)); asm("mov r3, #0 "); SET_RWRO_TID(,r3); SET_RWRW_TID(,r3); @@ -240,7 +238,7 @@ GET_RWNO_TID(,r0); asm("cmp r0, #0 "); asm("ldreq r0, __SS0 "); - asm("ldr r0, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iRegs)); + asm("ldr r0, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_Regs)); asm("cmp r0, #0 "); asm("ldreq r0, __DefaultRegs "); asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet, iExcCode)); @@ -277,8 +275,8 @@ asm("streq r2, [r1] "); asm("beq skip_other_cores "); // If subscheduler not yet set, don't bother with other cores asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuMask)); - asm("ldr r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iGicCpuIfcAddr)); -// asm("ldr r4, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iRegs)); + asm("ldr r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicCpuIfcAddr)); +// asm("ldr r4, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_Regs)); asm("bic sp, sp, #4 "); // align stack to multiple of 8 __DATA_MEMORY_BARRIER_Z__(r6); @@ -295,7 +293,7 @@ // we weren't first to crash, so wait here for a crash IPI // disable all interrupts except for CRASH_IPI GET_RWNO_TID(,r0); - asm("ldr r0, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iGicCpuIfcAddr)); + asm("ldr r0, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicCpuIfcAddr)); asm("mov r1, #0 "); asm("1: "); asm("add r1, r1, #1 "); @@ -314,9 +312,9 @@ asm("first_to_crash: "); asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iScheduler)); asm("ldr r7, __CrashStateOut "); - asm("ldr r3, [r2, #%a0]" : : "i" _FOFF(TScheduler, iIpiAcceptCpus)); + asm("ldr r3, [r2, #%a0]" : : "i" _FOFF(TScheduler, iActiveCpus1)); asm("str r3, [r7] "); // mask of CPUs pending - asm("ldr r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iGicDistAddr)); + asm("ldr r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr)); asm("ldr r1, __CrashIPIWord "); __DATA_SYNC_BARRIER_Z__(r6); asm("str r1, [r5, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq)); // send CRASH_IPI to all other CPUs @@ -1038,52 +1036,7 @@ asm("mov r0, #0"); //Kernel side not implemented yet } -#ifdef __CPU_ARM_HAS_WFE_SEV - -extern "C" __NAKED__ void __arm_wfe() - { - ARM_WFE; - __JUMP(, lr); - } - -extern "C" __NAKED__ void __arm_sev() - { - ARM_SEV; - __JUMP(, lr); - } - -#endif - -// Called by a CPU which has completed its detach sequence and should now be powered off -// Doesn't return - just waits for power to be removed -// CPU will come back up via the reset vector when it next wakes up. -// NOTE: On entry the CPU caches are disabled and the CPU does not participate in coherency -// SO BE VERY CAREFUL -extern "C" __NAKED__ void DetachComplete() - { - GET_RWNO_TID(,r0); - asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iUncached)); - asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(SPerCpuUncached, iDetachCompleteCpus)); - asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iGicDistAddr)); - __DATA_SYNC_BARRIER_Z__(r12); // need DSB before sending any IPI - asm("mov r2, r2, lsl #16 "); - asm("orr r2, r2, #%a0" : : "i" ((TInt)INDIRECT_POWERDOWN_IPI_VECTOR)); - asm("str r2, [r3, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq)); // trigger IPIs - - asm("wait_forever: "); - __DATA_SYNC_BARRIER__(r12); - ARM_WFE; - __DATA_SYNC_BARRIER__(r12); - asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(SPerCpuUncached, iPowerOnReq)); - __DATA_SYNC_BARRIER__(r12); - asm("cmp r2, #0xF000000F "); // for 'fake' power down - asm("bne wait_forever "); - - asm("0: "); - __JUMP(,lr); - } -