kernel/eka/nkernsmp/arm/vectors.cia
branchRCL_3
changeset 256 c1f20ce4abcf
parent 31 56f325a607ea
child 184 0e2270015475
child 257 3e88ff8f41d5
child 293 0659d0e1a03c
equal deleted inserted replaced
249:a179b74831c9 256:c1f20ce4abcf
   147 	asm("bcc	slow_swi ");				// bit 23=0 for slow/unprot
   147 	asm("bcc	slow_swi ");				// bit 23=0 for slow/unprot
   148 	asm("mov	r1, r9 ");
   148 	asm("mov	r1, r9 ");
   149 	asm("beq	wait_for_any_request ");	// special case for Exec::WaitForAnyRequest
   149 	asm("beq	wait_for_any_request ");	// special case for Exec::WaitForAnyRequest
   150 	asm("ldr	r2, [r1, #%a0]" : : "i" _FOFF(NThread,iFastExecTable));
   150 	asm("ldr	r2, [r1, #%a0]" : : "i" _FOFF(NThread,iFastExecTable));
   151 	asm("ldr	r3, [r2], r12, lsr #7 ");	// r3=limit, r2->dispatch table entry
   151 	asm("ldr	r3, [r2], r12, lsr #7 ");	// r3=limit, r2->dispatch table entry
   152 	asm("ldr	r2, [r2] ");				// r2->kernel function
       
   153 	asm("cmp	r3, r12, lsr #9 ");			// r3-SWI number
   152 	asm("cmp	r3, r12, lsr #9 ");			// r3-SWI number
   154 	__JUMP(hi,	r2);						// if SWI number valid, call kernel function
   153 	asm("ldrhi	pc, [r2] ");				// if SWI number valid, call kernel function
   155 	asm("mvn	r12, #0 ");					// put invalid SWI number into r12
   154 	asm("mvn	r12, #0 ");					// put invalid SWI number into r12
   156 	asm("b		slow_swi ");					// go through slow SWI routine to call invalid SWI handler
   155 	asm("b		slow_swi ");				// go through slow SWI routine to call invalid SWI handler
   157 
   156 
   158 #ifndef __FAST_SEM_MACHINE_CODED__
   157 #ifndef __FAST_SEM_MACHINE_CODED__
   159 	asm("wait_for_any_request: ");
   158 	asm("wait_for_any_request: ");
   160 	__ASM_STI();							// all interrupts on
   159 	__ASM_STI();							// all interrupts on
   161 	asm("b		WaitForAnyRequest__5NKern ");
   160 	asm("b		WaitForAnyRequest__5NKern ");
   289 	{
   288 	{
   290 	// Interrupts may be enabled here
   289 	// Interrupts may be enabled here
   291 #ifdef BTRACE_CPU_USAGE
   290 #ifdef BTRACE_CPU_USAGE
   292 	asm("ldr	r10, __BTraceCpuUsageFilter ");
   291 	asm("ldr	r10, __BTraceCpuUsageFilter ");
   293 #endif
   292 #endif
   294 	asm("ldr	r7, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqNestCount));
   293 	asm("ldr	r7, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iIrqNestCount));
   295 	asm("ldrb	r0, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, iEventHandlersPending));
   294 	asm("ldrb	r0, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, iEventHandlersPending));
   296 	__DATA_MEMORY_BARRIER_Z__(r2);
   295 	__DATA_MEMORY_BARRIER_Z__(r2);
   297 #ifdef BTRACE_CPU_USAGE
   296 #ifdef BTRACE_CPU_USAGE
   298 	asm("ldrb	r10, [r10] ");
   297 	asm("ldrb	r10, [r10] ");
   299 #endif
   298 #endif
   314 	asm("and	r2, r8, #0x1f ");
   313 	asm("and	r2, r8, #0x1f ");
   315 	asm("ldr	r1, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag));
   314 	asm("ldr	r1, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag));
   316 	asm("cmp	r2, #0x10 ");				// interrupted mode_usr ?
   315 	asm("cmp	r2, #0x10 ");				// interrupted mode_usr ?
   317 	asm("cmpne	r2, #0x13 ");				// if not, interrupted mode_svc ?
   316 	asm("cmpne	r2, #0x13 ");				// if not, interrupted mode_svc ?
   318 	asm("cmpeq	r0, #0 ");					// if mode_usr or mode_svc, is kernel locked?
   317 	asm("cmpeq	r0, #0 ");					// if mode_usr or mode_svc, is kernel locked?
   319 	asm("str	r7, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqNestCount));
   318 	asm("str	r7, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iIrqNestCount));
   320 	asm("bne	irq_kernel_locked_exit ");	// if neither or if kernel locked, exit immediately
   319 	asm("bne	irq_kernel_locked_exit ");	// if neither or if kernel locked, exit immediately
   321 	asm("cmp	r1, #0 ");					// If not, IDFCs/reschedule pending?
   320 	asm("cmp	r1, #0 ");					// If not, IDFCs/reschedule pending?
   322 	asm("beq	irq_kernel_locked_exit ");	// if not, exit
   321 	asm("beq	irq_kernel_locked_exit ");	// if not, exit
   323 
   322 
   324 	asm("mov	r1, #1 ");
   323 	asm("mov	r1, #1 ");
   379 	USER_MEMORY_GUARD_CHECK();				// check UMG is off if returning to user mode
   378 	USER_MEMORY_GUARD_CHECK();				// check UMG is off if returning to user mode
   380 	RFEIAW(13);								// restore PC and CPSR - return from Exec function
   379 	RFEIAW(13);								// restore PC and CPSR - return from Exec function
   381 
   380 
   382 	asm("nested_irq_exit: ");
   381 	asm("nested_irq_exit: ");
   383 	__ASM_CLI1();
   382 	__ASM_CLI1();
   384 	asm("str	r7, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqNestCount));
   383 	asm("str	r7, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iIrqNestCount));
   385 #ifdef BTRACE_CPU_USAGE
   384 #ifdef BTRACE_CPU_USAGE
   386 	asm("cmp	r10, #0 ");
   385 	asm("cmp	r10, #0 ");
   387 	asm("blne	btrace_irq_exit ");
   386 	asm("blne	btrace_irq_exit ");
   388 #endif
   387 #endif
   389 	asm("ldmia	sp!, {r0-r12,r14} ");		// restore r0-r12, r14_sys
   388 	asm("ldmia	sp!, {r0-r12,r14} ");		// restore r0-r12, r14_sys
   765 	asm("orr	r3, r12, #0xC0 ");
   764 	asm("orr	r3, r12, #0xC0 ");
   766 	asm("msr	cpsr, r3 ");				// back to exception mode, all interrupts off
   765 	asm("msr	cpsr, r3 ");				// back to exception mode, all interrupts off
   767 	asm("mov	r2, r0 ");
   766 	asm("mov	r2, r0 ");
   768 	asm("cmp	r11, #0 ");
   767 	asm("cmp	r11, #0 ");
   769 	asm("ldreq	r11, __SS0 ");
   768 	asm("ldreq	r11, __SS0 ");
   770 	asm("ldr	r0, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,i_Regs));	// pass in address of stored registers
   769 	asm("ldr	r0, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iSSX.iRegs));	// pass in address of stored registers
   771 	asm("cmp	r0, #0 ");
   770 	asm("cmp	r0, #0 ");
   772 	asm("ldreq	r0, __DefaultRegs ");
   771 	asm("ldreq	r0, __DefaultRegs ");
   773 	asm("bl "	CSM_ZN3Arm9SaveStateER14SFullArmRegSet );
   772 	asm("bl "	CSM_ZN3Arm9SaveStateER14SFullArmRegSet );
   774 	asm("ldmia	sp!, {r4-r9} ");			// get original R0-R5
   773 	asm("ldmia	sp!, {r4-r9} ");			// get original R0-R5
   775 	asm("stmia	r0!, {r4-r9} ");			// save original R0-R5
   774 	asm("stmia	r0!, {r4-r9} ");			// save original R0-R5
   832  ******************************************************************************/
   831  ******************************************************************************/
   833 extern "C" __NAKED__ void send_generic_ipis(TUint32 /*aMask*/)
   832 extern "C" __NAKED__ void send_generic_ipis(TUint32 /*aMask*/)
   834 	{
   833 	{
   835 	asm("movs	r0, r0, lsl #16 ");		// CPU mask into bits 16-23 - any bits set in aMask?
   834 	asm("movs	r0, r0, lsl #16 ");		// CPU mask into bits 16-23 - any bits set in aMask?
   836 	GET_RWNO_TID(ne,r3);
   835 	GET_RWNO_TID(ne,r3);
   837 	asm("ldrne	r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr));	// we assume i_GicDistAddr is the same for all CPUs
   836 	asm("ldrne	r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, iSSX.iGicDistAddr));	// we assume i_GicDistAddr is the same for all CPUs
   838 	__DATA_SYNC_BARRIER_Z__(r1);			// need DSB before sending any IPI
   837 	__DATA_SYNC_BARRIER_Z__(r1);			// need DSB before sending any IPI
   839 	asm("orrne	r0, r0, #%a0" : : "i" ((TInt)GENERIC_IPI_VECTOR));
   838 	asm("orrne	r0, r0, #%a0" : : "i" ((TInt)GENERIC_IPI_VECTOR));
   840 	asm("strne	r0, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs if any
   839 	asm("strne	r0, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs if any
   841 	__JUMP(,lr);
   840 	__JUMP(,lr);
   842 	}
   841 	}
   851  *	If in mode_fiq, FIQ stack contains R0...R7 R8usr...R14usr iExcCode PC CPSR
   850  *	If in mode_fiq, FIQ stack contains R0...R7 R8usr...R14usr iExcCode PC CPSR
   852  ******************************************************************************/
   851  ******************************************************************************/
   853 extern "C" __NAKED__ void handle_crash_ipi()
   852 extern "C" __NAKED__ void handle_crash_ipi()
   854 	{
   853 	{
   855 	GET_RWNO_TID(,r0);
   854 	GET_RWNO_TID(,r0);
   856 	asm("ldr	r0, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_Regs));
   855 	asm("ldr	r0, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iSSX.iRegs));
   857 	asm("ldr	r0, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet, iExcCode));
   856 	asm("ldr	r0, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet, iExcCode));
   858 	asm("cmp	r0, #0 ");
   857 	asm("cmp	r0, #0 ");
   859 	asm("bge	state_already_saved ");		// skip if this CPU has already saved its state (i.e. already crashed)
   858 	asm("bge	state_already_saved ");		// skip if this CPU has already saved its state (i.e. already crashed)
   860 	GET_RWNO_TID(,r0);
   859 	GET_RWNO_TID(,r0);
   861 	asm("ldr	r0, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_Regs));
   860 	asm("ldr	r0, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iSSX.iRegs));
   862 	asm("bl "	CSM_ZN3Arm9SaveStateER14SFullArmRegSet );	// save machine state (NOTE: R0 trashed)
   861 	asm("bl "	CSM_ZN3Arm9SaveStateER14SFullArmRegSet );	// save machine state (NOTE: R0 trashed)
   863 	asm("ldr	r1, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet, iN.iFlags));	// mode on entry
   862 	asm("ldr	r1, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet, iN.iFlags));	// mode on entry
   864 	asm("and	r1, r1, #0x1f ");
   863 	asm("and	r1, r1, #0x1f ");
   865 	asm("cmp	r1, #0x11 ");				// mode_fiq?
   864 	asm("cmp	r1, #0x11 ");				// mode_fiq?
   866 	asm("ldreq	r1, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet, iN.iR13Fiq));	// yes - take registers from FIQ stack
   865 	asm("ldreq	r1, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet, iN.iR13Fiq));	// yes - take registers from FIQ stack