| 0 |      1 | // Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
 | 
|  |      2 | // All rights reserved.
 | 
|  |      3 | // This component and the accompanying materials are made available
 | 
|  |      4 | // under the terms of the License "Eclipse Public License v1.0"
 | 
|  |      5 | // which accompanies this distribution, and is available
 | 
|  |      6 | // at the URL "http://www.eclipse.org/legal/epl-v10.html".
 | 
|  |      7 | //
 | 
|  |      8 | // Initial Contributors:
 | 
|  |      9 | // Nokia Corporation - initial contribution.
 | 
|  |     10 | //
 | 
|  |     11 | // Contributors:
 | 
|  |     12 | //
 | 
|  |     13 | // Description:
 | 
|  |     14 | // e32\nkern\arm\ncutilf.cia
 | 
|  |     15 | // 
 | 
|  |     16 | //
 | 
|  |     17 | 
 | 
|  |     18 | #include <e32cia.h>
 | 
|  |     19 | #include <arm.h>
 | 
|  |     20 | #include "highrestimer.h"
 | 
|  |     21 | 
 | 
|  |     22 | #ifdef __SCHEDULER_MACHINE_CODED__
 | 
|  |     23 | /** Signals the request semaphore of a nanothread.
 | 
|  |     24 | 
 | 
|  |     25 | 	This function is intended to be used by the EPOC layer and personality
 | 
|  |     26 | 	layers.  Device drivers should use Kern::RequestComplete instead.
 | 
|  |     27 | 
 | 
|  |     28 | 	@param aThread Nanothread to signal.  Must be non NULL.
 | 
|  |     29 | 
 | 
|  |     30 | 	@see Kern::RequestComplete()
 | 
|  |     31 | 
 | 
|  |     32 | 	@pre Interrupts must be enabled.
 | 
|  |     33 | 	@pre Do not call from an ISR.
 | 
|  |     34 |  */
 | 
|  |     35 | EXPORT_C __NAKED__ void NKern::ThreadRequestSignal(NThread* /*aThread*/)
 | 
|  |     36 | 	{
 | 
|  |     37 | 	ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR);
 | 
|  |     38 | 
 | 
|  |     39 | 	asm("ldr r2, __TheScheduler ");
 | 
|  |     40 | 	asm("str lr, [sp, #-4]! ");
 | 
|  |     41 | 	asm("ldr r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));
 | 
|  |     42 | 	asm("add r0, r0, #%a0" : : "i" _FOFF(NThread,iRequestSemaphore));
 | 
|  |     43 | 	asm("add r3, r3, #1 ");
 | 
|  |     44 | 	asm("str r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));
 | 
|  |     45 | 	asm("bl  " CSM_ZN14NFastSemaphore6SignalEv);	// alignment OK since target is also assembler
 | 
|  |     46 | 	asm("ldr lr, [sp], #4 ");
 | 
|  |     47 | 	asm("b  " CSM_ZN5NKern6UnlockEv);
 | 
|  |     48 | 	}
 | 
|  |     49 | 
 | 
|  |     50 | 
 | 
|  |     51 | /** Atomically signals the request semaphore of a nanothread and a fast mutex.
 | 
|  |     52 | 
 | 
|  |     53 | 	This function is intended to be used by the EPOC layer and personality
 | 
|  |     54 | 	layers.  Device drivers should use Kern::RequestComplete instead.
 | 
|  |     55 | 
 | 
|  |     56 | 	@param aThread Nanothread to signal.  Must be non NULL.
 | 
|  |     57 | 	@param aMutex Fast mutex to signal.  If NULL, the system lock is signaled.
 | 
|  |     58 | 
 | 
|  |     59 | 	@see Kern::RequestComplete()
 | 
|  |     60 | 
 | 
|  |     61 | 	@pre Kernel must be unlocked.
 | 
|  |     62 | 	@pre Call in a thread context.
 | 
|  |     63 | 	@pre Interrupts must be enabled.
 | 
|  |     64 |  */
 | 
|  |     65 | EXPORT_C __NAKED__ void NKern::ThreadRequestSignal(NThread* /*aThread*/, NFastMutex* /*aMutex*/)
 | 
|  |     66 | 	{
 | 
|  |     67 | 	ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC);
 | 
|  |     68 | 
 | 
|  |     69 | 	asm("ldr r2, __TheScheduler ");
 | 
|  |     70 | 	asm("cmp r1, #0 ");
 | 
|  |     71 | 	asm("ldreq r1, __SystemLock ");
 | 
|  |     72 | 	asm("ldr r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));
 | 
|  |     73 | 	asm("stmfd sp!, {r1,lr} ");
 | 
|  |     74 | 	asm("add r0, r0, #%a0" : : "i" _FOFF(NThread,iRequestSemaphore));
 | 
|  |     75 | 	asm("add r3, r3, #1 ");
 | 
|  |     76 | 	asm("str r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));
 | 
|  |     77 | 	asm("bl  " CSM_ZN14NFastSemaphore6SignalEv);
 | 
|  |     78 | 	asm("ldr r0, [sp], #4 ");
 | 
|  |     79 | 	asm("bl  " CSM_ZN10NFastMutex6SignalEv);		// alignment OK since target is also assembler
 | 
|  |     80 | 	asm("ldr lr, [sp], #4 ");
 | 
|  |     81 | 	asm("b  " CSM_ZN5NKern6UnlockEv);
 | 
|  |     82 | 
 | 
|  |     83 | 	asm("__SystemLock: ");
 | 
|  |     84 | 	asm(".word %a0" : : "i" ((TInt)&TheScheduler.iLock));
 | 
|  |     85 | 	asm("__TheScheduler: ");
 | 
|  |     86 | 	asm(".word TheScheduler ");
 | 
|  |     87 | 	}
 | 
|  |     88 | #endif
 | 
|  |     89 | 
 | 
|  |     90 | 
 | 
|  |     91 | #ifndef __USER_CONTEXT_TYPE_MACHINE_CODED__
 | 
|  |     92 | // called by C++ version of NThread::UserContextType()
 | 
|  |     93 | __NAKED__ TBool RescheduledAfterInterrupt(TUint32 /*aAddr*/)
 | 
|  |     94 | 	{
 | 
|  |     95 | 	asm("ldr r1, __irq_resched_return ");
 | 
|  |     96 | 	asm("cmp r0, r1 ");
 | 
|  |     97 | 	asm("movne r0, #0 ");
 | 
|  |     98 | 	__JUMP(,lr);
 | 
|  |     99 | 	asm("__irq_resched_return: ");
 | 
|  |    100 | 	asm(".word irq_resched_return ");
 | 
|  |    101 | 	}
 | 
|  |    102 | 
 | 
|  |    103 | #else
 | 
|  |    104 | 
 | 
|  |    105 | /** Get a value which indicates where a thread's user mode context is stored.
 | 
|  |    106 | 
 | 
|  |    107 | 	@return A value that can be used as an index into the tables returned by
 | 
|  |    108 | 	NThread::UserContextTables().
 | 
|  |    109 | 
 | 
|  |    110 | 	@pre any context
 | 
|  |    111 | 	@pre kernel locked
 | 
|  |    112 | 	@post kernel locked
 | 
|  |    113 |  
 | 
|  |    114 | 	@see UserContextTables
 | 
|  |    115 | 	@publishedPartner
 | 
|  |    116 |  */
 | 
|  |    117 | EXPORT_C __NAKED__ NThread::TUserContextType NThread::UserContextType()
 | 
|  |    118 | 	{
 | 
|  |    119 | 	ASM_CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR);
 | 
|  |    120 | //
 | 
|  |    121 | // Optimisation note: It may be possible to coalesce the first and second
 | 
|  |    122 | // checks below by creating separate "EContextXxxDied" context types for each
 | 
|  |    123 | // possible way a thread can die and ordering these new types before
 | 
|  |    124 | // EContextException.
 | 
|  |    125 | //
 | 
|  |    126 | 
 | 
|  |    127 | 	// Dying thread? use context saved earlier by kernel
 | 
|  |    128 | 
 | 
|  |    129 | 	asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(NThread,iCsFunction));
 | 
|  |    130 | 	asm("ldrb r2, [r0, #%a0]" : : "i" _FOFF(NThread,iSpare3));   // r2 = iUserContextType
 | 
|  |    131 | 	asm("mov r1, r0 ");    // r1 = this
 | 
|  |    132 | 	asm("cmp r3, #%a0" : : "i" ((TInt)NThread::ECSExitInProgress));
 | 
|  |    133 | 	asm("moveq r0, r2"); 
 | 
|  |    134 | 	__JUMP(eq,lr);
 | 
|  |    135 | 
 | 
|  |    136 | 	// Exception or no user context?
 | 
|  |    137 | 
 | 
|  |    138 | 	asm("ldr r3, __TheScheduler");
 | 
|  |    139 | 	asm("cmp r2, #%a0 " : : "i" ((TInt)NThread::EContextException));
 | 
|  |    140 | 	asm("ldr r3, [r3, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));
 | 
|  |    141 | 	asm("movls r0, r2 ");  // Return EContextNone or EContextException
 | 
|  |    142 | 	__JUMP(ls,lr);
 | 
|  |    143 | 	asm("cmp r2, #%a0 " : : "i" ((TInt)NThread::EContextUserIntrCallback));
 | 
|  |    144 | 	asm("blo 1f");
 | 
|  |    145 | 	asm("cmp r2, #%a0 " : : "i" ((TInt)NThread::EContextWFARCallback));
 | 
|  |    146 | 	asm("movls r0, r2 ");  // Return EContextUserIntrCallback or EContextWFARCallback
 | 
|  |    147 | 	__JUMP(ls,lr);
 | 
|  |    148 | 
 | 
|  |    149 | 	// Getting current thread context? must be in exec call as exception
 | 
|  |    150 | 	// and dying thread cases were tested above.
 | 
|  |    151 | 
 | 
|  |    152 | 	asm("1: ");
 | 
|  |    153 | 	asm("cmp r3, r1");
 | 
|  |    154 | 	asm("moveq r0, #%a0" : : "i" ((TInt)NThread::EContextExec));
 | 
|  |    155 | 	__JUMP(eq,lr);
 | 
|  |    156 | 
 | 
|  |    157 | 	asm("ldr r0, [r1, #%a0]" : : "i" _FOFF(NThread,iStackBase));
 | 
|  |    158 | 	asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(NThread,iStackSize));
 | 
|  |    159 | 	asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(NThread,iSavedSP));
 | 
|  |    160 | 	asm("add r2, r2, r0");
 | 
|  |    161 | 	asm("ldr r0, [r3, #%a0]" : : "i" (EXTRA_STACK_SPACE+11*4)); // get saved return address from reschedule
 | 
|  |    162 | 	asm("ldr r12, __irq_resched_return ");
 | 
|  |    163 | 	asm("sub r2, r2, r3");
 | 
|  |    164 | 	asm("cmp r0, r12 ");
 | 
|  |    165 | 	asm("beq preempted ");
 | 
|  |    166 | 
 | 
|  |    167 | 	// Transition to supervisor mode must have been due to a SWI
 | 
|  |    168 | 
 | 
|  |    169 | 	asm("not_preempted:");
 | 
|  |    170 | 	asm("cmp r2, #%a0 " : : "i" ((TInt)(EXTRA_STACK_SPACE+15*4)));
 | 
|  |    171 | 	asm("moveq r0, #%a0 " : : "i" ((TInt)NThread::EContextWFAR)); // thread must have blocked doing Exec::WaitForAnyRequest
 | 
|  |    172 | 	asm("movne r0, #%a0 " : : "i" ((TInt)NThread::EContextExec)); // Thread must have been in a SLOW or UNPROTECTED Exec call
 | 
|  |    173 | 	__JUMP(,lr);
 | 
|  |    174 | 	
 | 
|  |    175 | 	// thread was preempted due to an interrupt
 | 
|  |    176 | 	// interrupt and reschedule will have pushed ? words + USER_MEMORY_GUARD_SAVE_WORDS + EXTRA_STACK_SPACE onto the stack
 | 
|  |    177 | 
 | 
|  |    178 | 	asm("preempted:");
 | 
|  |    179 | 	asm("ldr r12, [r3, #%a0]" : : "i" (EXTRA_STACK_SPACE+USER_MEMORY_GUARD_SAVE_WORDS*4+12*4));  // first word on stack before reschedule
 | 
|  |    180 | 	asm("mov r0, #%a0 " : : "i" ((TInt)NThread::EContextUserInterrupt));
 | 
|  |    181 | 	asm("and r12, r12, #0x1f ");
 | 
|  |    182 | 	asm("cmp r12, #0x10 ");   // interrupted mode = user?
 | 
|  |    183 | 	__JUMP(eq,lr);
 | 
|  |    184 | 
 | 
|  |    185 | 	asm("cmp r2, #%a0 " : : "i" ((TInt)(EXTRA_STACK_SPACE+USER_MEMORY_GUARD_SAVE_WORDS*4+30*4)));
 | 
|  |    186 | 	asm("bcs not_preempted "); 	// thread was interrupted in supervisor mode, return address and r4-r11 were saved
 | 
|  |    187 | 
 | 
|  |    188 | 	// interrupt occurred in exec call entry before r4-r11 saved
 | 
|  |    189 | 	asm("cmp r2, #%a0 " : : "i" ((TInt)(EXTRA_STACK_SPACE+USER_MEMORY_GUARD_SAVE_WORDS*4+20*4)));
 | 
|  |    190 | 	asm("moveq r0, #%a0 " : : "i" ((TInt)NThread::EContextSvsrInterrupt1)); // interrupt before return address was saved or after registers restored
 | 
|  |    191 | 	asm("movne r0, #%a0 " : : "i" ((TInt)NThread::EContextSvsrInterrupt2)); // interrupt after return address saved
 | 
|  |    192 | 	__JUMP(,lr);
 | 
|  |    193 | 
 | 
|  |    194 | 	asm("__irq_resched_return: ");
 | 
|  |    195 | 	asm(".word irq_resched_return ");
 | 
|  |    196 | 	}
 | 
|  |    197 | 
 | 
|  |    198 | #endif  // __USER_CONTEXT_TYPE_MACHINE_CODED__
 | 
|  |    199 | 
 | 
|  |    200 | __NAKED__ void Arm::GetUserSpAndLr(TAny*) 
 | 
|  |    201 | 	{
 | 
|  |    202 | 	asm("stmia r0, {r13, r14}^ ");
 | 
|  |    203 | 	asm("mov r0, r0"); // NOP needed between stm^ and banked register access
 | 
|  |    204 | 	__JUMP(,lr);
 | 
|  |    205 | 	}
 | 
|  |    206 | 
 | 
|  |    207 | __NAKED__ void Arm::SetUserSpAndLr(TAny*) 
 | 
|  |    208 | 	{
 | 
|  |    209 | 	asm("ldmia r0, {r13, r14}^ ");
 | 
|  |    210 | 	asm("mov r0, r0"); // NOP needed between ldm^ and banked register access
 | 
|  |    211 | 	__JUMP(,lr);
 | 
|  |    212 | 	}
 | 
|  |    213 | 
 | 
|  |    214 | #ifdef __CPU_ARM_USE_DOMAINS
 | 
|  |    215 | __NAKED__ TUint32 Arm::Dacr()
 | 
|  |    216 | 	{
 | 
|  |    217 | 	asm("mrc p15, 0, r0, c3, c0, 0 ");
 | 
|  |    218 | 	__JUMP(,lr);
 | 
|  |    219 | 	}
 | 
|  |    220 | 
 | 
|  |    221 | __NAKED__ void Arm::SetDacr(TUint32)
 | 
|  |    222 | 	{
 | 
|  |    223 | 	asm("mcr p15, 0, r0, c3, c0, 0 ");
 | 
|  |    224 | 	CPWAIT(,r0);
 | 
|  |    225 | 	__JUMP(,lr);
 | 
|  |    226 | 	}
 | 
|  |    227 | 
 | 
|  |    228 | __NAKED__ TUint32 Arm::ModifyDacr(TUint32, TUint32)
 | 
|  |    229 | 	{
 | 
|  |    230 | 	asm("mrc p15, 0, r2, c3, c0, 0 ");
 | 
|  |    231 | 	asm("bic r2, r2, r0 ");
 | 
|  |    232 | 	asm("orr r2, r2, r1 ");
 | 
|  |    233 | 	asm("mcr p15, 0, r2, c3, c0, 0 ");
 | 
|  |    234 | 	CPWAIT(,r0);
 | 
|  |    235 | 	asm("mov r0, r2 ");
 | 
|  |    236 | 	__JUMP(,lr);
 | 
|  |    237 | 	}
 | 
|  |    238 | #endif
 | 
|  |    239 | 
 | 
|  |    240 | #ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG
 | 
|  |    241 | __NAKED__ void Arm::SetCar(TUint32)
 | 
|  |    242 | 	{
 | 
|  |    243 | 	SET_CAR(,r0);
 | 
|  |    244 | 	CPWAIT(,r0);
 | 
|  |    245 | 	__JUMP(,lr);
 | 
|  |    246 | 	}
 | 
|  |    247 | #endif
 | 
|  |    248 | 
 | 
|  |    249 | 
 | 
|  |    250 | 
 | 
|  |    251 | /** Get the CPU's coprocessor access register value
 | 
|  |    252 | 
 | 
|  |    253 | @return The value of the CAR, 0 if CPU doesn't have CAR
 | 
|  |    254 | 
 | 
|  |    255 | @publishedPartner
 | 
|  |    256 | @released
 | 
|  |    257 |  */
 | 
|  |    258 | EXPORT_C __NAKED__ TUint32 Arm::Car()
 | 
|  |    259 | 	{
 | 
|  |    260 | #ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG
 | 
|  |    261 | 	GET_CAR(,r0);
 | 
|  |    262 | #else
 | 
|  |    263 | 	asm("mov r0, #0 ");
 | 
|  |    264 | #endif
 | 
|  |    265 | 	__JUMP(,lr);
 | 
|  |    266 | 	}
 | 
|  |    267 | 
 | 
|  |    268 | 
 | 
|  |    269 | 
 | 
|  |    270 | /** Modify the CPU's coprocessor access register value
 | 
|  |    271 | 	Does nothing if CPU does not have CAR.
 | 
|  |    272 | 
 | 
|  |    273 | @param	aClearMask	Mask of bits to clear	(1 = clear this bit)
 | 
|  |    274 | @param	aSetMask	Mask of bits to set		(1 = set this bit)
 | 
|  |    275 | @return The original value of the CAR, 0 if CPU doesn't have CAR
 | 
|  |    276 | 
 | 
|  |    277 | @publishedPartner
 | 
|  |    278 | @released
 | 
|  |    279 |  */
 | 
|  |    280 | EXPORT_C __NAKED__ TUint32 Arm::ModifyCar(TUint32 /*aClearMask*/, TUint32 /*aSetMask*/)
 | 
|  |    281 | 	{
 | 
|  |    282 | #ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG
 | 
|  |    283 | 	GET_CAR(,r2);
 | 
|  |    284 | 	asm("bic r0, r2, r0 ");
 | 
|  |    285 | 	asm("orr r0, r0, r1 ");
 | 
|  |    286 | 	SET_CAR(,r0);
 | 
|  |    287 | 	CPWAIT(,r0);
 | 
|  |    288 | 	asm("mov r0, r2 ");
 | 
|  |    289 | #else
 | 
|  |    290 | 	asm("mov r0, #0 ");
 | 
|  |    291 | #endif
 | 
|  |    292 | 	__JUMP(,lr);
 | 
|  |    293 | 	}
 | 
|  |    294 | 
 | 
|  |    295 | #ifdef __CPU_HAS_VFP
 | 
|  |    296 | __NAKED__ void Arm::SetFpExc(TUint32)
 | 
|  |    297 | 	{
 | 
|  |    298 | #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_351912_FIXED)
 | 
|  |    299 | // If we are about to enable VFP, disable dynamic branch prediction
 | 
|  |    300 | // If we are about to disable VFP, enable dynamic branch prediction if return stack prediction is enabled
 | 
|  |    301 | 	asm("mrs r3, cpsr ");
 | 
|  |    302 | 	CPSIDAIF;
 | 
|  |    303 | 	asm("mrc p15, 0, r1, c1, c0, 1 ");
 | 
|  |    304 | 	asm("tst r0, #%a0" : : "i" ((TInt)VFP_FPEXC_EN) );
 | 
|  |    305 | 	asm("bic r1, r1, #2 ");				// clear DB bit (disable dynamic prediction)
 | 
|  |    306 | 	asm("and r2, r1, #1 ");				// r2 bit 0 = RS bit (1 if return stack enabled)
 | 
|  |    307 | 	asm("orreq r1, r1, r2, lsl #1 ");	// if VFP is being disabled set DB = RS
 | 
|  |    308 | 	asm("mcr p15, 0, r1, c1, c0, 1 ");
 | 
|  |    309 | 	asm("mcr p15, 0, r2, c7, c5, 6 ");	// flush BTAC
 | 
|  |    310 | 	VFP_FMXR(,VFP_XREG_FPEXC,0);
 | 
|  |    311 | 	asm("msr cpsr, r3 ");
 | 
|  |    312 | 	__JUMP(,lr);
 | 
|  |    313 | #else
 | 
|  |    314 | 	VFP_FMXR(,VFP_XREG_FPEXC,0);
 | 
|  |    315 | 	__JUMP(,lr);
 | 
|  |    316 | #endif
 | 
|  |    317 | 	}
 | 
|  |    318 | #endif
 | 
|  |    319 | 
 | 
|  |    320 | 
 | 
|  |    321 | 
 | 
|  |    322 | /** Get the value of the VFP FPEXC register
 | 
|  |    323 | 
 | 
|  |    324 | @return The value of FPEXC, 0 if there is no VFP
 | 
|  |    325 | 
 | 
|  |    326 | @publishedPartner
 | 
|  |    327 | @released
 | 
|  |    328 |  */
 | 
|  |    329 | EXPORT_C __NAKED__ TUint32 Arm::FpExc()
 | 
|  |    330 | 	{
 | 
|  |    331 | #ifdef __CPU_HAS_VFP
 | 
|  |    332 | 	VFP_FMRX(,0,VFP_XREG_FPEXC);
 | 
|  |    333 | #else
 | 
|  |    334 | 	asm("mov r0, #0 ");
 | 
|  |    335 | #endif
 | 
|  |    336 | 	__JUMP(,lr);
 | 
|  |    337 | 	}
 | 
|  |    338 | 
 | 
|  |    339 | 
 | 
|  |    340 | 
 | 
|  |    341 | /** Modify the VFP FPEXC register
 | 
|  |    342 | 	Does nothing if there is no VFP
 | 
|  |    343 | 
 | 
|  |    344 | @param	aClearMask	Mask of bits to clear	(1 = clear this bit)
 | 
|  |    345 | @param	aSetMask	Mask of bits to set		(1 = set this bit)
 | 
|  |    346 | @return The original value of FPEXC, 0 if no VFP present
 | 
|  |    347 | 
 | 
|  |    348 | @publishedPartner
 | 
|  |    349 | @released
 | 
|  |    350 |  */
 | 
|  |    351 | EXPORT_C __NAKED__ TUint32 Arm::ModifyFpExc(TUint32 /*aClearMask*/, TUint32 /*aSetMask*/)
 | 
|  |    352 | 	{
 | 
|  |    353 | #ifdef __CPU_HAS_VFP
 | 
|  |    354 | 	VFP_FMRX(,12,VFP_XREG_FPEXC);
 | 
|  |    355 | 	asm("bic r0, r12, r0 ");
 | 
|  |    356 | 	asm("orr r0, r0, r1 ");
 | 
|  |    357 | 
 | 
|  |    358 | #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_351912_FIXED)
 | 
|  |    359 | // If we are about to enable VFP, disable dynamic branch prediction
 | 
|  |    360 | // If we are about to disable VFP, enable dynamic branch prediction if return stack prediction is enabled
 | 
|  |    361 | 	asm("mrs r3, cpsr ");
 | 
|  |    362 | 	CPSIDAIF;
 | 
|  |    363 | 	asm("mrc p15, 0, r1, c1, c0, 1 ");
 | 
|  |    364 | 	asm("tst r0, #%a0" : : "i" ((TInt)VFP_FPEXC_EN) );
 | 
|  |    365 | 	asm("bic r1, r1, #2 ");				// clear DB bit (disable dynamic prediction)
 | 
|  |    366 | 	asm("and r2, r1, #1 ");				// r2 bit 0 = RS bit (1 if return stack enabled)
 | 
|  |    367 | 	asm("orreq r1, r1, r2, lsl #1 ");	// if VFP is being disabled set DB = RS
 | 
|  |    368 | 	asm("mcr p15, 0, r1, c1, c0, 1 ");
 | 
|  |    369 | 	asm("mcr p15, 0, r2, c7, c5, 6 ");	// flush BTAC
 | 
|  |    370 | 	VFP_FMXR(,VFP_XREG_FPEXC,0);
 | 
|  |    371 | 	asm("msr cpsr, r3 ");
 | 
|  |    372 | #else
 | 
|  |    373 | 	VFP_FMXR(,VFP_XREG_FPEXC,0);
 | 
|  |    374 | #endif	// erratum 351912
 | 
|  |    375 | 
 | 
|  |    376 | 	asm("mov r0, r12 ");
 | 
|  |    377 | #else	// no vfp
 | 
|  |    378 | 	asm("mov r0, #0 ");
 | 
|  |    379 | #endif
 | 
|  |    380 | 	__JUMP(,lr);
 | 
|  |    381 | 	}
 | 
|  |    382 | 
 | 
|  |    383 | /** Get the value of the VFP FPSCR register
 | 
|  |    384 | 
 | 
|  |    385 | @return The value of FPSCR, 0 if there is no VFP
 | 
|  |    386 | 
 | 
|  |    387 | @publishedPartner
 | 
|  |    388 | @released
 | 
|  |    389 |  */
 | 
|  |    390 | EXPORT_C __NAKED__ TUint32 Arm::FpScr()
 | 
|  |    391 | 	{
 | 
|  |    392 | #ifdef __CPU_HAS_VFP
 | 
|  |    393 | 	VFP_FMRX(,0,VFP_XREG_FPSCR);
 | 
|  |    394 | #else
 | 
|  |    395 | 	asm("mov r0, #0 ");
 | 
|  |    396 | #endif
 | 
|  |    397 | 	__JUMP(,lr);
 | 
|  |    398 | 	}
 | 
|  |    399 | 
 | 
|  |    400 | 
 | 
|  |    401 | 
 | 
|  |    402 | /** Modify the VFP FPSCR register
 | 
|  |    403 | 	Does nothing if there is no VFP
 | 
|  |    404 | 
 | 
|  |    405 | @param	aClearMask	Mask of bits to clear	(1 = clear this bit)
 | 
|  |    406 | @param	aSetMask	Mask of bits to set		(1 = set this bit)
 | 
|  |    407 | @return The original value of FPSCR, 0 if no VFP present
 | 
|  |    408 | 
 | 
|  |    409 | @publishedPartner
 | 
|  |    410 | @released
 | 
|  |    411 |  */
 | 
|  |    412 | EXPORT_C __NAKED__ TUint32 Arm::ModifyFpScr(TUint32 /*aClearMask*/, TUint32 /*aSetMask*/)
 | 
|  |    413 | 	{
 | 
|  |    414 | #ifdef __CPU_HAS_VFP
 | 
|  |    415 | 	VFP_FMRX(,2,VFP_XREG_FPSCR);
 | 
|  |    416 | 	asm("bic r0, r2, r0 ");
 | 
|  |    417 | 	asm("orr r0, r0, r1 ");
 | 
|  |    418 | 	VFP_FMXR(,VFP_XREG_FPSCR,0);
 | 
|  |    419 | 	asm("mov r0, r2 ");
 | 
|  |    420 | #else
 | 
|  |    421 | 	asm("mov r0, #0 ");
 | 
|  |    422 | #endif
 | 
|  |    423 | 	__JUMP(,lr);
 | 
|  |    424 | 	}
 | 
|  |    425 | 
 | 
|  |    426 | 
 | 
|  |    427 | /** Detect whether NEON is present
 | 
|  |    428 | 
 | 
|  |    429 | @return ETrue if present, EFalse if not
 | 
|  |    430 | 
 | 
|  |    431 | @internalTechnology
 | 
|  |    432 | @released
 | 
|  |    433 |  */
 | 
|  |    434 | #if defined(__CPU_HAS_VFP) && defined(__VFP_V3)
 | 
|  |    435 | __NAKED__ TBool Arm::NeonPresent()
 | 
|  |    436 | 	{
 | 
|  |    437 | 	asm("mov	r0, #0 ");										// Not present
 | 
|  |    438 | 	VFP_FMRX(,	1,VFP_XREG_FPEXC);								// Save VFP state
 | 
|  |    439 | 	asm("orr	r2, r1, #%a0" : : "i" ((TInt)VFP_FPEXC_EN));
 | 
|  |    440 | 	VFP_FMXR(,	VFP_XREG_FPEXC,1);								// Enable VFP
 | 
|  |    441 | 
 | 
|  |    442 | 	VFP_FMRX(,	2,VFP_XREG_MVFR0);								// Read MVFR0
 | 
|  |    443 | 	asm("tst	r2, #%a0" : : "i" ((TInt)VFP_MVFR0_ASIMD32));	// Check to see if all 32 Advanced SIMD registers are present
 | 
|  |    444 | 	asm("beq	0f ");											// Skip ahead if not
 | 
|  |    445 | 	GET_CAR(,	r2);
 | 
|  |    446 | 	asm("tst	r2, #%a0" : : "i" ((TInt)VFP_CPACR_ASEDIS));	// Check to see if ASIMD is disabled
 | 
|  |    447 | 	asm("bne	0f ");											// Skip ahead if so
 | 
|  |    448 | 	asm("tst	r2, #%a0" : : "i" ((TInt)VFP_CPACR_D32DIS));	// Check to see if the upper 16 registers are disabled
 | 
|  |    449 | 	asm("moveq	r0, #1" );										// If not then eport NEON present
 | 
|  |    450 | 
 | 
|  |    451 | 	asm("0: ");
 | 
|  |    452 | 	VFP_FMXR(,VFP_XREG_FPEXC,1);								// Restore VFP state
 | 
|  |    453 | 	__JUMP(,	lr);
 | 
|  |    454 | 	}
 | 
|  |    455 | #endif
 | 
|  |    456 | 
 | 
|  |    457 | 
 | 
|  |    458 | #ifdef __CPU_HAS_MMU
 | 
|  |    459 | __NAKED__ TBool Arm::MmuActive()
 | 
|  |    460 | 	{
 | 
|  |    461 | 	asm("mrc p15, 0, r0, c1, c0, 0 ");
 | 
|  |    462 | 	asm("and r0, r0, #1 ");
 | 
|  |    463 | 	__JUMP(,lr);
 | 
|  |    464 | 	}
 | 
|  |    465 | 
 | 
|  |    466 | // Returns the content of Translate Table Base Register 0.
 | 
|  |    467 | // To get physical address of the level 1 table, on some platforms this must be orred with 0xffff8000 (to get rid of table walk cache attributes)
 | 
|  |    468 | __NAKED__ TUint32 Arm::MmuTTBR0()
 | 
|  |    469 | 	{
 | 
|  |    470 | 	asm("mrc p15, 0, r0, c2, c0, 0 ");
 | 
|  |    471 | 	__JUMP(,lr);
 | 
|  |    472 | 	}
 | 
|  |    473 | #endif
 | 
|  |    474 | 
 | 
|  |    475 | 
 | 
|  |    476 | 
 | 
|  |    477 | /** Get the current value of the high performance counter.
 | 
|  |    478 | 
 | 
|  |    479 |     If a high performance counter is not available, this uses the millisecond
 | 
|  |    480 |     tick count instead.
 | 
|  |    481 | */
 | 
|  |    482 | #ifdef HAS_HIGH_RES_TIMER
 | 
|  |    483 | EXPORT_C __NAKED__ TUint32 NKern::FastCounter()
 | 
|  |    484 | 	{
 | 
|  |    485 | 	GET_HIGH_RES_TICK_COUNT(R0);
 | 
|  |    486 | 	__JUMP(,lr);
 | 
|  |    487 | 	}
 | 
|  |    488 | #else
 | 
|  |    489 | EXPORT_C TUint32 NKern::FastCounter()
 | 
|  |    490 | 	{
 | 
|  |    491 | 	return NTickCount();
 | 
|  |    492 | 	}
 | 
|  |    493 | #endif
 | 
|  |    494 | 
 | 
|  |    495 | 
 | 
|  |    496 | 
 | 
|  |    497 | /** Get the frequency of counter queried by NKern::FastCounter().
 | 
|  |    498 | */
 | 
|  |    499 | EXPORT_C TInt NKern::FastCounterFrequency()
 | 
|  |    500 | 	{
 | 
|  |    501 | #ifdef HAS_HIGH_RES_TIMER
 | 
|  |    502 | 	return KHighResTimerFrequency;
 | 
|  |    503 | #else
 | 
|  |    504 | 	return 1000000 / NKern::TickPeriod();
 | 
|  |    505 | #endif
 | 
|  |    506 | 	}
 | 
|  |    507 | 
 |