author | Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com> |
Fri, 16 Apr 2010 16:24:37 +0300 | |
changeset 90 | 947f0dc9f7a8 |
parent 0 | a41df078684a |
child 144 | c5e01f2a4bfd |
child 257 | 3e88ff8f41d5 |
permissions | -rw-r--r-- |
0 | 1 |
// Copyright (c) 1997-2009 Nokia Corporation and/or its subsidiary(-ies). |
2 |
// All rights reserved. |
|
3 |
// This component and the accompanying materials are made available |
|
4 |
// under the terms of the License "Eclipse Public License v1.0" |
|
5 |
// which accompanies this distribution, and is available |
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 |
// |
|
8 |
// Initial Contributors: |
|
9 |
// Nokia Corporation - initial contribution. |
|
10 |
// |
|
11 |
// Contributors: |
|
12 |
// |
|
13 |
// Description: |
|
14 |
// e32\nkern\arm\ncsched.cia |
|
15 |
// |
|
16 |
// |
|
17 |
||
18 |
// NThreadBase member data |
|
19 |
#define __INCLUDE_NTHREADBASE_DEFINES__ |
|
20 |
||
21 |
// TDfc member data |
|
22 |
#define __INCLUDE_TDFC_DEFINES__ |
|
23 |
||
24 |
#include <e32cia.h> |
|
25 |
#include <arm.h> |
|
26 |
#include "highrestimer.h" |
|
27 |
#include "nkern.h" |
|
28 |
#include "emievents.h" |
|
29 |
||
30 |
#if defined(MONITOR_THREAD_CPU_TIME) && !defined(HAS_HIGH_RES_TIMER) |
|
31 |
#error MONITOR_THREAD_CPU_TIME is defined, but high res timer is not supported |
|
32 |
#endif |
|
33 |
||
34 |
#ifdef _DEBUG |
|
35 |
#define ASM_KILL_LINK(rp,rs) asm("mov "#rs", #0xdf ");\ |
|
36 |
asm("orr "#rs", "#rs", "#rs", lsl #8 ");\ |
|
37 |
asm("orr "#rs", "#rs", "#rs", lsl #16 ");\ |
|
38 |
asm("str "#rs", ["#rp"] ");\ |
|
39 |
asm("str "#rs", ["#rp", #4] "); |
|
40 |
#else |
|
41 |
#define ASM_KILL_LINK(rp,rs) |
|
42 |
#endif |
|
43 |
||
44 |
#define ALIGN_STACK_START \ |
|
45 |
asm("mov r12, sp"); \ |
|
46 |
asm("tst sp, #4"); \ |
|
47 |
asm("subeq sp, sp, #4"); \ |
|
48 |
asm("str r12, [sp,#-4]!") |
|
49 |
||
50 |
#define ALIGN_STACK_END \ |
|
51 |
asm("ldr sp, [sp]") |
|
52 |
||
53 |
||
54 |
#ifdef __CPU_HAS_VFP |
|
55 |
#ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG |
|
56 |
#define FPEXC_REG 10 |
|
57 |
#define FPEXC_REG3 4 |
|
58 |
#else |
|
59 |
#define FPEXC_REG 11 |
|
60 |
#define FPEXC_REG3 10 |
|
61 |
#endif |
|
62 |
#endif |
|
63 |
||
64 |
////////////////////////////////////////////////////////////////////////////// |
|
65 |
// Macros to define which standard ARM registers are used to save |
|
66 |
// required co-processor registers on a reschedule. |
|
67 |
// They rely on the fact that the compiler will concatenate adjacent strings |
|
68 |
// so "r" "9" "," "r" "10" "," will be converted in the assembler file to: |
|
69 |
// r9,r10 |
|
70 |
///////////////////////////////////////////////////////////////////////////// |
|
71 |
||
72 |
#ifdef __CPU_HAS_CP15_THREAD_ID_REG |
|
73 |
#define TID_SP_REG(reg) "r"#reg"," |
|
74 |
#else |
|
75 |
#define TID_SP_REG(reg) |
|
76 |
#endif //__CPU_HAS_CP15_THREAD_ID_REG |
|
77 |
||
78 |
#ifdef __CPU_HAS_VFP |
|
79 |
#define FPEXC_SP_REG(reg) "r"#reg"," |
|
80 |
#else |
|
81 |
#define FPEXC_SP_REG(reg) |
|
82 |
#endif //__CPU_HAS_VFP |
|
83 |
||
84 |
#ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG |
|
85 |
#define CAR_SP_REG(reg) "r"#reg"," |
|
86 |
#else |
|
87 |
#define CAR_SP_REG(reg) |
|
88 |
#endif //__CPU_HAS_COPROCESSOR_ACCESS_REG |
|
89 |
||
90 |
#ifdef __CPU_ARM_USE_DOMAINS |
|
91 |
#define DACR_SP_REG(reg) "r"#reg"," |
|
92 |
#else |
|
93 |
#define DACR_SP_REG(reg) |
|
94 |
#endif //__CPU_ARM_USE_DOMAINS |
|
95 |
||
96 |
#ifdef __CPU_SUPPORT_THUMB2EE |
|
97 |
#define THUMB2EE_SP_REG(reg) "r"#reg"," |
|
98 |
#else |
|
99 |
#define THUMB2EE_SP_REG(reg) |
|
100 |
#endif // __CPU_SUPPORT_THUMB2EE |
|
101 |
||
102 |
// NOTE THIS WILL PRODUCE A WARNING IF REGISTERS ARE NOT IN ASCENDING ORDER |
|
103 |
#define EXTRA_STACK_LIST(thumb2ee, tid, fpexc, car, dacr)\ |
|
104 |
THUMB2EE_SP_REG(thumb2ee) TID_SP_REG(tid) FPEXC_SP_REG(fpexc) CAR_SP_REG(car) DACR_SP_REG(dacr) |
|
105 |
||
106 |
////////////////////////////////////////////////////////////////////////////// |
|
107 |
||
108 |
//#define __DEBUG_BAD_ADDR |
|
109 |
||
110 |
extern "C" void PanicFastSemaphoreWait(); |
|
111 |
||
112 |
#ifdef __DFC_MACHINE_CODED__ |
|
113 |
||
114 |
__ASSERT_COMPILE(_FOFF(TDfcQue,iPresent) == 0); |
|
115 |
__ASSERT_COMPILE(_FOFF(TDfc,iNext) == 0); |
|
116 |
__ASSERT_COMPILE(_FOFF(TDfc,iPrev) == 4); |
|
117 |
__ASSERT_COMPILE(_FOFF(TDfc,iPriority) % 4 == 0); |
|
118 |
__ASSERT_COMPILE(_FOFF(TDfc,iOnFinalQ) == _FOFF(TDfc,iPriority) + 2); |
|
119 |
__ASSERT_COMPILE(_FOFF(TDfc,iQueued) == _FOFF(TDfc,iOnFinalQ) + 1); |
|
120 |
||
121 |
__NAKED__ void TDfcQue::ThreadFunction(TAny* /*aDfcQ*/) |
|
122 |
{ |
|
123 |
asm("ldr r11, __TheScheduler2 "); |
|
124 |
||
125 |
asm("mov r4, r0 "); // r4=aDfcQ |
|
126 |
asm("ldr r10, [r11, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); |
|
127 |
asm("mov r7, #0 "); |
|
128 |
asm("mov r9, #1 "); |
|
129 |
SET_INTS_1(r5, MODE_SVC, INTS_ALL_ON); |
|
130 |
SET_INTS_1(r6, MODE_SVC, INTS_ALL_OFF); |
|
131 |
||
132 |
asm("dfc_thrd_fn_check_queue: "); |
|
133 |
SET_INTS_2(r5, MODE_SVC, INTS_ALL_ON); // enable interrupts |
|
134 |
||
135 |
asm("dfc_thrd_fn_check_queue2: "); |
|
136 |
asm("str r9, [r11, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // lock the kernel |
|
137 |
asm("ldr r3, [r4, #%a0]" : : "i" _FOFF(TDfcQue,iPresent)); // r3=aDfcQ->iPresent |
|
138 |
asm("add lr, r4, #%a0" : : "i" _FOFF(TDfcQue,iQueue)); // lr=address of priority 0 queue |
|
139 |
#ifdef __CPU_ARM_HAS_CLZ |
|
140 |
CLZ(12,3); // r12=31-MSB(r3), 32 if r3=0 |
|
141 |
asm("rsbs r12, r12, #31 "); // r12=ms bit number set, -1 if queue empty |
|
142 |
asm("bmi dfc_thrd_fn_wait "); // if empty, wait for next request |
|
143 |
#else |
|
144 |
asm("movs r2, r3 "); // check if queue empty |
|
145 |
asm("beq dfc_thrd_fn_wait "); // if empty, wait for next request |
|
146 |
asm("mov r12, #7 "); |
|
147 |
asm("cmp r2, #0x10 "); |
|
148 |
asm("movcc r2, r2, lsl #4 "); |
|
149 |
asm("subcc r12, r12, #4 "); |
|
150 |
asm("cmp r2, #0x40 "); |
|
151 |
asm("movcc r2, r2, lsl #2 "); |
|
152 |
asm("subcc r12, r12, #2 "); |
|
153 |
asm("cmp r2, #0x80 "); |
|
154 |
asm("subcc r12, r12, #1 "); // r12=ms bit number set |
|
155 |
#endif |
|
156 |
asm("ldr r8, [lr, r12, lsl #2]! "); // lr=address of highest priority non-empty queue, r8=address of first DFC |
|
157 |
asm("ldmia r8, {r0-r1} "); // r0=first->next, r1=first->prev |
|
158 |
asm("cmp r0, r8 "); // check if this is the only one at this priority |
|
159 |
asm("strne r0, [r1, #0] "); // if not, prev->next=next |
|
160 |
asm("strne r1, [r0, #4] "); // and next->prev=prev |
|
161 |
asm("streq r7, [lr] "); // if this was only one, set head pointer for this priority to NULL |
|
162 |
asm("strne r0, [lr] "); // else set head pointer to first->next |
|
163 |
ASM_KILL_LINK(r8,r1); |
|
164 |
asm("strh r7, [r8, #%a0]" : : "i" _FOFF(TDfc, iOnFinalQ)); // iOnFinalQ=iQueued=FALSE - can't touch link pointers after this |
|
165 |
asm("biceq r3, r3, r9, lsl r12 "); // if no more at this priority clear bit in iPresent |
|
166 |
asm("streq r3, [r4, #%a0]" : : "i" _FOFF(TDfcQue,iPresent)); |
|
167 |
||
168 |
SET_INTS_2(r6, MODE_SVC, INTS_ALL_OFF); // interrupts off |
|
169 |
asm("ldr r3, [r11, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); // check if reschedule required |
|
170 |
asm("cmp r3, #0 "); |
|
171 |
asm("streq r7, [r11, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // if no reschedule required unlock the kernel |
|
172 |
asm("blne " CSM_ZN10TScheduler10RescheduleEv); // if reschedule required, do it |
|
173 |
SET_INTS_2(r5, MODE_SVC, INTS_ALL_ON); // restore interrupts |
|
174 |
||
175 |
asm("ldr r1, [r8, #%a0]" : : "i" _FOFF(TDfc, iFunction)); // r1=function address |
|
176 |
asm("adr lr, dfc_thrd_fn_check_queue2 "); // set up return address |
|
177 |
asm("ldr r0, [r8, #%a0]" : : "i" _FOFF(TDfc, iPtr)); // r0=DFC argument |
|
178 |
__JUMP(,r1); // call DFC |
|
179 |
||
180 |
asm("dfc_thrd_fn_wait: "); |
|
181 |
asm("mov r0, #%a0" : : "i" ((TInt)NThreadBase::EWaitDfc)); |
|
182 |
asm("strb r0, [r10, #%a0]" : : "i" _FOFF(NThreadBase,iNState)); |
|
183 |
asm("strb r9, [r11, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); |
|
184 |
asm("mov r0, r11 "); |
|
185 |
asm("mov r1, r10 "); |
|
186 |
asm("bl unready "); |
|
187 |
asm("adr lr, dfc_thrd_fn_check_queue "); // set up return address |
|
188 |
asm("b " CSM_ZN10TScheduler10RescheduleEv); |
|
189 |
||
190 |
asm("__TheScheduler2: "); |
|
191 |
asm(".word TheScheduler "); |
|
192 |
} |
|
193 |
||
194 |
||
195 |
/** Cancels an IDFC or DFC. |
|
196 |
||
197 |
This function does nothing if the IDFC or DFC is not queued. |
|
198 |
||
199 |
@return TRUE if the DFC was actually dequeued by this call. In that case |
|
200 |
it is guaranteed that the DFC will not execute until it is |
|
201 |
queued again. |
|
202 |
FALSE if the DFC was not queued on entry to the call, or was in |
|
203 |
the process of being executed or cancelled. In this case |
|
204 |
it is possible that the DFC executes after this call |
|
205 |
returns. |
|
206 |
||
207 |
@post However in either case it is safe to delete the DFC object on |
|
208 |
return from this call provided only that the DFC function does not |
|
209 |
refer to the DFC object itself. |
|
210 |
||
211 |
@pre IDFC or thread context. Do not call from ISRs. |
|
212 |
||
213 |
@pre If the DFC function accesses the DFC object itself, the user must ensure that |
|
214 |
Cancel() cannot be called while the DFC function is running. |
|
215 |
*/ |
|
216 |
__NAKED__ EXPORT_C TBool TDfc::Cancel() |
|
217 |
{ |
|
218 |
ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR); |
|
219 |
||
220 |
asm("ldr r1, __TheScheduler2 "); |
|
221 |
asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); |
|
222 |
asm("add r3, r3, #1 "); |
|
223 |
asm("str r3, [r1, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // lock the kernel |
|
224 |
asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TDfc,iPriority)); // r2=priority/flags |
|
225 |
SET_INTS_1(r12, MODE_SVC, INTS_ALL_OFF); |
|
226 |
asm("tst r2, #0xff000000 "); // test queued flag |
|
227 |
asm("moveq r0, #0 "); // if not queued, return FALSE |
|
228 |
asm("beq 0f "); |
|
229 |
SET_INTS_2(r12, MODE_SVC, INTS_ALL_OFF); // otherwise disable interrupts while we dequeue |
|
230 |
asm("ldmia r0, {r3,r12} "); // r3=next, r12=prev |
|
231 |
SET_INTS_1(r1, MODE_SVC, INTS_ALL_ON); |
|
232 |
asm("str r3, [r12, #0] "); // prev->next=next |
|
233 |
asm("str r12, [r3, #4] "); // next->prev=prev |
|
234 |
SET_INTS_2(r1, MODE_SVC, INTS_ALL_ON); // reenable interrupts |
|
235 |
asm("tst r2, #0x00ff0000 "); // check iOnFinalQ |
|
236 |
asm("beq 1f "); // if FALSE, finish up |
|
237 |
asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TDfc,iDfcQ)); // r1=iDfcQ |
|
238 |
asm("and r2, r2, #0xff "); // r2=iPriority |
|
239 |
asm("subs r12, r3, r0 "); // check if queue is now empty, r12=0 if it is |
|
240 |
asm("beq 2f "); // branch if now empty |
|
241 |
asm("add r1, r1, r2, lsl #2 "); // r1=&iDfcQ->iQueue[iPriority]-_FOFF(TDfcQue.iPriority) |
|
242 |
asm("ldr r12, [r1, #%a0]" : : "i" _FOFF(TDfcQue,iQueue)); // r12=iDfcQ->iQueue[iPriority] |
|
243 |
asm("cmp r12, r0 "); // is this one first? |
|
244 |
asm("streq r3, [r1, #%a0]" : : "i" _FOFF(TDfcQue,iQueue)); // if so, iQueue[pri]=next |
|
245 |
asm("b 1f "); |
|
246 |
asm("2: "); // r0=this, r1=iDfcQ, r2=priority, r3=next, r12=0 |
|
247 |
asm("ldr r3, [r1], #%a0" : : "i" _FOFF(TDfcQue,iQueue)); // r3=iDfcQ->iPresent, r1=&iDfcQ->iQueue[0] |
|
248 |
asm("str r12, [r1, r2, lsl #2] "); // iDfcQ->iQueue[iPriority]=NULL |
|
249 |
asm("mov r12, #1 "); |
|
250 |
asm("bic r3, r3, r12, lsl r2 "); // clear present bit |
|
251 |
asm("str r3, [r1, #-%a0]" : : "i" _FOFF(TDfcQue,iQueue)); |
|
252 |
asm("1: "); |
|
253 |
ASM_KILL_LINK(r0,r1); |
|
254 |
asm("mov r3, #0 "); |
|
255 |
asm("strh r3, [r0, #%a0]" : : "i" _FOFF(TDfc,iOnFinalQ)); // iOnFinalQ=iQueued=FALSE - must be done last |
|
256 |
||
257 |
// R0=this != 0 here |
|
258 |
||
259 |
asm("0: "); |
|
260 |
asm("stmfd sp!, {r0,lr} "); |
|
261 |
asm("bl " CSM_ZN5NKern6UnlockEv); // unlock the kernel |
|
262 |
__POPRET("r0,"); |
|
263 |
} |
|
264 |
#endif |
|
265 |
||
266 |
#ifdef __FAST_SEM_MACHINE_CODED__ |
|
267 |
/** Waits on a fast semaphore. |
|
268 |
||
269 |
Decrements the signal count for the semaphore and |
|
270 |
removes the calling thread from the ready-list if the sempahore becomes |
|
271 |
unsignalled. Only the thread that owns a fast semaphore can wait on it. |
|
272 |
||
273 |
Note that this function does not block, it merely updates the NThread state, |
|
274 |
rescheduling will only occur when the kernel is unlocked. Generally threads |
|
275 |
would use NKern::FSWait() which manipulates the kernel lock for you. |
|
276 |
||
277 |
@pre The calling thread must own the semaphore. |
|
278 |
@pre Kernel must be locked. |
|
279 |
@pre No fast mutex can be held. |
|
280 |
||
281 |
@post Kernel is locked. |
|
282 |
||
283 |
@see NFastSemaphore::Signal() |
|
284 |
@see NKern::FSWait() |
|
285 |
@see NKern::Unlock() |
|
286 |
*/ |
|
287 |
EXPORT_C __NAKED__ void NFastSemaphore::Wait() |
|
288 |
{ |
|
289 |
ASM_CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC|MASK_NO_FAST_MUTEX); |
|
290 |
||
291 |
asm("mov r2, r0 "); |
|
292 |
asm("ldr r0, __TheScheduler "); |
|
293 |
asm("ldr r1, [r2, #%a0]" : : "i" _FOFF(NFastSemaphore,iOwningThread)); // r1=owning thread |
|
294 |
asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // r3=current thread |
|
295 |
asm("cmp r1, r3 "); |
|
296 |
asm("bne PanicFastSemaphoreWait "); // if wrong thread, fault |
|
297 |
// wait on a NFastSemaphore pointed to by r2 |
|
298 |
// enter with r0=&TheScheduler, r1=the current thread, already validated |
|
299 |
asm("ldr r3, [r2, #%a0]" : : "i" _FOFF(NFastSemaphore,iCount)); |
|
300 |
asm("mov r12, #%a0" : : "i" (NThread::EWaitFastSemaphore)); |
|
301 |
asm("subs r3, r3, #1 "); |
|
302 |
asm("str r3, [r2, #%a0]" : : "i" _FOFF(NFastSemaphore,iCount)); // decrement iCount |
|
303 |
__JUMP(ge,lr); // if result>=0, finished |
|
304 |
asm("str r2, [r1, #%a0]" : : "i" _FOFF(NThread,iWaitObj)); |
|
305 |
asm("strb r12, [r1, #%a0]" : : "i" _FOFF(NThread,iNState)); |
|
306 |
asm("mov r3, #1 "); |
|
307 |
asm("strb r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); |
|
308 |
||
309 |
// remove thread from ready list |
|
310 |
asm("b unready "); |
|
311 |
} |
|
312 |
||
313 |
||
314 |
/** Waits for a signal on the current thread's I/O semaphore. |
|
315 |
@pre No fast mutex can be held. |
|
316 |
@pre Kernel must be unlocked. |
|
317 |
@pre Call in a thread context. |
|
318 |
@pre Interrupts must be enabled. |
|
319 |
*/ |
|
320 |
EXPORT_C __NAKED__ void NKern::WaitForAnyRequest() |
|
321 |
{ |
|
322 |
ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC|MASK_NO_FAST_MUTEX); |
|
323 |
||
324 |
asm("ldr r0, __TheScheduler "); |
|
325 |
asm("str lr, [sp, #-4]! "); // save lr |
|
326 |
asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); |
|
327 |
asm("bl wait_for_any_request2 "); |
|
328 |
SET_INTS(r0, MODE_SVC, INTS_ALL_ON); // turn interrupts back on |
|
329 |
asm("ldr pc, [sp], #4 "); |
|
330 |
||
331 |
// Special case handler for Exec::WaitForAnyRequest() for efficiency reasons |
|
332 |
// Called from __ArmVectorSwi with R11=&TheScheduler, R1=current thread |
|
333 |
// Returns with interrupts disabled |
|
334 |
asm(".global wait_for_any_request "); |
|
335 |
asm("wait_for_any_request: "); |
|
336 |
||
337 |
ASM_DEBUG0(WaitForAnyRequest); |
|
338 |
asm("mov r0, r11 "); |
|
339 |
asm("wait_for_any_request2: "); |
|
340 |
SET_INTS_1(r2, MODE_SVC, INTS_ALL_OFF); |
|
341 |
#ifdef _DEBUG |
|
342 |
asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); |
|
343 |
asm("cmp r3, #0 "); |
|
344 |
asm("movne r12, #0xd8000001 "); // FAULT - calling Exec::WaitForAnyRequest() with the kernel locked is silly |
|
345 |
asm("strne r12, [r12] "); |
|
346 |
#endif |
|
347 |
SET_INTS_2(r2, MODE_SVC, INTS_ALL_OFF); // turn off interrupts |
|
348 |
asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(NThread,iRequestSemaphore.iCount)); |
|
349 |
asm("mov r3, #1 "); |
|
350 |
SET_INTS_1(r12, MODE_SVC, INTS_ALL_ON); |
|
351 |
asm("subs r2, r2, #1 "); |
|
352 |
asm("str r2, [r1, #%a0]" : : "i" _FOFF(NThread,iRequestSemaphore.iCount)); // decrement iCount |
|
353 |
__JUMP(ge,lr); // if result non-negative, finished |
|
354 |
||
355 |
asm("str r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // lock the kernel |
|
356 |
SET_INTS_2(r12, MODE_SVC, INTS_ALL_ON); // reenable interrupts |
|
357 |
asm("strb r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); |
|
358 |
||
359 |
// r2 points to NFastSemaphore |
|
360 |
asm("add r2, r1, #%a0" : : "i" _FOFF(NThread,iRequestSemaphore)); |
|
361 |
asm("str lr, [sp, #-4]! "); |
|
362 |
asm("str r2, [r1, #%a0]" : : "i" _FOFF(NThread,iWaitObj)); |
|
363 |
asm("mov r3, #%a0" : : "i" (NThread::EWaitFastSemaphore)); |
|
364 |
asm("strb r3, [r1, #%a0]" : : "i" _FOFF(NThread,iNState)); // mark thread waiting on semaphore |
|
365 |
asm("bl unready "); // remove thread from ready list - DOESN'T CLOBBER R0 |
|
366 |
asm("bl " CSM_ZN10TScheduler10RescheduleEv); // Reschedule |
|
367 |
asm("ldr lr, [sp], #4 "); |
|
368 |
asm("mov r3, #%a0 " : : "i" (NThread::EContextWFARCallback)); |
|
369 |
asm("b callUserModeCallbacks "); // exit and call callbacks |
|
370 |
} |
|
371 |
||
372 |
||
373 |
/** Signals a fast semaphore multiple times. |
|
374 |
||
375 |
@pre Kernel must be locked. |
|
376 |
@pre Call either in a thread or an IDFC context. |
|
377 |
||
378 |
@post Kernel is locked. |
|
379 |
||
380 |
@internalComponent |
|
381 |
*/ |
|
382 |
EXPORT_C __NAKED__ void NFastSemaphore::SignalN(TInt /*aCount*/) |
|
383 |
{ |
|
384 |
ASM_CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR); |
|
385 |
||
386 |
asm("req_sem_signaln: "); |
|
387 |
asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(NFastSemaphore,iCount)); |
|
388 |
asm("adds r2, r2, r1 "); |
|
389 |
asm("str r2, [r0, #%a0]" : : "i" _FOFF(NFastSemaphore,iCount)); |
|
390 |
__JUMP(cc,lr); // if count did not cross 0 nothing more to do |
|
391 |
asm("ldr r0, [r0, #%a0]" : : "i" _FOFF(NFastSemaphore,iOwningThread)); |
|
392 |
asm("mov r1, #0 "); |
|
393 |
asm("str r1, [r0, #%a0]" : : "i" _FOFF(NThread,iWaitObj)); |
|
394 |
asm("b check_suspend_then_ready "); |
|
395 |
} |
|
396 |
||
397 |
/** @internalComponent */ |
|
398 |
__NAKED__ void NFastSemaphore::WaitCancel() |
|
399 |
{ |
|
400 |
asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(NFastSemaphore,iOwningThread)); |
|
401 |
asm("mov r1, #0 "); |
|
402 |
asm("str r1, [r0, #%a0]" : : "i" _FOFF(NFastSemaphore,iCount)); |
|
403 |
asm("str r1, [r3, #%a0]" : : "i" _FOFF(NThread,iWaitObj)); |
|
404 |
asm("mov r0, r3 "); |
|
405 |
asm("b check_suspend_then_ready "); |
|
406 |
} |
|
407 |
||
408 |
||
409 |
/** Resets a fast semaphore. |
|
410 |
||
411 |
@pre Kernel must be locked. |
|
412 |
@pre Call either in a thread or an IDFC context. |
|
413 |
||
414 |
@post Kernel is locked. |
|
415 |
||
416 |
@internalComponent |
|
417 |
*/ |
|
418 |
EXPORT_C __NAKED__ void NFastSemaphore::Reset() |
|
419 |
{ |
|
420 |
ASM_CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR); |
|
421 |
||
422 |
asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(NFastSemaphore,iCount)); |
|
423 |
asm("mov r1, #0 "); |
|
424 |
asm("str r1, [r0, #%a0]" : : "i" _FOFF(NFastSemaphore,iCount)); |
|
425 |
asm("cmp r2, #0 "); |
|
426 |
__JUMP(ge,lr); // if count was not negative, nothing to do |
|
427 |
asm("ldr r0, [r0, #%a0]" : : "i" _FOFF(NFastSemaphore,iOwningThread)); |
|
428 |
asm("mov r1, #0 "); |
|
429 |
asm("str r1, [r0, #%a0]" : : "i" _FOFF(NThread,iWaitObj)); |
|
430 |
asm("b check_suspend_then_ready "); |
|
431 |
} |
|
432 |
||
433 |
#endif |
|
434 |
||
435 |
#ifdef __SCHEDULER_MACHINE_CODED__ |
|
436 |
||
437 |
__ASSERT_COMPILE(_FOFF(SDblQueLink,iNext) == 0); |
|
438 |
__ASSERT_COMPILE(_FOFF(SDblQueLink,iPrev) == 4); |
|
439 |
__ASSERT_COMPILE(_FOFF(TScheduler,iPresent) == 0); |
|
440 |
__ASSERT_COMPILE(_FOFF(NFastSemaphore,iCount) == 0); |
|
441 |
__ASSERT_COMPILE(_FOFF(NFastSemaphore,iOwningThread) == 4); |
|
442 |
__ASSERT_COMPILE(_FOFF(TDfc,iPtr) == _FOFF(TDfc,iPriority) + 4); |
|
443 |
__ASSERT_COMPILE(_FOFF(TDfc,iFunction) == _FOFF(TDfc,iPtr) + 4); |
|
444 |
||
445 |
__NAKED__ void TScheduler::Remove(NThreadBase* /*aThread*/) |
|
446 |
// |
|
447 |
// Remove a thread from the ready list |
|
448 |
// |
|
449 |
{ |
|
450 |
asm("unready: "); |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
451 |
|
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
452 |
asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TScheduler,iMadeUnReadyCounter)); // Update Made UnReady count here, |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
453 |
asm("add r2, r2, #1"); // ie equiv of 'iMadeUnReadyCounter++;'. |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
454 |
asm("str r2, [r0, #%a0]" : : "i" _FOFF(TScheduler,iMadeUnReadyCounter)); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
455 |
|
0 | 456 |
#ifdef _DEBUG |
457 |
asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); |
|
458 |
asm("mov r12, #0xd8000003 "); |
|
459 |
asm("cmp r2, #0 "); |
|
460 |
asm("strne r12, [r12] "); // crash if fast mutex held |
|
461 |
#endif |
|
462 |
asm("ldr r12, [r1, #%a0]" : : "i" _FOFF(NThread,iTimeslice)); |
|
463 |
asm("ldmia r1, {r2,r3} "); // r2=next, r3=prev |
|
464 |
asm("str r12, [r1, #%a0]" : : "i" _FOFF(NThread,iTime)); // fresh timeslice for next time |
|
465 |
||
466 |
asm("pri_list_remove: "); |
|
467 |
ASM_KILL_LINK(r1,r12); |
|
468 |
asm("subs r12, r1, r2 "); // check if more threads at this priority, r12=0 if not |
|
469 |
asm("bne unready_1 "); // branch if there are more at same priority |
|
470 |
asm("ldrb r2, [r1, #%a0]" : : "i" _FOFF(NThread, iPriority)); // r2=thread priority |
|
471 |
asm("add r1, r0, #%a0" : : "i" _FOFF(TScheduler, iQueue)); // r1->iQueue[0] |
|
472 |
asm("str r12, [r1, r2, lsl #2] "); // iQueue[priority]=NULL |
|
473 |
asm("ldrb r1, [r0, r2, lsr #3] "); // r1=relevant byte in present mask |
|
474 |
asm("and r3, r2, #7 "); // r3=priority & 7 |
|
475 |
asm("mov r12, #1 "); |
|
476 |
asm("bic r1, r1, r12, lsl r3 "); // clear bit in present mask |
|
477 |
asm("strb r1, [r0, r2, lsr #3] "); // update relevant byte in present mask |
|
478 |
__JUMP(,lr); |
|
479 |
asm("unready_1: "); // get here if there are other threads at same priority |
|
480 |
asm("ldrb r12, [r1, #%a0]" : : "i" _FOFF(NThread, iPriority)); // r12=thread priority |
|
481 |
asm("add r0, r0, #%a0" : : "i" _FOFF(TScheduler, iQueue)); // r0=&iQueue[0] |
|
482 |
asm("str r3, [r2, #4] "); // next->prev=prev |
|
483 |
asm("ldr r12, [r0, r12, lsl #2]! "); // r12=iQueue[priority], r0=&iQueue[priority] |
|
484 |
asm("str r2, [r3, #0] "); // and prev->next=next |
|
485 |
asm("cmp r12, r1 "); // if aThread was first... |
|
486 |
asm("streq r2, [r0, #0] "); // iQueue[priority]=aThread->next |
|
487 |
__JUMP(,lr); // finished |
|
488 |
} |
|
489 |
||
490 |
||
491 |
/** Removes an item from a priority list. |
|
492 |
||
493 |
@param aLink A pointer to the item - this must not be NULL. |
|
494 |
*/ |
|
495 |
EXPORT_C __NAKED__ void TPriListBase::Remove(TPriListLink* /*aLink*/) |
|
496 |
{ |
|
497 |
asm("ldmia r1, {r2,r3} "); // r2=aLink->iNext, r3=aLink->iPrev |
|
498 |
asm("b pri_list_remove "); |
|
499 |
} |
|
500 |
||
501 |
||
502 |
/** Signals a fast semaphore. |
|
503 |
||
504 |
Increments the signal count of a fast semaphore by |
|
505 |
one and releases any waiting thread if the semphore becomes signalled. |
|
506 |
||
507 |
Note that a reschedule will not occur before this function returns, this will |
|
508 |
only take place when the kernel is unlocked. Generally threads |
|
509 |
would use NKern::FSSignal() which manipulates the kernel lock for you. |
|
510 |
||
511 |
@pre Kernel must be locked. |
|
512 |
@pre Call either in a thread or an IDFC context. |
|
513 |
||
514 |
@post Kernel is locked. |
|
515 |
||
516 |
@see NFastSemaphore::Wait() |
|
517 |
@see NKern::FSSignal() |
|
518 |
@see NKern::Unlock() |
|
519 |
*/ |
|
520 |
EXPORT_C __NAKED__ void NFastSemaphore::Signal() |
|
521 |
{ |
|
522 |
ASM_CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR); |
|
523 |
||
524 |
asm("req_sem_signal: "); |
|
525 |
asm("ldmia r0, {r1,r2} "); // r1=iCount, r2=iOwningThread |
|
526 |
asm("mov r3, #0 "); |
|
527 |
asm("adds r1, r1, #1 "); |
|
528 |
asm("str r1, [r0, #%a0]" : : "i" _FOFF(NFastSemaphore,iCount)); |
|
529 |
__JUMP(gt,lr); // if count after incrementing is >0, nothing more to do |
|
530 |
asm("mov r0, r2 "); |
|
531 |
asm("str r3, [r0, #%a0]" : : "i" _FOFF(NThread,iWaitObj)); |
|
532 |
||
533 |
// fall through to NThreadBase::CheckSuspendThenReady() |
|
534 |
} |
|
535 |
||
536 |
||
537 |
/** Makes a nanothread ready provided that it is not explicitly suspended. |
|
538 |
||
539 |
For use by RTOS personality layers. |
|
540 |
||
541 |
@pre Kernel must be locked. |
|
542 |
@pre Call either in a thread or an IDFC context. |
|
543 |
||
544 |
@post Kernel is locked. |
|
545 |
*/ |
|
546 |
EXPORT_C __NAKED__ void NThreadBase::CheckSuspendThenReady() |
|
547 |
{ |
|
548 |
ASM_CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR); |
|
549 |
||
550 |
asm("check_suspend_then_ready: "); |
|
551 |
asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(NThread,iSuspendCount)); |
|
552 |
asm("mov r2, #%a0" : : "i" (NThread::ESuspended)); |
|
553 |
asm("cmp r1, #0 "); |
|
554 |
asm("bne mark_thread_suspended "); // branch out if suspend count nonzero |
|
555 |
||
556 |
// fall through to NThreadBase::Ready() |
|
557 |
} |
|
558 |
||
559 |
||
560 |
/** Makes a nanothread ready. |
|
561 |
||
562 |
For use by RTOS personality layers. |
|
563 |
||
564 |
@pre Kernel must be locked. |
|
565 |
@pre Call either in a thread or an IDFC context. |
|
566 |
@pre The calling thread must not be explicitly suspended. |
|
567 |
||
568 |
@post Kernel is locked. |
|
569 |
*/ |
|
570 |
EXPORT_C __NAKED__ void NThreadBase::Ready() |
|
571 |
{ |
|
572 |
// on release builds just fall through to DoReady |
|
573 |
#ifdef _DEBUG |
|
574 |
ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR|MASK_KERNEL_LOCKED); |
|
575 |
asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(NThreadBase,iSuspendCount)); |
|
576 |
asm("cmp r1, #0 "); |
|
577 |
asm("beq 1f "); |
|
578 |
ASM_CHECK_PRECONDITIONS(MASK_ALWAYS_FAIL); |
|
579 |
asm("1: "); |
|
580 |
asm("stmfd sp!, {r0,lr} "); |
|
581 |
asm("mov r0, #%a0" : : "i" ((TInt)KCRAZYSCHEDDELAY)); |
|
582 |
asm("bl " CSM_Z9KDebugNumi ); |
|
583 |
asm("cmp r0, #0 "); // Z=1 => no delayed scheduler |
|
584 |
asm("ldmfd sp!, {r0,lr} "); |
|
585 |
asm("ldr r1, __TheScheduler "); |
|
586 |
asm("ldrb r2, [r0, #%a0]" : : "i" _FOFF(NThread,iPriority)); // r2=priority of aThread |
|
587 |
asm("beq DoReadyInner "); // delayed scheduler is disabled |
|
588 |
asm("ldr r12, __TheTimerQ "); |
|
589 |
asm("cmp r2, #0 "); |
|
590 |
asm("ldr r12, [r12, #%a0]" : : "i" _FOFF(NTimerQ,iMsCount)); |
|
591 |
asm("cmpne r12, #0 "); // tick hasn't happened yet or this is priority 0 |
|
592 |
asm("beq DoReadyInner "); // so ready it as usual |
|
593 |
asm("ldrb r2, [r0, #%a0]" : : "i" _FOFF(NThread,i_ThrdAttr)); |
|
594 |
asm("tst r2, #%a0 " : : "i" ((TInt)KThreadAttDelayed)); |
|
595 |
__JUMP(ne,lr); // thread is already on the delayed queue |
|
596 |
asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TScheduler,iDelayedQ)); |
|
597 |
asm("ldr r12, [r3, #4] "); // r12->last thread |
|
598 |
asm("str r0, [r3, #4] "); // first->prev=this |
|
599 |
asm("str r0, [r12, #0] "); // old last->next=this |
|
600 |
asm("stmia r0, {r3,r12} "); // this->next=first, this->prev=old last |
|
601 |
asm("orr r2, r2, #%a0 " : : "i" ((TInt)KThreadAttDelayed)); |
|
602 |
asm("strb r2, [r0, #%a0]" : : "i" _FOFF(NThread,i_ThrdAttr)); |
|
603 |
__JUMP(,lr); |
|
604 |
||
605 |
asm("__TheTimerQ: "); |
|
606 |
asm(".word TheTimerQ "); |
|
607 |
asm("__SuperPageAddress: "); |
|
608 |
asm(".word SuperPageAddress "); |
|
609 |
#endif |
|
610 |
// on release builds just fall through to DoReady |
|
611 |
} |
|
612 |
||
613 |
__NAKED__ void NThreadBase::DoReady() |
|
614 |
{ |
|
615 |
asm("ldr r1, __TheScheduler "); |
|
616 |
asm("ldrb r2, [r0, #%a0]" : : "i" _FOFF(NThread,iPriority)); // r2=priority of aThread |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
617 |
|
0 | 618 |
asm("DoReadyInner: "); |
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
619 |
asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TScheduler,iMadeReadyCounter)); // Update Made Ready count here, |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
620 |
asm("add r3, r3, #1"); // ie equiv of 'iMadeReadyCounter++;'. |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
621 |
asm("str r3, [r1, #%a0]" : : "i" _FOFF(TScheduler,iMadeReadyCounter)); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
622 |
|
0 | 623 |
asm("mov r3, #%a0" : : "i" (NThread::EReady)); |
624 |
asm("strb r3, [r0, #%a0]" : : "i" _FOFF(NThread,iNState)); |
|
625 |
asm("ldmia r1!, {r3,r12} "); // r3=present mask low, r12=present mask high, r1=&iQueue[0] |
|
626 |
asm("cmp r2, #31 "); |
|
627 |
asm("bhi 1f "); |
|
628 |
asm("cmp r12, #0 "); |
|
629 |
asm("mov r12, r3 "); |
|
630 |
asm("mov r3, #1 "); |
|
631 |
asm("bne 2f "); // branch if high word set, so this has lower priority |
|
632 |
asm("cmp r3, r12, lsr r2 "); // see if new thread may cause reschedule (CS if so, EQ if equal priority) |
|
633 |
asm("beq 3f "); // branch if equality case (no need to update bitmask) |
|
634 |
asm("strhib r3, [r1, #%a0]" : : "i" (_FOFF(TScheduler,iRescheduleNeededFlag)-8)); // set reschedule flag if necessary |
|
635 |
asm("2: "); |
|
636 |
asm("tst r12, r3, lsl r2 "); // test bit in present mask |
|
637 |
asm("orreq r12, r12, r3, lsl r2 "); // if clear, set it ... |
|
638 |
asm("ldrne r3, [r1, r2, lsl #2] "); // if not alone, r3->first thread on queue |
|
639 |
asm("streq r12, [r1, #-8] "); // ... and update present mask low word |
|
640 |
asm("bne 4f "); // branch if not alone (don't need to touch bitmask) |
|
641 |
asm("6: "); // get here if thread is alone at this priority |
|
642 |
asm("str r0, [r1, r2, lsl #2] "); // thread is alone at this priority, so point queue to it |
|
643 |
asm("str r0, [r0, #0] "); // next=prev=this |
|
644 |
asm("str r0, [r0, #4] "); |
|
645 |
__JUMP(,lr); // NOTE: R0=this != 0 |
|
646 |
asm("5: "); // get here if this thread has joint highest priority >= 32 |
|
647 |
asm("add r2, r2, #32 "); // restore thread priority |
|
648 |
asm("3: "); // get here if this thread has joint highest priority < 32 |
|
649 |
asm("ldr r3, [r1, r2, lsl #2] "); // r3->first thread on queue |
|
650 |
asm("ldr r12, [r3, #%a0]" : : "i" _FOFF(NThreadBase,iTime)); // r12=first thread->time remaining |
|
651 |
asm("subs r12, r12, #1 "); // timeslice expired? if so, r12=-1 and C=0 else C=1 |
|
652 |
asm("strccb r12, [r1, #%a0]" : : "i" (_FOFF(TScheduler,iRescheduleNeededFlag)-8)); // set reschedule flag if necessary |
|
653 |
asm("4: "); // get here when adding to non-empty queue; r1->queue, r3->first thread on queue |
|
654 |
asm("ldr r12, [r3, #4] "); // r12->last thread |
|
655 |
asm("str r0, [r3, #4] "); // first->prev=this |
|
656 |
asm("str r0, [r12, #0] "); // old last->next=this |
|
657 |
asm("stmia r0, {r3,r12} "); // this->next=first, this->prev=old last |
|
658 |
__JUMP(,lr); // NOTE: R0=this != 0 |
|
659 |
asm("1: "); // get here if this thread priority > 31 |
|
660 |
asm("and r2, r2, #31 "); |
|
661 |
asm("mov r3, #1 "); |
|
662 |
asm("cmp r3, r12, lsr r2 "); // see if new thread may cause reschedule (CS if so, EQ if equal priority) |
|
663 |
asm("beq 5b "); // branch if equality case (no need to update bitmask) |
|
664 |
asm("strhib r3, [r1, #%a0]" : : "i" (_FOFF(TScheduler,iRescheduleNeededFlag)-8)); // set reschedule flag if necessary |
|
665 |
asm("tst r12, r3, lsl r2 "); // test bit in present mask |
|
666 |
asm("orreq r12, r12, r3, lsl r2 "); // if clear, set it ... |
|
667 |
asm("add r2, r2, #32 "); |
|
668 |
asm("streq r12, [r1, #-4] "); // ... and update present mask high word |
|
669 |
asm("beq 6b "); // branch if alone |
|
670 |
asm("ldr r3, [r1, r2, lsl #2] "); // if not alone, r3->first thread on queue |
|
671 |
asm("b 4b "); // branch if not alone (don't need to touch bitmask) |
|
672 |
||
673 |
asm("mark_thread_suspended: "); // continuation of CheckSuspendThenReady in unusual case |
|
674 |
asm("strb r2, [r0, #%a0]" : : "i" _FOFF(NThread,iNState)); // set state to suspended |
|
675 |
__JUMP(,lr); // NOTE: R0=this != 0 |
|
676 |
} |
|
677 |
||
678 |
__NAKED__ void TScheduler::QueueDfcs() |
|
679 |
{ |
|
680 |
// move DFCs from pending queue to their final queues |
|
681 |
// enter with interrupts off and kernel locked |
|
682 |
// leave with interrupts off and kernel locked |
|
683 |
// NOTE: WE MUST NOT CLOBBER R0 OR R2! |
|
684 |
// NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY |
|
685 |
||
686 |
||
687 |
SET_INTS(r1, MODE_SVC, INTS_ALL_ON); // enable interrupts |
|
688 |
#ifdef __CPU_ARM_HAS_CPS |
|
689 |
asm("mov r1, #1 "); // (not necessary on ARMV5 as SET_INTS above leaves r1 == 0x13) |
|
690 |
#endif |
|
691 |
asm("strb r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iInIDFC)); |
|
692 |
asm("stmfd sp!, {r2,r5,r11,lr} "); // save registers |
|
693 |
||
694 |
#ifdef BTRACE_CPU_USAGE |
|
695 |
asm("ldrb r1, [r0,#%a0]" : : "i" _FOFF(TScheduler,iCpuUsageFilter)); |
|
696 |
asm("add r5, r0, #%a0" : : "i" _FOFF(TScheduler,iDfcs)); |
|
697 |
asm("mov r11, sp "); // r11 points to saved registers |
|
698 |
asm("cmp r1, #0"); |
|
699 |
asm("blne idfc_start_trace"); |
|
700 |
#else |
|
701 |
asm("add r5, r0, #%a0" : : "i" _FOFF(TScheduler,iDfcs)); |
|
702 |
asm("mov r11, sp "); // r11 points to saved registers |
|
703 |
#endif |
|
704 |
||
705 |
asm("queue_dfcs_1: "); |
|
706 |
SET_INTS(r0, MODE_SVC, INTS_ALL_OFF); // disable interrupts |
|
707 |
asm("ldr r0, [r5, #0] "); // r0 points to first pending DFC |
|
708 |
SET_INTS_1(r1, MODE_SVC, INTS_ALL_ON); |
|
709 |
asm("subs r2, r0, r5 "); // check if queue empty |
|
710 |
asm("ldrne r3, [r0, #0] "); // r3 points to next DFC |
|
711 |
asm("beq queue_dfcs_0 "); // if so, exit |
|
712 |
asm("str r3, [r5, #0] "); // next one is now first |
|
713 |
asm("str r5, [r3, #4] "); // next->prev=queue head |
|
714 |
SET_INTS_2(r1, MODE_SVC, INTS_ALL_ON); // enable interrupts |
|
715 |
||
716 |
asm("ldrb r12, [r0, #%a0]" : : "i" _FOFF(TDfc,iPriority)); // r12=iPriority |
|
717 |
asm("adr lr, queue_dfcs_1 "); // return to queue_dfcs_1 |
|
718 |
asm("cmp r12, #%a0" : : "i" ((TInt)KNumDfcPriorities)); // check for immediate DFC |
|
719 |
asm("bcs do_immediate_dfc "); |
|
720 |
||
721 |
// enqueue the DFC and signal the DFC thread |
|
722 |
asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TDfc,iDfcQ)); // r2=iDfcQ |
|
723 |
asm("mov r3, #1 "); |
|
724 |
asm("dfc_enque_1: "); |
|
725 |
asm("ldr r1, [r2], #%a0" : : "i" _FOFF(TDfcQue,iQueue)); // r1=present mask, r2 points to first queue |
|
726 |
asm("strb r3, [r0, #%a0]" : : "i" _FOFF(TDfc,iOnFinalQ)); // set flag to show DFC on final queue |
|
727 |
asm("tst r1, r3, lsl r12 "); // test bit in present mask |
|
728 |
asm("ldrne r1, [r2, r12, lsl #2] "); // if not originally empty, r1->first |
|
729 |
asm("orreq r1, r1, r3, lsl r12 "); // if bit clear, set it |
|
730 |
asm("streq r1, [r2, #%a0]" : : "i" (_FOFF(TDfcQue,iPresent)-_FOFF(TDfcQue,iQueue))); // if bit originally clear update present mask |
|
731 |
asm("ldrne r3, [r1, #4] "); // if not originally empty, r3->last |
|
732 |
asm("streq r0, [r2, r12, lsl #2] "); // if queue originally empty, iQueue[p]=this |
|
733 |
asm("streq r0, [r0, #0] "); // this->next=this |
|
734 |
asm("ldr r2, [r2, #%a0]" : : "i" (_FOFF(TDfcQue,iThread)-_FOFF(TDfcQue,iQueue))); // r2=iDfcQ->iThread |
|
735 |
asm("stmneia r0, {r1,r3} "); // this->next=first, this->prev=last |
|
736 |
asm("streq r0, [r0, #4] "); // this->prev=this |
|
737 |
asm("ldrb r12, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iNState)); // r2=thread NState |
|
738 |
asm("strne r0, [r1, #4] "); // first->prev=this |
|
739 |
asm("strne r0, [r3, #0] "); // last->next=this |
|
740 |
asm("cmp r12, #%a0" : : "i" ((TInt)NThreadBase::EWaitDfc)); // check for EWaitDfc |
|
741 |
asm("mov r0, r2 "); // r0->thread |
|
742 |
asm("beq check_suspend_then_ready "); // if it is, release thread |
|
743 |
__JUMP(,lr); // else we are finished - NOTE R0=thread ptr != 0 |
|
744 |
||
745 |
asm("queue_dfcs_0: "); |
|
746 |
#ifdef BTRACE_CPU_USAGE |
|
747 |
asm("ldrb r1, [r5, #%a0]" : : "i" (_FOFF(TScheduler,iCpuUsageFilter)-_FOFF(TScheduler,iDfcs))); |
|
748 |
asm("strb r2, [r5, #%a0]" : : "i" (_FOFF(TScheduler,iDfcPendingFlag)-_FOFF(TScheduler,iDfcs))); |
|
749 |
asm("strb r2, [r5, #%a0]" : : "i" (_FOFF(TScheduler,iInIDFC)-_FOFF(TScheduler,iDfcs))); |
|
750 |
asm("cmp r1, #0"); |
|
751 |
asm("blne idfc_end_trace"); |
|
752 |
#else |
|
753 |
asm("strb r2, [r0, #%a0]" : : "i" (_FOFF(TScheduler,iDfcPendingFlag)-_FOFF(TScheduler,iDfcs))); |
|
754 |
asm("strb r2, [r0, #%a0]" : : "i" (_FOFF(TScheduler,iInIDFC)-_FOFF(TScheduler,iDfcs))); |
|
755 |
#endif |
|
756 |
asm("sub r0, r5, #%a0" : : "i" _FOFF(TScheduler,iDfcs)); // restore r0 |
|
757 |
asm("mov sp, r11 "); // retrieve stack pointer before alignment |
|
758 |
asm("ldmfd sp!, {r2,r5,r11,pc} "); |
|
759 |
||
760 |
asm("do_immediate_dfc: "); |
|
761 |
ASM_KILL_LINK(r0,r1); |
|
762 |
asm("mov r1, #0x000000ff "); // pri=0xff (IDFC), spare1=0 (unused), spare2=0 (iOnFinalQ), spare3=0 (iQueued) |
|
763 |
asm("str r1, [r0, #%a0]!" : : "i" _FOFF(TDfc,iPriority)); // dfc->iQueued=FALSE, r0->iPriority |
|
764 |
asm("ldmib r0, {r0,r1} "); // r0 = DFC parameter, r1 = DFC function pointer |
|
765 |
asm("bic sp, sp, #4 "); // align stack |
|
766 |
__JUMP(,r1); // call DFC, return to queue_dfcs_1 |
|
767 |
||
768 |
#ifdef BTRACE_CPU_USAGE |
|
769 |
asm("idfc_start_trace_header:"); |
|
770 |
asm(".word %a0" : : "i" ((TInt)(4<<BTrace::ESizeIndex) + (BTrace::ECpuUsage<<BTrace::ECategoryIndex*8) + (BTrace::EIDFCStart<<BTrace::ESubCategoryIndex*8)) ); |
|
771 |
asm("idfc_end_trace_header:"); |
|
772 |
asm(".word %a0" : : "i" ((TInt)(4<<BTrace::ESizeIndex) + (BTrace::ECpuUsage<<BTrace::ECategoryIndex*8) + (BTrace::EIDFCEnd<<BTrace::ESubCategoryIndex*8)) ); |
|
773 |
||
774 |
asm("idfc_start_trace:"); |
|
775 |
asm("ldr r1, [r0,#%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler)); |
|
776 |
asm("ldr r0, idfc_start_trace_header" ); |
|
777 |
__JUMP(,r1); |
|
778 |
||
779 |
asm("idfc_end_trace:"); |
|
780 |
asm("ldr r0, idfc_end_trace_header" ); |
|
781 |
asm("ldr pc, [r5,#%a0]" : : "i" (_FOFF(TScheduler,iBTraceHandler)-_FOFF(TScheduler,iDfcs))); |
|
782 |
#endif |
|
783 |
||
784 |
} |
|
785 |
#endif |
|
786 |
||
787 |
#ifdef __DFC_MACHINE_CODED__ |
|
788 |
||
789 |
/** Queues an IDFC or a DFC from an ISR. |
|
790 |
||
791 |
This function is the only way to queue an IDFC and is the only way to queue |
|
792 |
a DFC from an ISR. To queue a DFC from an IDFC or a thread either Enque() |
|
793 |
or DoEnque() should be used. |
|
794 |
||
795 |
This function does nothing if the IDFC/DFC is already queued. |
|
796 |
||
797 |
@pre Call only from ISR, IDFC or thread with the kernel locked. |
|
798 |
@pre Do not call from thread with the kernel unlocked. |
|
799 |
@return TRUE if DFC was actually queued by this call |
|
800 |
FALSE if DFC was already queued on entry so this call did nothing |
|
801 |
||
802 |
@see TDfc::DoEnque() |
|
803 |
@see TDfc::Enque() |
|
804 |
*/ |
|
805 |
__NAKED__ EXPORT_C TBool TDfc::Add() |
|
806 |
{ |
|
807 |
ASM_CHECK_PRECONDITIONS(MASK_NO_RESCHED); |
|
808 |
#ifdef _DEBUG |
|
809 |
asm("ldrb r2, [r0, #%a0]" : : "i" _FOFF(TDfc,iPriority)); |
|
810 |
asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TDfc,iDfcQ)); |
|
811 |
asm("cmp r2, #%a0" : : "i" ((TInt)KNumDfcPriorities)); |
|
812 |
asm("bhs 1f "); |
|
813 |
asm("cmp r1, #0 "); |
|
814 |
asm("bne 1f "); |
|
815 |
ASM_CHECK_PRECONDITIONS(MASK_ALWAYS_FAIL); |
|
816 |
asm("1: "); |
|
817 |
#endif |
|
818 |
// Fall through to TDfc::RawAdd() ... |
|
819 |
} |
|
820 |
||
821 |
/** Queue an IDFC or a DFC. |
|
822 |
||
823 |
This function is identical to TDfc::Add() but no checks are performed for correct usage, |
|
824 |
and it contains no instrumentation code. |
|
825 |
||
826 |
@return TRUE if DFC was actually queued by this call |
|
827 |
FALSE if DFC was already queued on entry so this call did nothing |
|
828 |
@see TDfc::DoEnque() |
|
829 |
@see TDfc::Enque() |
|
830 |
@see TDfc::Add() |
|
831 |
*/ |
|
832 |
__NAKED__ EXPORT_C TBool TDfc::RawAdd() |
|
833 |
{ |
|
834 |
||
835 |
#if defined(__CPU_ARM_HAS_LDREX_STREX_V6K) |
|
836 |
/* Optimize with LDREXB/STREXB */ |
|
837 |
||
838 |
asm("add r2, r0, #%a0" : : "i" _FOFF(TDfc, iQueued)); // r2=&iQueued's byte offset |
|
839 |
asm("mov r12, #1 "); // r12=TRUE |
|
840 |
||
841 |
asm("tryagain: "); |
|
842 |
LDREXB(3,2); // r3 = already iQueued |
|
843 |
STREXB(1,12,2); // Try setting iQueued = TRUE |
|
844 |
asm("teq r1, #0 "); // Exclusive write succeeded? |
|
845 |
asm("bne tryagain "); // No - retry until it does |
|
846 |
||
847 |
#elif defined(__CPU_ARM_HAS_LDREX_STREX) |
|
848 |
/* Implement with LDREX/STREX and shifts */ |
|
849 |
||
850 |
#define IQUEUED_WORD (_FOFF(TDfc, iQueued) & ~3) // offset of word containing iQueued |
|
851 |
#define IQUEUED_SHIFT ((_FOFF(TDfc, iQueued) & 3) * 8) // bit position of byte within word |
|
852 |
||
853 |
asm("add r2, r0, #%a0" : : "i" IQUEUED_WORD); // r2=&iQueued's word |
|
854 |
||
855 |
asm("tryagain: "); |
|
856 |
LDREX(3, 2); |
|
857 |
asm("bic r12, r3, #%a0" : : "i" ((TInt)0xff<<IQUEUED_SHIFT)); // clear the bits to write to |
|
858 |
asm("orr r12, r12, #%a0" : : "i" ((TInt)0x01<<IQUEUED_SHIFT)); // &iQueued = TRUE; |
|
859 |
STREX(1, 12, 2); |
|
860 |
asm("teq r1, #0 "); |
|
861 |
asm("bne tryagain "); |
|
862 |
asm("and r3, r3, #%a0" : : "i" ((TInt)0xff<<IQUEUED_SHIFT)); // mask out unwanted bits |
|
863 |
#else |
|
864 |
asm("mov r12, #1 "); // r12=TRUE |
|
865 |
asm("add r2, r0, #%a0" : : "i" _FOFF(TDfc, iQueued)); // r2=&iQueued |
|
866 |
asm("swpb r3, r12, [r2] "); // ATOMIC {r3=iQueued; iQueued=TRUE} |
|
867 |
#endif |
|
868 |
||
869 |
asm("ldr r1, __PendingDfcQueue "); // r1 points to DFC pending queue |
|
870 |
||
871 |
asm("cmp r3, #0 "); // check if already queued |
|
872 |
asm("addeq r3, r1, #4 "); // if not r3=&TheScheduler.iDfcs.iPrev ... |
|
873 |
asm("streq r1, [r0, #0] "); // ...iNext=&TheScheduler.iDfcs ... |
|
874 |
||
875 |
#ifdef __CPU_ARM_HAS_LDREX_STREX |
|
876 |
asm("movne r0, #0 "); |
|
877 |
asm("bne dontswap "); // easier this way |
|
878 |
asm("try2: "); |
|
879 |
LDREX(2, 3); // read |
|
880 |
STREX(12, 0, 3); // write |
|
881 |
asm("teq r12, #0 "); // success? also restore eq |
|
882 |
asm("bne try2 "); // no! |
|
883 |
asm("mov r12, #1"); |
|
884 |
#else |
|
885 |
asm("swpeq r2, r0, [r3] "); // ...ATOMIC {r2=last; last=this} ... |
|
886 |
#endif |
|
887 |
||
888 |
asm("streqb r12, [r1, #%a0]" : : "i" (_FOFF(TScheduler,iDfcPendingFlag)-_FOFF(TScheduler,iDfcs))); |
|
889 |
asm("streq r0, [r2, #0] "); // ...old last->iNext=this ... |
|
890 |
asm("streq r2, [r0, #4] "); // ...iPrev=old last |
|
891 |
||
892 |
// NOTE: R0=this != 0 |
|
893 |
||
894 |
asm("dontswap: "); |
|
895 |
__JUMP(,lr); |
|
896 |
||
897 |
asm("__PendingDfcQueue: "); |
|
898 |
asm(".word %a0" : : "i" ((TInt)&TheScheduler.iDfcs)); |
|
899 |
} |
|
900 |
||
901 |
||
902 |
/** Queues a DFC (not an IDFC) from an IDFC or thread with preemption disabled. |
|
903 |
||
904 |
This function is the preferred way to queue a DFC from an IDFC. It should not |
|
905 |
be used to queue an IDFC - use TDfc::Add() for this. |
|
906 |
||
907 |
This function does nothing if the DFC is already queued. |
|
908 |
||
909 |
@pre Call only from IDFC or thread with the kernel locked. |
|
910 |
@pre Do not call from ISR or thread with the kernel unlocked. |
|
911 |
@return TRUE if DFC was actually queued by this call |
|
912 |
FALSE if DFC was already queued on entry so this call did nothing |
|
913 |
||
914 |
@see TDfc::Add() |
|
915 |
@see TDfc::Enque() |
|
916 |
*/ |
|
917 |
__NAKED__ EXPORT_C TBool TDfc::DoEnque() |
|
918 |
{ |
|
919 |
ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR|MASK_NO_RESCHED); |
|
920 |
#ifdef _DEBUG |
|
921 |
asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TDfc,iDfcQ)); |
|
922 |
asm("cmp r1, #0 "); |
|
923 |
asm("bne 1f "); |
|
924 |
ASM_CHECK_PRECONDITIONS(MASK_ALWAYS_FAIL); |
|
925 |
asm("1: "); |
|
926 |
#endif |
|
927 |
||
928 |
#if defined(__CPU_ARM_HAS_LDREX_STREX_V6K) |
|
929 |
asm("add r2, r0, #%a0" : : "i" _FOFF(TDfc, iQueued)); // r2=&iQueued's byte offset |
|
930 |
asm("mov r3, #1 "); |
|
931 |
||
932 |
asm("tryagain8: "); |
|
933 |
LDREXB(1, 2); // r1 = iQueued |
|
934 |
STREXB(12, 3, 2); // Try setting iQueued = True |
|
935 |
asm(" teq r12, #1 "); // worked? |
|
936 |
asm(" beq tryagain8 "); // nope |
|
937 |
// r3 = 1, r1 = old iQueued |
|
938 |
#elif defined(__CPU_ARM_HAS_LDREX_STREX) |
|
939 |
asm(" add r0, r0, #8 "); // align address (struct always aligned) |
|
940 |
asm("tryagain8: "); |
|
941 |
LDREX(2, 0); // do the load/store half |
|
942 |
asm(" bic r12, r2, #0xff000000 "); // knock out unwanted bits |
|
943 |
asm(" orr r12, r12, #0x01000000 "); // 'looking' value |
|
944 |
STREX(1, 12, 0); // write looking value |
|
945 |
asm(" teq r1, #1 "); // worked? |
|
946 |
asm(" beq tryagain8 "); // nope |
|
947 |
asm(" mov r1, r2, lsr #24 "); // extract previous value byte |
|
948 |
asm(" sub r0, r0, #8 "); // restore base pointer |
|
949 |
asm(" mov r3, #1 "); // dfc_enque_1 expects r3 = 1 |
|
950 |
#else |
|
951 |
asm("add r12, r0, #11 "); // r12=&iQueued |
|
952 |
asm("mov r3, #1 "); |
|
953 |
asm("swpb r1, r3, [r12] "); // ATOMIC {r1=iQueued; iQueued=TRUE} |
|
954 |
#endif |
|
955 |
||
956 |
asm("ldrb r12, [r0, #8] "); // r12=iPriority |
|
957 |
asm("ldr r2, [r0, #20] "); // r2=iDfcQ |
|
958 |
asm("cmp r1, #0 "); // check if queued |
|
959 |
asm("beq dfc_enque_1 "); // if not, queue it and return with R0 nonzero |
|
960 |
asm("mov r0, #0 "); |
|
961 |
__JUMP(,lr); |
|
962 |
} |
|
963 |
#endif |
|
964 |
||
965 |
#ifdef __FAST_MUTEX_MACHINE_CODED__ |
|
966 |
||
967 |
__ASSERT_COMPILE(_FOFF(NFastMutex,iHoldingThread) == 0); |
|
968 |
||
969 |
/** Releases a previously acquired fast mutex. |
|
970 |
||
971 |
Generally threads would use NKern::FMSignal() which manipulates the kernel lock |
|
972 |
for you. |
|
973 |
||
974 |
@pre The calling thread must hold the mutex. |
|
975 |
@pre Kernel must be locked. |
|
976 |
||
977 |
@post Kernel is locked. |
|
978 |
||
979 |
@see NFastMutex::Wait() |
|
980 |
@see NKern::FMSignal() |
|
981 |
*/ |
|
982 |
EXPORT_C __NAKED__ void NFastMutex::Signal() |
|
983 |
{ |
|
984 |
// NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY |
|
985 |
ASM_DEBUG1(FMSignal,r0); |
|
986 |
asm("ldr r2, __TheScheduler "); |
|
987 |
#ifdef BTRACE_FAST_MUTEX |
|
988 |
asm("ldrb r1, [r2,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter)); |
|
989 |
asm("cmp r1, #0"); |
|
990 |
asm("bne fastmutex_signal_trace"); |
|
991 |
asm("no_fastmutex_signal_trace:"); |
|
992 |
#endif |
|
993 |
asm("mov r12, #0 "); |
|
994 |
asm("str r12, [r0], #%a0" : : "i" _FOFF(NFastMutex,iWaiting)); // iHoldingThread=NULL, r0->iWaiting |
|
995 |
asm("ldr r1, [r2, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // r1=iCurrentThread |
|
996 |
asm("ldr r3, [r0] "); // r3=iWaiting |
|
997 |
asm("str r12, [r0] "); // iWaiting=FALSE |
|
998 |
asm("str r12, [r1, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); // current thread->iHeldFastMutex=NULL |
|
999 |
asm("cmp r3, #0 "); // check waiting flag |
|
1000 |
asm("bne 2f "); |
|
1001 |
asm("1: "); |
|
1002 |
__JUMP(,lr); // if clear, finished |
|
1003 |
asm("2: "); |
|
1004 |
asm("ldr r12, [r1, #%a0]" : : "i" _FOFF(NThread,iCsFunction)); |
|
1005 |
asm("strb r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); // Assumes iWaiting!=0 mod 256 |
|
1006 |
asm("cmp r12, #0 "); // check for outstanding CS function |
|
1007 |
asm("beq 1b "); // if none, finished |
|
1008 |
asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(NThread,iCsCount)); // else check CS count |
|
1009 |
asm("mov r0, r1 "); |
|
1010 |
asm("cmp r2, #0 "); |
|
1011 |
__JUMP(ne,lr); // if nonzero, finished |
|
1012 |
asm("DoDoCsFunction: "); |
|
1013 |
asm("stmfd sp!, {r11,lr} "); |
|
1014 |
asm("mov r11, sp "); |
|
1015 |
asm("bic sp, sp, #4 "); |
|
1016 |
asm("bl " CSM_ZN11NThreadBase12DoCsFunctionEv); // if iCsCount=0, DoCsFunction() |
|
1017 |
asm("mov sp, r11 "); |
|
1018 |
asm("ldmfd sp!, {r11,pc} "); |
|
1019 |
||
1020 |
#ifdef BTRACE_FAST_MUTEX |
|
1021 |
asm("fastmutex_signal_trace:"); |
|
1022 |
ALIGN_STACK_START; |
|
1023 |
asm("stmdb sp!, {r0-r2,lr}"); // 4th item on stack is PC value for trace |
|
1024 |
asm("bl fmsignal_lock_trace_unlock"); |
|
1025 |
asm("ldmia sp!, {r0-r2,lr}"); |
|
1026 |
ALIGN_STACK_END; |
|
1027 |
asm("b no_fastmutex_signal_trace"); |
|
1028 |
#endif |
|
1029 |
} |
|
1030 |
||
1031 |
||
1032 |
/** Acquires the fast mutex. |
|
1033 |
||
1034 |
This will block until the mutex is available, and causes |
|
1035 |
the thread to enter an implicit critical section until the mutex is released. |
|
1036 |
||
1037 |
Generally threads would use NKern::FMWait() which manipulates the kernel lock |
|
1038 |
for you. |
|
1039 |
||
1040 |
@pre Kernel must be locked, with lock count 1. |
|
1041 |
||
1042 |
@post Kernel is locked, with lock count 1. |
|
1043 |
@post The calling thread holds the mutex. |
|
1044 |
||
1045 |
@see NFastMutex::Signal() |
|
1046 |
@see NKern::FMWait() |
|
1047 |
*/ |
|
1048 |
EXPORT_C __NAKED__ void NFastMutex::Wait() |
|
1049 |
{ |
|
1050 |
// NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY |
|
1051 |
ASM_DEBUG1(FMWait,r0); |
|
1052 |
asm("ldr r2, __TheScheduler "); |
|
1053 |
asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iHoldingThread)); // r3=iHoldingThread |
|
1054 |
asm("ldr r1, [r2, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // r1=iCurrentThread |
|
1055 |
asm("cmp r3, #0 "); // check if mutex held |
|
1056 |
asm("bne fastmutex_wait_block "); |
|
1057 |
asm("str r1, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iHoldingThread)); // if not, iHoldingThread=current thread |
|
1058 |
asm("str r0, [r1, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); // and current thread->iHeldFastMutex=this |
|
1059 |
#ifdef BTRACE_FAST_MUTEX |
|
1060 |
asm("ldrb r12, [r2,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter)); |
|
1061 |
asm("cmp r12, #0"); |
|
1062 |
asm("bne fmwait_trace2"); |
|
1063 |
#endif |
|
1064 |
__JUMP(,lr); // and we're done |
|
1065 |
asm("fastmutex_wait_block:"); |
|
1066 |
asm("str lr, [sp, #-4]! "); // We must wait - save return address |
|
1067 |
asm("mov r12, #1 "); |
|
1068 |
asm("str r12, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iWaiting)); // iWaiting=TRUE |
|
1069 |
asm("str r0, [r1, #%a0]" : : "i" _FOFF(NThread,iWaitFastMutex)); // current thread->iWaitFastMutex=this |
|
1070 |
asm("mov r0, r3 "); // parameter for YieldTo |
|
1071 |
ASM_DEBUG1(FMWaitYield,r0); |
|
1072 |
asm("bl " CSM_ZN10TScheduler7YieldToEP11NThreadBase); // yield to the mutex holding thread |
|
1073 |
// will not return until the mutex is free |
|
1074 |
// on return r0=Scheduler,r1=0,r2!=0,r3=current thread, kernel unlocked, interrupts disabled |
|
1075 |
asm("mov r12, #1 "); |
|
1076 |
asm("str r12, [r0, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // lock the kernel |
|
1077 |
SET_INTS(r12, MODE_SVC, INTS_ALL_ON); // reenable interrupts |
|
1078 |
asm("ldr r2, [r3, #%a0]" : : "i" _FOFF(NThread,iWaitFastMutex)); // r2=this |
|
1079 |
asm("str r1, [r3, #%a0]" : : "i" _FOFF(NThread,iWaitFastMutex)); // iWaitFastMutex=NULL |
|
1080 |
asm("str r3, [r2, #0] "); // iHoldingThread=current thread |
|
1081 |
asm("str r2, [r3, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); // current thread->iHeldFastMutex=this |
|
1082 |
#ifdef BTRACE_FAST_MUTEX |
|
1083 |
asm("ldrb r12, [r0,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter)); |
|
1084 |
asm("cmp r12, #0"); |
|
1085 |
asm("bne fastmutex_wait_trace2"); |
|
1086 |
#endif |
|
1087 |
asm("ldr pc, [sp], #4 "); |
|
1088 |
||
1089 |
#ifdef BTRACE_FAST_MUTEX |
|
1090 |
asm("fastmutex_wait_trace2:"); |
|
1091 |
// r0=scheduler r2=mutex r3=thread |
|
1092 |
asm("ldr lr, [sp], #4 "); |
|
1093 |
ALIGN_STACK_START; |
|
1094 |
asm("stmdb sp!, {r0-r2,lr}"); // 4th item on stack is PC value for trace |
|
1095 |
asm("bl fmwait_lockacquiredwait_trace"); |
|
1096 |
asm("ldmia sp!, {r0-r2,lr}"); |
|
1097 |
ALIGN_STACK_END; |
|
1098 |
__JUMP(,lr); |
|
1099 |
#endif |
|
1100 |
} |
|
1101 |
||
1102 |
||
1103 |
/** Releases the System Lock. |
|
1104 |
||
1105 |
@pre System lock must be held. |
|
1106 |
||
1107 |
@see NKern::LockSystem() |
|
1108 |
@see NKern::FMSignal() |
|
1109 |
*/ |
|
1110 |
EXPORT_C __NAKED__ void NKern::UnlockSystem() |
|
1111 |
{ |
|
1112 |
// NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY |
|
1113 |
ASM_CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED); |
|
1114 |
asm("ldr r0, __SystemLock "); |
|
1115 |
} |
|
1116 |
||
1117 |
||
1118 |
/** Releases a previously acquired fast mutex. |
|
1119 |
||
1120 |
@param aMutex The fast mutex to be released. |
|
1121 |
||
1122 |
@pre The calling thread must hold the mutex. |
|
1123 |
||
1124 |
@see NFastMutex::Signal() |
|
1125 |
@see NKern::FMWait() |
|
1126 |
*/ |
|
1127 |
EXPORT_C __NAKED__ void NKern::FMSignal(NFastMutex*) |
|
1128 |
{ |
|
1129 |
// NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY |
|
1130 |
ASM_DEBUG1(NKFMSignal,r0); |
|
1131 |
||
1132 |
asm("ldr r2, __TheScheduler "); |
|
1133 |
#ifdef BTRACE_FAST_MUTEX |
|
1134 |
asm("ldrb r1, [r2,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter)); |
|
1135 |
asm("cmp r1, #0"); |
|
1136 |
asm("bne fmsignal_trace1"); |
|
1137 |
asm("no_fmsignal_trace1:"); |
|
1138 |
#endif |
|
1139 |
||
1140 |
#ifdef __CPU_ARM_HAS_CPS |
|
1141 |
asm("mov r12, #0 "); |
|
1142 |
CPSIDIF; // disable interrupts |
|
1143 |
asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iWaiting)); // r3=iWaiting |
|
1144 |
asm("ldr r1, [r2, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // r1=iCurrentThread |
|
1145 |
asm("str r12, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iHoldingThread)); // iHoldingThread=NULL |
|
1146 |
asm("cmp r3, #0 "); // check waiting flag |
|
1147 |
asm("str r12, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iWaiting)); // iWaiting=FALSE |
|
1148 |
asm("str r12, [r1, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); // current thread->iHeldFastMutex=NULL |
|
1149 |
asm("bne 1f "); |
|
1150 |
CPSIEIF; // reenable interrupts |
|
1151 |
__JUMP(,lr); // if clear, finished |
|
1152 |
asm("1: "); |
|
1153 |
asm("str r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // lock the kernel if set (assumes iWaiting always 0 or 1) |
|
1154 |
CPSIEIF; // reenable interrupts |
|
1155 |
#else |
|
1156 |
SET_INTS_1(r3, MODE_SVC, INTS_ALL_OFF); |
|
1157 |
asm("mov r12, #0 "); |
|
1158 |
asm("ldr r1, [r2, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // r1=iCurrentThread |
|
1159 |
SET_INTS_2(r3, MODE_SVC, INTS_ALL_OFF); // disable interrupts |
|
1160 |
asm("str r12, [r0], #%a0" : : "i" _FOFF(NFastMutex,iWaiting)); // iHoldingThread=NULL, r0->iWaiting |
|
1161 |
asm("ldr r3, [r0] "); // r3=iWaiting |
|
1162 |
asm("str r12, [r0] "); // iWaiting=FALSE |
|
1163 |
asm("str r12, [r1, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); // current thread->iHeldFastMutex=NULL |
|
1164 |
asm("mov r12, #0x13 "); |
|
1165 |
asm("cmp r3, #0 "); // check waiting flag |
|
1166 |
__MSR_CPSR_C(eq, r12); // if clear, finished |
|
1167 |
__JUMP(eq,lr); |
|
1168 |
asm("str r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // lock the kernel (assumes iWaiting always 0 or 1) |
|
1169 |
asm("msr cpsr_c, r12 "); // reenable interrupts |
|
1170 |
#endif |
|
1171 |
asm("strb r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); |
|
1172 |
asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(NThread,iCsFunction)); // r3=current thread->iCsFunction |
|
1173 |
asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(NThread,iCsCount)); // r2=current thread->iCsCount |
|
1174 |
asm("str lr, [sp, #-4]! "); |
|
1175 |
asm("cmp r3, #0 "); // outstanding CS function? |
|
1176 |
asm("beq 2f "); // branch if not |
|
1177 |
asm("cmp r2, #0 "); // iCsCount!=0 ? |
|
1178 |
asm("moveq r0, r1 "); // if iCsCount=0, DoCsFunction() |
|
1179 |
asm("bleq DoDoCsFunction "); |
|
1180 |
asm("2: "); |
|
1181 |
asm("bl " CSM_ZN10TScheduler10RescheduleEv); // reschedule to allow waiting thread in |
|
1182 |
SET_INTS(r12, MODE_SVC, INTS_ALL_ON); // reenable interrupts after reschedule |
|
1183 |
asm("ldr pc, [sp], #4 "); |
|
1184 |
||
1185 |
#ifdef BTRACE_FAST_MUTEX |
|
1186 |
asm("fmsignal_trace1:"); |
|
1187 |
ALIGN_STACK_START; |
|
1188 |
asm("stmdb sp!, {r0-r2,lr}"); // 4th item on stack is PC value for trace |
|
1189 |
asm("bl fmsignal_lock_trace_unlock"); |
|
1190 |
asm("ldmia sp!, {r0-r2,lr}"); |
|
1191 |
ALIGN_STACK_END; |
|
1192 |
asm("b no_fmsignal_trace1"); |
|
1193 |
#endif |
|
1194 |
} |
|
1195 |
||
1196 |
||
1197 |
/** Acquires the System Lock. |
|
1198 |
||
1199 |
This will block until the mutex is available, and causes |
|
1200 |
the thread to enter an implicit critical section until the mutex is released. |
|
1201 |
||
1202 |
@post System lock is held. |
|
1203 |
||
1204 |
@see NKern::UnlockSystem() |
|
1205 |
@see NKern::FMWait() |
|
1206 |
||
1207 |
@pre No fast mutex can be held. |
|
1208 |
@pre Kernel must be unlocked. |
|
1209 |
@pre Call in a thread context. |
|
1210 |
@pre Interrupts must be enabled. |
|
1211 |
*/ |
|
1212 |
EXPORT_C __NAKED__ void NKern::LockSystem() |
|
1213 |
{ |
|
1214 |
ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_UNLOCKED|MASK_NO_FAST_MUTEX|MASK_NOT_ISR|MASK_NOT_IDFC); |
|
1215 |
// NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY |
|
1216 |
asm("ldr r0, __SystemLock "); |
|
1217 |
} |
|
1218 |
||
1219 |
||
1220 |
/** Acquires a fast mutex. |
|
1221 |
||
1222 |
This will block until the mutex is available, and causes |
|
1223 |
the thread to enter an implicit critical section until the mutex is released. |
|
1224 |
||
1225 |
@param aMutex The fast mutex to be acquired. |
|
1226 |
||
1227 |
@post The calling thread holds the mutex. |
|
1228 |
||
1229 |
@see NFastMutex::Wait() |
|
1230 |
@see NKern::FMSignal() |
|
1231 |
||
1232 |
@pre No fast mutex can be held. |
|
1233 |
@pre Kernel must be unlocked. |
|
1234 |
@pre Call in a thread context. |
|
1235 |
@pre Interrupts must be enabled. |
|
1236 |
*/ |
|
1237 |
EXPORT_C __NAKED__ void NKern::FMWait(NFastMutex*) |
|
1238 |
{ |
|
1239 |
ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_UNLOCKED|MASK_NO_FAST_MUTEX|MASK_NOT_ISR|MASK_NOT_IDFC); |
|
1240 |
// NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY |
|
1241 |
ASM_DEBUG1(NKFMWait,r0); |
|
1242 |
asm("ldr r2, __TheScheduler "); |
|
1243 |
||
1244 |
#ifdef __CPU_ARM_HAS_CPS |
|
1245 |
CPSIDIF; // disable interrupts |
|
1246 |
asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iHoldingThread)); // r3=iHoldingThread |
|
1247 |
asm("ldr r1, [r2, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // r1=iCurrentThread |
|
1248 |
asm("cmp r3, #0 "); // check if mutex held |
|
1249 |
asm("bne 1f"); |
|
1250 |
asm("str r1, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iHoldingThread)); // iHoldingThread=current thread |
|
1251 |
asm("str r0, [r1, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); // and current thread->iHeldFastMutex=this |
|
1252 |
CPSIEIF; // reenable interrupts |
|
1253 |
#ifdef BTRACE_FAST_MUTEX |
|
1254 |
asm("ldrb r12, [r2,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter)); |
|
1255 |
asm("cmp r12, #0"); |
|
1256 |
asm("bne fmwait_trace2"); |
|
1257 |
#endif |
|
1258 |
__JUMP(,lr); // we're finished |
|
1259 |
asm("1: "); |
|
1260 |
asm("mov r3, #1 "); |
|
1261 |
asm("str r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // mutex held, so lock the kernel |
|
1262 |
CPSIEIF; // reenable interrupts |
|
1263 |
#else |
|
1264 |
asm("mov r3, #0xd3 "); |
|
1265 |
asm("ldr r1, [r2, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // r1=iCurrentThread |
|
1266 |
asm("msr cpsr, r3 "); // disable interrupts |
|
1267 |
asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iHoldingThread)); // r3=iHoldingThread |
|
1268 |
asm("mov r12, #0x13 "); |
|
1269 |
asm("cmp r3, #0"); // check if mutex held |
|
1270 |
asm("streq r1, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iHoldingThread)); // if not, iHoldingThread=current thread |
|
1271 |
asm("streq r0, [r1, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); // and current thread->iHeldFastMutex=this |
|
1272 |
__MSR_CPSR_C(eq, r12); // and we're finished |
|
1273 |
#ifdef BTRACE_FAST_MUTEX |
|
1274 |
asm("bne no_fmwait_trace2"); |
|
1275 |
asm("ldrb r12, [r2,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter)); |
|
1276 |
asm("cmp r12, #0"); |
|
1277 |
asm("bne fmwait_trace2"); |
|
1278 |
__JUMP(,lr); |
|
1279 |
asm("no_fmwait_trace2:"); |
|
1280 |
#endif |
|
1281 |
__JUMP(eq,lr); |
|
1282 |
asm("mov r3, #1 "); |
|
1283 |
asm("str r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // mutex held, so lock the kernel |
|
1284 |
asm("msr cpsr_c, r12 "); // and reenable interrupts |
|
1285 |
#endif |
|
1286 |
asm("str lr, [sp, #-4]! "); |
|
1287 |
asm("str r3, [r0, #4] "); // iWaiting=TRUE |
|
1288 |
asm("str r0, [r1, #%a0]" : : "i" _FOFF(NThread,iWaitFastMutex)); // current thread->iWaitFastMutex=this |
|
1289 |
asm("ldr r0, [r0, #0] "); // parameter for YieldTo |
|
1290 |
ASM_DEBUG1(NKFMWaitYield,r0); |
|
1291 |
asm("bl " CSM_ZN10TScheduler7YieldToEP11NThreadBase); // yield to the mutex holding thread |
|
1292 |
// will not return until the mutex is free |
|
1293 |
// on return r0=Scheduler,r1=0,r2!=0,r3=current thread, kernel unlocked, interrupts disabled |
|
1294 |
asm("ldr r2, [r3, #%a0]" : : "i" _FOFF(NThread,iWaitFastMutex)); // r2=this |
|
1295 |
asm("ldr lr, [sp], #4 "); |
|
1296 |
asm("str r1, [r3, #%a0]" : : "i" _FOFF(NThread,iWaitFastMutex)); // iWaitFastMutex=NULL |
|
1297 |
asm("str r2, [r3, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); // current thread->iHeldFastMutex=this |
|
1298 |
asm("str r3, [r2, #0] "); // iHoldingThread=current thread |
|
1299 |
SET_INTS(r12, MODE_SVC, INTS_ALL_ON); |
|
1300 |
#ifdef BTRACE_FAST_MUTEX |
|
1301 |
asm("ldrb r12, [r0,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter)); |
|
1302 |
asm("cmp r12, #0"); |
|
1303 |
asm("bne fmwait_trace3"); |
|
1304 |
#endif |
|
1305 |
__JUMP(,lr); |
|
1306 |
||
1307 |
#ifdef BTRACE_FAST_MUTEX |
|
1308 |
asm("fmwait_trace2:"); |
|
1309 |
// r0=mutex r1=thread r2=scheduler |
|
1310 |
ALIGN_STACK_START; |
|
1311 |
asm("stmdb sp!, {r0-r2,lr}"); // 4th item on stack is PC value for trace |
|
1312 |
asm("bl fmwait_lockacquiredwait_trace2"); |
|
1313 |
asm("ldmia sp!, {r0-r2,lr}"); |
|
1314 |
ALIGN_STACK_END; |
|
1315 |
__JUMP(,lr); |
|
1316 |
||
1317 |
asm("fmwait_trace3:"); |
|
1318 |
// r0=scheduler r2=mutex r3=thread |
|
1319 |
ALIGN_STACK_START; |
|
1320 |
asm("stmdb sp!, {r0-r2,lr}"); // 4th item on stack is PC value for trace |
|
1321 |
asm("bl fmwait_lockacquiredwait_trace"); |
|
1322 |
asm("ldmia sp!, {r0-r2,lr}"); |
|
1323 |
ALIGN_STACK_END; |
|
1324 |
__JUMP(,lr); |
|
1325 |
#endif |
|
1326 |
} |
|
1327 |
#endif |
|
1328 |
||
1329 |
__NAKED__ void TScheduler::YieldTo(NThreadBase*) |
|
1330 |
{ |
|
1331 |
// |
|
1332 |
// Enter in mode_svc with kernel locked, interrupts can be on or off |
|
1333 |
// Exit in mode_svc with kernel unlocked, interrupts off |
|
1334 |
// On exit r0=&TheScheduler, r1=0, r2!=0, r3=TheCurrentThread, r4-r11 unaltered |
|
1335 |
// NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY |
|
1336 |
// |
|
1337 |
asm("mrs r1, spsr "); // r1=spsr_svc |
|
1338 |
asm("mov r2, r0 "); // r2=new thread |
|
1339 |
asm("ldr r0, __TheScheduler "); // r0 points to scheduler data |
|
1340 |
asm("stmfd sp!, {r1,r4-r11,lr} "); // store registers and return address |
|
1341 |
#ifdef __CPU_ARM_USE_DOMAINS |
|
1342 |
asm("mrc p15, 0, r12, c3, c0, 0 "); // r12=DACR |
|
1343 |
#endif |
|
1344 |
asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // r1=iCurrentThread |
|
1345 |
#ifdef __CPU_HAS_VFP |
|
1346 |
VFP_FMRX(,FPEXC_REG,VFP_XREG_FPEXC); // r10/r11=FPEXC |
|
1347 |
#endif |
|
1348 |
#ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG |
|
1349 |
GET_CAR(,r11); // r11=CAR |
|
1350 |
#endif |
|
1351 |
#ifdef __CPU_HAS_CP15_THREAD_ID_REG |
|
1352 |
GET_RWRW_TID(,r9); // r9=Thread ID |
|
1353 |
#endif |
|
1354 |
#ifdef __CPU_SUPPORT_THUMB2EE |
|
1355 |
GET_THUMB2EE_HNDLR_BASE(,r8); // r8=Thumb-2EE Handler Base |
|
1356 |
#endif |
|
1357 |
||
1358 |
asm("sub sp, sp, #%a0" : : "i" (8+EXTRA_STACK_SPACE)); // make room for original thread, extras, sp_usr and lr_usr |
|
1359 |
||
1360 |
// Save the sp_usr and lr_usr and only the required coprocessor registers |
|
1361 |
// Thumb-2EE TID FPEXC CAR DACR |
|
1362 |
asm("stmia sp, {" EXTRA_STACK_LIST( 8, 9, FPEXC_REG, 11, 12) "r13-r14}^ "); |
|
1363 |
#if defined(__CPU_ARMV4) || defined(__CPU_ARMV4T) || defined(__CPU_ARMV5T) |
|
1364 |
asm("nop "); // Can't have banked register access immediately after LDM/STM user registers |
|
1365 |
#endif |
|
1366 |
asm("str sp, [r1, #%a0]" : : "i" _FOFF(NThread,iSavedSP)); // store original thread's stack pointer |
|
1367 |
asm("b switch_threads "); |
|
1368 |
} |
|
1369 |
||
1370 |
#ifdef MONITOR_THREAD_CPU_TIME |
|
1371 |
||
1372 |
#ifdef HIGH_RES_TIMER_COUNTS_UP |
|
1373 |
#define CALC_HIGH_RES_DIFF(Rd, Rn, Rm) asm("sub "#Rd", "#Rn", "#Rm) |
|
1374 |
#else |
|
1375 |
#define CALC_HIGH_RES_DIFF(Rd, Rn, Rm) asm("rsb "#Rd", "#Rn", "#Rm) |
|
1376 |
#endif |
|
1377 |
||
1378 |
// Update thread cpu time counters |
|
1379 |
// Called just before thread switch with r2 == new thread |
|
1380 |
// Corrupts r3-r8, Leaves r5=current Time, r6=current thread |
|
1381 |
#define UPDATE_THREAD_CPU_TIME \ |
|
1382 |
asm("ldr r6, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); \ |
|
1383 |
GET_HIGH_RES_TICK_COUNT(r5); \ |
|
1384 |
asm("ldr r3, [r6, #%a0]" : : "i" _FOFF(NThreadBase,iLastStartTime)); \ |
|
1385 |
asm("str r5, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iLastStartTime)); \ |
|
1386 |
CALC_HIGH_RES_DIFF(r4, r5, r3); \ |
|
1387 |
asm("add r3, r6, #%a0" : : "i" _FOFF(NThreadBase,iTotalCpuTime)); \ |
|
1388 |
asm("ldmia r3, {r7-r8}"); \ |
|
1389 |
asm("adds r7, r7, r4"); \ |
|
1390 |
asm("adc r8, r8, #0"); \ |
|
1391 |
asm("stmia r3, {r7-r8}") |
|
1392 |
||
1393 |
#else |
|
1394 |
#define UPDATE_THREAD_CPU_TIME |
|
1395 |
#endif |
|
1396 |
||
1397 |
// EMI - Schedule Logging |
|
1398 |
// Needs: r0=TScheduler, r2 = new thread |
|
1399 |
// If CPU_TIME, needs: r5=time, r6=current thread |
|
1400 |
// preserve r0 r2 r9(new address space), r10(&iLock), sp. Trashes r3-r8, lr |
|
1401 |
||
1402 |
#ifdef __EMI_SUPPORT__ |
|
1403 |
#define EMI_EVENTLOGGER \ |
|
1404 |
asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLogging)); \ |
|
1405 |
asm("cmp r3,#0"); \ |
|
1406 |
asm("blne AddTaskSwitchEvent"); |
|
1407 |
||
1408 |
// Needs: r0=TScheduler, r2 = new thread |
|
1409 |
#define EMI_CHECKDFCTAG(no) \ |
|
1410 |
asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iEmiMask)); \ |
|
1411 |
asm("ldr r4, [r2,#%a0]" : : "i" _FOFF(NThread, iTag)); \ |
|
1412 |
asm("ands r3, r3, r4"); \ |
|
1413 |
asm("bne emi_add_dfc" #no); \ |
|
1414 |
asm("check_dfc_tag_done" #no ": "); |
|
1415 |
||
1416 |
#define EMI_ADDDFC(no) \ |
|
1417 |
asm("emi_add_dfc" #no ": "); \ |
|
1418 |
asm("ldr r4, [r0,#%a0]" : : "i" _FOFF(TScheduler, iEmiDfcTrigger)); \ |
|
1419 |
asm("mov r5, r2"); \ |
|
1420 |
asm("orr r4, r3, r4"); \ |
|
1421 |
asm("str r4, [r0,#%a0]" : : "i" _FOFF(TScheduler, iEmiDfcTrigger)); \ |
|
1422 |
asm("mov r6, r0"); \ |
|
1423 |
asm("ldr r0, [r0,#%a0]" : : "i" _FOFF(TScheduler, iEmiDfc)); \ |
|
1424 |
asm("bl " CSM_ZN4TDfc3AddEv); \ |
|
1425 |
asm("mov r2, r5"); \ |
|
1426 |
asm("mov r0, r6"); \ |
|
1427 |
asm("b check_dfc_tag_done" #no); |
|
1428 |
||
1429 |
#else |
|
1430 |
#define EMI_EVENTLOGGER |
|
1431 |
#define EMI_CHECKDFCTAG(no) |
|
1432 |
#define EMI_ADDDFC(no) |
|
1433 |
#endif |
|
1434 |
||
1435 |
||
1436 |
__ASSERT_COMPILE(_FOFF(NThread,iPriority) == _FOFF(NThread,iPrev) + 4); |
|
1437 |
__ASSERT_COMPILE(_FOFF(NThread,i_ThrdAttr) == _FOFF(NThread,iPriority) + 2); |
|
1438 |
__ASSERT_COMPILE(_FOFF(NThread,iHeldFastMutex) == _FOFF(NThread,i_ThrdAttr) + 2); |
|
1439 |
__ASSERT_COMPILE(_FOFF(NThread,iWaitFastMutex) == _FOFF(NThread,iHeldFastMutex) + 4); |
|
1440 |
__ASSERT_COMPILE(_FOFF(NThread,iAddressSpace) == _FOFF(NThread,iWaitFastMutex) + 4); |
|
1441 |
||
1442 |
__NAKED__ void TScheduler::Reschedule() |
|
1443 |
{ |
|
1444 |
// |
|
1445 |
// Enter in mode_svc with kernel locked, interrupts can be on or off |
|
1446 |
// Exit in mode_svc with kernel unlocked, interrupts off |
|
1447 |
// On exit r0=&TheScheduler, r1=0, r3=TheCurrentThread, r4-r11 unaltered |
|
1448 |
// r2=0 if no reschedule occurred, non-zero if a reschedule did occur. |
|
1449 |
// NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY |
|
1450 |
// |
|
1451 |
asm("ldr r0, __TheScheduler "); // r0 points to scheduler data |
|
1452 |
asm("str lr, [sp, #-4]! "); // save return address |
|
1453 |
SET_INTS(r3, MODE_SVC, INTS_ALL_OFF); // interrupts off |
|
1454 |
asm("ldrb r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iDfcPendingFlag)); |
|
1455 |
asm("mov r2, #0 "); // start with r2=0 |
|
1456 |
asm("cmp r1, #0 "); // check if DFCs pending |
|
1457 |
||
1458 |
asm("start_resched: "); |
|
1459 |
asm("blne " CSM_ZN10TScheduler9QueueDfcsEv); // queue any pending DFCs - PRESERVES R2 |
|
1460 |
asm("ldrb r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); |
|
1461 |
SET_INTS_1(r3, MODE_SVC, INTS_ALL_ON); |
|
1462 |
asm("cmp r1, #0 "); // check if a reschedule is required |
|
1463 |
asm("beq no_resched_needed "); // branch out if not |
|
1464 |
SET_INTS_2(r3, MODE_SVC, INTS_ALL_ON); // enable interrupts |
|
1465 |
asm("mrs r2, spsr "); // r2=spsr_svc |
|
1466 |
asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); |
|
1467 |
asm("stmfd sp!, {r2,r4-r11} "); // store registers and return address |
|
1468 |
#ifdef __CPU_HAS_VFP |
|
1469 |
VFP_FMRX(,FPEXC_REG,VFP_XREG_FPEXC); // r10/r11=FPEXC |
|
1470 |
#endif |
|
1471 |
#ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG |
|
1472 |
GET_CAR(,r11); // r11=CAR |
|
1473 |
#endif |
|
1474 |
#ifdef __CPU_HAS_CP15_THREAD_ID_REG |
|
1475 |
GET_RWRW_TID(,r9); // r9=Thread ID |
|
1476 |
#endif |
|
1477 |
#ifdef __CPU_ARM_USE_DOMAINS |
|
1478 |
asm("mrc p15, 0, r12, c3, c0, 0 "); // r12=DACR |
|
1479 |
#endif |
|
1480 |
#ifdef __CPU_SUPPORT_THUMB2EE |
|
1481 |
GET_THUMB2EE_HNDLR_BASE(,r8); // r8=Thumb-2EE Handler Base |
|
1482 |
#endif |
|
1483 |
asm("ldr lr, [r0, #4] "); // lr=present mask high |
|
1484 |
asm("sub sp, sp, #%a0" : : "i" (8+EXTRA_STACK_SPACE)); // make room for extras, sp_usr and lr_usr |
|
1485 |
asm("str sp, [r1, #%a0]" : : "i" _FOFF(NThread,iSavedSP)); // store original thread's stack pointer |
|
1486 |
||
1487 |
||
1488 |
// Save the sp_usr and lr_usr and only the required coprocessor registers |
|
1489 |
// Thumb-2EE TID FPEXC CAR DACR |
|
1490 |
asm("stmia sp, {" EXTRA_STACK_LIST( 8, 9, FPEXC_REG, 11, 12) "r13-r14}^ "); |
|
1491 |
// NOTE: Prior to ARMv6 can't have banked register access immediately after LDM/STM user registers |
|
1492 |
||
1493 |
asm("ldr r1, [r0], #%a0" : : "i" _FOFF(TScheduler,iQueue)); // r1=present mask low, r0=&iQueue[0] |
|
1494 |
#ifdef __CPU_ARM_HAS_CLZ |
|
1495 |
CLZ(12,14); // r12=31-MSB(r14) |
|
1496 |
asm("subs r12, r12, #32 "); // r12=-1-MSB(r14), 0 if r14=0 |
|
1497 |
CLZcc(CC_EQ,12,1); // if r14=0, r12=31-MSB(r1) |
|
1498 |
asm("rsb r12, r12, #31 "); // r12=highest ready thread priority |
|
1499 |
#else |
|
1500 |
asm("mov r12, #31 "); // find the highest priority ready thread |
|
1501 |
asm("cmp r14, #0 "); // high word nonzero? |
|
1502 |
asm("moveq r14, r1 "); // if zero, r14=low word |
|
1503 |
asm("movne r12, #63 "); // else start at pri 63 |
|
1504 |
asm("cmp r14, #0x00010000 "); |
|
1505 |
asm("movlo r14, r14, lsl #16 "); |
|
1506 |
asm("sublo r12, r12, #16 "); |
|
1507 |
asm("cmp r14, #0x01000000 "); |
|
1508 |
asm("movlo r14, r14, lsl #8 "); |
|
1509 |
asm("sublo r12, r12, #8 "); |
|
1510 |
asm("cmp r14, #0x10000000 "); |
|
1511 |
asm("movlo r14, r14, lsl #4 "); |
|
1512 |
asm("sublo r12, r12, #4 "); |
|
1513 |
asm("cmp r14, #0x40000000 "); |
|
1514 |
asm("movlo r14, r14, lsl #2 "); |
|
1515 |
asm("sublo r12, r12, #2 "); |
|
1516 |
asm("cmp r14, #0x80000000 "); |
|
1517 |
asm("sublo r12, r12, #1 "); // r12 now equals highest ready priority |
|
1518 |
#endif |
|
1519 |
asm("ldr r2, [r0, r12, lsl #2] "); // r2=pointer to highest priority thread's link field |
|
1520 |
asm("sub r0, r0, #%a0" : : "i" _FOFF(TScheduler,iQueue)); |
|
1521 |
asm("mov r4, #0 "); |
|
1522 |
asm("ldmia r2, {r3,r5-r9,lr} "); // r3=next r5=prev r6=attributes, r7=heldFM, r8=waitFM, r9=address space |
|
1523 |
// lr=time |
|
1524 |
asm("add r10, r0, #%a0" : : "i" _FOFF(TScheduler,iLock)); |
|
1525 |
asm("strb r4, [r0, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); // clear flag |
|
1526 |
ASM_DEBUG1(InitSelection,r2); |
|
1527 |
asm("cmp lr, #0 "); // check if timeslice expired |
|
1528 |
asm("bne no_other "); // skip if not |
|
1529 |
asm("cmp r3, r2 "); // check for thread at same priority |
|
1530 |
asm("bne round_robin "); // branch if there is one |
|
1531 |
asm("no_other: "); |
|
1532 |
asm("cmp r7, #0 "); // does this thread hold a fast mutex? |
|
1533 |
asm("bne holds_fast_mutex "); // branch if it does |
|
1534 |
asm("cmp r8, #0 "); // is thread blocked on a fast mutex? |
|
1535 |
asm("bne resched_blocked "); // branch out if it is |
|
1536 |
||
1537 |
asm("resched_not_blocked: "); |
|
1538 |
asm("tst r6, #%a0" : : "i" ((TInt)KThreadAttImplicitSystemLock<<16)); // implicit system lock required? |
|
1539 |
#if defined(__MEMMODEL_MULTIPLE__) || defined(__MEMMODEL_FLEXIBLE__) |
|
1540 |
asm("beq resched_end "); // no, switch to this thread |
|
1541 |
asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iHoldingThread)); // yes, look at system lock holding thread |
|
1542 |
asm("cmp r1, #0 "); // lock held? |
|
1543 |
asm("beq resched_end "); // no, switch to this thread |
|
1544 |
asm("b resched_imp_sys_held "); |
|
1545 |
#else |
|
1546 |
asm("ldrne r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iHoldingThread)); // yes, look at system lock holding thread |
|
1547 |
asm("beq resched_end "); // no, switch to this thread |
|
1548 |
asm("cmp r1, #0 "); // lock held? |
|
1549 |
asm("ldreq r5, [r0, #%a0]" : : "i" _FOFF(TScheduler,iAddressSpace)); // no, get current address space ptr |
|
1550 |
asm("bne resched_imp_sys_held "); |
|
1551 |
asm("tst r6, #%a0" : : "i" ((TInt)KThreadAttAddressSpace<<16)); // does thread require address space switch? |
|
1552 |
asm("cmpne r9, r5 "); // change of address space required? |
|
1553 |
asm("beq resched_end "); // branch if not |
|
1554 |
||
1555 |
ASM_DEBUG1(Resched,r2) // r2->new thread |
|
1556 |
UPDATE_THREAD_CPU_TIME; |
|
1557 |
EMI_EVENTLOGGER; |
|
1558 |
EMI_CHECKDFCTAG(1) |
|
1559 |
||
1560 |
#ifdef BTRACE_CPU_USAGE |
|
1561 |
asm("ldrb r1, [r0,#%a0]" : : "i" _FOFF(TScheduler,iCpuUsageFilter)); |
|
1562 |
asm("ldr sp, [r2, #%a0]" : : "i" _FOFF(NThread,iSavedSP)); // restore new thread's stack pointer |
|
1563 |
asm("str r2, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // iCurrentThread=r2 |
|
1564 |
asm("cmp r1, #0"); |
|
1565 |
asm("blne context_switch_trace"); |
|
1566 |
#else |
|
1567 |
asm("ldr sp, [r2, #%a0]" : : "i" _FOFF(NThread,iSavedSP)); // restore new thread's stack pointer |
|
1568 |
asm("str r2, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // iCurrentThread=r2 |
|
1569 |
#endif |
|
1570 |
||
1571 |
#ifdef __CPU_HAS_ETM_PROCID_REG |
|
1572 |
asm("mcr p15, 0, r2, c13, c0, 1 "); // notify ETM of new thread |
|
1573 |
#endif |
|
1574 |
SET_INTS_1(r12, MODE_SVC, INTS_ALL_OFF); |
|
1575 |
#if EXTRA_STACK_SPACE==0 && defined(__CPU_ARM9_USER_LDM_BUG) |
|
1576 |
asm("mov r1, sp "); |
|
1577 |
asm("ldmia r1, {r13,r14}^ "); // restore sp_usr and lr_usr |
|
1578 |
// NOTE: Prior to ARMv6 can't have banked register access immediately after LDM/STM user registers |
|
1579 |
#else |
|
1580 |
// Load the sp_usr and lr_usr and only the required coprocessor registers |
|
1581 |
// Thumb-2EE TID FPEXC CAR DACR |
|
1582 |
asm("ldmia sp, {" EXTRA_STACK_LIST( 3, 4, 5, 6, 11) "r13-r14}^ "); |
|
1583 |
// NOTE: Prior to ARMv6 can't have banked register access immediately after LDM/STM user registers |
|
1584 |
#endif |
|
1585 |
asm("str r2, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iHoldingThread)); // iLock.iHoldingThread=new thread |
|
1586 |
asm("str r10, [r2, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); // current thread->iHeldFastMutex=&iLock |
|
1587 |
#ifdef BTRACE_FAST_MUTEX |
|
1588 |
asm("ldrb lr, [r0,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter)); |
|
1589 |
asm("cmp lr, #0"); |
|
1590 |
asm("blne reschedule_syslock_wait_trace"); |
|
1591 |
#endif |
|
1592 |
||
1593 |
#ifdef __CPU_SUPPORT_THUMB2EE |
|
1594 |
SET_THUMB2EE_HNDLR_BASE(,r3); |
|
1595 |
#endif |
|
1596 |
#ifdef __CPU_HAS_CP15_THREAD_ID_REG |
|
1597 |
SET_RWRW_TID(,r4); |
|
1598 |
#endif |
|
1599 |
#ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG |
|
1600 |
SET_CAR(,r6) |
|
1601 |
#endif |
|
1602 |
#ifdef __CPU_ARM_USE_DOMAINS |
|
1603 |
asm("mcr p15, 0, r11, c3, c0, 0 "); |
|
1604 |
#endif |
|
1605 |
#ifdef __CPU_HAS_VFP |
|
1606 |
VFP_FMXR(,VFP_XREG_FPEXC,5); // restore FPEXC from R5 |
|
1607 |
#endif |
|
1608 |
asm("add sp, sp, #%a0" : : "i" (8+EXTRA_STACK_SPACE)); // step past sp_usr and lr_usr |
|
1609 |
||
1610 |
// Do process switching |
|
1611 |
// Handler called with: |
|
1612 |
// r0->scheduler, r2->current thread |
|
1613 |
// r9->new address space, r10->system lock |
|
1614 |
// Must preserve r0,r2, can modify other registers |
|
1615 |
CPWAIT(,r1); |
|
1616 |
SET_INTS_2(r12, MODE_SVC, INTS_ALL_OFF); // disable interrupts |
|
1617 |
asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); |
|
1618 |
asm("mov r3, r2 "); |
|
1619 |
asm("cmp r1, #0 "); |
|
1620 |
asm("streq r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // unlock the kernel |
|
1621 |
asm("blne " CSM_ZN10TScheduler10RescheduleEv); |
|
1622 |
SET_INTS(r12, MODE_SVC, INTS_ALL_ON); // kernel is now unlocked, interrupts enabled, system lock held |
|
1623 |
asm("mov r2, r3 "); |
|
1624 |
asm("mov lr, pc "); |
|
1625 |
asm("ldr pc, [r0, #%a0]" : : "i" _FOFF(TScheduler,iProcessHandler)); // do process switch |
|
1626 |
||
1627 |
asm("mov r1, #1 "); |
|
1628 |
asm("mov r4, #0 "); |
|
1629 |
asm("str r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // lock the kernel |
|
1630 |
asm("mov r3, r2 "); // r3->new thread |
|
1631 |
asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iWaiting)); // check system lock wait flag |
|
1632 |
asm("str r4, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iHoldingThread)); // release system lock |
|
1633 |
asm("str r4, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iWaiting)); |
|
1634 |
asm("str r4, [r3, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); |
|
1635 |
#ifdef BTRACE_FAST_MUTEX |
|
1636 |
asm("ldrb lr, [r0,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter)); |
|
1637 |
asm("cmp lr, #0"); |
|
1638 |
asm("blne reschedule_syslock_signal_trace"); |
|
1639 |
#endif |
|
1640 |
asm("cmp r2, #0 "); |
|
1641 |
asm("beq switch_threads_2 "); // no contention on system lock |
|
1642 |
asm("ldr r2, [r3, #%a0]" : : "i" _FOFF(NThread,iCsFunction)); |
|
1643 |
asm("ldr r12, [r3, #%a0]" : : "i" _FOFF(NThread,iCsCount)); |
|
1644 |
asm("strb r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); // contention - need to reschedule again |
|
1645 |
asm("cmp r2, #0 "); // outstanding CS function? |
|
1646 |
asm("beq switch_threads_2 "); // branch if not |
|
1647 |
asm("cmp r12, #0 "); // iCsCount!=0 ? |
|
1648 |
asm("bne switch_threads_2 "); // branch if it is |
|
1649 |
asm("ldr r1, [sp, #0] "); // r1=spsr_svc for this thread |
|
1650 |
asm("mov r4, r0 "); |
|
1651 |
asm("mov r5, r3 "); |
|
1652 |
asm("msr spsr, r1 "); // restore spsr_svc |
|
1653 |
asm("mov r0, r3 "); // if iCsCount=0, DoCsFunction() |
|
1654 |
asm("bl DoDoCsFunction "); |
|
1655 |
asm("mov r0, r4 "); |
|
1656 |
asm("mov r3, r5 "); |
|
1657 |
asm("b switch_threads_2 "); |
|
1658 |
#endif // __MEMMODEL_MULTIPLE__ || __MEMMODEL_FLEXIBLE__ |
|
1659 |
||
1660 |
asm("round_robin: "); // get here if thread's timeslice has expired and there is another |
|
1661 |
// thread ready at the same priority |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1662 |
|
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1663 |
asm("ldr r6, [r0, #%a0]" : : "i" _FOFF(TScheduler,iTimeSliceExpireCounter)); // Update Time Slice Expire count here, |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1664 |
asm("add r6,r6, #1"); // ie equiv of 'iTimeSliceExpireCounter++;'. |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1665 |
asm("str r6, [r0, #%a0]" : : "i" _FOFF(TScheduler,iTimeSliceExpireCounter)); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1666 |
|
0 | 1667 |
asm("cmp r7, #0 "); // does this thread hold a fast mutex? |
1668 |
asm("bne rr_holds_fast_mutex "); |
|
1669 |
asm("ldr lr, [r2, #%a0]" : : "i" _FOFF(NThread,iTimeslice)); |
|
1670 |
asm("add r0, r0, #%a0" : : "i" _FOFF(TScheduler,iQueue)); |
|
1671 |
asm("str r3, [r0, r12, lsl #2] "); // first thread at this priority is now the next one |
|
1672 |
asm("str lr, [r2, #%a0]" : : "i" _FOFF(NThread,iTime)); // fresh timeslice |
|
1673 |
ASM_DEBUG1(RR,r3); |
|
1674 |
asm("add r3, r3, #%a0" : : "i" _FOFF(NThread,iPriority)); |
|
1675 |
asm("ldmia r3, {r6-r9} "); // r6=attributes, r7=heldFM, r8=waitFM, r9=address space |
|
1676 |
asm("sub r2, r3, #%a0" : : "i" _FOFF(NThread,iPriority)); // move to next thread at this priority |
|
1677 |
asm("sub r0, r0, #%a0" : : "i" _FOFF(TScheduler,iQueue)); |
|
1678 |
asm("b no_other "); |
|
1679 |
||
1680 |
asm("resched_blocked: "); // get here if thread is blocked on a fast mutex |
|
1681 |
ASM_DEBUG1(BlockedFM,r8) |
|
1682 |
asm("ldr r3, [r8, #%a0]" : : "i" _FOFF(NFastMutex,iHoldingThread)); // if so, get holding thread |
|
1683 |
asm("cmp r3, #0 "); // mutex now free? |
|
1684 |
asm("beq resched_not_blocked "); |
|
1685 |
asm("mov r2, r3 "); // no, switch to holding thread |
|
1686 |
asm("b resched_end "); |
|
1687 |
||
1688 |
asm("holds_fast_mutex: "); |
|
1689 |
#if defined(__MEMMODEL_MULTIPLE__) || defined(__MEMMODEL_FLEXIBLE__) |
|
1690 |
asm("cmp r7, r10 "); // does this thread hold system lock? |
|
1691 |
asm("tstne r6, #%a0" : : "i" (((TInt)KThreadAttImplicitSystemLock)<<16)); // if not, is implicit system lock required? |
|
1692 |
asm("beq resched_end "); // if neither, switch to this thread |
|
1693 |
asm("ldr r5, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iHoldingThread)); // check if system lock held |
|
1694 |
asm("cmp r5, #0 "); |
|
1695 |
asm("bne rr_holds_fast_mutex "); // if implicit system lock contention, set waiting flag on held mutex but still schedule thread |
|
1696 |
asm("b resched_end "); // else switch to thread and finish |
|
1697 |
#else |
|
1698 |
asm("cmp r7, r10 "); // does this thread hold system lock? |
|
1699 |
asm("beq resched_end "); // if so, switch to it |
|
1700 |
asm("tst r6, #%a0" : : "i" (((TInt)KThreadAttImplicitSystemLock)<<16)); // implicit system lock required? |
|
1701 |
asm("ldrne r5, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iHoldingThread)); // if so, check if system lock held |
|
1702 |
asm("beq resched_end "); // if lock not required, switch to thread and finish |
|
1703 |
asm("cmp r5, #0 "); |
|
1704 |
asm("bne rr_holds_fast_mutex "); // if implicit system lock contention, set waiting flag on held mutex but still schedule thread |
|
1705 |
asm("tst r6, #%a0" : : "i" (((TInt)KThreadAttAddressSpace)<<16)); // address space required? |
|
1706 |
asm("ldrne r5, [r0, #%a0]" : : "i" _FOFF(TScheduler,iAddressSpace)); // if so, get current address space ptr |
|
1707 |
asm("beq resched_end "); // if not, switch to thread and finish |
|
1708 |
asm("cmp r5, r9 "); // do we have correct address space? |
|
1709 |
asm("beq resched_end "); // yes, switch to thread and finish |
|
1710 |
asm("b rr_holds_fast_mutex "); // no, set waiting flag on fast mutex |
|
1711 |
#endif // __MEMMODEL_MULTIPLE__ || __MEMMODEL_FLEXIBLE__ |
|
1712 |
||
1713 |
asm("resched_imp_sys_held: "); // get here if thread requires implicit system lock and lock is held |
|
1714 |
ASM_DEBUG1(ImpSysHeld,r1) |
|
1715 |
asm("mov r2, r1 "); // switch to holding thread |
|
1716 |
asm("add r7, r0, #%a0" : : "i" _FOFF(TScheduler,iLock)); // set waiting flag on system lock |
|
1717 |
||
1718 |
asm("rr_holds_fast_mutex: "); // get here if round-robin deferred due to fast mutex held |
|
1719 |
asm("mov r6, #1 "); |
|
1720 |
asm("str r6, [r7, #%a0]" : : "i" _FOFF(NFastMutex,iWaiting)); // if so, set waiting flag |
|
1721 |
||
1722 |
asm("resched_end: "); |
|
1723 |
ASM_DEBUG1(Resched,r2) |
|
1724 |
||
1725 |
asm("switch_threads: "); |
|
1726 |
UPDATE_THREAD_CPU_TIME; |
|
1727 |
EMI_EVENTLOGGER; |
|
1728 |
EMI_CHECKDFCTAG(2) |
|
1729 |
||
1730 |
#ifdef BTRACE_CPU_USAGE |
|
1731 |
asm("ldrb r1, [r0,#%a0]" : : "i" _FOFF(TScheduler,iCpuUsageFilter)); |
|
1732 |
asm("ldr sp, [r2, #%a0]" : : "i" _FOFF(NThread,iSavedSP)); // restore new thread's stack pointer |
|
1733 |
asm("str r2, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // iCurrentThread=r2 |
|
1734 |
asm("cmp r1, #0"); |
|
1735 |
asm("blne context_switch_trace"); |
|
1736 |
#else |
|
1737 |
asm("ldr sp, [r2, #%a0]" : : "i" _FOFF(NThread,iSavedSP)); // restore new thread's stack pointer |
|
1738 |
asm("str r2, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // iCurrentThread=r2 |
|
1739 |
#endif |
|
1740 |
||
1741 |
#if defined(__MEMMODEL_MULTIPLE__) || defined(__MEMMODEL_FLEXIBLE__) |
|
1742 |
asm("ldr r6, [r2, #%a0]" : : "i" _FOFF(NThread,iPriority)); // attributes into r6 |
|
1743 |
asm("ldr r9, [r2, #%a0]" : : "i" _FOFF(NThread,iAddressSpace)); // address space into r9 |
|
1744 |
#else |
|
1745 |
#ifdef __CPU_HAS_ETM_PROCID_REG |
|
1746 |
asm("mcr p15, 0, r2, c13, c0, 1 "); // notify ETM of new thread |
|
1747 |
#endif |
|
1748 |
#endif |
|
1749 |
#if EXTRA_STACK_SPACE==0 && defined(__CPU_ARM9_USER_LDM_BUG) |
|
1750 |
asm("mov r3, sp "); |
|
1751 |
asm("ldmia r3, {r13,r14}^ "); // restore sp_usr and lr_usr |
|
1752 |
// NOTE: Prior to ARMv6 can't have banked register access immediately after LDM/STM user registers |
|
1753 |
#else |
|
1754 |
// Load the sp_usr and lr_usr and only the required coprocessor registers |
|
1755 |
// Thumb-2EE TID FPEXC CAR DACR |
|
1756 |
asm("ldmia sp, {" EXTRA_STACK_LIST( 1, 3, FPEXC_REG3, 10, 11) "r13-r14}^ "); |
|
1757 |
// NOTE: Prior to ARMv6 can't have banked register access immediately after LDM/STM user registers |
|
1758 |
#endif |
|
1759 |
#ifdef __CPU_SUPPORT_THUMB2EE |
|
1760 |
SET_THUMB2EE_HNDLR_BASE(,r1); |
|
1761 |
#endif |
|
1762 |
#ifdef __CPU_HAS_CP15_THREAD_ID_REG |
|
1763 |
SET_RWRW_TID(,r3) // restore Thread ID from r3 |
|
1764 |
#endif |
|
1765 |
asm("mov r3, r2 "); // r3=TheCurrentThread |
|
1766 |
#ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG |
|
1767 |
SET_CAR(,r10) |
|
1768 |
#endif |
|
1769 |
#ifdef __CPU_ARM_USE_DOMAINS |
|
1770 |
asm("mcr p15, 0, r11, c3, c0, 0 "); |
|
1771 |
#endif |
|
1772 |
#ifdef __CPU_HAS_VFP |
|
1773 |
VFP_FMXR(,VFP_XREG_FPEXC,FPEXC_REG3); // restore FPEXC from R4 or R10 |
|
1774 |
#endif |
|
1775 |
asm("add sp, sp, #%a0" : : "i" (8+EXTRA_STACK_SPACE)); // step past sp_usr and lr_usr |
|
1776 |
#if defined(__MEMMODEL_MULTIPLE__) || defined(__MEMMODEL_FLEXIBLE__) |
|
1777 |
// r2=r3=current thread here |
|
1778 |
asm("tst r6, #%a0" : : "i" (((TInt)KThreadAttAddressSpace)<<16)); // address space required? |
|
1779 |
asm("ldrne r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iProcessHandler)); // if so, get pointer to process handler |
|
1780 |
asm("mov r2, r2, lsr #6 "); // r2=current thread>>6 |
|
1781 |
asm("beq switch_threads_3 "); // skip if address space change not required |
|
1782 |
||
1783 |
// Do address space switching |
|
1784 |
// Handler called with: |
|
1785 |
// r0->scheduler, r3->current thread |
|
1786 |
// r9->new address space, r5->old address space |
|
1787 |
// Return with r2 = (r2<<8) | ASID |
|
1788 |
// Must preserve r0,r3, can modify other registers |
|
1789 |
asm("ldr r5, [r0, #%a0]" : : "i" _FOFF(TScheduler,iAddressSpace)); // get current address space ptr |
|
1790 |
#ifdef __MEMMODEL_FLEXIBLE__ |
|
1791 |
asm("adr lr, switch_threads_5 "); |
|
1792 |
#else |
|
1793 |
asm("adr lr, switch_threads_4 "); |
|
1794 |
#endif |
|
1795 |
__JUMP(,r1); |
|
1796 |
||
1797 |
asm("switch_threads_3: "); |
|
1798 |
asm("mrc p15, 0, r4, c13, c0, 1 "); // r4 = CONTEXTID (threadID:ASID) |
|
1799 |
asm("and r4, r4, #0xff "); // isolate ASID |
|
1800 |
asm("orr r2, r4, r2, lsl #8 "); // r2 = new thread ID : ASID |
|
1801 |
__DATA_SYNC_BARRIER_Z__(r12); // needed before change to ContextID |
|
1802 |
||
1803 |
asm("switch_threads_4: "); |
|
1804 |
#if (defined(__CPU_ARM1136__) || defined(__CPU_ARM1176__)) && !defined(__CPU_ARM1136_ERRATUM_408022_FIXED) |
|
1805 |
asm("nop"); |
|
1806 |
#endif |
|
1807 |
asm("mcr p15, 0, r2, c13, c0, 1 "); // set ContextID (ASID + debugging thread ID) |
|
1808 |
__INST_SYNC_BARRIER_Z__(r12); |
|
1809 |
#ifdef __CPU_NEEDS_BTAC_FLUSH_AFTER_ASID_CHANGE |
|
1810 |
asm("mcr p15, 0, r12, c7, c5, 6 "); // flush BTAC |
|
1811 |
#endif |
|
1812 |
||
1813 |
// asm("switch_threads_3: "); // TEMPORARY UNTIL CONTEXTID BECOMES READABLE |
|
1814 |
asm("switch_threads_5: "); |
|
1815 |
#if defined(__CPU_ARM1136__) && defined(__CPU_HAS_VFP) && !defined(__CPU_ARM1136_ERRATUM_351912_FIXED) |
|
1816 |
VFP_FMRX(,14,VFP_XREG_FPEXC); |
|
1817 |
asm("mrc p15, 0, r4, c1, c0, 1 "); |
|
1818 |
asm("tst r14, #%a0" : : "i" ((TInt)VFP_FPEXC_EN) ); |
|
1819 |
asm("bic r4, r4, #2 "); // clear DB bit (disable dynamic prediction) |
|
1820 |
asm("and r12, r4, #1 "); // r2 bit 0 = RS bit (1 if return stack enabled) |
|
1821 |
asm("orreq r4, r4, r12, lsl #1 "); // if VFP is being disabled set DB = RS |
|
1822 |
asm("mcr p15, 0, r4, c1, c0, 1 "); |
|
1823 |
#endif |
|
1824 |
#endif |
|
1825 |
CPWAIT(,r12); |
|
1826 |
||
1827 |
asm("switch_threads_2: "); |
|
1828 |
asm("resched_trampoline_hook_address: "); |
|
1829 |
asm("ldmia sp!, {r2,r4-r11,lr} "); // r2=spsr_svc, restore r4-r11 and return address |
|
1830 |
asm("resched_trampoline_return: "); |
|
1831 |
||
1832 |
SET_INTS(r12, MODE_SVC, INTS_ALL_OFF); // disable interrupts |
|
1833 |
asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); |
|
1834 |
asm("msr spsr, r2 "); // restore spsr_svc |
|
1835 |
asm("cmp r1, #0 "); // check for another reschedule |
|
1836 |
asm("streq r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // if not needed unlock the kernel |
|
1837 |
#if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571622_FIXED) |
|
1838 |
asm("nop "); // ARM Cortex-A9 MPCore erratum 571622 workaround |
|
1839 |
// Insert nops so branch doesn't occur in 2nd or 3rd position after a msr spsr |
|
1840 |
#endif |
|
1841 |
__JUMP(eq,lr); // and return in context of new thread, with r2 non zero |
|
1842 |
asm("str lr, [sp, #-4]! "); |
|
1843 |
asm("b start_resched "); // if necessary, go back to beginning |
|
1844 |
||
1845 |
asm("no_resched_needed: "); |
|
1846 |
asm("str r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // else unlock the kernel |
|
1847 |
asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // r3=iCurrentThread |
|
1848 |
asm("ldr pc, [sp], #4 "); // and exit immediately with r2=0 iff no reschedule occurred |
|
1849 |
||
1850 |
asm("__TheScheduler: "); |
|
1851 |
asm(".word TheScheduler "); |
|
1852 |
asm("__SystemLock: "); |
|
1853 |
asm(".word %a0" : : "i" ((TInt)&TheScheduler.iLock)); |
|
1854 |
#ifdef BTRACE_CPU_USAGE |
|
1855 |
asm("context_switch_trace_header:"); |
|
1856 |
asm(".word %a0" : : "i" ((TInt)(8<<BTrace::ESizeIndex) + (BTrace::EContextIdPresent<<BTrace::EFlagsIndex*8) + (BTrace::ECpuUsage<<BTrace::ECategoryIndex*8) + (BTrace::ENewThreadContext<<BTrace::ESubCategoryIndex*8)) ); |
|
1857 |
||
1858 |
asm("context_switch_trace:"); |
|
1859 |
asm("ldr r1, [r0,#%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler)); |
|
1860 |
asm("stmdb sp!, {r0,r2,lr}"); |
|
1861 |
asm("ldr r0, context_switch_trace_header" ); |
|
1862 |
asm("mov lr, pc"); |
|
1863 |
__JUMP(,r1); |
|
1864 |
asm("ldmia sp!, {r0,r2,pc}"); |
|
1865 |
#endif |
|
1866 |
||
1867 |
#ifdef __DEBUGGER_SUPPORT__ |
|
1868 |
asm("resched_trampoline: "); |
|
1869 |
asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleHook)); |
|
1870 |
asm("ldr r0, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); |
|
1871 |
asm("mov r11, sp "); // save stack pointer |
|
1872 |
asm("bic sp, sp, #4 "); // align stack to 8 byte boundary |
|
1873 |
asm("tst r1, r1"); |
|
1874 |
asm("movne lr, pc"); |
|
1875 |
__JUMP(ne,r1); |
|
1876 |
asm("ldr r0, __TheScheduler "); // r0 points to scheduler data |
|
1877 |
asm("mov sp, r11 "); // restore stack pointer |
|
1878 |
asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // r3=iCurrentThread |
|
1879 |
asm("resched_trampoline_unhook_data: "); |
|
1880 |
asm("ldmia sp!, {r2,r4-r11,lr} "); // r2=spsr_svc, restore r4-r11 and return address |
|
1881 |
asm("b resched_trampoline_return"); |
|
1882 |
#endif |
|
1883 |
||
1884 |
#ifdef __EMI_SUPPORT__ |
|
1885 |
// EMI Task Event Logger |
|
1886 |
asm("AddTaskSwitchEvent: "); |
|
1887 |
#ifndef MONITOR_THREAD_CPU_TIME |
|
1888 |
// if we dont have it, get CurrentThread |
|
1889 |
asm("ldr r6, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); |
|
1890 |
#endif |
|
1891 |
||
1892 |
// Check new thread for if loggable |
|
1893 |
asm("ldrb r3, [r2,#%a0]" : : "i" _FOFF(NThread, i_ThrdAttr)); |
|
1894 |
asm("ldr r4, [r6,#%a0]" : : "i" _FOFF(NThread, iPriority)); // Load Spares. b2=state,b3=attrbutes |
|
1895 |
||
1896 |
asm("tst r3, #%a0" : : "i" ((TInt) KThreadAttLoggable)); |
|
1897 |
asm("ldreq r7, [r0, #%a0]" : : "i" _FOFF(TScheduler,iSigma)); |
|
1898 |
asm("movne r7,r2"); |
|
1899 |
||
1900 |
// Check old thread for if loggable |
|
1901 |
asm("tst r4, #%a0" : : "i" (KThreadAttLoggable << 16)); |
|
1902 |
asm("ldreq r6, [r0, #%a0]" : : "i" _FOFF(TScheduler,iSigma)); |
|
1903 |
||
1904 |
// Abort log entry if duplicate |
|
1905 |
asm("cmp r6,r7"); |
|
1906 |
__JUMP(eq,lr); |
|
1907 |
||
1908 |
// create record: r3=iType/iFlags/iExtra, r4=iUserState |
|
1909 |
// r5=iTime, r6=iPrevious, r7=iNext |
|
1910 |
// waiting = (2nd byte of r4)!=NThread::EReady (=0) |
|
1911 |
#ifndef MONITOR_THREAD_CPU_TIME |
|
1912 |
GET_HIGH_RES_TICK_COUNT(r5); |
|
1913 |
#endif |
|
1914 |
||
1915 |
asm("tst r4, #0xff00"); |
|
1916 |
asm("ldr r8, [r0, #%a0]" : : "i" _FOFF(TScheduler,iBufferHead)); |
|
1917 |
asm("ldr r4, [r0, #%a0]" : : "i" _FOFF(TScheduler,iEmiState)); |
|
1918 |
asm("moveq r3, #0x200"); // #2 = waiting flag. |
|
1919 |
asm("movne r3, #0x0"); |
|
1920 |
||
1921 |
//Store record, move onto next |
|
1922 |
asm("stmia r8!,{r3-r7}"); |
|
1923 |
||
1924 |
// Check for and apply buffer wrap |
|
1925 |
asm("ldr r7,[r0, #%a0]" : : "i" _FOFF(TScheduler,iBufferEnd)); // r7 = BufferEnd |
|
1926 |
asm("ldr r6,[r0, #%a0]" : : "i" _FOFF(TScheduler,iBufferTail)); // r6 = BufferTail |
|
1927 |
asm("cmp r7,r8"); |
|
1928 |
asm("ldrlo r8,[r0, #%a0]" : : "i" _FOFF(TScheduler,iBufferStart)); |
|
1929 |
||
1930 |
// Check for event lost |
|
1931 |
asm("cmp r6,r8"); |
|
1932 |
asm("str r8, [r0, #%a0]" : : "i" _FOFF(TScheduler,iBufferHead)); // r8 = BufferHead |
|
1933 |
__JUMP(ne,lr); |
|
1934 |
||
1935 |
// overflow, move on read pointer - event lost! |
|
1936 |
asm("add r6,r6,#%a0" : : "i" ((TInt) sizeof(TTaskEventRecord))); // iBufferTail++ |
|
1937 |
asm("cmp r7,r6"); // iBufferTail > iBufferEnd ? |
|
1938 |
asm("ldrlo r6,[r0, #%a0]" : : "i" _FOFF(TScheduler,iBufferStart)); |
|
1939 |
||
1940 |
asm("ldrb r5, [r6, #%a0]" : : "i" _FOFF(TTaskEventRecord,iFlags)); |
|
1941 |
asm("orr r5, r5, #%a0" : : "i" ((TInt) KTskEvtFlag_EventLost)); |
|
1942 |
asm("strb r5, [r6, #%a0]" : : "i" _FOFF(TTaskEventRecord,iFlags)); |
|
1943 |
||
1944 |
asm("str r6, [r0, #%a0]" : : "i" _FOFF(TScheduler,iBufferTail)); |
|
1945 |
||
1946 |
__JUMP(,lr); |
|
1947 |
||
1948 |
#if !defined(__MEMMODEL_MULTIPLE__) && !defined(__MEMMODEL_FLEXIBLE__) |
|
1949 |
EMI_ADDDFC(1) |
|
1950 |
#endif |
|
1951 |
EMI_ADDDFC(2) |
|
1952 |
#endif |
|
1953 |
||
1954 |
#ifdef BTRACE_FAST_MUTEX |
|
1955 |
asm("reschedule_syslock_wait_trace:"); |
|
1956 |
// r0=scheduler r2=thread |
|
1957 |
asm("stmdb sp!, {r3,r12}"); |
|
1958 |
ALIGN_STACK_START; |
|
1959 |
asm("stmdb sp!, {r0-r2,lr}"); // 4th item on stack is PC value for trace |
|
1960 |
asm("bl syslock_wait_trace"); |
|
1961 |
asm("ldmia sp!, {r0-r2,lr}"); |
|
1962 |
ALIGN_STACK_END; |
|
1963 |
asm("ldmia sp!, {r3,r12}"); |
|
1964 |
__JUMP(,lr); |
|
1965 |
||
1966 |
asm("reschedule_syslock_signal_trace:"); |
|
1967 |
// r0=scheduler r3=thread |
|
1968 |
asm("stmdb sp!, {r3,r12}"); |
|
1969 |
ALIGN_STACK_START; |
|
1970 |
asm("stmdb sp!, {r0-r2,lr}"); // 4th item on stack is PC value for trace |
|
1971 |
asm("bl syslock_signal_trace"); |
|
1972 |
asm("ldmia sp!, {r0-r2,lr}"); |
|
1973 |
ALIGN_STACK_END; |
|
1974 |
asm("ldmia sp!, {r3,r12}"); |
|
1975 |
__JUMP(,lr); |
|
1976 |
#endif |
|
1977 |
}; |
|
1978 |
||
1979 |
||
1980 |
/** |
|
1981 |
* Returns the range of linear memory which inserting the scheduler hooks needs to modify. |
|
1982 |
* |
|
1983 |
* @param aStart Set to the lowest memory address which needs to be modified. |
|
1984 |
* @param aEnd Set to the highest memory address +1 which needs to be modified. |
|
1985 |
||
1986 |
@pre Kernel must be locked. |
|
1987 |
@pre Call in a thread context. |
|
1988 |
@pre Interrupts must be enabled. |
|
1989 |
*/ |
|
1990 |
EXPORT_C __NAKED__ void NKern::SchedulerHooks(TLinAddr& aStart, TLinAddr& aEnd) |
|
1991 |
{ |
|
1992 |
ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC); |
|
1993 |
#ifdef __DEBUGGER_SUPPORT__ |
|
1994 |
asm("adr r2,resched_trampoline_hook_address"); |
|
1995 |
asm("str r2,[r0]"); |
|
1996 |
asm("adr r2,resched_trampoline_hook_address+4"); |
|
1997 |
asm("str r2,[r1]"); |
|
1998 |
#else |
|
1999 |
asm("mov r2,#0"); |
|
2000 |
asm("str r2,[r0]"); |
|
2001 |
asm("str r2,[r1]"); |
|
2002 |
#endif |
|
2003 |
__JUMP(,lr); |
|
2004 |
}; |
|
2005 |
||
2006 |
||
2007 |
/** |
|
2008 |
* Modifies the scheduler code so that it can call the function set by |
|
2009 |
* NKern::SetRescheduleCallback(). |
|
2010 |
* |
|
2011 |
* This requires that the region of memory indicated by NKern::SchedulerHooks() is writable. |
|
2012 |
||
2013 |
@pre Kernel must be locked. |
|
2014 |
@pre Call in a thread context. |
|
2015 |
@pre Interrupts must be enabled. |
|
2016 |
*/ |
|
2017 |
EXPORT_C __NAKED__ void NKern::InsertSchedulerHooks() |
|
2018 |
{ |
|
2019 |
ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC); |
|
2020 |
#ifdef __DEBUGGER_SUPPORT__ |
|
2021 |
asm("adr r0,resched_trampoline_hook_address"); |
|
2022 |
asm("adr r1,resched_trampoline"); |
|
2023 |
asm("sub r1, r1, r0"); |
|
2024 |
asm("sub r1, r1, #8"); |
|
2025 |
asm("mov r1, r1, asr #2"); |
|
2026 |
asm("add r1, r1, #0xea000000"); // r1 = a branch instruction from resched_trampoline_hook_address to resched_trampoline |
|
2027 |
||
2028 |
#if defined(__MMU_USE_SYMMETRIC_ACCESS_PERMISSIONS) |
|
2029 |
// These platforms have shadow memory in non-writable page. We cannot use the standard |
|
2030 |
// Epoc::CopyToShadowMemory interface as we hold Kernel lock here. |
|
2031 |
// Instead, we'll temporarily disable access permission checking in MMU by switching |
|
2032 |
// domain#0 into Manager Mode (see Domain Access Control Register). |
|
2033 |
asm("mrs r12, CPSR "); // save cpsr setting and ... |
|
2034 |
CPSIDAIF; // ...disable interrupts |
|
2035 |
asm("mrc p15, 0, r2, c3, c0, 0 "); // read DACR |
|
2036 |
asm("orr r3, r2, #3"); // domain #0 is the first two bits. manager mode is 11b |
|
2037 |
asm("mcr p15, 0, r3, c3, c0, 0 "); // write DACR |
|
2038 |
asm("str r1,[r0]"); |
|
2039 |
asm("mcr p15, 0, r2, c3, c0, 0 "); // write back the original value of DACR |
|
2040 |
asm("msr CPSR_cxsf, r12 "); // restore cpsr setting (re-enable interrupts) |
|
2041 |
#else |
|
2042 |
asm("str r1,[r0]"); |
|
2043 |
#endif |
|
2044 |
||
2045 |
#endif |
|
2046 |
__JUMP(,lr); |
|
2047 |
}; |
|
2048 |
||
2049 |
||
2050 |
/** |
|
2051 |
* Reverts the modification of the Scheduler code performed by NKern::InsertSchedulerHooks() |
|
2052 |
* |
|
2053 |
* This requires that the region of memory indicated by NKern::SchedulerHooks() is writable. |
|
2054 |
||
2055 |
@pre Kernel must be locked. |
|
2056 |
@pre Call in a thread context. |
|
2057 |
@pre Interrupts must be enabled. |
|
2058 |
*/ |
|
2059 |
EXPORT_C __NAKED__ void NKern::RemoveSchedulerHooks() |
|
2060 |
{ |
|
2061 |
ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC); |
|
2062 |
#ifdef __DEBUGGER_SUPPORT__ |
|
2063 |
asm("adr r0,resched_trampoline_hook_address"); |
|
2064 |
asm("ldr r1,resched_trampoline_unhook_data"); |
|
2065 |
||
2066 |
#if defined(__MMU_USE_SYMMETRIC_ACCESS_PERMISSIONS) |
|
2067 |
// See comments above in InsertSchedulerHooks |
|
2068 |
asm("mrs r12, CPSR "); // save cpsr setting and ... |
|
2069 |
CPSIDAIF; // ...disable interrupts |
|
2070 |
asm("mrc p15, 0, r2, c3, c0, 0 "); // read DACR |
|
2071 |
asm("orr r3, r2, #3"); // domain #0 is the first two bits. manager mode is 11b |
|
2072 |
asm("mcr p15, 0, r3, c3, c0, 0 "); // write DACR |
|
2073 |
asm("str r1,[r0]"); |
|
2074 |
asm("mcr p15, 0, r2, c3, c0, 0 "); // write back the original value of DACR |
|
2075 |
asm("msr CPSR_cxsf, r12 "); // restore cpsr setting (re-enable interrupts) |
|
2076 |
#else |
|
2077 |
asm("str r1,[r0]"); |
|
2078 |
#endif |
|
2079 |
||
2080 |
#endif |
|
2081 |
__JUMP(,lr); |
|
2082 |
}; |
|
2083 |
||
2084 |
||
2085 |
/** |
|
2086 |
* Set the function which is to be called on every thread reschedule. |
|
2087 |
* |
|
2088 |
* @param aCallback Pointer to callback function, or NULL to disable callback. |
|
2089 |
||
2090 |
@pre Kernel must be locked. |
|
2091 |
@pre Call in a thread context. |
|
2092 |
@pre Interrupts must be enabled. |
|
2093 |
*/ |
|
2094 |
EXPORT_C __NAKED__ void NKern::SetRescheduleCallback(TRescheduleCallback /*aCallback*/) |
|
2095 |
{ |
|
2096 |
ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC); |
|
2097 |
#ifdef __DEBUGGER_SUPPORT__ |
|
2098 |
asm("ldr r1, __TheScheduler "); |
|
2099 |
asm("str r0, [r1, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleHook)); |
|
2100 |
#endif |
|
2101 |
__JUMP(,lr); |
|
2102 |
}; |
|
2103 |
||
2104 |
||
2105 |
||
2106 |
/** Disables interrupts to specified level. |
|
2107 |
||
2108 |
Note that if we are not disabling all interrupts we must lock the kernel |
|
2109 |
here, otherwise a high priority interrupt which is still enabled could |
|
2110 |
cause a reschedule and the new thread could then reenable interrupts. |
|
2111 |
||
2112 |
@param aLevel Interrupts are disbabled up to and including aLevel. On ARM, |
|
2113 |
level 1 stands for IRQ only and level 2 stands for IRQ and FIQ. |
|
2114 |
@return CPU-specific value passed to RestoreInterrupts. |
|
2115 |
||
2116 |
@pre 1 <= aLevel <= maximum level (CPU-specific) |
|
2117 |
||
2118 |
@see NKern::RestoreInterrupts() |
|
2119 |
*/ |
|
2120 |
EXPORT_C __NAKED__ TInt NKern::DisableInterrupts(TInt /*aLevel*/) |
|
2121 |
{ |
|
2122 |
asm("cmp r0, #1 "); |
|
2123 |
asm("bhi " CSM_ZN5NKern20DisableAllInterruptsEv); // if level>1, disable all |
|
2124 |
asm("ldreq r12, __TheScheduler "); |
|
2125 |
asm("mrs r2, cpsr "); // r2=original CPSR |
|
2126 |
asm("bcc 1f "); // skip if level=0 |
|
2127 |
asm("ldr r3, [r12, #%a0]!" : : "i" _FOFF(TScheduler,iKernCSLocked)); |
|
2128 |
asm("and r0, r2, #0xc0 "); |
|
2129 |
INTS_OFF_1(r2, r2, INTS_IRQ_OFF); // disable level 1 interrupts |
|
2130 |
asm("cmp r3, #0 "); // test if kernel locked |
|
2131 |
asm("addeq r3, r3, #1 "); // if not, lock the kernel |
|
2132 |
asm("streq r3, [r12] "); |
|
2133 |
asm("orreq r0, r0, #0x80000000 "); // and set top bit to indicate kernel locked |
|
2134 |
INTS_OFF_2(r2, r2, INTS_IRQ_OFF); |
|
2135 |
__JUMP(,lr); |
|
2136 |
asm("1: "); |
|
2137 |
asm("and r0, r2, #0xc0 "); |
|
2138 |
__JUMP(,lr); |
|
2139 |
} |
|
2140 |
||
2141 |
||
2142 |
/** Disables all interrupts (e.g. both IRQ and FIQ on ARM). |
|
2143 |
||
2144 |
@return CPU-specific value passed to NKern::RestoreInterrupts(). |
|
2145 |
||
2146 |
@see NKern::RestoreInterrupts() |
|
2147 |
*/ |
|
2148 |
EXPORT_C __NAKED__ TInt NKern::DisableAllInterrupts() |
|
2149 |
{ |
|
2150 |
asm("mrs r1, cpsr "); |
|
2151 |
asm("and r0, r1, #0xc0 "); // return I and F bits of CPSR |
|
2152 |
INTS_OFF(r1, r1, INTS_ALL_OFF); |
|
2153 |
__JUMP(,lr); |
|
2154 |
} |
|
2155 |
||
2156 |
||
2157 |
/** Enables all interrupts (e.g. IRQ and FIQ on ARM). |
|
2158 |
||
2159 |
This function never unlocks the kernel. So it must be used |
|
2160 |
only to complement NKern::DisableAllInterrupts. Never use it |
|
2161 |
to complement NKern::DisableInterrupts. |
|
2162 |
||
2163 |
@see NKern::DisableInterrupts() |
|
2164 |
@see NKern::DisableAllInterrupts() |
|
2165 |
||
2166 |
@internalComponent |
|
2167 |
*/ |
|
2168 |
EXPORT_C __NAKED__ void NKern::EnableAllInterrupts() |
|
2169 |
{ |
|
2170 |
#ifndef __CPU_ARM_HAS_CPS |
|
2171 |
asm("mrs r0, cpsr "); |
|
2172 |
asm("bic r0, r0, #0xc0 "); |
|
2173 |
asm("msr cpsr_c, r0 "); |
|
2174 |
#else |
|
2175 |
CPSIEIF; |
|
2176 |
#endif |
|
2177 |
__JUMP(,lr); |
|
2178 |
} |
|
2179 |
||
2180 |
||
2181 |
/** Restores interrupts to previous level and unlocks the kernel if it was |
|
2182 |
locked when disabling them. |
|
2183 |
||
2184 |
@param aRestoreData CPU-specific data returned from NKern::DisableInterrupts |
|
2185 |
or NKern::DisableAllInterrupts specifying the previous interrupt level. |
|
2186 |
||
2187 |
@see NKern::DisableInterrupts() |
|
2188 |
@see NKern::DisableAllInterrupts() |
|
2189 |
*/ |
|
2190 |
EXPORT_C __NAKED__ void NKern::RestoreInterrupts(TInt /*aRestoreData*/) |
|
2191 |
{ |
|
2192 |
asm("tst r0, r0 "); // test state of top bit of aLevel |
|
2193 |
asm("mrs r1, cpsr "); |
|
2194 |
asm("and r0, r0, #0xc0 "); |
|
2195 |
asm("bic r1, r1, #0xc0 "); |
|
2196 |
asm("orr r1, r1, r0 "); // replace I and F bits with those supplied |
|
2197 |
asm("msr cpsr_c, r1 "); // flags are unchanged (in particular N) |
|
2198 |
__JUMP(pl,lr); // if top bit of aLevel clear, finished |
|
2199 |
||
2200 |
// if top bit of aLevel set, fall through to unlock the kernel |
|
2201 |
} |
|
2202 |
||
2203 |
||
2204 |
/** Unlocks the kernel. |
|
2205 |
||
2206 |
Decrements iKernCSLocked; if it becomes zero and IDFCs or a reschedule are |
|
2207 |
pending, calls the scheduler to process them. |
|
2208 |
Must be called in mode_svc. |
|
2209 |
||
2210 |
@pre Call either in a thread or an IDFC context. |
|
2211 |
@pre Do not call from an ISR. |
|
2212 |
*/ |
|
2213 |
EXPORT_C __NAKED__ void NKern::Unlock() |
|
2214 |
{ |
|
2215 |
ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR); |
|
2216 |
||
2217 |
asm("ldr r1, __TheScheduler "); |
|
2218 |
asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); |
|
2219 |
asm("subs r2, r3, #1 "); |
|
2220 |
asm("str r2, [r1, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); |
|
2221 |
asm("ldreq r2, [r1, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); // if kernel now unlocked, check flags |
|
2222 |
asm("bne 1f "); // if kernel still locked, return |
|
2223 |
asm("cmp r2, #0 "); // check for DFCs or reschedule |
|
2224 |
asm("bne 2f"); // branch if needed |
|
2225 |
asm("1: "); |
|
2226 |
__JUMP(,lr); |
|
2227 |
asm("2: "); |
|
2228 |
asm("str r3, [r1, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // else lock the kernel again |
|
2229 |
asm("str lr, [sp, #-4]! "); // save return address |
|
2230 |
asm("bl " CSM_ZN10TScheduler10RescheduleEv); // run DFCs and reschedule, return with kernel unlocked, interrupts disabled |
|
2231 |
SET_INTS(r0, MODE_SVC, INTS_ALL_ON); // reenable interrupts |
|
2232 |
asm("ldr pc, [sp], #4 "); |
|
2233 |
} |
|
2234 |
||
2235 |
/** Locks the kernel. |
|
2236 |
||
2237 |
Increments iKernCSLocked, thereby deferring IDFCs and preemption. |
|
2238 |
Must be called in mode_svc. |
|
2239 |
||
2240 |
@pre Call either in a thread or an IDFC context. |
|
2241 |
@pre Do not call from an ISR. |
|
2242 |
*/ |
|
2243 |
EXPORT_C __NAKED__ void NKern::Lock() |
|
2244 |
{ |
|
2245 |
ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR); |
|
2246 |
||
2247 |
asm("ldr r12, __TheScheduler "); |
|
2248 |
asm("ldr r3, [r12, #%a0]!" : : "i" _FOFF(TScheduler,iKernCSLocked)); |
|
2249 |
asm("add r3, r3, #1 "); // lock the kernel |
|
2250 |
asm("str r3, [r12] "); |
|
2251 |
__JUMP(,lr); |
|
2252 |
} |
|
2253 |
||
2254 |
||
2255 |
/** Locks the kernel and returns a pointer to the current thread |
|
2256 |
Increments iKernCSLocked, thereby deferring IDFCs and preemption. |
|
2257 |
||
2258 |
@pre Call either in a thread or an IDFC context. |
|
2259 |
@pre Do not call from an ISR. |
|
2260 |
*/ |
|
2261 |
EXPORT_C __NAKED__ NThread* NKern::LockC() |
|
2262 |
{ |
|
2263 |
ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR); |
|
2264 |
||
2265 |
asm("ldr r12, __TheScheduler "); |
|
2266 |
asm("ldr r0, [r12, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); |
|
2267 |
asm("ldr r3, [r12, #%a0]!" : : "i" _FOFF(TScheduler,iKernCSLocked)); |
|
2268 |
asm("add r3, r3, #1 "); // lock the kernel |
|
2269 |
asm("str r3, [r12] "); |
|
2270 |
__JUMP(,lr); |
|
2271 |
} |
|
2272 |
||
2273 |
||
2274 |
__ASSERT_COMPILE(_FOFF(TScheduler,iKernCSLocked) == _FOFF(TScheduler,iRescheduleNeededFlag) + 4); |
|
2275 |
||
2276 |
/** Allows IDFCs and rescheduling if they are pending. |
|
2277 |
||
2278 |
If IDFCs or a reschedule are pending and iKernCSLocked is exactly equal to 1 |
|
2279 |
calls the scheduler to process the IDFCs and possibly reschedule. |
|
2280 |
Must be called in mode_svc. |
|
2281 |
||
2282 |
@return Nonzero if a reschedule actually occurred, zero if not. |
|
2283 |
||
2284 |
@pre Call either in a thread or an IDFC context. |
|
2285 |
@pre Do not call from an ISR. |
|
2286 |
*/ |
|
2287 |
EXPORT_C __NAKED__ TInt NKern::PreemptionPoint() |
|
2288 |
{ |
|
2289 |
ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR); |
|
2290 |
||
2291 |
asm("ldr r3, __RescheduleNeededFlag "); |
|
2292 |
asm("ldmia r3, {r0,r1} "); // r0=RescheduleNeededFlag, r1=KernCSLocked |
|
2293 |
asm("cmp r0, #0 "); |
|
2294 |
__JUMP(eq,lr); // if no reschedule required, return 0 |
|
2295 |
asm("subs r1, r1, #1 "); |
|
2296 |
__JUMP(ne,lr); // if kernel still locked, exit |
|
2297 |
asm("str lr, [sp, #-4]! "); // store return address |
|
2298 |
||
2299 |
// reschedule - this also switches context if necessary |
|
2300 |
// enter this function in mode_svc, interrupts on, kernel locked |
|
2301 |
// exit this function in mode_svc, all interrupts off, kernel unlocked |
|
2302 |
asm("bl " CSM_ZN10TScheduler10RescheduleEv); |
|
2303 |
||
2304 |
asm("mov r1, #1 "); |
|
2305 |
asm("str r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // lock the kernel again |
|
2306 |
SET_INTS(r3, MODE_SVC, INTS_ALL_ON); // interrupts back on |
|
2307 |
asm("mov r0, r2 "); // Return 0 if no reschedule, non-zero if reschedule occurred |
|
2308 |
asm("ldr pc, [sp], #4 "); |
|
2309 |
||
2310 |
asm("__RescheduleNeededFlag: "); |
|
2311 |
asm(".word %a0" : : "i" ((TInt)&TheScheduler.iRescheduleNeededFlag)); |
|
2312 |
} |
|
2313 |
||
2314 |
||
2315 |
/** Returns the current processor context type (thread, IDFC or interrupt). |
|
2316 |
||
2317 |
@return A value from NKern::TContext enumeration (but never EEscaped). |
|
2318 |
||
2319 |
@pre Call in any context. |
|
2320 |
||
2321 |
@see NKern::TContext |
|
2322 |
*/ |
|
2323 |
EXPORT_C __NAKED__ TInt NKern::CurrentContext() |
|
2324 |
{ |
|
2325 |
asm("mrs r1, cpsr "); |
|
2326 |
asm("mov r0, #2 "); // 2 = interrupt |
|
2327 |
asm("and r1, r1, #0x1f "); // r1 = mode |
|
2328 |
asm("cmp r1, #0x13 "); |
|
2329 |
asm("ldreq r2, __TheScheduler "); |
|
2330 |
__JUMP(ne,lr); // if not svc, must be interrupt |
|
2331 |
asm("ldrb r0, [r2, #%a0]" : : "i" _FOFF(TScheduler,iInIDFC)); |
|
2332 |
asm("cmp r0, #0 "); |
|
2333 |
asm("movne r0, #1 "); // if iInIDFC, return 1 else return 0 |
|
2334 |
__JUMP(,lr); |
|
2335 |
} |
|
2336 |
||
2337 |
||
2338 |
#ifdef __FAST_MUTEX_MACHINE_CODED__ |
|
2339 |
||
2340 |
/** Temporarily releases the System Lock if there is contention. |
|
2341 |
||
2342 |
If there |
|
2343 |
is another thread attempting to acquire the System lock, the calling |
|
2344 |
thread releases the mutex and then acquires it again. |
|
2345 |
||
2346 |
This is more efficient than the equivalent code: |
|
2347 |
||
2348 |
@code |
|
2349 |
NKern::UnlockSystem(); |
|
2350 |
NKern::LockSystem(); |
|
2351 |
@endcode |
|
2352 |
||
2353 |
Note that this can only allow higher priority threads to use the System |
|
2354 |
lock as lower priority cannot cause contention on a fast mutex. |
|
2355 |
||
2356 |
@return TRUE if the system lock was relinquished, FALSE if not. |
|
2357 |
||
2358 |
@pre System lock must be held. |
|
2359 |
||
2360 |
@post System lock is held. |
|
2361 |
||
2362 |
@see NKern::LockSystem() |
|
2363 |
@see NKern::UnlockSystem() |
|
2364 |
*/ |
|
2365 |
EXPORT_C __NAKED__ TBool NKern::FlashSystem() |
|
2366 |
{ |
|
2367 |
asm("ldr r0, __SystemLock "); |
|
2368 |
} |
|
2369 |
||
2370 |
||
2371 |
/** Temporarily releases a fast mutex if there is contention. |
|
2372 |
||
2373 |
If there is another thread attempting to acquire the mutex, the calling |
|
2374 |
thread releases the mutex and then acquires it again. |
|
2375 |
||
2376 |
This is more efficient than the equivalent code: |
|
2377 |
||
2378 |
@code |
|
2379 |
NKern::FMSignal(); |
|
2380 |
NKern::FMWait(); |
|
2381 |
@endcode |
|
2382 |
||
2383 |
@return TRUE if the mutex was relinquished, FALSE if not. |
|
2384 |
||
2385 |
@pre The mutex must be held. |
|
2386 |
||
2387 |
@post The mutex is held. |
|
2388 |
*/ |
|
2389 |
EXPORT_C __NAKED__ TBool NKern::FMFlash(NFastMutex*) |
|
2390 |
{ |
|
2391 |
ASM_DEBUG1(NKFMFlash,r0); |
|
2392 |
||
2393 |
asm("ldr r1, [r0,#%a0]" : : "i" _FOFF(NFastMutex,iWaiting)); |
|
2394 |
asm("cmp r1, #0"); |
|
2395 |
asm("bne fmflash_contended"); |
|
2396 |
#ifdef BTRACE_FAST_MUTEX |
|
2397 |
asm("ldr r1, __TheScheduler "); |
|
2398 |
asm("ldrb r2, [r1,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter)); |
|
2399 |
asm("cmp r2, #0"); |
|
2400 |
asm("bne fmflash_trace"); |
|
2401 |
#endif |
|
2402 |
asm("mov r0, #0"); |
|
2403 |
__JUMP(,lr); |
|
2404 |
||
2405 |
asm("fmflash_contended:"); |
|
2406 |
asm("stmfd sp!,{r4,lr}"); |
|
2407 |
asm("mov r4, r0"); |
|
2408 |
asm("bl " CSM_ZN5NKern4LockEv); |
|
2409 |
asm("mov r0, r4"); |
|
2410 |
asm("bl " CSM_ZN10NFastMutex6SignalEv); |
|
2411 |
asm("bl " CSM_ZN5NKern15PreemptionPointEv); |
|
2412 |
asm("mov r0, r4"); |
|
2413 |
asm("bl " CSM_ZN10NFastMutex4WaitEv); |
|
2414 |
asm("bl " CSM_ZN5NKern6UnlockEv); |
|
2415 |
asm("mov r0, #-1"); |
|
2416 |
__POPRET("r4,"); |
|
2417 |
||
2418 |
#ifdef BTRACE_FAST_MUTEX |
|
2419 |
asm("fmflash_trace:"); |
|
2420 |
ALIGN_STACK_START; |
|
2421 |
asm("stmdb sp!,{r0-r2,lr}"); // 4th item on stack is PC value for trace |
|
2422 |
asm("mov r3, r0"); // fast mutex parameter in r3 |
|
2423 |
asm("ldr r0, fmflash_trace_header"); // header parameter in r0 |
|
2424 |
asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); |
|
2425 |
asm("mov lr, pc"); |
|
2426 |
asm("ldr pc, [r1, #%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler)); |
|
2427 |
asm("ldmia sp!,{r0-r2,lr}"); |
|
2428 |
ALIGN_STACK_END; |
|
2429 |
asm("mov r0, #0"); |
|
2430 |
__JUMP(,lr); |
|
2431 |
||
2432 |
asm("fmflash_trace_header:"); |
|
2433 |
asm(".word %a0" : : "i" ((TInt)(16<<BTrace::ESizeIndex) + ((BTrace::EContextIdPresent|BTrace::EPcPresent) << BTrace::EFlagsIndex*8) + (BTrace::EFastMutex<< BTrace::ECategoryIndex*8) + (BTrace::EFastMutexFlash << BTrace::ESubCategoryIndex*8)) ); |
|
2434 |
#endif |
|
2435 |
} |
|
2436 |
#endif |
|
2437 |
||
2438 |
||
2439 |
// Need to put the code here because the H2 ekern build complains about the |
|
2440 |
// offset of __TheSchduler label offset from the first function in the file |
|
2441 |
// files outside the permissible range |
|
2442 |
#ifdef BTRACE_FAST_MUTEX |
|
2443 |
__NAKED__ TInt BtraceFastMutexHolder() |
|
2444 |
{ |
|
2445 |
asm("fmsignal_lock_trace_header:"); |
|
2446 |
asm(".word %a0" : : "i" ((TInt)(16<<BTrace::ESizeIndex) + ((BTrace::EContextIdPresent|BTrace::EPcPresent) << BTrace::EFlagsIndex*8) + (BTrace::EFastMutex<< BTrace::ECategoryIndex*8) + (BTrace::EFastMutexSignal << BTrace::ESubCategoryIndex*8)) ); |
|
2447 |
||
2448 |
asm("fmwait_lockacquired_trace_header:"); |
|
2449 |
asm(".word %a0" : : "i" ((TInt)(16<<BTrace::ESizeIndex) + ((BTrace::EContextIdPresent|BTrace::EPcPresent) << BTrace::EFlagsIndex*8) + (BTrace::EFastMutex << BTrace::ECategoryIndex*8) + (BTrace::EFastMutexWait << BTrace::ESubCategoryIndex*8)) ); |
|
2450 |
||
2451 |
asm("fmsignal_lock_trace_unlock:"); |
|
2452 |
// r0=mutex r2=scheduler |
|
2453 |
asm("ldr r12, [r2, #%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler)); |
|
2454 |
asm("mov r3, r0"); // mutex |
|
2455 |
asm("ldr r0, fmsignal_lock_trace_header"); // header |
|
2456 |
asm("ldr r2, [r2, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // context id |
|
2457 |
__JUMP(,r12); |
|
2458 |
||
2459 |
asm("fmwait_lockacquiredwait_trace:"); |
|
2460 |
// r0=scheduler r2=mutex r3=thread |
|
2461 |
asm("ldr r12, [r0, #%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler)); |
|
2462 |
asm("mov r1, r2"); |
|
2463 |
asm("mov r2, r3"); // context id |
|
2464 |
asm("mov r3, r1"); // mutex |
|
2465 |
asm("ldr r0, fmwait_lockacquired_trace_header"); // header |
|
2466 |
__JUMP(,r12); |
|
2467 |
||
2468 |
asm("fmwait_lockacquiredwait_trace2:"); |
|
2469 |
// r0=mutex r1=thread r2=scheduler |
|
2470 |
asm("ldr r12, [r2, #%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler)); |
|
2471 |
asm("mov r3, r0"); // mutex |
|
2472 |
asm("ldr r0, fmwait_lockacquired_trace_header"); // header |
|
2473 |
asm("mov r2, r1"); // context id |
|
2474 |
__JUMP(,r12); |
|
2475 |
||
2476 |
asm("syslock_wait_trace:"); |
|
2477 |
// r0=scheduler r2=thread |
|
2478 |
asm("ldr r12, [r0, #%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler)); |
|
2479 |
// asm("mov r2, r2"); // context id |
|
2480 |
asm("add r3, r0, #%a0" : : "i" _FOFF(TScheduler,iLock)); // mutex |
|
2481 |
asm("ldr r0, fmwait_lockacquired_trace_header"); // header |
|
2482 |
__JUMP(,r12); |
|
2483 |
||
2484 |
asm("syslock_signal_trace:"); |
|
2485 |
// r0=scheduler r3=thread |
|
2486 |
asm("ldr r12, [r0, #%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler)); |
|
2487 |
asm("mov r2, r3"); // context id |
|
2488 |
asm("add r3, r0, #%a0" : : "i" _FOFF(TScheduler,iLock)); // mutex |
|
2489 |
asm("ldr r0, fmsignal_lock_trace_header"); // header |
|
2490 |
__JUMP(,r12); |
|
2491 |
||
2492 |
} |
|
2493 |
#endif // BTRACE_FAST_MUTEX |