|
1 // Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of the License "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // e32\nkern\arm\nctimer.cia |
|
15 // Fast Millisecond Timer Implementation |
|
16 // |
|
17 // |
|
18 |
|
19 #include <e32cia.h> |
|
20 #include <arm.h> |
|
21 |
|
22 #ifdef _DEBUG |
|
23 #define ASM_KILL_LINK(rp,rs) asm("mov "#rs", #0xdf ");\ |
|
24 asm("orr "#rs", "#rs", "#rs", lsl #8 ");\ |
|
25 asm("orr "#rs", "#rs", "#rs", lsl #16 ");\ |
|
26 asm("str "#rs", ["#rp"] ");\ |
|
27 asm("str "#rs", ["#rp", #4] "); |
|
28 |
|
29 #define ASM_KILL_LINK_OFFSET(rp,rs,offset) asm("mov "#rs", #0xdf ");\ |
|
30 asm("orr "#rs", "#rs", "#rs", lsl #8 ");\ |
|
31 asm("orr "#rs", "#rs", "#rs", lsl #16 ");\ |
|
32 asm("str "#rs", ["#rp", #"#offset"] ");\ |
|
33 asm("str "#rs", ["#rp", #"#offset"+4] "); |
|
34 #else |
|
35 #define ASM_KILL_LINK(rp,rs) |
|
36 #define ASM_KILL_LINK_OFFSET(rp,rs,offset) |
|
37 #endif |
|
38 |
|
39 #ifdef __MSTIM_MACHINE_CODED__ |
|
40 |
|
41 #ifdef _DEBUG |
|
42 #define __DEBUG_CALLBACK(n) asm("stmfd sp!, {r0-r3,r12,lr} "); \ |
|
43 asm("ldr r0, __TheTimerQ "); \ |
|
44 asm("ldr r12, [r0, #%a0]!" : : "i" _FOFF(NTimerQ,iDebugFn)); \ |
|
45 asm("cmp r12, #0 "); \ |
|
46 asm("movne r1, #" #n ); \ |
|
47 asm("ldrne r0, [r0, #4] "); \ |
|
48 asm("movne lr, pc "); \ |
|
49 __JUMP(ne,r12); \ |
|
50 asm("ldmfd sp!, {r0-r3,r12,lr} ") |
|
51 #else |
|
52 #define __DEBUG_CALLBACK(n) |
|
53 #endif |
|
54 |
|
55 |
|
56 /** Start a nanokernel timer in zero-drift periodic mode with ISR or DFC callback. |
|
57 Queues the timer to expire in the specified number of nanokernel ticks, |
|
58 measured from the time at which it last expired. This allows exact periodic |
|
59 timers to be implemented with no drift caused by delays in requeueing the |
|
60 timer. |
|
61 The expiry handler will be called in the same context as the previous timer |
|
62 expiry. Generally the way this is used is that NTimer::OneShot() is used to start |
|
63 the first time interval and this specifies whether the callback is in ISR context |
|
64 or in the context of the nanokernel timer thread (DfcThread1) or other Dfc thread. |
|
65 The expiry handler then uses NTimer::Again() to requeue the timer. |
|
66 |
|
67 @param aTime Timeout in nanokernel ticks |
|
68 @return KErrNone if no error |
|
69 @return KErrInUse if timer is already active |
|
70 @return KErrArgument if the requested expiry time is in the past |
|
71 @pre Any context |
|
72 */ |
|
73 __NAKED__ EXPORT_C TInt NTimer::Again(TInt /*aTime*/) |
|
74 { |
|
75 asm("mrs r12, cpsr "); |
|
76 INTS_OFF(r3, r12, INTS_ALL_OFF); // all interrupts off |
|
77 asm("ldrb r3, [r0, #%a0]" : : "i" _FOFF(NTimer,iState)); // r3=iState |
|
78 asm("ldr r2, __TheTimerQ "); |
|
79 asm("cmp r3, #%a0" : : "i" ((TInt)EIdle)); |
|
80 asm("ldreq r3, [r0, #%a0]" : : "i" _FOFF(NTimer,iTriggerTime)); // r3=iTriggerTime |
|
81 asm("bne add_mscb_in_use "); // if already queued return KErrInUse |
|
82 asm("add r3, r3, r1 "); // add requested time interval |
|
83 asm("ldr r1, [r2, #%a0]" : : "i" _FOFF(NTimerQ,iMsCount)); // r1=iMsCount |
|
84 asm("subs r1, r3, r1 "); // r1=trigger time-next tick time |
|
85 asm("strpl r3, [r0, #%a0]" : : "i" _FOFF(NTimer,iTriggerTime)); // iTriggerTime+=aTime |
|
86 asm("bpl AddMsCallBack "); // if time interval positive, ok |
|
87 asm("mov r0, #%a0" : : "i" ((TInt)KErrArgument)); // else return KErrArgument |
|
88 asm("b add_mscb_0 "); |
|
89 } |
|
90 |
|
91 |
|
92 /** Start a nanokernel timer in one-shot mode with ISR callback. |
|
93 Queues the timer to expire in the specified number of nanokernel ticks. The |
|
94 actual wait time will be at least that much and may be up to one tick more. |
|
95 The expiry handler will be called in ISR context. |
|
96 |
|
97 @param aTime Timeout in nanokernel ticks |
|
98 @return KErrNone if no error |
|
99 @return KErrInUse if timer is already active |
|
100 @pre Any context |
|
101 */ |
|
102 __NAKED__ EXPORT_C TInt NTimer::OneShot(TInt /*aTime*/) |
|
103 { |
|
104 asm("mov r2, #0 "); |
|
105 // fall through |
|
106 } |
|
107 |
|
108 |
|
109 /** Start a nanokernel timer in one-shot mode with ISR or DFC callback. |
|
110 Queues the timer to expire in the specified number of nanokernel ticks. The |
|
111 actual wait time will be at least that much and may be up to one tick more. |
|
112 The expiry handler will be called in either ISR context or in the context |
|
113 of the nanokernel timer thread (DfcThread1). |
|
114 |
|
115 @param aTime Timeout in nanokernel ticks |
|
116 @param aDfc TRUE if DFC callback required, FALSE if ISR callback required. |
|
117 @return KErrNone if no error |
|
118 @return KErrInUse if timer is already active |
|
119 @pre Any context |
|
120 */ |
|
121 __NAKED__ EXPORT_C TInt NTimer::OneShot(TInt /*aTime*/, TBool /*aDfc*/) |
|
122 { |
|
123 asm("mrs r12, cpsr "); |
|
124 INTS_OFF(r3, r12, INTS_ALL_OFF); // all interrupts off |
|
125 asm("ldrb r3, [r0, #%a0]" : : "i" _FOFF(NTimer,iState)); // r3=iState |
|
126 asm("cmp r3, #%a0" : : "i" ((TInt)EIdle)); |
|
127 asm("bne add_mscb_in_use "); // if already queued return KErrInUse |
|
128 asm("strb r2, [r0, #%a0]" : : "i" _FOFF(NTimer,iCompleteInDfc)); // iCompleteInDfc=aDfc |
|
129 asm("ldr r2, __TheTimerQ "); |
|
130 asm("ldr r3, [r2, #%a0]" : : "i" _FOFF(NTimerQ,iMsCount)); // r3=iMsCount |
|
131 asm("add r3, r3, r1 "); |
|
132 asm("str r3, [r0, #%a0]" : : "i" _FOFF(NTimer,iTriggerTime)); // iTriggerTime=ms count + aTime |
|
133 |
|
134 // r0->CallBack, r2=TheTimerQ, r1=time interval, r3=trigger time |
|
135 asm("AddMsCallBack: "); |
|
136 asm("cmp r1, #32 "); // compare interval with 32ms |
|
137 asm("bge add_mscb_holding "); // if >=32ms put it on holding queue |
|
138 asm("ldrb r1, [r0, #%a0]" : : "i" _FOFF(NTimer,iCompleteInDfc)); // r1=iCompleteInDfc |
|
139 asm("and r3, r3, #0x1f "); // r3=trigger time & 0x1f |
|
140 asm("cmp r1, #0 "); |
|
141 asm("add r1, r2, r3, lsl #4 "); // r1->IntQ corresponding to trigger time |
|
142 asm("addne r1, r1, #8 "); // if (iCompleteInDfc), r1 points to DfcQ |
|
143 asm("ldr r3, [r1, #4] "); // r3=pQ->iA.iPrev |
|
144 asm("str r0, [r1, #4] "); // pQ->iA.iPrev=pC |
|
145 asm("str r0, [r3, #0] "); // pQ->iA.iPrev->iNext=pC |
|
146 asm("stmia r0, {r1,r3} "); // pC->iNext=&pQ->iA, pC->iPrev=pQ->iA.iPrev |
|
147 asm("mov r1, #%a0" : : "i" ((TInt)EFinal)); |
|
148 asm("strb r1, [r0, #%a0]" : : "i" _FOFF(NTimer,iState)); // iState=EFinal |
|
149 asm("ldr r0, [r0, #%a0]" : : "i" _FOFF(NTimer,iTriggerTime)); // r0=iTriggerTime |
|
150 asm("ldr r3, [r2, #%a0]" : : "i" _FOFF(NTimerQ,iPresent)); // r3=TheTimerQ->iPresent |
|
151 asm("and r0, r0, #0x1f "); |
|
152 asm("mov r1, #1 "); |
|
153 asm("orr r3, r3, r1, lsl r0 "); // iPresent |= (1<<index) |
|
154 asm("str r3, [r2, #%a0]" : : "i" _FOFF(NTimerQ,iPresent)); |
|
155 asm("mov r0, #0 "); // return KErrNone |
|
156 asm("msr cpsr, r12 "); |
|
157 __JUMP(,lr); |
|
158 |
|
159 asm("add_mscb_holding: "); |
|
160 asm("ldr r3, [r2, #%a0]!" : : "i" _FOFF(NTimerQ,iHoldingQ.iA.iPrev)); // r3=pQ->iPrev, r2=&iHoldingQ.iA.iPrev |
|
161 asm("mov r1, #%a0" : : "i" ((TInt)EHolding)); |
|
162 asm("strb r1, [r0, #%a0]" : : "i" _FOFF(NTimer,iState)); // iState=EHolding |
|
163 asm("str r0, [r2], #-4 "); // pQ->iPrev=pC, r2=&iHoldingQ |
|
164 asm("str r0, [r3, #0] "); // pQ->iPrev->iNext=pC |
|
165 asm("stmia r0, {r2,r3} "); // pC->iNext=pQ, pC->iPrev=pQ->iPrev |
|
166 asm("mov r0, #0 "); // return KErrNone |
|
167 |
|
168 asm("add_mscb_0: "); |
|
169 asm("msr cpsr, r12 "); |
|
170 __JUMP(,lr); |
|
171 |
|
172 asm("add_mscb_in_use: "); |
|
173 asm("mov r0, #%a0" : : "i" ((TInt)KErrInUse)); // return KErrInUse |
|
174 asm("msr cpsr, r12 "); |
|
175 __JUMP(,lr); |
|
176 |
|
177 asm("__TheTimerQ: "); |
|
178 asm(".word TheTimerQ "); |
|
179 } |
|
180 |
|
181 |
|
182 /** Starts a nanokernel timer in one-shot mode with callback in dfc thread that provided DFC belongs to. |
|
183 |
|
184 Queues the timer to expire in the specified number of nanokernel ticks. The |
|
185 actual wait time will be at least that much and may be up to one tick more. |
|
186 On expiry aDfc will be queued in ISR context. |
|
187 |
|
188 Note that NKern::TimerTicks() can be used to convert milliseconds to ticks. |
|
189 |
|
190 @param aTime Timeout in nanokernel ticks |
|
191 @param aDfc - Dfc to be queued when the timer expires. |
|
192 |
|
193 @return KErrNone if no error; KErrInUse if timer is already active. |
|
194 |
|
195 @pre Any context |
|
196 |
|
197 @see NKern::TimerTicks() |
|
198 */ |
|
199 |
|
200 __NAKED__ EXPORT_C TInt NTimer::OneShot(TInt /*aTime*/, TDfc& /*aDfc*/) |
|
201 { |
|
202 asm("mrs r12, cpsr "); |
|
203 INTS_OFF(r3, r12, INTS_ALL_OFF); // all interrupts off |
|
204 asm("ldrb r3, [r0, #%a0]" : : "i" _FOFF(NTimer,iState)); // r3=iState |
|
205 asm("cmp r3, #%a0" : : "i" ((TInt)EIdle)); |
|
206 asm("bne add_mscb_in_use "); // if already queued return KErrInUse |
|
207 asm("mov r3, #0 "); |
|
208 asm("strb r3, [r0, #%a0]" : : "i" _FOFF(NTimer,iCompleteInDfc)); // iCompleteInDfc=0 |
|
209 asm("str r3, [r0, #%a0]" : : "i" _FOFF(NTimer,iFunction)); // iFunction=NULL |
|
210 asm("str r2, [r0, #%a0]" : : "i" _FOFF(NTimer,iPtr)); // iPtr= &aDfc |
|
211 asm("ldr r2, __TheTimerQ "); |
|
212 asm("ldr r3, [r2, #%a0]" : : "i" _FOFF(NTimerQ,iMsCount)); // r3=iMsCount |
|
213 asm("add r3, r3, r1 "); |
|
214 asm("str r3, [r0, #%a0]" : : "i" _FOFF(NTimer,iTriggerTime)); // iTriggerTime=ms count + aTime |
|
215 asm("b AddMsCallBack "); |
|
216 } |
|
217 |
|
218 |
|
219 /** Cancel a nanokernel timer. |
|
220 Removes this timer from the nanokernel timer queue. Does nothing if the |
|
221 timer is inactive or has already expired. |
|
222 Note that if the timer was queued and DFC callback requested it is possible |
|
223 for the expiry handler to run even after Cancel() has been called. This will |
|
224 occur in the case where DfcThread1 is preempted just before calling the |
|
225 expiry handler for this timer and the preempting thread/ISR/IDFC calls |
|
226 Cancel() on the timer. |
|
227 |
|
228 @pre Any context |
|
229 @return TRUE if timer was actually cancelled |
|
230 @return FALSE if timer was not cancelled - this could be because it was not |
|
231 active or because its expiry handler was already running on |
|
232 another CPU or in the timer DFC. |
|
233 */ |
|
234 EXPORT_C __NAKED__ TBool NTimer::Cancel() |
|
235 { |
|
236 asm("mrs r12, cpsr "); |
|
237 INTS_OFF(r3, r12, INTS_ALL_OFF); // all interrupts off |
|
238 asm("ldrb r3, [r0, #%a0]" : : "i" _FOFF(NTimer,iState)); |
|
239 asm("mov r1, #0 "); |
|
240 asm("cmp r3, #%a0" : : "i" ((TInt)ETransferring)); |
|
241 asm("movcc r0, #0 "); // if EIdle, nothing to do, return FALSE |
|
242 asm("bcc cancel_idle "); |
|
243 asm("strb r1, [r0, #%a0]" : : "i" _FOFF(NTimer,iState)); // iState=EIdle |
|
244 asm("beq cancel_xfer "); // if ETransferring, branch |
|
245 asm("ldmia r0, {r1,r2} "); // If queued, dequeue - r1=next, r2=prev |
|
246 asm("cmp r3, #%a0" : : "i" ((TInt)ECritical)); |
|
247 asm("str r1, [r2, #0] "); // if queued, prev->next=next |
|
248 asm("str r2, [r1, #4] "); // and next->prev=prev |
|
249 ASM_KILL_LINK(r0,r1); |
|
250 asm("ldrcs r1, __TheTimerQ "); |
|
251 asm("ldrhi r0, [r0, #%a0]" : : "i" _FOFF(NTimer,iTriggerTime)); // r0=iTriggerTime |
|
252 asm("ldrhi r3, [r1, #%a0]" : : "i" _FOFF(NTimerQ,iMsCount)); // r3=iMsCount |
|
253 asm("bcc cancel_done "); // if EHolding or EOrdered, finished |
|
254 asm("beq cancel_critical "); // if ECritical, branch |
|
255 // r1->ms timer, state was EFinal |
|
256 asm("subs r3, r0, r3 "); // r3=trigger time - next tick |
|
257 asm("bmi cancel_done "); // if trigger time already expired, don't touch iPresent (was on iCompletedQ) |
|
258 asm("and r0, r0, #0x1f "); // r0=iTriggerTime&0x1f = queue index |
|
259 asm("mov r3, r1 "); |
|
260 asm("ldr r2, [r3, r0, lsl #4]! "); // r3->iIntQ for this timer, r2=iIntQ head pointer |
|
261 asm("cmp r2, r3 "); |
|
262 asm("bne cancel_done "); // iIntQ not empty so finished |
|
263 asm("ldr r2, [r3, #8]! "); // r2=iDfcQ head pointer |
|
264 asm("cmp r2, r3 "); |
|
265 asm("bne cancel_done "); // iDfcQ not empty so finished |
|
266 asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(NTimerQ,iPresent)); // r2=TheTimerQ->iPresent |
|
267 asm("mov r3, #1 "); |
|
268 asm("bic r2, r2, r3, lsl r0 "); // iPresent &= ~(1<<index) |
|
269 asm("str r2, [r1, #%a0]" : : "i" _FOFF(NTimerQ,iPresent)); |
|
270 |
|
271 asm("cancel_done: "); |
|
272 asm("mov r0, #1 "); // return TRUE |
|
273 |
|
274 asm("cancel_idle: "); |
|
275 asm("msr cpsr, r12 "); |
|
276 __JUMP(,lr); |
|
277 |
|
278 asm("cancel_xfer: "); |
|
279 asm("ldr r1, __TheTimerQ "); // r1->ms timer, state was ETransferring |
|
280 asm("strb r3, [r1, #%a0]" : : "i" _FOFF(NTimerQ,iTransferringCancelled)); |
|
281 asm("msr cpsr, r12 "); |
|
282 asm("mov r0, #1 "); // return TRUE |
|
283 __JUMP(,lr); |
|
284 |
|
285 asm("cancel_critical: "); // r1->ms timer, state was ECritical |
|
286 asm("strb r3, [r1, #%a0]" : : "i" _FOFF(NTimerQ,iCriticalCancelled)); |
|
287 asm("msr cpsr, r12 "); |
|
288 asm("mov r0, #1 "); // return TRUE |
|
289 __JUMP(,lr); |
|
290 } |
|
291 |
|
292 |
|
293 /** Return the number of ticks before the next nanokernel timer expiry. |
|
294 May on occasion return a pessimistic estimate (i.e. too low). |
|
295 Used by base port to disable the system tick interrupt when the system |
|
296 is idle. |
|
297 |
|
298 @return The number of ticks before the next nanokernel timer expiry. |
|
299 |
|
300 @pre Interrupts must be disabled. |
|
301 |
|
302 @post Interrupts are disabled. |
|
303 */ |
|
304 EXPORT_C __NAKED__ TInt NTimerQ::IdleTime() |
|
305 { |
|
306 ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_DISABLED); |
|
307 #ifdef _DEBUG |
|
308 asm("ldr r1, __TheScheduler "); |
|
309 asm("ldr r2, [r1, #%a0]! " : : "i" _FOFF(TScheduler,iDelayedQ)); |
|
310 // r2 = iA.iNext, r1 = iA |
|
311 asm("cmp r2, r1 "); |
|
312 asm("movne r0, #1 "); // if there are delayed threads, prevent idle |
|
313 __JUMP(ne,lr); |
|
314 #endif |
|
315 asm("ldr r12, __TheTimerQ "); |
|
316 asm("mvn r0, #0x80000000 "); // set r0=KMaxTInt initially |
|
317 asm("ldr r2, [r12, #%a0]!" : : "i" _FOFF(NTimerQ,iOrderedQ)); // r12->iOrderedQ, r2=iOrderedQ first |
|
318 asm("ldr r3, [r12, #-12] "); // r3=next tick number |
|
319 asm("cmp r2, r12 "); // check if iOrderedQ empty |
|
320 asm("ldrne r0, [r2, #%a0]" : : "i" _FOFF(NTimer,iTriggerTime)); // if not, r0=ordered Q first->trigger time |
|
321 asm("ldr r1, [r12, #-8]! "); // r1=iHoldingQ first, r12->iHoldingQ |
|
322 asm("bicne r0, r0, #0x0f "); |
|
323 asm("subne r0, r0, #16 "); // r0=tick at which transfer to final queue would occur |
|
324 asm("subne r0, r0, r3 "); // return value = trigger time - iMsCount |
|
325 asm("cmp r1, r12 "); // holding Q empty? |
|
326 asm("ldr r1, [r12, #-8] "); // r1=iPresent |
|
327 asm("and r12, r3, #0x1f "); // r12=next tick mod 32 |
|
328 asm("beq 1f "); // branch if holding Q empty |
|
329 asm("ands r2, r3, #0x0f "); // else r2=next tick no. mod 16 |
|
330 asm("rsbne r2, r2, #16 "); // if nonzero, subtract from 16 to give #ticks before next multiple of 16 |
|
331 asm("cmp r2, r0 "); |
|
332 asm("movlt r0, r2 "); // update result if necessary |
|
333 asm("1: "); |
|
334 asm("movs r1, r1, ror r12 "); // r1=iPresent rotated so that LSB corresponds to next tick |
|
335 __JUMP(eq,lr); // if iPresent=0, finished |
|
336 asm("mov r3, #0 "); // r3 will accumulate bit number of least significant 1 |
|
337 asm("movs r2, r1, lsl #16 "); |
|
338 asm("movne r1, r2 "); |
|
339 asm("addeq r3, r3, #16 "); |
|
340 asm("movs r2, r1, lsl #8 "); |
|
341 asm("movne r1, r2 "); |
|
342 asm("addeq r3, r3, #8 "); |
|
343 asm("movs r2, r1, lsl #4 "); |
|
344 asm("movne r1, r2 "); |
|
345 asm("addeq r3, r3, #4 "); |
|
346 asm("movs r2, r1, lsl #2 "); |
|
347 asm("movne r1, r2 "); |
|
348 asm("addeq r3, r3, #2 "); |
|
349 asm("movs r2, r1, lsl #1 "); |
|
350 asm("addeq r3, r3, #1 "); |
|
351 asm("cmp r3, r0 "); |
|
352 asm("movlt r0, r3 "); // update result if necessary |
|
353 __JUMP(,lr); |
|
354 } |
|
355 |
|
356 |
|
357 /** Tick over the nanokernel timer queue. |
|
358 This function should be called by the base port in the system tick timer ISR. |
|
359 It should not be called at any other time. |
|
360 The value of 'this' to pass is the value returned by NTimerQ::TimerAddress(). |
|
361 |
|
362 @see NTimerQ::TimerAddress() |
|
363 */ |
|
364 __NAKED__ EXPORT_C void NTimerQ::Tick() |
|
365 { |
|
366 |
|
367 |
|
368 #ifdef _DEBUG |
|
369 asm("ldr r1, __TheScheduler "); |
|
370 asm("ldr r2, [r1, #%a0]! " : : "i" _FOFF(TScheduler,iDelayedQ)); |
|
371 // r2 = iA.iNext, r1 = iA |
|
372 asm("cmp r2, r1 "); |
|
373 asm("beq 1f "); // no delayed threads, don't queue dfc |
|
374 asm("stmfd sp!, {r0,lr} "); |
|
375 asm("ldr r1, __TheScheduler "); |
|
376 asm("add r0, r1, #%a0 " : : "i" _FOFF(TScheduler,iDelayDfc)); |
|
377 asm("bl " CSM_ZN4TDfc3AddEv); |
|
378 asm("ldmfd sp!, {r0,lr} "); |
|
379 asm("1: "); |
|
380 #endif |
|
381 |
|
382 // Enter with r0 pointing to NTimerQ |
|
383 asm("ldr r1, __TheScheduler "); |
|
384 asm("mrs r12, cpsr "); |
|
385 |
|
386 // do the timeslice tick - on ARM __SCHEDULER_MACHINE_CODED is mandatory |
|
387 asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); |
|
388 asm("ldr r3, [r2, #%a0]" : : "i" _FOFF(NThread,iTime)); |
|
389 asm("subs r3, r3, #1 "); |
|
390 asm("strge r3, [r2, #%a0]" : : "i" _FOFF(NThread,iTime)); |
|
391 asm("streqb r12, [r1, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); // r12 lower byte is never 0 |
|
392 INTS_OFF(r3, r12, INTS_ALL_OFF); // disable all interrupts |
|
393 |
|
394 asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(NTimerQ,iMsCount)); // r1=iMsCount |
|
395 asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(NTimerQ,iPresent)); // r3=iPresent |
|
396 asm("and r2, r1, #0x1f "); // r2=iMsCount & 0x1f |
|
397 asm("add r1, r1, #1 "); |
|
398 asm("str r1, [r0, #%a0]" : : "i" _FOFF(NTimerQ,iMsCount)); // iMsCount++ |
|
399 asm("mov r1, #1 "); |
|
400 asm("tst r3, r1, lsl r2 "); // test iPresent bit for this tick |
|
401 asm("bic r1, r3, r1, lsl r2 "); // clear iPresent bit |
|
402 asm("strne r1, [r0, #%a0]" : : "i" _FOFF(NTimerQ,iPresent)); // update iPresent if necessary |
|
403 asm("bne mstim_tick_1 "); // if bit was set, we have work to do |
|
404 asm("tst r2, #0x0f "); // else test for tick 0 or 16 |
|
405 __MSR_CPSR_C(ne, r12); // if neither return |
|
406 __JUMP(ne,lr); |
|
407 |
|
408 asm("mstim_tick_1: "); // get here if timers complete this tick |
|
409 asm("stmfd sp!, {r4-r6,lr} "); |
|
410 asm("add r1, r0, r2, lsl #4 "); // r1->IntQ for this tick |
|
411 asm("ldr r3, [r1, #8]! "); // r1->DfcQ and r3=DfcQ first |
|
412 asm("mov r5, #0 "); // r5=doDfc=FALSE |
|
413 asm("cmp r3, r1 "); |
|
414 asm("beq mstim_tick_2 "); // skip if DfcQ empty |
|
415 |
|
416 // Move DFC completions from iDfcQ to iCompletedQ |
|
417 asm("ldr lr, [r0, #%a0]" : : "i" _FOFF(NTimerQ,iCompletedQ.iA.iPrev)); // lr=last completed |
|
418 asm("ldr r4, [r1, #4] "); // r4=DfcQ last |
|
419 asm("add r5, r0, #%a0" : : "i" _FOFF(NTimerQ,iDfc)); // doDfc=TRUE |
|
420 asm("str r3, [lr, #0] "); // old last pending->next = DfcQ first |
|
421 asm("str r4, [r0, #%a0]" : : "i" _FOFF(NTimerQ,iCompletedQ.iA.iPrev)); // last pending=DfcQ last |
|
422 asm("str lr, [r3, #4] "); // DfcQ first->prev = old last pending |
|
423 asm("add lr, r0, #%a0" : : "i" _FOFF(NTimerQ,iCompletedQ)); // lr=&iCompletedQ.iA |
|
424 asm("str lr, [r4, #0] "); // DfcQ last->next=&iPending |
|
425 asm("str r1, [r1, #0] "); // DfcQ first=&DfcQ |
|
426 asm("str r1, [r1, #4] "); // DfcQ last=&DfcQ |
|
427 |
|
428 asm("mstim_tick_2: "); |
|
429 asm("tst r2, #0x0f "); // check for tick 0 or 16 |
|
430 asm("bne mstim_tick_3 "); // skip if not |
|
431 |
|
432 // Tick 0 or 16 - must check holding queue and ordered queue |
|
433 asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(NTimerQ,iHoldingQ)); // r3=iHoldingQ first |
|
434 asm("add r6, r0, #%a0" : : "i" _FOFF(NTimerQ,iHoldingQ)); // r6=&iHoldingQ |
|
435 asm("cmp r3, r6 "); |
|
436 asm("addne r5, r0, #%a0" : : "i" _FOFF(NTimerQ,iDfc)); // if iHoldingQ nonempty, doDfc=TRUE and skip ordered queue check |
|
437 asm("bne mstim_tick_3 "); // skip if iHoldingQ nonempty |
|
438 asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(NTimerQ,iOrderedQ)); // r3=iOrderedQ first |
|
439 asm("add r6, r0, #%a0" : : "i" _FOFF(NTimerQ,iOrderedQ)); // r6=&iOrderedQ |
|
440 asm("cmp r3, r6 "); |
|
441 asm("beq mstim_tick_3 "); // skip if iOrderedQ empty |
|
442 asm("ldr r4, [r0, #%a0]" : : "i" _FOFF(NTimerQ,iMsCount)); // else r4=iMsCount |
|
443 asm("ldr r3, [r3, #%a0]" : : "i" _FOFF(NTimer,iTriggerTime)); // and r3=trigger time of first on ordered queue |
|
444 asm("sub r3, r3, r4 "); // r3=trigger time-iMsCount |
|
445 asm("cmp r3, #31 "); |
|
446 asm("addls r5, r0, #%a0" : : "i" _FOFF(NTimerQ,iDfc)); // if first expiry in <=31ms, doDfc=TRUE |
|
447 |
|
448 // Check iIntQ |
|
449 asm("mstim_tick_3: "); |
|
450 asm("ldr r3, [r1, #-8]! "); // r1->iIntQ, r3=iIntQ first |
|
451 asm("mov r6, r12 "); // r6=original cpsr |
|
452 asm("cmp r3, r1 "); // test if iIntQ empty |
|
453 asm("beq mstim_tick_4 "); // branch if it is |
|
454 |
|
455 // Transfer iIntQ to a temporary queue |
|
456 asm("ldr r4, [r1, #4] "); // r4=iIntQ last |
|
457 asm("str r1, [r1, #0] "); // clear iIntQ |
|
458 asm("str r1, [r1, #4] "); |
|
459 asm("stmfd sp!, {r3,r4} "); // copy queue onto stack |
|
460 asm("str sp, [r4, #0] "); // iIntQ last->next=sp |
|
461 asm("str sp, [r3, #4] "); // iIntQ first->prev=sp |
|
462 INTS_OFF_1(r4, r6, INTS_ALL_OFF); // r4=cpsr with all interrupts off |
|
463 |
|
464 // Walk the temporary queue and complete timers |
|
465 asm("mstim_tick_5: "); |
|
466 INTS_OFF_2(r4, r6, INTS_ALL_OFF); // all interrupts off |
|
467 asm("ldr r0, [sp, #0] "); // r0=q.iNext |
|
468 asm("mov r3, #%a0" : : "i" ((TInt)NTimer::EIdle)); |
|
469 asm("cmp r0, sp "); // end of queue? |
|
470 asm("beq mstim_tick_6 "); // if so, branch out |
|
471 asm("ldmia r0!, {r1,r2} "); // r1=next r2=prev, r0->iPtr |
|
472 asm("strb r3, [r0, #%a0]" : : "i" (_FOFF(NTimer,iState)-8)); // iState=EIdle |
|
473 ASM_KILL_LINK_OFFSET(r0,r12,-8); |
|
474 asm("ldmia r0, {r0,r12} "); // r0=iPtr, r12=iFunction |
|
475 asm("str r1, [r2, #0] "); // prev->next=next |
|
476 asm("str r2, [r1, #4] "); // next->prev=prev |
|
477 asm("adr lr, mstim_tick_5 "); // return to mstim_tick_5 |
|
478 asm("msr cpsr, r6 "); // restore interrupts |
|
479 asm("cmp r12, #0 "); // iFunction==NULL ? |
|
480 asm("beq mstim_tick_7 "); // if so queue Dfc (iPtr is a pointer to TDfc ) |
|
481 __JUMP(,r12); // call timer callback with r0=iPtr |
|
482 asm("b mstim_tick_6 "); // skip queuing of Dfc |
|
483 |
|
484 asm("mstim_tick_7: "); |
|
485 asm("b " CSM_ZN4TDfc3AddEv); // add the DFC with r0=iPtr - a pointer to TDfc |
|
486 |
|
487 asm("mstim_tick_6: "); |
|
488 asm("add sp, sp, #8 "); // take temporary queue off stack |
|
489 |
|
490 asm("mstim_tick_4: "); |
|
491 asm("msr cpsr, r6 "); // restore original interrupt state |
|
492 asm("movs r0, r5 "); // DFC needed? if so, r0->iDfc |
|
493 asm("ldmfd sp!, {r4-r6,lr} "); // restore registers |
|
494 asm("bne " CSM_ZN4TDfc3AddEv); // add the DFC if required |
|
495 __JUMP(,lr); // if no DFC needed, return |
|
496 |
|
497 asm("__TheScheduler: "); |
|
498 asm(".word TheScheduler "); |
|
499 } |
|
500 |
|
501 __NAKED__ void NTimerQ::DfcFn(TAny* /*aPtr*/) |
|
502 { |
|
503 // Enter with r0 pointing to NTimerQ |
|
504 asm("stmfd sp!, {r7-r11,lr} "); |
|
505 SET_INTS_1(r11, MODE_SVC, INTS_ALL_ON); // always called from SVC mode |
|
506 SET_INTS_1(r10, MODE_SVC, INTS_ALL_OFF); // with interruts enabled |
|
507 |
|
508 // First transfer entries on the Ordered queue to the Final queues |
|
509 asm("mstim_dfc_0: "); |
|
510 SET_INTS_2(r10, MODE_SVC, INTS_ALL_OFF); // disable interrupts |
|
511 asm("ldr r1, [r0, #%a0]!" : : "i" _FOFF(NTimerQ,iOrderedQ)); // r0->iOrderedQ, r1=orderedQ first |
|
512 asm("cmp r1, r0 "); |
|
513 asm("beq mstim_dfc_1 "); // ordered Q empty so move to next stage |
|
514 asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(NTimer,iTriggerTime)); // r2=r1->trigger time |
|
515 asm("ldr r3, [r0, #-12] "); // r3=iMsCount |
|
516 asm("subs r3, r2, r3 "); // r3=trigger time-iMsCount |
|
517 asm("cmp r3, #31 "); // test if remaining time <32ms or has already passed |
|
518 asm("bgt mstim_dfc_1 "); // if >31ms, move to next stage (signed comparison to catch already passed case) |
|
519 asm("sub r0, r0, #%a0" : : "i" _FOFF(NTimerQ,iOrderedQ)); // r0->NTimerQ |
|
520 asm("bl dequeaddfinal "); // <=31ms, so deque and add to final queue |
|
521 SET_INTS_2(r11, MODE_SVC, INTS_ALL_ON); // let interrupts in |
|
522 __DEBUG_CALLBACK(0); |
|
523 asm("b mstim_dfc_0 "); |
|
524 |
|
525 asm("mstim_dfc_1: "); |
|
526 SET_INTS_2(r11, MODE_SVC, INTS_ALL_ON); // let interrupts in |
|
527 asm("sub r0, r0, #%a0" : : "i" _FOFF(NTimerQ,iOrderedQ)); // r0->NTimerQ |
|
528 __DEBUG_CALLBACK(1); |
|
529 |
|
530 // Next transfer entries on the Holding queue to the Ordered queue or final queue |
|
531 asm("mstim_dfc_2: "); |
|
532 SET_INTS_2(r10, MODE_SVC, INTS_ALL_OFF); // disable interrupts |
|
533 asm("ldr r1, [r0, #%a0]!" : : "i" _FOFF(NTimerQ,iHoldingQ)); // r0->iHoldingQ, r1=holdingQ first |
|
534 asm("cmp r1, r0 "); |
|
535 asm("beq mstim_dfc_3 "); // holding Q empty so move to next stage |
|
536 asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(NTimer,iTriggerTime)); // r2=r1->trigger time |
|
537 asm("ldr r3, [r0, #-4] "); // r3=iMsCount |
|
538 asm("sub r0, r0, #%a0" : : "i" _FOFF(NTimerQ,iHoldingQ)); // r0->NTimerQ |
|
539 asm("subs r3, r2, r3 "); // r3=trigger time-iMsCount |
|
540 asm("cmp r3, #31 "); // test if remaining time <32ms or has already passed |
|
541 asm("bgt mstim_dfc_4 "); // if >31ms, need to put it on the ordered Q (signed comparison to catch late case) |
|
542 asm("bl dequeaddfinal "); // <=31ms or already passed, so deque and add to final queue |
|
543 |
|
544 asm("mstim_dfc_7: "); |
|
545 SET_INTS_2(r11, MODE_SVC, INTS_ALL_ON); // let interrupts in |
|
546 __DEBUG_CALLBACK(2); |
|
547 asm("b mstim_dfc_2 "); // process next holding Q entry |
|
548 |
|
549 // need to put entry r1 trigger time r2 on the ordered Q |
|
550 asm("mstim_dfc_4: "); |
|
551 asm("ldmia r1, {r3,r12} "); // r3=r1->next, r12=r1->prev |
|
552 asm("mov r9, #0 "); |
|
553 asm("strb r9, [r0, #%a0]" : : "i" _FOFF(NTimerQ,iTransferringCancelled)); // iTransferringCancelled=0 |
|
554 asm("str r3, [r12, #0] "); // prev->next=next |
|
555 asm("str r12, [r3, #4] "); // next->prev=prev |
|
556 asm("mov r3, #%a0" : : "i" ((TInt)NTimer::ETransferring)); |
|
557 asm("strb r3, [r1, #%a0]" : : "i" _FOFF(NTimer,iState)); // r1->iState=ETransferring |
|
558 |
|
559 asm("mstim_dfc_5: "); |
|
560 SET_INTS_2(r11, MODE_SVC, INTS_ALL_ON); // let interrupts in |
|
561 asm("add lr, r0, #%a0" : : "i" _FOFF(NTimerQ,iOrderedQ)); // lr=&iOrderedQ.iA |
|
562 __DEBUG_CALLBACK(3); |
|
563 SET_INTS_2(r10, MODE_SVC, INTS_ALL_OFF); // disable interrupts |
|
564 |
|
565 asm("mstim_dfc_9: "); |
|
566 asm("ldrb r12, [r0, #%a0]" : : "i" _FOFF(NTimerQ,iTransferringCancelled)); |
|
567 asm("ldr r3, [lr, #0] "); // r3=iOrderedQ first |
|
568 asm("cmp r12, #0 "); |
|
569 asm("bne mstim_dfc_7 "); // Entry r1 has been cancelled so move to next one |
|
570 asm("strb r9, [r0, #%a0]" : : "i" _FOFF(NTimerQ,iCriticalCancelled)); // iCriticalCancelled=0 |
|
571 |
|
572 // Walk iOrderedQ to find correct position for this entry |
|
573 asm("mstim_dfc_6: "); |
|
574 asm("cmp r3, lr "); // reached end of ordered Q? |
|
575 asm("ldrne r12, [r3, #%a0]" : : "i" _FOFF(NTimer,iTriggerTime)); // if not, r12=r3->trigger time |
|
576 asm("beq mstim_dfc_8 "); // branch if we have |
|
577 asm("mov r8, #%a0" : : "i" ((TInt)NTimer::ECritical)); |
|
578 asm("subs r12, r12, r2 "); // r12=r3->trigger - r1->trigger |
|
579 asm("bpl mstim_dfc_8 "); // branch if r3 expires after r1 |
|
580 asm("strb r8, [r3, #%a0]" : : "i" _FOFF(NTimer,iState)); // r3->iState=ECritical |
|
581 SET_INTS_2(r11, MODE_SVC, INTS_ALL_ON); // let interrupts in |
|
582 asm("mov r8, #%a0" : : "i" ((TInt)NTimer::EOrdered)); |
|
583 __DEBUG_CALLBACK(4); |
|
584 SET_INTS_2(r10, MODE_SVC, INTS_ALL_OFF); // disable interrupts |
|
585 asm("ldr r12, [r0, #%a0]" : : "i" _FOFF(NTimerQ,iTransferringCancelled)); |
|
586 asm("tst r12, #0xff00 "); // test iCriticalCancelled |
|
587 asm("streqb r8, [r3, #%a0]" : : "i" _FOFF(NTimer,iState)); // if not set, r3->iState=EOrdered |
|
588 asm("cmp r12, #0 "); // test iTransferringCancelled and iCriticalCancelled |
|
589 asm("ldreq r3, [r3, #0] "); // if neither set r3=r3->iNext |
|
590 asm("beq mstim_dfc_6 "); // and inspect next ordered Q entry |
|
591 asm("b mstim_dfc_9 "); // if either set, start again from beginning of ordered Q |
|
592 |
|
593 asm("mstim_dfc_8: "); // if we get to here we need to insert r1 before r3 |
|
594 asm("ldr r12, [r3, #4] "); // r12=r3->iPrev |
|
595 asm("mov r8, #%a0" : : "i" ((TInt)NTimer::EOrdered)); |
|
596 asm("strb r8, [r1, #%a0]" : : "i" _FOFF(NTimer,iState)); // r1->iState=EOrdered |
|
597 asm("str r1, [r3, #4] "); // r3->prev=r1 |
|
598 asm("str r1, [r12, #0] "); // r3->prev->next=r1 |
|
599 asm("stmia r1, {r3,r12} "); // r1->next=r3, r1->prev=r3->prev |
|
600 SET_INTS_2(r11, MODE_SVC, INTS_ALL_ON); // let interrupts in |
|
601 __DEBUG_CALLBACK(5); |
|
602 asm("b mstim_dfc_2 "); // process next holding Q entry |
|
603 |
|
604 // Get here when all holding Q entries processed |
|
605 asm("mstim_dfc_3: "); |
|
606 SET_INTS_2(r11, MODE_SVC, INTS_ALL_ON); // let interrupts in |
|
607 __DEBUG_CALLBACK(6); |
|
608 asm("add r8, r0, #16 "); // r8->iCompletedQ |
|
609 |
|
610 // Finally do call backs for timers which requested DFC callback |
|
611 asm("mstim_dfc_10: "); |
|
612 SET_INTS_2(r10, MODE_SVC, INTS_ALL_OFF); // disable interrupts |
|
613 asm("ldr r9, [r8, #0] "); // r9=completed Q first |
|
614 asm("mov r3, #%a0" : : "i" ((TInt)NTimer::EIdle)); |
|
615 asm("cmp r9, r8 "); // Is completed Q empty? |
|
616 asm("beq mstim_dfc_11 "); // branch out if it is |
|
617 asm("ldmia r9!, {r1,r2} "); // r1=r9->next, r2=r9->prev, r9->iPtr of completed entry |
|
618 asm("strb r3, [r9, #%a0]" : : "i" (_FOFF(NTimer,iState)-8)); // iState=EIdle for completed entry |
|
619 asm("ldmia r9, {r0,r3} "); // r0=iPtr, r3=function address |
|
620 ASM_KILL_LINK_OFFSET(r9,r12,-8); |
|
621 asm("str r1, [r2, #0] "); // prev->next=next |
|
622 asm("str r2, [r1, #4] "); // next->prev=prev |
|
623 SET_INTS_2(r11, MODE_SVC, INTS_ALL_ON); // let interrupts in |
|
624 __DEBUG_CALLBACK(7); |
|
625 asm("adr lr, mstim_dfc_10 "); // return to mstim_dfc_10 |
|
626 __JUMP(,r3); // call back with r0=iPtr |
|
627 |
|
628 // All done |
|
629 asm("mstim_dfc_11: "); |
|
630 SET_INTS_2(r11, MODE_SVC, INTS_ALL_ON); // let interrupts in |
|
631 asm("ldmfd sp!, {r7-r11,pc} "); // and return |
|
632 |
|
633 // Subroutine dequeaddfinal |
|
634 // Deque the NTimer pointed to by r1 and put it on its final queue |
|
635 // Enter with r0->NTimerQ, r1->NTimer, r2=r1->iTriggerTime |
|
636 // Enter and leave with interrupts disabled |
|
637 // Can modify r1-r3,r8,r9,r12 |
|
638 asm("dequeaddfinal: "); |
|
639 asm("ldmia r1, {r8,r9} "); // r8=r1->next, r9=r1->prev |
|
640 asm("mov r3, #%a0" : : "i" ((TInt)NTimer::EFinal)); |
|
641 asm("strb r3, [r1, #%a0]" : : "i" _FOFF(NTimer,iState)); // iState=EFinal |
|
642 asm("str r8, [r9, #0] "); // prev->next=next |
|
643 asm("str r9, [r8, #4] "); // next->prev=prev |
|
644 asm("ldr r12, [r0, #%a0]" : : "i" _FOFF(NTimerQ,iPresent)); // r12=timer iPresent |
|
645 asm("and r2, r2, #0x1f "); // r2=trigger time & 0x1f |
|
646 asm("mov r3, #1 "); |
|
647 asm("orr r12, r12, r3, lsl r2 "); // set bit in iPresent |
|
648 asm("ldrb r3, [r1, #%a0]" : : "i" _FOFF(NTimer,iCompleteInDfc)); // r3=iCompleteInDfc |
|
649 asm("str r12, [r0, #%a0]" : : "i" _FOFF(NTimerQ,iPresent)); |
|
650 asm("add r2, r0, r2, lsl #4 "); // r2->iIntQ for this timer |
|
651 asm("cmp r3, #0 "); |
|
652 asm("addne r2, r2, #8 "); // if iCompleteInDfc, r2->iDfcQ for this timer |
|
653 asm("ldr r12, [r2, #4] "); // r12->last on queue |
|
654 asm("str r1, [r2, #4] "); // queue->last=this |
|
655 asm("str r1, [r12, #0] "); // last->next=this |
|
656 asm("stmia r1, {r2,r12} "); // this->next=&queue, this->prev=last on queue |
|
657 __JUMP(,lr); |
|
658 } |
|
659 #endif |
|
660 |