|
1 // Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of the License "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // e32\nkernsmp\arm\ncthrd.cpp |
|
15 // |
|
16 // |
|
17 |
|
18 // NThreadBase member data |
|
19 #define __INCLUDE_NTHREADBASE_DEFINES__ |
|
20 |
|
21 #define __INCLUDE_REG_OFFSETS__ |
|
22 #include <arm.h> |
|
23 #include <arm_gic.h> |
|
24 #include <arm_scu.h> |
|
25 #include <arm_tmr.h> |
|
26 #include <nk_irq.h> |
|
27 |
|
28 const TInt KNThreadMinStackSize = 0x100; // needs to be enough for interrupt + reschedule stack |
|
29 |
|
30 // Called by a thread when it first runs |
|
31 extern "C" void __StartThread(); |
|
32 |
|
33 // Initialise CPU registers |
|
34 extern void initialiseState(TInt aCpu, TSubScheduler* aSS); |
|
35 |
|
36 extern "C" void ExcFault(TAny*); |
|
37 |
|
38 extern TUint32 __mpid(); |
|
39 extern void InitAPTimestamp(SNThreadCreateInfo& aInfo); |
|
40 |
|
41 TInt NThread::Create(SNThreadCreateInfo& aInfo, TBool aInitial) |
|
42 { |
|
43 // Assert ParameterBlockSize is not negative and is a multiple of 8 bytes |
|
44 __NK_ASSERT_ALWAYS((aInfo.iParameterBlockSize&0x80000007)==0); |
|
45 __NK_ASSERT_ALWAYS(aInfo.iStackBase && aInfo.iStackSize>=aInfo.iParameterBlockSize+KNThreadMinStackSize); |
|
46 TInt cpu = -1; |
|
47 new (this) NThread; |
|
48 if (aInitial) |
|
49 { |
|
50 cpu = __e32_atomic_add_ord32(&TheScheduler.iNumCpus, 1); |
|
51 aInfo.iCpuAffinity = cpu; |
|
52 // OK since we can't migrate yet |
|
53 TSubScheduler& ss = TheSubSchedulers[cpu]; |
|
54 ss.iCurrentThread = this; |
|
55 iRunCount64 = UI64LIT(1); |
|
56 __KTRACE_OPT(KBOOT,DEBUGPRINT("Init: cpu=%d ss=%08x", cpu, &ss)); |
|
57 if (cpu) |
|
58 { |
|
59 initialiseState(cpu,&ss); |
|
60 |
|
61 ArmLocalTimer& T = LOCAL_TIMER; |
|
62 T.iWatchdogDisable = E_ArmTmrWDD_1; |
|
63 T.iWatchdogDisable = E_ArmTmrWDD_2; |
|
64 T.iTimerCtrl = 0; |
|
65 T.iTimerIntStatus = E_ArmTmrIntStatus_Event; |
|
66 T.iWatchdogCtrl = 0; |
|
67 T.iWatchdogIntStatus = E_ArmTmrIntStatus_Event; |
|
68 |
|
69 NIrq::HwInit2AP(); |
|
70 T.iTimerCtrl = E_ArmTmrCtrl_IntEn | E_ArmTmrCtrl_Reload | E_ArmTmrCtrl_Enable; |
|
71 |
|
72 __e32_atomic_ior_ord32(&TheScheduler.iActiveCpus1, 1<<cpu); |
|
73 __e32_atomic_ior_ord32(&TheScheduler.iActiveCpus2, 1<<cpu); |
|
74 __e32_atomic_ior_ord32(&TheScheduler.iCpusNotIdle, 1<<cpu); |
|
75 __KTRACE_OPT(KBOOT,DEBUGPRINT("AP MPID=%08x",__mpid())); |
|
76 } |
|
77 else |
|
78 { |
|
79 Arm::DefaultDomainAccess = Arm::Dacr(); |
|
80 Arm::ModifyCar(0, 0x00f00000); // full access to CP10, CP11 |
|
81 Arm::DefaultCoprocessorAccess = Arm::Car(); |
|
82 } |
|
83 } |
|
84 TInt r=NThreadBase::Create(aInfo,aInitial); |
|
85 if (r!=KErrNone) |
|
86 return r; |
|
87 if (!aInitial) |
|
88 { |
|
89 aInfo.iPriority = 0; |
|
90 TLinAddr stack_top = (TLinAddr)iStackBase + (TLinAddr)iStackSize; |
|
91 TLinAddr sp = stack_top; |
|
92 TUint32 pb = (TUint32)aInfo.iParameterBlock; |
|
93 SThreadStackStub* tss = 0; |
|
94 if (aInfo.iParameterBlockSize) |
|
95 { |
|
96 tss = (SThreadStackStub*)stack_top; |
|
97 --tss; |
|
98 tss->iExcCode = SThreadExcStack::EStub; |
|
99 tss->iR15 = 0; |
|
100 tss->iCPSR = 0; |
|
101 sp = (TLinAddr)tss; |
|
102 sp -= (TLinAddr)aInfo.iParameterBlockSize; |
|
103 wordmove((TAny*)sp, aInfo.iParameterBlock, aInfo.iParameterBlockSize); |
|
104 pb = (TUint32)sp; |
|
105 tss->iPBlock = sp; |
|
106 } |
|
107 SThreadInitStack* tis = (SThreadInitStack*)sp; |
|
108 --tis; |
|
109 memclr(tis, sizeof(SThreadInitStack)); |
|
110 iSavedSP = (TLinAddr)tis; |
|
111 #ifdef __CPU_HAS_VFP |
|
112 tis->iR.iFpExc = VFP_FPEXC_THRD_INIT; |
|
113 #endif |
|
114 tis->iR.iCar = Arm::DefaultCoprocessorAccess; |
|
115 tis->iR.iDacr = Arm::DefaultDomainAccess; |
|
116 tis->iR.iSpsrSvc = MODE_SVC; |
|
117 tis->iR.iSPRschdFlg = TLinAddr(&tis->iX) | 1; |
|
118 tis->iR.iR15 = (TUint32)&__StartThread; |
|
119 |
|
120 tis->iX.iR0 = pb; |
|
121 tis->iX.iR4 = (TUint32)this; |
|
122 tis->iX.iR11 = stack_top; |
|
123 tis->iX.iExcCode = SThreadExcStack::EInit; |
|
124 tis->iX.iR15 = (TUint32)aInfo.iFunction; |
|
125 tis->iX.iCPSR = MODE_SVC; |
|
126 } |
|
127 else |
|
128 { |
|
129 NKern::EnableAllInterrupts(); |
|
130 |
|
131 // start local timer |
|
132 ArmLocalTimer& T = LOCAL_TIMER; |
|
133 T.iTimerCtrl = E_ArmTmrCtrl_IntEn | E_ArmTmrCtrl_Reload | E_ArmTmrCtrl_Enable; |
|
134 |
|
135 // synchronize AP's timestamp with BP's |
|
136 if (cpu>0) |
|
137 InitAPTimestamp(aInfo); |
|
138 } |
|
139 #ifdef BTRACE_THREAD_IDENTIFICATION |
|
140 BTrace4(BTrace::EThreadIdentification,BTrace::ENanoThreadCreate,this); |
|
141 #endif |
|
142 return KErrNone; |
|
143 } |
|
144 |
|
145 |
|
146 /** Called from generic layer when thread is killed asynchronously. |
|
147 |
|
148 For ARM, save reason for last user->kernel switch (if any) so that user |
|
149 context can be accessed from EDebugEventRemoveThread hook. Must be done |
|
150 before forcing the thread to exit as this alters the saved return address |
|
151 which is used to figure out where the context is saved. |
|
152 |
|
153 @pre kernel locked |
|
154 @post kernel locked |
|
155 */ |
|
156 void NThreadBase::OnKill() |
|
157 { |
|
158 } |
|
159 |
|
160 /** Called from generic layer when thread exits. |
|
161 |
|
162 For ARM, save that if the thread terminates synchronously the last |
|
163 user->kernel switch was an exec call. Do nothing if non-user thread or |
|
164 reason already saved in OnKill(). |
|
165 |
|
166 @pre kernel locked |
|
167 @post kernel locked |
|
168 @see OnKill |
|
169 */ |
|
170 void NThreadBase::OnExit() |
|
171 { |
|
172 } |
|
173 |
|
174 |
|
175 void DumpExcInfo(TArmExcInfo& a) |
|
176 { |
|
177 DEBUGPRINT("Exc %1d Cpsr=%08x FAR=%08x FSR=%08x",a.iExcCode,a.iCpsr,a.iFaultAddress,a.iFaultStatus); |
|
178 DEBUGPRINT(" R0=%08x R1=%08x R2=%08x R3=%08x",a.iR0,a.iR1,a.iR2,a.iR3); |
|
179 DEBUGPRINT(" R4=%08x R5=%08x R6=%08x R7=%08x",a.iR4,a.iR5,a.iR6,a.iR7); |
|
180 DEBUGPRINT(" R8=%08x R9=%08x R10=%08x R11=%08x",a.iR8,a.iR9,a.iR10,a.iR11); |
|
181 DEBUGPRINT("R12=%08x R13=%08x R14=%08x R15=%08x",a.iR12,a.iR13,a.iR14,a.iR15); |
|
182 DEBUGPRINT("R13Svc=%08x R14Svc=%08x SpsrSvc=%08x",a.iR13Svc,a.iR14Svc,a.iSpsrSvc); |
|
183 |
|
184 TInt irq = NKern::DisableAllInterrupts(); |
|
185 TSubScheduler& ss = SubScheduler(); |
|
186 NThreadBase* ct = ss.iCurrentThread; |
|
187 TInt inc = TInt(ss.i_IrqNestCount); |
|
188 TInt cpu = ss.iCpuNum; |
|
189 TInt klc = ss.iKernLockCount; |
|
190 NKern::RestoreInterrupts(irq); |
|
191 DEBUGPRINT("Thread %T, CPU %d, KLCount=%d, IrqNest=%d", ct, cpu, klc, inc); |
|
192 } |
|
193 |
|
194 void DumpFullRegSet(SFullArmRegSet& a) |
|
195 { |
|
196 SNormalRegs& r = a.iN; |
|
197 DEBUGPRINT("MODE_USR:"); |
|
198 DEBUGPRINT(" R0=%08x R1=%08x R2=%08x R3=%08x", r.iR0, r.iR1, r.iR2, r.iR3); |
|
199 DEBUGPRINT(" R4=%08x R5=%08x R6=%08x R7=%08x", r.iR4, r.iR5, r.iR6, r.iR7); |
|
200 DEBUGPRINT(" R8=%08x R9=%08x R10=%08x R11=%08x", r.iR8, r.iR9, r.iR10, r.iR11); |
|
201 DEBUGPRINT("R12=%08x R13=%08x R14=%08x R15=%08x", r.iR12, r.iR13, r.iR14, r.iR15); |
|
202 DEBUGPRINT("CPSR=%08x", r.iFlags); |
|
203 DEBUGPRINT("MODE_FIQ:"); |
|
204 DEBUGPRINT(" R8=%08x R9=%08x R10=%08x R11=%08x", r.iR8Fiq, r.iR9Fiq, r.iR10Fiq, r.iR11Fiq); |
|
205 DEBUGPRINT("R12=%08x R13=%08x R14=%08x SPSR=%08x", r.iR12Fiq, r.iR13Fiq, r.iR14Fiq, r.iSpsrFiq); |
|
206 DEBUGPRINT("MODE_IRQ:"); |
|
207 DEBUGPRINT("R13=%08x R14=%08x SPSR=%08x", r.iR13Irq, r.iR14Irq, r.iSpsrIrq); |
|
208 DEBUGPRINT("MODE_SVC:"); |
|
209 DEBUGPRINT("R13=%08x R14=%08x SPSR=%08x", r.iR13Svc, r.iR14Svc, r.iSpsrSvc); |
|
210 DEBUGPRINT("MODE_ABT:"); |
|
211 DEBUGPRINT("R13=%08x R14=%08x SPSR=%08x", r.iR13Abt, r.iR14Abt, r.iSpsrAbt); |
|
212 DEBUGPRINT("MODE_UND:"); |
|
213 DEBUGPRINT("R13=%08x R14=%08x SPSR=%08x", r.iR13Und, r.iR14Und, r.iSpsrUnd); |
|
214 // DEBUGPRINT("MODE_MON:"); |
|
215 // DEBUGPRINT("R13=%08x R14=%08x SPSR=%08x", r.iR13Mon, r.iR14Mon, r.iSpsrMon); |
|
216 |
|
217 SAuxiliaryRegs& aux = a.iA; |
|
218 DEBUGPRINT("TEEHBR=%08x CPACR=%08x", aux.iTEEHBR, aux.iCPACR); |
|
219 |
|
220 SBankedRegs& b = a.iB[0]; |
|
221 DEBUGPRINT(" SCTLR=%08x ACTLR=%08x PRRR=%08x NMRR=%08x", b.iSCTLR, b.iACTLR, b.iPRRR, b.iNMRR); |
|
222 DEBUGPRINT(" DACR=%08x TTBR0=%08x TTBR1=%08x TTBCR=%08x", b.iDACR, b.iTTBR0, b.iTTBR1, b.iTTBCR); |
|
223 DEBUGPRINT(" VBAR=%08x FCSEID=%08x CTXIDR=%08x", b.iVBAR, b.iFCSEIDR, b.iCTXIDR); |
|
224 DEBUGPRINT("Thread ID RWRW=%08x RWRO=%08x RWNO=%08x", b.iRWRWTID, b.iRWROTID, b.iRWNOTID); |
|
225 DEBUGPRINT(" DFSR=%08x DFAR=%08x IFSR=%08x IFAR=%08x", b.iDFSR, b.iDFAR, b.iIFSR, b.iIFAR); |
|
226 DEBUGPRINT(" ADFSR=%08x AIFSR=%08x", b.iADFSR, b.iAIFSR); |
|
227 #ifdef __CPU_HAS_VFP |
|
228 DEBUGPRINT("FPEXC %08x", a.iMore[0]); |
|
229 #endif |
|
230 DEBUGPRINT("ExcCode %08x", a.iExcCode); |
|
231 } |
|
232 |
|
233 |
|
234 #define CONTEXT_ELEMENT_UNDEFINED(val) \ |
|
235 { \ |
|
236 TArmContextElement::EUndefined, \ |
|
237 val, \ |
|
238 0, \ |
|
239 0 \ |
|
240 } |
|
241 |
|
242 #define CONTEXT_ELEMENT_EXCEPTION(reg) \ |
|
243 { \ |
|
244 TArmContextElement::EOffsetFromStackTop, \ |
|
245 ((sizeof(SThreadExcStack)-_FOFF(SThreadExcStack,reg))>>2), \ |
|
246 0, \ |
|
247 0 \ |
|
248 } |
|
249 |
|
250 #define CONTEXT_ELEMENT_RESCHED(reg) \ |
|
251 { \ |
|
252 TArmContextElement::EOffsetFromSp, \ |
|
253 (_FOFF(SThreadReschedStack,reg)>>2), \ |
|
254 0, \ |
|
255 0 \ |
|
256 } |
|
257 |
|
258 #define CONTEXT_ELEMENT_RESCHED_SP() \ |
|
259 { \ |
|
260 TArmContextElement::EOffsetFromSpBic3, \ |
|
261 (_FOFF(SThreadReschedStack,iSPRschdFlg)>>2), \ |
|
262 0, \ |
|
263 0 \ |
|
264 } |
|
265 |
|
266 #define CONTEXT_ELEMENT_RESCHED_SP_PLUS(offset) \ |
|
267 { \ |
|
268 TArmContextElement::EOffsetFromSpBic3_1, \ |
|
269 (_FOFF(SThreadReschedStack,iSPRschdFlg)>>2), \ |
|
270 (offset), \ |
|
271 0 \ |
|
272 } |
|
273 |
|
274 #define CONTEXT_ELEMENT_RESCHED_SP_OFFSET(offset) \ |
|
275 { \ |
|
276 TArmContextElement::EOffsetFromSpBic3_2, \ |
|
277 (_FOFF(SThreadReschedStack,iSPRschdFlg)>>2), \ |
|
278 (offset), \ |
|
279 0 \ |
|
280 } |
|
281 |
|
282 #define CONTEXT_ELEMENT_RESCHED_IRQ(reg) \ |
|
283 { \ |
|
284 TArmContextElement::EOffsetFromSpBic3_2, \ |
|
285 (_FOFF(SThreadReschedStack,iSPRschdFlg)>>2), \ |
|
286 ((_FOFF(SThreadIrqStack,reg)-sizeof(SThreadReschedStack))>>2), \ |
|
287 0 \ |
|
288 } |
|
289 |
|
290 #define CONTEXT_ELEMENT_RESCHED_INIT(reg) \ |
|
291 { \ |
|
292 TArmContextElement::EOffsetFromSpBic3_2, \ |
|
293 (_FOFF(SThreadReschedStack,iSPRschdFlg)>>2), \ |
|
294 ((_FOFF(SThreadInitStack,reg)-sizeof(SThreadReschedStack))>>2), \ |
|
295 0 \ |
|
296 } |
|
297 |
|
298 |
|
299 const TArmContextElement ContextTableException[] = |
|
300 { |
|
301 CONTEXT_ELEMENT_EXCEPTION(iR0), |
|
302 CONTEXT_ELEMENT_EXCEPTION(iR1), |
|
303 CONTEXT_ELEMENT_EXCEPTION(iR2), |
|
304 CONTEXT_ELEMENT_EXCEPTION(iR3), |
|
305 CONTEXT_ELEMENT_EXCEPTION(iR4), |
|
306 CONTEXT_ELEMENT_EXCEPTION(iR5), |
|
307 CONTEXT_ELEMENT_EXCEPTION(iR6), |
|
308 CONTEXT_ELEMENT_EXCEPTION(iR7), |
|
309 CONTEXT_ELEMENT_EXCEPTION(iR8), |
|
310 CONTEXT_ELEMENT_EXCEPTION(iR9), |
|
311 CONTEXT_ELEMENT_EXCEPTION(iR10), |
|
312 CONTEXT_ELEMENT_EXCEPTION(iR11), |
|
313 CONTEXT_ELEMENT_EXCEPTION(iR12), |
|
314 CONTEXT_ELEMENT_EXCEPTION(iR13usr), |
|
315 CONTEXT_ELEMENT_EXCEPTION(iR14usr), |
|
316 CONTEXT_ELEMENT_EXCEPTION(iR15), |
|
317 CONTEXT_ELEMENT_EXCEPTION(iCPSR), |
|
318 CONTEXT_ELEMENT_UNDEFINED(0), |
|
319 }; |
|
320 |
|
321 const TArmContextElement ContextTableUndefined[] = |
|
322 { |
|
323 CONTEXT_ELEMENT_UNDEFINED(0), |
|
324 CONTEXT_ELEMENT_UNDEFINED(0), |
|
325 CONTEXT_ELEMENT_UNDEFINED(0), |
|
326 CONTEXT_ELEMENT_UNDEFINED(0), |
|
327 CONTEXT_ELEMENT_UNDEFINED(0), |
|
328 CONTEXT_ELEMENT_UNDEFINED(0), |
|
329 CONTEXT_ELEMENT_UNDEFINED(0), |
|
330 CONTEXT_ELEMENT_UNDEFINED(0), |
|
331 CONTEXT_ELEMENT_UNDEFINED(0), |
|
332 CONTEXT_ELEMENT_UNDEFINED(0), |
|
333 CONTEXT_ELEMENT_UNDEFINED(0), |
|
334 CONTEXT_ELEMENT_UNDEFINED(0), |
|
335 CONTEXT_ELEMENT_UNDEFINED(0), |
|
336 CONTEXT_ELEMENT_UNDEFINED(0), |
|
337 CONTEXT_ELEMENT_UNDEFINED(0), |
|
338 CONTEXT_ELEMENT_UNDEFINED(0), |
|
339 CONTEXT_ELEMENT_UNDEFINED(EUserMode), |
|
340 CONTEXT_ELEMENT_UNDEFINED(0), |
|
341 }; |
|
342 |
|
343 // Table used for non dying threads which have been preempted by an interrupt |
|
344 // while in user mode. |
|
345 const TArmContextElement ContextTableUserInterrupt[] = |
|
346 { |
|
347 CONTEXT_ELEMENT_EXCEPTION(iR0), |
|
348 CONTEXT_ELEMENT_EXCEPTION(iR1), |
|
349 CONTEXT_ELEMENT_EXCEPTION(iR2), |
|
350 CONTEXT_ELEMENT_EXCEPTION(iR3), |
|
351 CONTEXT_ELEMENT_EXCEPTION(iR4), |
|
352 CONTEXT_ELEMENT_EXCEPTION(iR5), |
|
353 CONTEXT_ELEMENT_EXCEPTION(iR6), |
|
354 CONTEXT_ELEMENT_EXCEPTION(iR7), |
|
355 CONTEXT_ELEMENT_EXCEPTION(iR8), |
|
356 CONTEXT_ELEMENT_EXCEPTION(iR9), |
|
357 CONTEXT_ELEMENT_EXCEPTION(iR10), |
|
358 CONTEXT_ELEMENT_EXCEPTION(iR11), |
|
359 CONTEXT_ELEMENT_EXCEPTION(iR12), |
|
360 CONTEXT_ELEMENT_EXCEPTION(iR13usr), |
|
361 CONTEXT_ELEMENT_EXCEPTION(iR14usr), |
|
362 CONTEXT_ELEMENT_EXCEPTION(iR15), |
|
363 CONTEXT_ELEMENT_EXCEPTION(iCPSR), |
|
364 CONTEXT_ELEMENT_UNDEFINED(0), |
|
365 }; |
|
366 |
|
367 // Table used for threads which have been preempted by an interrupt while in |
|
368 // supervisor mode in the SWI handler either before the return address was |
|
369 // saved or after the registers were restored. |
|
370 const TArmContextElement ContextTableSvsrInterrupt1[] = |
|
371 { |
|
372 CONTEXT_ELEMENT_EXCEPTION(iR0), |
|
373 CONTEXT_ELEMENT_EXCEPTION(iR1), |
|
374 CONTEXT_ELEMENT_EXCEPTION(iR2), |
|
375 CONTEXT_ELEMENT_EXCEPTION(iR3), |
|
376 CONTEXT_ELEMENT_EXCEPTION(iR4), |
|
377 CONTEXT_ELEMENT_EXCEPTION(iR5), |
|
378 CONTEXT_ELEMENT_EXCEPTION(iR6), |
|
379 CONTEXT_ELEMENT_EXCEPTION(iR7), |
|
380 CONTEXT_ELEMENT_EXCEPTION(iR8), |
|
381 CONTEXT_ELEMENT_EXCEPTION(iR9), |
|
382 CONTEXT_ELEMENT_EXCEPTION(iR10), |
|
383 CONTEXT_ELEMENT_EXCEPTION(iR11), |
|
384 CONTEXT_ELEMENT_EXCEPTION(iR12), |
|
385 CONTEXT_ELEMENT_EXCEPTION(iR13usr), |
|
386 CONTEXT_ELEMENT_EXCEPTION(iR14usr), |
|
387 CONTEXT_ELEMENT_EXCEPTION(iR15), |
|
388 CONTEXT_ELEMENT_UNDEFINED(EUserMode), // can't get flags so just use 'user mode' |
|
389 CONTEXT_ELEMENT_UNDEFINED(0), |
|
390 }; |
|
391 |
|
392 // Table used for non-dying threads blocked on their request semaphore. |
|
393 const TArmContextElement ContextTableWFAR[] = |
|
394 { |
|
395 CONTEXT_ELEMENT_EXCEPTION(iR0), |
|
396 CONTEXT_ELEMENT_EXCEPTION(iR1), |
|
397 CONTEXT_ELEMENT_EXCEPTION(iR2), |
|
398 CONTEXT_ELEMENT_EXCEPTION(iR3), |
|
399 CONTEXT_ELEMENT_EXCEPTION(iR4), |
|
400 CONTEXT_ELEMENT_EXCEPTION(iR5), |
|
401 CONTEXT_ELEMENT_EXCEPTION(iR6), |
|
402 CONTEXT_ELEMENT_EXCEPTION(iR7), |
|
403 CONTEXT_ELEMENT_EXCEPTION(iR8), |
|
404 CONTEXT_ELEMENT_EXCEPTION(iR9), |
|
405 CONTEXT_ELEMENT_EXCEPTION(iR10), |
|
406 CONTEXT_ELEMENT_EXCEPTION(iR11), |
|
407 CONTEXT_ELEMENT_EXCEPTION(iR12), |
|
408 CONTEXT_ELEMENT_EXCEPTION(iR13usr), |
|
409 CONTEXT_ELEMENT_EXCEPTION(iR14usr), |
|
410 CONTEXT_ELEMENT_EXCEPTION(iR15), |
|
411 CONTEXT_ELEMENT_EXCEPTION(iCPSR), |
|
412 CONTEXT_ELEMENT_UNDEFINED(0), |
|
413 }; |
|
414 |
|
415 const TArmContextElement ContextTableExec[] = |
|
416 { |
|
417 CONTEXT_ELEMENT_EXCEPTION(iR0), |
|
418 CONTEXT_ELEMENT_EXCEPTION(iR1), |
|
419 CONTEXT_ELEMENT_EXCEPTION(iR2), |
|
420 CONTEXT_ELEMENT_EXCEPTION(iR3), |
|
421 CONTEXT_ELEMENT_EXCEPTION(iR4), |
|
422 CONTEXT_ELEMENT_EXCEPTION(iR5), |
|
423 CONTEXT_ELEMENT_EXCEPTION(iR6), |
|
424 CONTEXT_ELEMENT_EXCEPTION(iR7), |
|
425 CONTEXT_ELEMENT_EXCEPTION(iR8), |
|
426 CONTEXT_ELEMENT_EXCEPTION(iR9), |
|
427 CONTEXT_ELEMENT_EXCEPTION(iR10), |
|
428 CONTEXT_ELEMENT_EXCEPTION(iR11), |
|
429 CONTEXT_ELEMENT_EXCEPTION(iR12), |
|
430 CONTEXT_ELEMENT_EXCEPTION(iR13usr), |
|
431 CONTEXT_ELEMENT_EXCEPTION(iR14usr), |
|
432 CONTEXT_ELEMENT_EXCEPTION(iR15), |
|
433 CONTEXT_ELEMENT_EXCEPTION(iCPSR), |
|
434 CONTEXT_ELEMENT_UNDEFINED(0), |
|
435 }; |
|
436 |
|
437 // Table used to retrieve a thread's kernel side context at the point where |
|
438 // Reschedule() returns. |
|
439 // Used for kernel threads. |
|
440 const TArmContextElement ContextTableKernel[] = |
|
441 { |
|
442 CONTEXT_ELEMENT_UNDEFINED(0), |
|
443 CONTEXT_ELEMENT_UNDEFINED(0), |
|
444 CONTEXT_ELEMENT_UNDEFINED(0), |
|
445 CONTEXT_ELEMENT_UNDEFINED(0), |
|
446 CONTEXT_ELEMENT_UNDEFINED(0), |
|
447 CONTEXT_ELEMENT_UNDEFINED(0), |
|
448 CONTEXT_ELEMENT_UNDEFINED(0), |
|
449 CONTEXT_ELEMENT_UNDEFINED(0), |
|
450 CONTEXT_ELEMENT_UNDEFINED(0), |
|
451 CONTEXT_ELEMENT_UNDEFINED(0), |
|
452 CONTEXT_ELEMENT_UNDEFINED(0), |
|
453 CONTEXT_ELEMENT_UNDEFINED(0), |
|
454 CONTEXT_ELEMENT_UNDEFINED(0), |
|
455 CONTEXT_ELEMENT_RESCHED_SP(), // supervisor stack pointer before reschedule |
|
456 CONTEXT_ELEMENT_UNDEFINED(0), // supervisor lr is unknown |
|
457 CONTEXT_ELEMENT_RESCHED(iR15), // return address from reschedule |
|
458 CONTEXT_ELEMENT_UNDEFINED(ESvcMode), // can't get flags so just use 'user mode' |
|
459 CONTEXT_ELEMENT_UNDEFINED(0), |
|
460 }; |
|
461 |
|
462 // Table used to retrieve a thread's kernel side context at the point where |
|
463 // NKern::Unlock() or NKern::PreemptionPoint() returns. |
|
464 // Used for kernel threads. |
|
465 const TArmContextElement ContextTableKernel1[] = |
|
466 { |
|
467 CONTEXT_ELEMENT_UNDEFINED(0), |
|
468 CONTEXT_ELEMENT_UNDEFINED(0), |
|
469 CONTEXT_ELEMENT_UNDEFINED(0), |
|
470 CONTEXT_ELEMENT_UNDEFINED(0), |
|
471 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(4), |
|
472 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(8), |
|
473 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(12), |
|
474 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(16), |
|
475 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(20), |
|
476 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(24), |
|
477 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(28), |
|
478 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(32), |
|
479 CONTEXT_ELEMENT_UNDEFINED(0), |
|
480 CONTEXT_ELEMENT_RESCHED_SP_PLUS(40), |
|
481 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(36), |
|
482 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(36), |
|
483 CONTEXT_ELEMENT_UNDEFINED(ESvcMode), |
|
484 CONTEXT_ELEMENT_UNDEFINED(0), |
|
485 }; |
|
486 |
|
487 // Table used to retrieve a thread's kernel side context at the point where |
|
488 // NKern::FSWait() or NKern::WaitForAnyRequest() returns. |
|
489 // Used for kernel threads. |
|
490 const TArmContextElement ContextTableKernel2[] = |
|
491 { |
|
492 CONTEXT_ELEMENT_UNDEFINED(0), |
|
493 CONTEXT_ELEMENT_UNDEFINED(0), |
|
494 CONTEXT_ELEMENT_UNDEFINED(0), |
|
495 CONTEXT_ELEMENT_UNDEFINED(0), |
|
496 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(4), |
|
497 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(8), |
|
498 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(12), |
|
499 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(16), |
|
500 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(20), |
|
501 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(24), |
|
502 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(28), |
|
503 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(32), |
|
504 CONTEXT_ELEMENT_UNDEFINED(0), |
|
505 CONTEXT_ELEMENT_RESCHED_SP_PLUS(40), |
|
506 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(36), |
|
507 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(36), |
|
508 CONTEXT_ELEMENT_UNDEFINED(ESvcMode), |
|
509 CONTEXT_ELEMENT_UNDEFINED(0), |
|
510 }; |
|
511 |
|
512 // Table used to retrieve a thread's kernel side context at the point where |
|
513 // an interrupt taken in supervisor mode returns. |
|
514 // Used for kernel threads. |
|
515 const TArmContextElement ContextTableKernel3[] = |
|
516 { |
|
517 CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR0), |
|
518 CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR1), |
|
519 CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR2), |
|
520 CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR3), |
|
521 CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR4), |
|
522 CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR5), |
|
523 CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR6), |
|
524 CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR7), |
|
525 CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR8), |
|
526 CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR9), |
|
527 CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR10), |
|
528 CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR11), |
|
529 CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR12), |
|
530 CONTEXT_ELEMENT_RESCHED_SP_PLUS((sizeof(SThreadExcStack)+8)), |
|
531 CONTEXT_ELEMENT_RESCHED_IRQ(iR14svc), |
|
532 CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR15), |
|
533 CONTEXT_ELEMENT_RESCHED_IRQ(iX.iCPSR), |
|
534 CONTEXT_ELEMENT_UNDEFINED(0), |
|
535 }; |
|
536 |
|
537 // Table used to retrieve a thread's kernel side context at the point where |
|
538 // Exec::WaitForAnyRequest() returns. |
|
539 // Used for kernel threads. |
|
540 const TArmContextElement ContextTableKernel4[] = |
|
541 { |
|
542 CONTEXT_ELEMENT_RESCHED_INIT(iX.iR0), |
|
543 CONTEXT_ELEMENT_RESCHED_INIT(iX.iR1), |
|
544 CONTEXT_ELEMENT_RESCHED_INIT(iX.iR2), |
|
545 CONTEXT_ELEMENT_RESCHED_INIT(iX.iR3), |
|
546 CONTEXT_ELEMENT_RESCHED_INIT(iX.iR4), |
|
547 CONTEXT_ELEMENT_RESCHED_INIT(iX.iR5), |
|
548 CONTEXT_ELEMENT_RESCHED_INIT(iX.iR6), |
|
549 CONTEXT_ELEMENT_RESCHED_INIT(iX.iR7), |
|
550 CONTEXT_ELEMENT_RESCHED_INIT(iX.iR8), |
|
551 CONTEXT_ELEMENT_RESCHED_INIT(iX.iR9), |
|
552 CONTEXT_ELEMENT_RESCHED_INIT(iX.iR10), |
|
553 CONTEXT_ELEMENT_RESCHED_INIT(iX.iR11), |
|
554 CONTEXT_ELEMENT_RESCHED_INIT(iX.iR12), |
|
555 CONTEXT_ELEMENT_RESCHED_SP_PLUS(sizeof(SThreadExcStack)), |
|
556 CONTEXT_ELEMENT_RESCHED_INIT(iX.iR15), |
|
557 CONTEXT_ELEMENT_RESCHED_INIT(iX.iR15), |
|
558 CONTEXT_ELEMENT_RESCHED_INIT(iX.iCPSR), |
|
559 CONTEXT_ELEMENT_UNDEFINED(0), |
|
560 }; |
|
561 |
|
562 const TArmContextElement* const ThreadUserContextTables[] = |
|
563 { |
|
564 ContextTableUndefined, // EContextNone |
|
565 ContextTableException, // EContextException |
|
566 ContextTableUndefined, // EContextUndefined |
|
567 ContextTableUserInterrupt, // EContextUserInterrupt |
|
568 ContextTableUndefined, // EContextUserInterruptDied (not used) |
|
569 ContextTableSvsrInterrupt1, // EContextSvsrInterrupt1 |
|
570 ContextTableUndefined, // EContextSvsrInterrupt1Died (not used) |
|
571 ContextTableUndefined, // EContextSvsrInterrupt2 (not used) |
|
572 ContextTableUndefined, // EContextSvsrInterrupt2Died (not used) |
|
573 ContextTableWFAR, // EContextWFAR |
|
574 ContextTableUndefined, // EContextWFARDied (not used) |
|
575 ContextTableExec, // EContextExec |
|
576 ContextTableKernel, // EContextKernel |
|
577 ContextTableKernel1, // EContextKernel1 |
|
578 ContextTableKernel2, // EContextKernel2 |
|
579 ContextTableKernel3, // EContextKernel3 |
|
580 ContextTableKernel4, // EContextKernel4 |
|
581 0 // Null terminated |
|
582 }; |
|
583 |
|
584 /** Return table of pointers to user context tables. |
|
585 |
|
586 Each user context table is an array of TArmContextElement objects, one per |
|
587 ARM CPU register, in the order defined in TArmRegisters. |
|
588 |
|
589 The master table contains pointers to the user context tables in the order |
|
590 defined in TUserContextType. There are as many user context tables as |
|
591 scenarii leading a user thread to switch to privileged mode. |
|
592 |
|
593 Stop-mode debug agents should use this function to store the address of the |
|
594 master table at a location known to the host debugger. Run-mode debug |
|
595 agents are advised to use NKern::GetUserContext() and |
|
596 NKern::SetUserContext() instead. |
|
597 |
|
598 @return A pointer to the master table. The master table is NULL |
|
599 terminated. The master and user context tables are guaranteed to remain at |
|
600 the same location for the lifetime of the OS execution so it is safe the |
|
601 cache the returned address. |
|
602 |
|
603 @see UserContextType |
|
604 @see TArmContextElement |
|
605 @see TArmRegisters |
|
606 @see TUserContextType |
|
607 @see NKern::SetUserContext |
|
608 @see NKern::GetUserContext |
|
609 |
|
610 @publishedPartner |
|
611 */ |
|
612 EXPORT_C const TArmContextElement* const* NThread::UserContextTables() |
|
613 { |
|
614 return &ThreadUserContextTables[0]; |
|
615 } |
|
616 |
|
617 |
|
618 /** Get a value which indicates where a thread's user mode context is stored. |
|
619 |
|
620 @return A value that can be used as an index into the tables returned by |
|
621 NThread::UserContextTables(). |
|
622 |
|
623 @pre any context |
|
624 @pre kernel locked |
|
625 @post kernel locked |
|
626 |
|
627 @see UserContextTables |
|
628 @publishedPartner |
|
629 */ |
|
630 EXPORT_C NThread::TUserContextType NThread::UserContextType() |
|
631 { |
|
632 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED,"NThread::UserContextType"); |
|
633 |
|
634 /* |
|
635 The SMP nanokernel always saves R0-R12,R13usr,R14usr,ExcCode,PC,CPSR on any |
|
636 entry to the kernel, so getting the user context is always the same. |
|
637 The only possible problem is an FIQ occurring immediately after any other |
|
638 exception, before the registers have been saved. In this case the registers |
|
639 saved by the FIQ will be the ones observed and they will be correct except |
|
640 that the CPSR value will indicate a mode other than USR, which can be used |
|
641 to detect the condition. |
|
642 */ |
|
643 return EContextException; |
|
644 } |
|
645 |
|
646 |
|
647 // Enter and return with kernel locked |
|
648 void NThread::GetUserContext(TArmRegSet& aContext, TUint32& aAvailRegistersMask) |
|
649 { |
|
650 NThread* pC = NCurrentThreadL(); |
|
651 TSubScheduler* ss = 0; |
|
652 if (pC != this) |
|
653 { |
|
654 AcqSLock(); |
|
655 if (iWaitState.ThreadIsDead()) |
|
656 { |
|
657 RelSLock(); |
|
658 aAvailRegistersMask = 0; |
|
659 return; |
|
660 } |
|
661 if (iReady && iParent->iReady) |
|
662 { |
|
663 ss = TheSubSchedulers + (iParent->iReady & EReadyCpuMask); |
|
664 ss->iReadyListLock.LockOnly(); |
|
665 } |
|
666 if (iCurrent) |
|
667 { |
|
668 // thread is actually running on another CPU |
|
669 // interrupt that CPU and wait for it to enter interrupt mode |
|
670 // this allows a snapshot of the thread user state to be observed |
|
671 // and ensures the thread cannot return to user mode |
|
672 send_resched_ipi_and_wait(iLastCpu); |
|
673 } |
|
674 } |
|
675 SThreadExcStack* txs = (SThreadExcStack*)(TLinAddr(iStackBase) + TLinAddr(iStackSize)); |
|
676 --txs; |
|
677 if (txs->iExcCode <= SThreadExcStack::EInit) // if not, thread never entered user mode |
|
678 { |
|
679 aContext.iR0 = txs->iR0; |
|
680 aContext.iR1 = txs->iR1; |
|
681 aContext.iR2 = txs->iR2; |
|
682 aContext.iR3 = txs->iR3; |
|
683 aContext.iR4 = txs->iR4; |
|
684 aContext.iR5 = txs->iR5; |
|
685 aContext.iR6 = txs->iR6; |
|
686 aContext.iR7 = txs->iR7; |
|
687 aContext.iR8 = txs->iR8; |
|
688 aContext.iR9 = txs->iR9; |
|
689 aContext.iR10 = txs->iR10; |
|
690 aContext.iR11 = txs->iR11; |
|
691 aContext.iR12 = txs->iR12; |
|
692 aContext.iR13 = txs->iR13usr; |
|
693 aContext.iR14 = txs->iR14usr; |
|
694 aContext.iR15 = txs->iR15; |
|
695 aContext.iFlags = txs->iCPSR; |
|
696 if ((aContext.iFlags & 0x1f) == 0x10) |
|
697 aAvailRegistersMask = 0x1ffffu; // R0-R15,CPSR all valid |
|
698 else |
|
699 { |
|
700 aContext.iFlags = 0x10; // account for FIQ in SVC case |
|
701 aAvailRegistersMask = 0x0ffffu; // CPSR not valid |
|
702 } |
|
703 } |
|
704 if (pC != this) |
|
705 { |
|
706 if (ss) |
|
707 ss->iReadyListLock.UnlockOnly(); |
|
708 RelSLock(); |
|
709 } |
|
710 } |
|
711 |
|
712 class TGetContextIPI : public TGenericIPI |
|
713 { |
|
714 public: |
|
715 void Get(TInt aCpu, TArmRegSet& aContext, TUint32& aAvailRegistersMask); |
|
716 static void Isr(TGenericIPI*); |
|
717 public: |
|
718 TArmRegSet* iContext; |
|
719 TUint32* iAvailRegsMask; |
|
720 }; |
|
721 |
|
722 extern "C" TLinAddr get_sp_svc(); |
|
723 extern "C" TLinAddr get_lr_svc(); |
|
724 extern "C" TInt get_kernel_context_type(TLinAddr /*aReschedReturn*/); |
|
725 |
|
726 void TGetContextIPI::Isr(TGenericIPI* aPtr) |
|
727 { |
|
728 TGetContextIPI& ipi = *(TGetContextIPI*)aPtr; |
|
729 TArmRegSet& a = *ipi.iContext; |
|
730 SThreadExcStack* txs = (SThreadExcStack*)get_sp_svc(); |
|
731 a.iR0 = txs->iR0; |
|
732 a.iR1 = txs->iR1; |
|
733 a.iR2 = txs->iR2; |
|
734 a.iR3 = txs->iR3; |
|
735 a.iR4 = txs->iR4; |
|
736 a.iR5 = txs->iR5; |
|
737 a.iR6 = txs->iR6; |
|
738 a.iR7 = txs->iR7; |
|
739 a.iR8 = txs->iR8; |
|
740 a.iR9 = txs->iR9; |
|
741 a.iR10 = txs->iR10; |
|
742 a.iR11 = txs->iR11; |
|
743 a.iR12 = txs->iR12; |
|
744 a.iR13 = TUint32(txs) + sizeof(SThreadExcStack); |
|
745 a.iR14 = get_lr_svc(); |
|
746 a.iR15 = txs->iR15; |
|
747 a.iFlags = txs->iCPSR; |
|
748 *ipi.iAvailRegsMask = 0x1ffffu; |
|
749 } |
|
750 |
|
751 void TGetContextIPI::Get(TInt aCpu, TArmRegSet& aContext, TUint32& aAvailRegsMask) |
|
752 { |
|
753 iContext = &aContext; |
|
754 iAvailRegsMask = &aAvailRegsMask; |
|
755 Queue(&Isr, 1u<<aCpu); |
|
756 WaitCompletion(); |
|
757 } |
|
758 |
|
759 void GetRegs(TArmRegSet& aContext, TLinAddr aStart, TUint32 aMask) |
|
760 { |
|
761 TUint32* d = (TUint32*)&aContext; |
|
762 const TUint32* s = (const TUint32*)aStart; |
|
763 for (; aMask; aMask>>=1, ++d) |
|
764 { |
|
765 if (aMask & 1) |
|
766 *d = *s++; |
|
767 } |
|
768 } |
|
769 |
|
770 // Enter and return with kernel locked |
|
771 void NThread::GetSystemContext(TArmRegSet& aContext, TUint32& aAvailRegsMask) |
|
772 { |
|
773 aAvailRegsMask = 0; |
|
774 NThread* pC = NCurrentThreadL(); |
|
775 __NK_ASSERT_ALWAYS(pC!=this); |
|
776 TSubScheduler* ss = 0; |
|
777 AcqSLock(); |
|
778 if (iWaitState.ThreadIsDead()) |
|
779 { |
|
780 RelSLock(); |
|
781 return; |
|
782 } |
|
783 if (iReady && iParent->iReady) |
|
784 { |
|
785 ss = TheSubSchedulers + (iParent->iReady & EReadyCpuMask); |
|
786 ss->iReadyListLock.LockOnly(); |
|
787 } |
|
788 if (iCurrent) |
|
789 { |
|
790 // thread is actually running on another CPU |
|
791 // use an interprocessor interrupt to get a snapshot of the state |
|
792 TGetContextIPI ipi; |
|
793 ipi.Get(iLastCpu, aContext, aAvailRegsMask); |
|
794 } |
|
795 else |
|
796 { |
|
797 // thread is not running and can't start |
|
798 SThreadReschedStack* trs = (SThreadReschedStack*)iSavedSP; |
|
799 TInt kct = get_kernel_context_type(trs->iR15); |
|
800 __NK_ASSERT_ALWAYS(kct>=0); // couldn't match return address from reschedule |
|
801 TLinAddr sp = trs->iSPRschdFlg &~ 3; |
|
802 switch (kct) |
|
803 { |
|
804 case 0: // thread not yet started |
|
805 case 5: // Exec::WaitForAnyRequest() |
|
806 GetRegs(aContext, sp, 0x01fffu); |
|
807 aContext.iR13 = sp + sizeof(SThreadExcStack); |
|
808 GetRegs(aContext, sp+64, 0x18000u); |
|
809 aAvailRegsMask =0x1bfffu; |
|
810 break; |
|
811 case 1: // unlock |
|
812 case 2: // preemption point |
|
813 case 3: // NKern::WaitForAnyRequest() or NKern::FSWait() |
|
814 GetRegs(aContext, sp+4, 0x08ff0u); |
|
815 aContext.iR14 = aContext.iR15; |
|
816 aContext.iR13 = sp+40; |
|
817 aAvailRegsMask =0x0eff0u; |
|
818 break; |
|
819 case 4: // IRQ/FIQ |
|
820 GetRegs(aContext, sp+4, 0x04000u); |
|
821 GetRegs(aContext, sp+8, 0x01fffu); |
|
822 GetRegs(aContext, sp+64, 0x18000u); |
|
823 aContext.iR13 = sp + sizeof(SThreadExcStack) + 8; |
|
824 aAvailRegsMask =0x1ffffu; |
|
825 break; |
|
826 default: |
|
827 __NK_ASSERT_ALWAYS(0); |
|
828 } |
|
829 } |
|
830 if (ss) |
|
831 ss->iReadyListLock.UnlockOnly(); |
|
832 RelSLock(); |
|
833 } |
|
834 |
|
835 // Enter and return with kernel locked |
|
836 void NThread::SetUserContext(const TArmRegSet& aContext, TUint32& aRegMask) |
|
837 { |
|
838 NThread* pC = NCurrentThreadL(); |
|
839 TSubScheduler* ss = 0; |
|
840 if (pC != this) |
|
841 { |
|
842 AcqSLock(); |
|
843 if (iWaitState.ThreadIsDead()) |
|
844 { |
|
845 RelSLock(); |
|
846 aRegMask = 0; |
|
847 return; |
|
848 } |
|
849 if (iReady && iParent->iReady) |
|
850 { |
|
851 ss = TheSubSchedulers + (iParent->iReady & EReadyCpuMask); |
|
852 ss->iReadyListLock.LockOnly(); |
|
853 } |
|
854 if (iCurrent) |
|
855 { |
|
856 // thread is actually running on another CPU |
|
857 // interrupt that CPU and wait for it to enter interrupt mode |
|
858 // this allows a snapshot of the thread user state to be observed |
|
859 // and ensures the thread cannot return to user mode |
|
860 send_resched_ipi_and_wait(iLastCpu); |
|
861 } |
|
862 } |
|
863 SThreadExcStack* txs = (SThreadExcStack*)(TLinAddr(iStackBase) + TLinAddr(iStackSize)); |
|
864 --txs; |
|
865 aRegMask &= 0x1ffffu; |
|
866 if (txs->iExcCode <= SThreadExcStack::EInit) // if not, thread never entered user mode |
|
867 { |
|
868 if (aRegMask & 0x0001u) |
|
869 txs->iR0 = aContext.iR0; |
|
870 if (aRegMask & 0x0002u) |
|
871 txs->iR1 = aContext.iR1; |
|
872 if (aRegMask & 0x0004u) |
|
873 txs->iR2 = aContext.iR2; |
|
874 if (aRegMask & 0x0008u) |
|
875 txs->iR3 = aContext.iR3; |
|
876 if (aRegMask & 0x0010u) |
|
877 txs->iR4 = aContext.iR4; |
|
878 if (aRegMask & 0x0020u) |
|
879 txs->iR5 = aContext.iR5; |
|
880 if (aRegMask & 0x0040u) |
|
881 txs->iR6 = aContext.iR6; |
|
882 if (aRegMask & 0x0080u) |
|
883 txs->iR7 = aContext.iR7; |
|
884 if (aRegMask & 0x0100u) |
|
885 txs->iR8 = aContext.iR8; |
|
886 if (aRegMask & 0x0200u) |
|
887 txs->iR9 = aContext.iR9; |
|
888 if (aRegMask & 0x0400u) |
|
889 txs->iR10 = aContext.iR10; |
|
890 if (aRegMask & 0x0800u) |
|
891 txs->iR11 = aContext.iR11; |
|
892 if (aRegMask & 0x1000u) |
|
893 txs->iR12 = aContext.iR12; |
|
894 if (aRegMask & 0x2000u) |
|
895 txs->iR13usr = aContext.iR13; |
|
896 if (aRegMask & 0x4000u) |
|
897 txs->iR14usr = aContext.iR14; |
|
898 if (aRegMask & 0x8000u) |
|
899 txs->iR15 = aContext.iR15; |
|
900 // Assert that target thread is in USR mode, and update only the flags part of the PSR |
|
901 __NK_ASSERT_ALWAYS((txs->iCPSR & 0x1f) == 0x10); |
|
902 if (aRegMask & 0x10000u) |
|
903 { |
|
904 // NZCVQ.......GE3-0................ |
|
905 const TUint32 writableFlags = 0xF80F0000; |
|
906 txs->iCPSR &= ~writableFlags; |
|
907 txs->iCPSR |= aContext.iFlags & writableFlags; |
|
908 } |
|
909 } |
|
910 else |
|
911 aRegMask = 0; |
|
912 if (pC != this) |
|
913 { |
|
914 if (ss) |
|
915 ss->iReadyListLock.UnlockOnly(); |
|
916 RelSLock(); |
|
917 } |
|
918 } |
|
919 |
|
920 /** Get (subset of) user context of specified thread. |
|
921 |
|
922 The nanokernel does not systematically save all registers in the supervisor |
|
923 stack on entry into privileged mode and the exact subset depends on why the |
|
924 switch to privileged mode occured. So in general only a subset of the |
|
925 register set is available. |
|
926 |
|
927 @param aThread Thread to inspect. It can be the current thread or a |
|
928 non-current one. |
|
929 |
|
930 @param aContext Pointer to TArmRegSet structure where the context is |
|
931 copied. |
|
932 |
|
933 @param aAvailRegistersMask Bit mask telling which subset of the context is |
|
934 available and has been copied to aContext (1: register available / 0: not |
|
935 available). Bit 0 stands for register R0. |
|
936 |
|
937 @see TArmRegSet |
|
938 @see ThreadSetUserContext |
|
939 |
|
940 @pre Call in a thread context. |
|
941 @pre Interrupts must be enabled. |
|
942 */ |
|
943 EXPORT_C void NKern::ThreadGetUserContext(NThread* aThread, TAny* aContext, TUint32& aAvailRegistersMask) |
|
944 { |
|
945 CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadGetUserContext"); |
|
946 TArmRegSet& a = *(TArmRegSet*)aContext; |
|
947 memclr(aContext, sizeof(TArmRegSet)); |
|
948 NKern::Lock(); |
|
949 aThread->GetUserContext(a, aAvailRegistersMask); |
|
950 NKern::Unlock(); |
|
951 } |
|
952 |
|
953 /** Get (subset of) system context of specified thread. |
|
954 |
|
955 @param aThread Thread to inspect. It can be the current thread or a |
|
956 non-current one. |
|
957 |
|
958 @param aContext Pointer to TArmRegSet structure where the context is |
|
959 copied. |
|
960 |
|
961 @param aAvailRegistersMask Bit mask telling which subset of the context is |
|
962 available and has been copied to aContext (1: register available / 0: not |
|
963 available). Bit 0 stands for register R0. |
|
964 |
|
965 @see TArmRegSet |
|
966 @see ThreadSetUserContext |
|
967 |
|
968 @pre Call in a thread context. |
|
969 @pre Interrupts must be enabled. |
|
970 */ |
|
971 EXPORT_C void NKern::ThreadGetSystemContext(NThread* aThread, TAny* aContext, TUint32& aAvailRegistersMask) |
|
972 { |
|
973 CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadGetSystemContext"); |
|
974 TArmRegSet& a = *(TArmRegSet*)aContext; |
|
975 memclr(aContext, sizeof(TArmRegSet)); |
|
976 NKern::Lock(); |
|
977 aThread->GetSystemContext(a, aAvailRegistersMask); |
|
978 NKern::Unlock(); |
|
979 } |
|
980 |
|
981 /** Set (subset of) user context of specified thread. |
|
982 |
|
983 @param aThread Thread to modify. It can be the current thread or a |
|
984 non-current one. |
|
985 |
|
986 @param aContext Pointer to TArmRegSet structure containing the context |
|
987 to set. The values of registers which aren't part of the context saved |
|
988 on the supervisor stack are ignored. |
|
989 |
|
990 @see TArmRegSet |
|
991 @see ThreadGetUserContext |
|
992 |
|
993 @pre Call in a thread context. |
|
994 @pre Interrupts must be enabled. |
|
995 */ |
|
996 EXPORT_C void NKern::ThreadSetUserContext(NThread* aThread, TAny* aContext) |
|
997 { |
|
998 CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadSetUserContext"); |
|
999 TArmRegSet& a = *(TArmRegSet*)aContext; |
|
1000 TUint32 mask = 0x1ffffu; |
|
1001 NKern::Lock(); |
|
1002 aThread->SetUserContext(a, mask); |
|
1003 NKern::Unlock(); |
|
1004 } |
|
1005 |
|
1006 |
|
1007 #ifdef __CPU_HAS_VFP |
|
1008 extern void VfpContextSave(void*); |
|
1009 #endif |
|
1010 /** Complete the saving of a thread's context |
|
1011 |
|
1012 This saves the VFP/NEON registers if necessary once we know that we are definitely |
|
1013 switching threads. |
|
1014 |
|
1015 @internalComponent |
|
1016 */ |
|
1017 void NThread::CompleteContextSave() |
|
1018 { |
|
1019 #ifdef __CPU_HAS_VFP |
|
1020 if (Arm::VfpThread[NKern::CurrentCpu()] == this) |
|
1021 { |
|
1022 VfpContextSave(iExtraContext); // Disables VFP |
|
1023 } |
|
1024 #endif |
|
1025 } |
|
1026 |
|
1027 |
|
1028 extern "C" TInt HandleSpecialOpcode(TArmExcInfo* aContext, TInt aType) |
|
1029 { |
|
1030 TUint32 cpsr = aContext->iCpsr; |
|
1031 TUint32 mode = cpsr & 0x1f; |
|
1032 TUint32 opcode = aContext->iFaultStatus; |
|
1033 |
|
1034 // Coprocessor abort from CP15 or E7FFDEFF -> crash immediately |
|
1035 if ( (aType==15 && opcode!=0xee000f20) |
|
1036 || (aType==32 && opcode==0xe7ffdeff) |
|
1037 || (aType==33 && opcode==0xdeff) |
|
1038 ) |
|
1039 { |
|
1040 if (mode != 0x10) |
|
1041 ExcFault(aContext); // crash instruction in privileged mode |
|
1042 return 0; // crash instruction in user mode - handle normally |
|
1043 } |
|
1044 if ( (aType==15 && opcode==0xee000f20) |
|
1045 || (aType==32 && opcode==0xe7ffdefc) |
|
1046 || (aType==33 && opcode==0xdefc) |
|
1047 ) |
|
1048 { |
|
1049 // checkpoint |
|
1050 __KTRACE_OPT(KPANIC,DumpExcInfo(*aContext)); |
|
1051 if (aType==32) |
|
1052 aContext->iR15 += 4; |
|
1053 else |
|
1054 aContext->iR15 += 2; |
|
1055 return 1; |
|
1056 } |
|
1057 return 0; |
|
1058 } |
|
1059 |
|
1060 /** Return the total CPU time so far used by the specified thread. |
|
1061 |
|
1062 @return The total CPU time in units of 1/NKern::CpuTimeMeasFreq(). |
|
1063 */ |
|
1064 EXPORT_C TUint64 NKern::ThreadCpuTime(NThread* aThread) |
|
1065 { |
|
1066 TSubScheduler* ss = 0; |
|
1067 NKern::Lock(); |
|
1068 aThread->AcqSLock(); |
|
1069 if (aThread->i_NThread_Initial) |
|
1070 ss = &TheSubSchedulers[aThread->iLastCpu]; |
|
1071 else if (aThread->iReady && aThread->iParent->iReady) |
|
1072 ss = &TheSubSchedulers[aThread->iParent->iReady & NSchedulable::EReadyCpuMask]; |
|
1073 if (ss) |
|
1074 ss->iReadyListLock.LockOnly(); |
|
1075 TUint64 t = aThread->iTotalCpuTime64; |
|
1076 if (aThread->iCurrent || (aThread->i_NThread_Initial && !ss->iCurrentThread)) |
|
1077 t += (NKern::Timestamp() - ss->iLastTimestamp64); |
|
1078 if (ss) |
|
1079 ss->iReadyListLock.UnlockOnly(); |
|
1080 aThread->RelSLock(); |
|
1081 NKern::Unlock(); |
|
1082 return t; |
|
1083 } |
|
1084 |
|
1085 TInt NKern::QueueUserModeCallback(NThreadBase* aThread, TUserModeCallback* aCallback) |
|
1086 { |
|
1087 __e32_memory_barrier(); |
|
1088 if (aCallback->iNext != KUserModeCallbackUnqueued) |
|
1089 return KErrInUse; |
|
1090 TInt result = KErrDied; |
|
1091 NKern::Lock(); |
|
1092 TUserModeCallback* listHead = aThread->iUserModeCallbacks; |
|
1093 do { |
|
1094 if (TLinAddr(listHead) & 3) |
|
1095 goto done; // thread exiting |
|
1096 aCallback->iNext = listHead; |
|
1097 } while (!__e32_atomic_cas_ord_ptr(&aThread->iUserModeCallbacks, &listHead, aCallback)); |
|
1098 result = KErrNone; |
|
1099 |
|
1100 if (!listHead) // if this isn't first callback someone else will have done this bit |
|
1101 { |
|
1102 /* |
|
1103 * If aThread is currently running on another CPU we need to send an IPI so |
|
1104 * that it will enter kernel mode and run the callback. |
|
1105 * The synchronization is tricky here. We want to check if the thread is |
|
1106 * running and if so on which core. We need to avoid any possibility of |
|
1107 * the thread entering user mode without having seen the callback, |
|
1108 * either because we thought it wasn't running so didn't send an IPI or |
|
1109 * because the thread migrated after we looked and we sent the IPI to |
|
1110 * the wrong processor. Sending a redundant IPI is not a problem (e.g. |
|
1111 * because the thread is running in kernel mode - which we can't tell - |
|
1112 * or because the thread stopped running after we looked) |
|
1113 * The following events are significant: |
|
1114 * Event A: Target thread writes to iCurrent when it starts running |
|
1115 * Event B: Target thread reads iUserModeCallbacks before entering user |
|
1116 * mode |
|
1117 * Event C: This thread writes to iUserModeCallbacks |
|
1118 * Event D: This thread reads iCurrent to check if aThread is running |
|
1119 * There is a DMB and DSB between A and B since A occurs with the ready |
|
1120 * list lock for the CPU involved or the thread lock for aThread held |
|
1121 * and this lock is released before B occurs. |
|
1122 * There is a DMB between C and D (part of __e32_atomic_cas_ord_ptr). |
|
1123 * Any observer which observes B must also have observed A. |
|
1124 * Any observer which observes D must also have observed C. |
|
1125 * If aThread observes B before C (i.e. enters user mode without running |
|
1126 * the callback) it must observe A before C and so it must also observe |
|
1127 * A before D (i.e. D reads the correct value for iCurrent). |
|
1128 */ |
|
1129 TInt current = aThread->iCurrent; |
|
1130 if (current) |
|
1131 { |
|
1132 TInt cpu = current & NSchedulable::EReadyCpuMask; |
|
1133 if (cpu != NKern::CurrentCpu()) |
|
1134 send_resched_ipi(cpu); |
|
1135 } |
|
1136 } |
|
1137 done: |
|
1138 NKern::Unlock(); |
|
1139 return result; |
|
1140 } |
|
1141 |