0
|
1 |
// Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies).
|
|
2 |
// All rights reserved.
|
|
3 |
// This component and the accompanying materials are made available
|
|
4 |
// under the terms of the License "Eclipse Public License v1.0"
|
|
5 |
// which accompanies this distribution, and is available
|
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
|
|
7 |
//
|
|
8 |
// Initial Contributors:
|
|
9 |
// Nokia Corporation - initial contribution.
|
|
10 |
//
|
|
11 |
// Contributors:
|
|
12 |
//
|
|
13 |
// Description:
|
|
14 |
// e32\nkern\arm\ncthrd.cpp
|
|
15 |
//
|
|
16 |
//
|
|
17 |
|
|
18 |
// NThreadBase member data
|
|
19 |
#define __INCLUDE_NTHREADBASE_DEFINES__
|
|
20 |
|
|
21 |
#define __INCLUDE_REG_OFFSETS__
|
|
22 |
#include <arm.h>
|
|
23 |
|
|
24 |
const TInt KNThreadMinStackSize = 0x100; // needs to be enough for interrupt + reschedule stack
|
|
25 |
|
|
26 |
// Called by a thread when it first runs
|
|
27 |
extern void __StartThread();
|
|
28 |
|
|
29 |
// Called by a thread which has been forced to exit
|
|
30 |
// Interrupts off here, kernel unlocked
|
|
31 |
extern void __DoForcedExit();
|
|
32 |
|
|
33 |
void NThreadBase::SetEntry(NThreadFunction aFunc)
|
|
34 |
{
|
|
35 |
TUint32* sp=(TUint32*)iSavedSP;
|
|
36 |
sp[SP_R5]=(TUint32)aFunc;
|
|
37 |
}
|
|
38 |
|
|
39 |
TInt NThread::Create(SNThreadCreateInfo& aInfo, TBool aInitial)
|
|
40 |
{
|
|
41 |
// Assert ParameterBlockSize is not negative and is a multiple of 8 bytes
|
|
42 |
__NK_ASSERT_ALWAYS((aInfo.iParameterBlockSize&0x80000007)==0);
|
|
43 |
|
|
44 |
__NK_ASSERT_ALWAYS(aInfo.iStackBase && aInfo.iStackSize>=aInfo.iParameterBlockSize+KNThreadMinStackSize);
|
|
45 |
TInt r=NThreadBase::Create(aInfo,aInitial);
|
|
46 |
if (r!=KErrNone)
|
|
47 |
return r;
|
|
48 |
if (!aInitial)
|
|
49 |
{
|
|
50 |
TUint32* sp=(TUint32*)(iStackBase+iStackSize-aInfo.iParameterBlockSize);
|
|
51 |
TUint32 r6=(TUint32)aInfo.iParameterBlock;
|
|
52 |
if (aInfo.iParameterBlockSize)
|
|
53 |
{
|
|
54 |
wordmove(sp,aInfo.iParameterBlock,aInfo.iParameterBlockSize);
|
|
55 |
r6=(TUint32)sp;
|
|
56 |
}
|
|
57 |
*--sp=(TUint32)__StartThread; // PC
|
|
58 |
*--sp=0; // R11
|
|
59 |
*--sp=0; // R10
|
|
60 |
*--sp=0; // R9
|
|
61 |
*--sp=0; // R8
|
|
62 |
*--sp=0; // R7
|
|
63 |
*--sp=r6; // R6
|
|
64 |
*--sp=(TUint32)aInfo.iFunction; // R5
|
|
65 |
*--sp=(TUint32)this; // R4
|
|
66 |
*--sp=0x13; // SPSR_SVC
|
|
67 |
*--sp=0; // R14_USR
|
|
68 |
*--sp=0; // R13_USR
|
|
69 |
#ifdef __CPU_ARM_USE_DOMAINS
|
|
70 |
*--sp=Arm::DefaultDomainAccess; // DACR
|
|
71 |
#endif
|
|
72 |
#ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG
|
|
73 |
*--sp=Arm::DefaultCoprocessorAccess; // CAR
|
|
74 |
#endif
|
|
75 |
#ifdef __CPU_HAS_VFP
|
|
76 |
*--sp=VFP_FPEXC_THRD_INIT; // FPEXC
|
|
77 |
#endif
|
|
78 |
#ifdef __CPU_HAS_CP15_THREAD_ID_REG
|
|
79 |
*--sp=0; // TID
|
|
80 |
#endif
|
|
81 |
#ifdef __CPU_SUPPORT_THUMB2EE
|
|
82 |
*--sp=0; // ThumbEE Base
|
|
83 |
#endif
|
|
84 |
iSavedSP=(TLinAddr)sp;
|
|
85 |
}
|
|
86 |
else
|
|
87 |
{
|
|
88 |
#ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG
|
|
89 |
#ifdef __CPU_HAS_VFP
|
|
90 |
#ifdef __CPU_XSCALE__
|
|
91 |
Arm::ModifyCar(0, 0x0c00); // enable CP10, CP11
|
|
92 |
#else
|
|
93 |
Arm::ModifyCar(0, 0x00f00000); // full access to CP10, CP11
|
|
94 |
#endif
|
|
95 |
#endif
|
|
96 |
Arm::DefaultCoprocessorAccess = Arm::Car();
|
|
97 |
#endif
|
|
98 |
NKern::EnableAllInterrupts();
|
|
99 |
}
|
|
100 |
#ifdef BTRACE_THREAD_IDENTIFICATION
|
|
101 |
BTrace4(BTrace::EThreadIdentification,BTrace::ENanoThreadCreate,this);
|
|
102 |
#endif
|
|
103 |
return KErrNone;
|
|
104 |
}
|
|
105 |
|
|
106 |
/** Called from generic layer when thread is killed asynchronously.
|
|
107 |
|
|
108 |
For ARM, save reason for last user->kernel switch (if any) so that user
|
|
109 |
context can be accessed from EDebugEventRemoveThread hook. Must be done
|
|
110 |
before forcing the thread to exit as this alters the saved return address
|
|
111 |
which is used to figure out where the context is saved.
|
|
112 |
|
|
113 |
@pre kernel locked
|
|
114 |
@post kernel locked
|
|
115 |
*/
|
|
116 |
|
|
117 |
void NThreadBase::OnKill()
|
|
118 |
{
|
|
119 |
if (iUserContextType != NThread::EContextNone)
|
|
120 |
{
|
|
121 |
NThread::TUserContextType t = ((NThread*)this)->UserContextType();
|
|
122 |
switch (t)
|
|
123 |
{
|
|
124 |
case NThread::EContextUserInterrupt:
|
|
125 |
t = NThread::EContextUserInterruptDied;
|
|
126 |
break;
|
|
127 |
case NThread::EContextSvsrInterrupt1:
|
|
128 |
t = NThread::EContextSvsrInterrupt1Died;
|
|
129 |
break;
|
|
130 |
case NThread::EContextSvsrInterrupt2:
|
|
131 |
t = NThread::EContextSvsrInterrupt2Died;
|
|
132 |
break;
|
|
133 |
case NThread::EContextWFAR:
|
|
134 |
t = NThread::EContextWFARDied;
|
|
135 |
break;
|
|
136 |
default:
|
|
137 |
// NOP
|
|
138 |
break;
|
|
139 |
}
|
|
140 |
iUserContextType = t;
|
|
141 |
}
|
|
142 |
}
|
|
143 |
|
|
144 |
/** Called from generic layer when thread exits.
|
|
145 |
|
|
146 |
For ARM, save that if the thread terminates synchronously the last
|
|
147 |
user->kernel switch was an exec call. Do nothing if non-user thread or
|
|
148 |
reason already saved in OnKill().
|
|
149 |
|
|
150 |
@pre kernel locked
|
|
151 |
@post kernel locked
|
|
152 |
@see OnKill
|
|
153 |
*/
|
|
154 |
|
|
155 |
void NThreadBase::OnExit()
|
|
156 |
{
|
|
157 |
CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED,"NThreadBase::OnExit");
|
|
158 |
if (iUserContextType == NThread::EContextUndefined)
|
|
159 |
iUserContextType = NThread::EContextExec;
|
|
160 |
}
|
|
161 |
|
|
162 |
void NThreadBase::ForceExit()
|
|
163 |
{
|
|
164 |
TUint32* sp=(TUint32*)iSavedSP;
|
|
165 |
sp[SP_PC]=(TUint32)__DoForcedExit;
|
|
166 |
}
|
|
167 |
|
|
168 |
void DumpExcInfo(TArmExcInfo& a)
|
|
169 |
{
|
|
170 |
DEBUGPRINT("Exc %1d Cpsr=%08x FAR=%08x FSR=%08x",a.iExcCode,a.iCpsr,a.iFaultAddress,a.iFaultStatus);
|
|
171 |
DEBUGPRINT(" R0=%08x R1=%08x R2=%08x R3=%08x",a.iR0,a.iR1,a.iR2,a.iR3);
|
|
172 |
DEBUGPRINT(" R4=%08x R5=%08x R6=%08x R7=%08x",a.iR4,a.iR5,a.iR6,a.iR7);
|
|
173 |
DEBUGPRINT(" R8=%08x R9=%08x R10=%08x R11=%08x",a.iR8,a.iR9,a.iR10,a.iR11);
|
|
174 |
DEBUGPRINT("R12=%08x R13=%08x R14=%08x R15=%08x",a.iR12,a.iR13,a.iR14,a.iR15);
|
|
175 |
DEBUGPRINT("R13Svc=%08x R14Svc=%08x SpsrSvc=%08x",a.iR13Svc,a.iR14Svc,a.iSpsrSvc);
|
|
176 |
DEBUGPRINT("Thread %T, KernCSLocked=%d",TheScheduler.iCurrentThread,TheScheduler.iKernCSLocked);
|
|
177 |
}
|
|
178 |
|
|
179 |
void DumpFullRegSet(SFullArmRegSet& a)
|
|
180 |
{
|
|
181 |
SNormalRegs& r = a.iN;
|
|
182 |
DEBUGPRINT("MODE_USR:");
|
|
183 |
DEBUGPRINT(" R0=%08x R1=%08x R2=%08x R3=%08x", r.iR0, r.iR1, r.iR2, r.iR3);
|
|
184 |
DEBUGPRINT(" R4=%08x R5=%08x R6=%08x R7=%08x", r.iR4, r.iR5, r.iR6, r.iR7);
|
|
185 |
DEBUGPRINT(" R8=%08x R9=%08x R10=%08x R11=%08x", r.iR8, r.iR9, r.iR10, r.iR11);
|
|
186 |
DEBUGPRINT("R12=%08x R13=%08x R14=%08x R15=%08x", r.iR12, r.iR13, r.iR14, r.iR15);
|
|
187 |
DEBUGPRINT("CPSR=%08x", r.iFlags);
|
|
188 |
DEBUGPRINT("MODE_FIQ:");
|
|
189 |
DEBUGPRINT(" R8=%08x R9=%08x R10=%08x R11=%08x", r.iR8Fiq, r.iR9Fiq, r.iR10Fiq, r.iR11Fiq);
|
|
190 |
DEBUGPRINT("R12=%08x R13=%08x R14=%08x SPSR=%08x", r.iR12Fiq, r.iR13Fiq, r.iR14Fiq, r.iSpsrFiq);
|
|
191 |
DEBUGPRINT("MODE_IRQ:");
|
|
192 |
DEBUGPRINT("R13=%08x R14=%08x SPSR=%08x", r.iR13Irq, r.iR14Irq, r.iSpsrIrq);
|
|
193 |
DEBUGPRINT("MODE_SVC:");
|
|
194 |
DEBUGPRINT("R13=%08x R14=%08x SPSR=%08x", r.iR13Svc, r.iR14Svc, r.iSpsrSvc);
|
|
195 |
DEBUGPRINT("MODE_ABT:");
|
|
196 |
DEBUGPRINT("R13=%08x R14=%08x SPSR=%08x", r.iR13Abt, r.iR14Abt, r.iSpsrAbt);
|
|
197 |
DEBUGPRINT("MODE_UND:");
|
|
198 |
DEBUGPRINT("R13=%08x R14=%08x SPSR=%08x", r.iR13Und, r.iR14Und, r.iSpsrUnd);
|
|
199 |
// DEBUGPRINT("MODE_MON:");
|
|
200 |
// DEBUGPRINT("R13=%08x R14=%08x SPSR=%08x", r.iR13Mon, r.iR14Mon, r.iSpsrMon);
|
|
201 |
|
|
202 |
SAuxiliaryRegs& aux = a.iA;
|
|
203 |
DEBUGPRINT("TEEHBR=%08x CPACR=%08x", aux.iTEEHBR, aux.iCPACR);
|
|
204 |
|
|
205 |
SBankedRegs& b = a.iB[0];
|
|
206 |
DEBUGPRINT(" SCTLR=%08x ACTLR=%08x PRRR=%08x NMRR=%08x", b.iSCTLR, b.iACTLR, b.iPRRR, b.iNMRR);
|
|
207 |
DEBUGPRINT(" DACR=%08x TTBR0=%08x TTBR1=%08x TTBCR=%08x", b.iDACR, b.iTTBR0, b.iTTBR1, b.iTTBCR);
|
|
208 |
DEBUGPRINT(" VBAR=%08x FCSEID=%08x CTXIDR=%08x", b.iVBAR, b.iFCSEIDR, b.iCTXIDR);
|
|
209 |
DEBUGPRINT("Thread ID RWRW=%08x RWRO=%08x RWNO=%08x", b.iRWRWTID, b.iRWROTID, b.iRWNOTID);
|
|
210 |
DEBUGPRINT(" DFSR=%08x DFAR=%08x IFSR=%08x IFAR=%08x", b.iDFSR, b.iDFAR, b.iIFSR, b.iIFAR);
|
|
211 |
DEBUGPRINT(" ADFSR=%08x AIFSR=%08x", b.iADFSR, b.iAIFSR);
|
|
212 |
#ifdef __CPU_HAS_VFP
|
|
213 |
DEBUGPRINT("FPEXC %08x", a.iMore[0]);
|
|
214 |
#endif
|
|
215 |
DEBUGPRINT("ExcCode %08x", a.iExcCode);
|
|
216 |
}
|
|
217 |
|
|
218 |
#define CONTEXT_ELEMENT_UNDEFINED(val) \
|
|
219 |
{ \
|
|
220 |
TArmContextElement::EUndefined, \
|
|
221 |
val \
|
|
222 |
}
|
|
223 |
|
|
224 |
#define CONTEXT_ELEMENT_EXCEPTION(reg) \
|
|
225 |
{ \
|
|
226 |
TArmContextElement::EOffsetFromStackTop, \
|
|
227 |
(- (-sizeof(TArmExcInfo)+_FOFF(TArmExcInfo,reg)) )>>2 \
|
|
228 |
}
|
|
229 |
|
|
230 |
#define CONTEXT_ELEMENT_FROM_SP(offset) \
|
|
231 |
{ \
|
|
232 |
TArmContextElement::EOffsetFromSp, \
|
|
233 |
offset \
|
|
234 |
}
|
|
235 |
|
|
236 |
#define CONTEXT_ELEMENT_FROM_STACK_TOP(offset) \
|
|
237 |
{ \
|
|
238 |
TArmContextElement::EOffsetFromStackTop, \
|
|
239 |
offset \
|
|
240 |
}
|
|
241 |
|
|
242 |
#define CONTEXT_ELEMENT_SP_PLUS(offset) \
|
|
243 |
{ \
|
|
244 |
TArmContextElement::ESpPlusOffset, \
|
|
245 |
offset \
|
|
246 |
}
|
|
247 |
|
|
248 |
const TArmContextElement ContextTableException[] =
|
|
249 |
{
|
|
250 |
CONTEXT_ELEMENT_EXCEPTION(iR0),
|
|
251 |
CONTEXT_ELEMENT_EXCEPTION(iR1),
|
|
252 |
CONTEXT_ELEMENT_EXCEPTION(iR2),
|
|
253 |
CONTEXT_ELEMENT_EXCEPTION(iR3),
|
|
254 |
CONTEXT_ELEMENT_EXCEPTION(iR4),
|
|
255 |
CONTEXT_ELEMENT_EXCEPTION(iR5),
|
|
256 |
CONTEXT_ELEMENT_EXCEPTION(iR6),
|
|
257 |
CONTEXT_ELEMENT_EXCEPTION(iR7),
|
|
258 |
CONTEXT_ELEMENT_EXCEPTION(iR8),
|
|
259 |
CONTEXT_ELEMENT_EXCEPTION(iR9),
|
|
260 |
CONTEXT_ELEMENT_EXCEPTION(iR10),
|
|
261 |
CONTEXT_ELEMENT_EXCEPTION(iR11),
|
|
262 |
CONTEXT_ELEMENT_EXCEPTION(iR12),
|
|
263 |
CONTEXT_ELEMENT_EXCEPTION(iR13),
|
|
264 |
CONTEXT_ELEMENT_EXCEPTION(iR14),
|
|
265 |
CONTEXT_ELEMENT_EXCEPTION(iR15),
|
|
266 |
CONTEXT_ELEMENT_EXCEPTION(iCpsr),
|
|
267 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
268 |
};
|
|
269 |
|
|
270 |
const TArmContextElement ContextTableUndefined[] =
|
|
271 |
{
|
|
272 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
273 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
274 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
275 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
276 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
277 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
278 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
279 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
280 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
281 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
282 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
283 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
284 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
285 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
286 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
287 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
288 |
CONTEXT_ELEMENT_UNDEFINED(EUserMode),
|
|
289 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
290 |
};
|
|
291 |
|
|
292 |
// Table used for non dying threads which have been preempted by an interrupt
|
|
293 |
// while in user mode.
|
|
294 |
|
|
295 |
const TArmContextElement ContextTableUserInterrupt[] =
|
|
296 |
{
|
|
297 |
CONTEXT_ELEMENT_FROM_STACK_TOP(6),
|
|
298 |
CONTEXT_ELEMENT_FROM_STACK_TOP(5),
|
|
299 |
CONTEXT_ELEMENT_FROM_STACK_TOP(4),
|
|
300 |
CONTEXT_ELEMENT_FROM_STACK_TOP(3),
|
|
301 |
CONTEXT_ELEMENT_FROM_SP(SP_R4),
|
|
302 |
CONTEXT_ELEMENT_FROM_SP(SP_R5),
|
|
303 |
CONTEXT_ELEMENT_FROM_SP(SP_R6),
|
|
304 |
CONTEXT_ELEMENT_FROM_SP(SP_R7),
|
|
305 |
CONTEXT_ELEMENT_FROM_SP(SP_R8),
|
|
306 |
CONTEXT_ELEMENT_FROM_SP(SP_R9),
|
|
307 |
CONTEXT_ELEMENT_FROM_SP(SP_R10),
|
|
308 |
CONTEXT_ELEMENT_FROM_SP(SP_R11),
|
|
309 |
CONTEXT_ELEMENT_FROM_STACK_TOP(2),
|
|
310 |
CONTEXT_ELEMENT_FROM_SP(SP_R13U),
|
|
311 |
CONTEXT_ELEMENT_FROM_SP(SP_R14U),
|
|
312 |
CONTEXT_ELEMENT_FROM_STACK_TOP(1),
|
|
313 |
CONTEXT_ELEMENT_FROM_STACK_TOP(8), // interrupted CPSR
|
|
314 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
315 |
};
|
|
316 |
|
|
317 |
// Table used for threads which have been asynchronously killed after being
|
|
318 |
// preempted by interrupt while in user mode.
|
|
319 |
|
|
320 |
const TArmContextElement ContextTableUserInterruptDied[] =
|
|
321 |
{
|
|
322 |
CONTEXT_ELEMENT_FROM_STACK_TOP(6),
|
|
323 |
CONTEXT_ELEMENT_FROM_STACK_TOP(5),
|
|
324 |
CONTEXT_ELEMENT_FROM_STACK_TOP(4),
|
|
325 |
CONTEXT_ELEMENT_FROM_STACK_TOP(3),
|
|
326 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
327 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
328 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
329 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
330 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
331 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
332 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
333 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
334 |
CONTEXT_ELEMENT_FROM_STACK_TOP(2),
|
|
335 |
CONTEXT_ELEMENT_FROM_SP(SP_R13U),
|
|
336 |
CONTEXT_ELEMENT_FROM_SP(SP_R14U),
|
|
337 |
CONTEXT_ELEMENT_FROM_STACK_TOP(1),
|
|
338 |
CONTEXT_ELEMENT_FROM_STACK_TOP(8), // interrupted CPSR
|
|
339 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
340 |
};
|
|
341 |
|
|
342 |
// Table used for threads which have been preempted by an interrupt while in
|
|
343 |
// supervisor mode in the SWI handler either before the return address was
|
|
344 |
// saved or after the registers were restored.
|
|
345 |
|
|
346 |
const TArmContextElement ContextTableSvsrInterrupt1[] =
|
|
347 |
{
|
|
348 |
CONTEXT_ELEMENT_FROM_SP(SP_NEXT+USER_MEMORY_GUARD_SAVE_WORDS+2),
|
|
349 |
CONTEXT_ELEMENT_FROM_SP(SP_NEXT+USER_MEMORY_GUARD_SAVE_WORDS+3),
|
|
350 |
CONTEXT_ELEMENT_FROM_SP(SP_NEXT+USER_MEMORY_GUARD_SAVE_WORDS+4),
|
|
351 |
CONTEXT_ELEMENT_FROM_SP(SP_NEXT+USER_MEMORY_GUARD_SAVE_WORDS+5),
|
|
352 |
CONTEXT_ELEMENT_FROM_SP(SP_R4),
|
|
353 |
CONTEXT_ELEMENT_FROM_SP(SP_R5),
|
|
354 |
CONTEXT_ELEMENT_FROM_SP(SP_R6),
|
|
355 |
CONTEXT_ELEMENT_FROM_SP(SP_R7),
|
|
356 |
CONTEXT_ELEMENT_FROM_SP(SP_R8),
|
|
357 |
CONTEXT_ELEMENT_FROM_SP(SP_R9),
|
|
358 |
CONTEXT_ELEMENT_FROM_SP(SP_R10),
|
|
359 |
CONTEXT_ELEMENT_FROM_SP(SP_R11),
|
|
360 |
CONTEXT_ELEMENT_FROM_SP(SP_NEXT+USER_MEMORY_GUARD_SAVE_WORDS+6),
|
|
361 |
CONTEXT_ELEMENT_FROM_SP(SP_R13U),
|
|
362 |
CONTEXT_ELEMENT_FROM_SP(SP_R14U),
|
|
363 |
CONTEXT_ELEMENT_FROM_SP(SP_NEXT+USER_MEMORY_GUARD_SAVE_WORDS+6), // r15 = r12
|
|
364 |
CONTEXT_ELEMENT_UNDEFINED(EUserMode), // can't get flags so just use 'user mode'
|
|
365 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
366 |
};
|
|
367 |
|
|
368 |
// Table used for threads which have been asynchronously killed while in the situation
|
|
369 |
// described above (see ContextTableSvsrInterrupt1).
|
|
370 |
|
|
371 |
const TArmContextElement ContextTableSvsrInterrupt1Died[] =
|
|
372 |
{
|
|
373 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
374 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
375 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
376 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
377 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
378 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
379 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
380 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
381 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
382 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
383 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
384 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
385 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
386 |
CONTEXT_ELEMENT_FROM_SP(SP_R13U),
|
|
387 |
CONTEXT_ELEMENT_FROM_SP(SP_R14U),
|
|
388 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
389 |
CONTEXT_ELEMENT_UNDEFINED(EUserMode), // can't get flags so just use 'user mode'
|
|
390 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
391 |
};
|
|
392 |
|
|
393 |
// Table used for threads which have been preempted by an interrupt while in
|
|
394 |
// supervisor mode in the SWI handler after the return address was saved.
|
|
395 |
|
|
396 |
const TArmContextElement ContextTableSvsrInterrupt2[] =
|
|
397 |
{
|
|
398 |
CONTEXT_ELEMENT_FROM_SP(SP_NEXT+USER_MEMORY_GUARD_SAVE_WORDS+2),
|
|
399 |
CONTEXT_ELEMENT_FROM_SP(SP_NEXT+USER_MEMORY_GUARD_SAVE_WORDS+3),
|
|
400 |
CONTEXT_ELEMENT_FROM_SP(SP_NEXT+USER_MEMORY_GUARD_SAVE_WORDS+4),
|
|
401 |
CONTEXT_ELEMENT_FROM_SP(SP_NEXT+USER_MEMORY_GUARD_SAVE_WORDS+5),
|
|
402 |
CONTEXT_ELEMENT_FROM_SP(SP_R4),
|
|
403 |
CONTEXT_ELEMENT_FROM_SP(SP_R5),
|
|
404 |
CONTEXT_ELEMENT_FROM_SP(SP_R6),
|
|
405 |
CONTEXT_ELEMENT_FROM_SP(SP_R7),
|
|
406 |
CONTEXT_ELEMENT_FROM_SP(SP_R8),
|
|
407 |
CONTEXT_ELEMENT_FROM_SP(SP_R9),
|
|
408 |
CONTEXT_ELEMENT_FROM_SP(SP_R10),
|
|
409 |
CONTEXT_ELEMENT_FROM_STACK_TOP(2),
|
|
410 |
CONTEXT_ELEMENT_FROM_SP(SP_NEXT+USER_MEMORY_GUARD_SAVE_WORDS+6),
|
|
411 |
CONTEXT_ELEMENT_FROM_SP(SP_R13U),
|
|
412 |
CONTEXT_ELEMENT_FROM_SP(SP_R14U),
|
|
413 |
CONTEXT_ELEMENT_FROM_STACK_TOP(1),
|
|
414 |
CONTEXT_ELEMENT_UNDEFINED(EUserMode), // can't get flags so just use 'user mode'
|
|
415 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
416 |
};
|
|
417 |
|
|
418 |
// Table used for threads which have been asynchronously killed while in the situation
|
|
419 |
// described above (see ContextTableSvsrInterrupt2).
|
|
420 |
|
|
421 |
const TArmContextElement ContextTableSvsrInterrupt2Died[] =
|
|
422 |
{
|
|
423 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
424 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
425 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
426 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
427 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
428 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
429 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
430 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
431 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
432 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
433 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
434 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
435 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
436 |
CONTEXT_ELEMENT_FROM_SP(SP_R13U),
|
|
437 |
CONTEXT_ELEMENT_FROM_SP(SP_R14U),
|
|
438 |
CONTEXT_ELEMENT_FROM_STACK_TOP(1),
|
|
439 |
CONTEXT_ELEMENT_UNDEFINED(EUserMode), // can't get flags so just use 'user mode'
|
|
440 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
441 |
};
|
|
442 |
|
|
443 |
// Table used for non-dying threads blocked on their request semaphore.
|
|
444 |
|
|
445 |
const TArmContextElement ContextTableWFAR[] =
|
|
446 |
{
|
|
447 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
448 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
449 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
450 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
451 |
CONTEXT_ELEMENT_FROM_SP(SP_R4),
|
|
452 |
CONTEXT_ELEMENT_FROM_SP(SP_R5),
|
|
453 |
CONTEXT_ELEMENT_FROM_SP(SP_R6),
|
|
454 |
CONTEXT_ELEMENT_FROM_SP(SP_R7),
|
|
455 |
CONTEXT_ELEMENT_FROM_SP(SP_R8),
|
|
456 |
CONTEXT_ELEMENT_FROM_SP(SP_R9),
|
|
457 |
CONTEXT_ELEMENT_FROM_SP(SP_R10),
|
|
458 |
CONTEXT_ELEMENT_FROM_STACK_TOP(2),
|
|
459 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
460 |
CONTEXT_ELEMENT_FROM_SP(SP_R13U),
|
|
461 |
CONTEXT_ELEMENT_FROM_SP(SP_R14U),
|
|
462 |
CONTEXT_ELEMENT_FROM_STACK_TOP(1),
|
|
463 |
CONTEXT_ELEMENT_FROM_SP(SP_SPSR),
|
|
464 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
465 |
};
|
|
466 |
|
|
467 |
// Table used for threads killed asynchronously while blocked on their request
|
|
468 |
// semaphore.
|
|
469 |
|
|
470 |
const TArmContextElement ContextTableWFARDied[] =
|
|
471 |
{
|
|
472 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
473 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
474 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
475 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
476 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
477 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
478 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
479 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
480 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
481 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
482 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
483 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
484 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
485 |
CONTEXT_ELEMENT_FROM_SP(SP_R13U),
|
|
486 |
CONTEXT_ELEMENT_FROM_SP(SP_R14U),
|
|
487 |
CONTEXT_ELEMENT_FROM_STACK_TOP(1),
|
|
488 |
CONTEXT_ELEMENT_FROM_SP(SP_SPSR),
|
|
489 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
490 |
};
|
|
491 |
|
|
492 |
const TArmContextElement ContextTableExec[] =
|
|
493 |
{
|
|
494 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
495 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
496 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
497 |
CONTEXT_ELEMENT_FROM_STACK_TOP(10),
|
|
498 |
CONTEXT_ELEMENT_FROM_STACK_TOP(9),
|
|
499 |
CONTEXT_ELEMENT_FROM_STACK_TOP(8),
|
|
500 |
CONTEXT_ELEMENT_FROM_STACK_TOP(7),
|
|
501 |
CONTEXT_ELEMENT_FROM_STACK_TOP(6),
|
|
502 |
CONTEXT_ELEMENT_FROM_STACK_TOP(5),
|
|
503 |
CONTEXT_ELEMENT_FROM_STACK_TOP(4),
|
|
504 |
CONTEXT_ELEMENT_FROM_STACK_TOP(3),
|
|
505 |
CONTEXT_ELEMENT_FROM_STACK_TOP(2),
|
|
506 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
507 |
CONTEXT_ELEMENT_FROM_SP(SP_R13U),
|
|
508 |
CONTEXT_ELEMENT_FROM_SP(SP_R14U),
|
|
509 |
CONTEXT_ELEMENT_FROM_STACK_TOP(1),
|
|
510 |
CONTEXT_ELEMENT_UNDEFINED(EUserMode), // can't get flags so just use 'user mode'
|
|
511 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
512 |
};
|
|
513 |
|
|
514 |
// Table used to retrieve a thread's kernel side context.
|
|
515 |
// Used for kernel threads.
|
|
516 |
const TArmContextElement ContextTableKernel[] =
|
|
517 |
{
|
|
518 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
519 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
520 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
521 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
522 |
CONTEXT_ELEMENT_FROM_SP(SP_R4), // r4 before reschedule
|
|
523 |
CONTEXT_ELEMENT_FROM_SP(SP_R5), // r5 before reschedule
|
|
524 |
CONTEXT_ELEMENT_FROM_SP(SP_R6), // r6 before reschedule
|
|
525 |
CONTEXT_ELEMENT_FROM_SP(SP_R7), // r7 before reschedule
|
|
526 |
CONTEXT_ELEMENT_FROM_SP(SP_R8), // r8 before reschedule
|
|
527 |
CONTEXT_ELEMENT_FROM_SP(SP_R9), // r9 before reschedule
|
|
528 |
CONTEXT_ELEMENT_FROM_SP(SP_R10), // r10 before reschedule
|
|
529 |
CONTEXT_ELEMENT_FROM_SP(SP_R11), // r11 before reschedule
|
|
530 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
531 |
CONTEXT_ELEMENT_SP_PLUS(SP_NEXT), // supervisor stack pointer before reschedule
|
|
532 |
CONTEXT_ELEMENT_UNDEFINED(0), // supervisor lr is unknown
|
|
533 |
CONTEXT_ELEMENT_FROM_SP(SP_PC), // return address from reschedule
|
|
534 |
CONTEXT_ELEMENT_UNDEFINED(ESvcMode), // can't get flags so just use 'supervisor mode'
|
|
535 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
536 |
};
|
|
537 |
|
|
538 |
// Table used for non dying threads which are in a user callback while returning
|
|
539 |
// from having been preempted by an interrupt while in user mode.
|
|
540 |
|
|
541 |
const TArmContextElement ContextTableUserIntrCallback[] =
|
|
542 |
{
|
|
543 |
CONTEXT_ELEMENT_FROM_STACK_TOP(6),
|
|
544 |
CONTEXT_ELEMENT_FROM_STACK_TOP(5),
|
|
545 |
CONTEXT_ELEMENT_FROM_STACK_TOP(4),
|
|
546 |
CONTEXT_ELEMENT_FROM_STACK_TOP(3),
|
|
547 |
CONTEXT_ELEMENT_FROM_STACK_TOP(8+USER_MEMORY_GUARD_SAVE_WORDS+9),
|
|
548 |
CONTEXT_ELEMENT_FROM_STACK_TOP(8+USER_MEMORY_GUARD_SAVE_WORDS+8),
|
|
549 |
CONTEXT_ELEMENT_FROM_STACK_TOP(8+USER_MEMORY_GUARD_SAVE_WORDS+7),
|
|
550 |
CONTEXT_ELEMENT_FROM_STACK_TOP(8+USER_MEMORY_GUARD_SAVE_WORDS+6),
|
|
551 |
CONTEXT_ELEMENT_FROM_STACK_TOP(8+USER_MEMORY_GUARD_SAVE_WORDS+5),
|
|
552 |
CONTEXT_ELEMENT_FROM_STACK_TOP(8+USER_MEMORY_GUARD_SAVE_WORDS+4),
|
|
553 |
CONTEXT_ELEMENT_FROM_STACK_TOP(8+USER_MEMORY_GUARD_SAVE_WORDS+3),
|
|
554 |
CONTEXT_ELEMENT_FROM_STACK_TOP(8+USER_MEMORY_GUARD_SAVE_WORDS+2),
|
|
555 |
CONTEXT_ELEMENT_FROM_STACK_TOP(2),
|
|
556 |
CONTEXT_ELEMENT_FROM_SP(SP_R13U),
|
|
557 |
CONTEXT_ELEMENT_FROM_SP(SP_R14U),
|
|
558 |
CONTEXT_ELEMENT_FROM_STACK_TOP(1),
|
|
559 |
CONTEXT_ELEMENT_FROM_STACK_TOP(8), // interrupted CPSR
|
|
560 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
561 |
};
|
|
562 |
|
|
563 |
// Table used for non-dying threads which are in a user callback while returning
|
|
564 |
// from being blocked on their request semaphore.
|
|
565 |
|
|
566 |
const TArmContextElement ContextTableWFARCallback[] =
|
|
567 |
{
|
|
568 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
569 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
570 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
571 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
572 |
CONTEXT_ELEMENT_FROM_STACK_TOP(11),
|
|
573 |
CONTEXT_ELEMENT_FROM_STACK_TOP(10),
|
|
574 |
CONTEXT_ELEMENT_FROM_STACK_TOP(9),
|
|
575 |
CONTEXT_ELEMENT_FROM_STACK_TOP(8),
|
|
576 |
CONTEXT_ELEMENT_FROM_STACK_TOP(7),
|
|
577 |
CONTEXT_ELEMENT_FROM_STACK_TOP(6),
|
|
578 |
CONTEXT_ELEMENT_FROM_STACK_TOP(5),
|
|
579 |
CONTEXT_ELEMENT_FROM_STACK_TOP(2),
|
|
580 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
581 |
CONTEXT_ELEMENT_FROM_SP(SP_R13U),
|
|
582 |
CONTEXT_ELEMENT_FROM_SP(SP_R14U),
|
|
583 |
CONTEXT_ELEMENT_FROM_STACK_TOP(1),
|
|
584 |
CONTEXT_ELEMENT_FROM_SP(SP_SPSR),
|
|
585 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
|
586 |
};
|
|
587 |
|
|
588 |
const TArmContextElement* const ThreadUserContextTables[] =
|
|
589 |
{
|
|
590 |
ContextTableUndefined, // EContextNone
|
|
591 |
ContextTableException,
|
|
592 |
ContextTableUndefined,
|
|
593 |
ContextTableUserInterrupt,
|
|
594 |
ContextTableUserInterruptDied,
|
|
595 |
ContextTableSvsrInterrupt1,
|
|
596 |
ContextTableSvsrInterrupt1Died,
|
|
597 |
ContextTableSvsrInterrupt2,
|
|
598 |
ContextTableSvsrInterrupt2Died,
|
|
599 |
ContextTableWFAR,
|
|
600 |
ContextTableWFARDied,
|
|
601 |
ContextTableExec,
|
|
602 |
ContextTableKernel,
|
|
603 |
ContextTableUserIntrCallback,
|
|
604 |
ContextTableWFARCallback,
|
|
605 |
0 // Null terminated
|
|
606 |
};
|
|
607 |
|
|
608 |
/** Return table of pointers to user context tables.
|
|
609 |
|
|
610 |
Each user context table is an array of TArmContextElement objects, one per
|
|
611 |
ARM CPU register, in the order defined in TArmRegisters.
|
|
612 |
|
|
613 |
The master table contains pointers to the user context tables in the order
|
|
614 |
defined in TUserContextType. There are as many user context tables as
|
|
615 |
scenarii leading a user thread to switch to privileged mode.
|
|
616 |
|
|
617 |
Stop-mode debug agents should use this function to store the address of the
|
|
618 |
master table at a location known to the host debugger. Run-mode debug
|
|
619 |
agents are advised to use NKern::GetUserContext() and
|
|
620 |
NKern::SetUserContext() instead.
|
|
621 |
|
|
622 |
@return A pointer to the master table. The master table is NULL
|
|
623 |
terminated. The master and user context tables are guaranteed to remain at
|
|
624 |
the same location for the lifetime of the OS execution so it is safe the
|
|
625 |
cache the returned address.
|
|
626 |
|
|
627 |
@see UserContextType
|
|
628 |
@see TArmContextElement
|
|
629 |
@see TArmRegisters
|
|
630 |
@see TUserContextType
|
|
631 |
@see NKern::SetUserContext
|
|
632 |
@see NKern::GetUserContext
|
|
633 |
|
|
634 |
@publishedPartner
|
|
635 |
*/
|
|
636 |
EXPORT_C const TArmContextElement* const* NThread::UserContextTables()
|
|
637 |
{
|
|
638 |
return &ThreadUserContextTables[0];
|
|
639 |
}
|
|
640 |
|
|
641 |
|
|
642 |
#ifndef __USER_CONTEXT_TYPE_MACHINE_CODED__
|
|
643 |
extern TBool RescheduledAfterInterrupt(TUint32 /*aAddr*/);
|
|
644 |
|
|
645 |
/** Get a value which indicates where a thread's user mode context is stored.
|
|
646 |
|
|
647 |
@return A value that can be used as an index into the tables returned by
|
|
648 |
NThread::UserContextTables().
|
|
649 |
|
|
650 |
@pre any context
|
|
651 |
@pre kernel locked
|
|
652 |
@post kernel locked
|
|
653 |
|
|
654 |
@see UserContextTables
|
|
655 |
@publishedPartner
|
|
656 |
*/
|
|
657 |
EXPORT_C NThread::TUserContextType NThread::UserContextType()
|
|
658 |
{
|
|
659 |
CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED,"NThread::UserContextType");
|
|
660 |
// Dying thread? use context saved earlier by kernel
|
|
661 |
if (iCsFunction == ECSExitInProgress)
|
|
662 |
return (TUserContextType)iUserContextType;
|
|
663 |
|
|
664 |
// Check for EContextNone and EContextException
|
|
665 |
// Also EContextUserIntrCallback and EContextWFARCallback
|
|
666 |
if(iUserContextType<=EContextException || iUserContextType==EContextUserIntrCallback
|
|
667 |
|| iUserContextType==EContextWFARCallback)
|
|
668 |
return (TUserContextType)iUserContextType;
|
|
669 |
|
|
670 |
// Getting current thread context? must be in exec call as exception
|
|
671 |
// and dying thread cases were tested above.
|
|
672 |
if (this == NCurrentThread())
|
|
673 |
return EContextExec;
|
|
674 |
|
|
675 |
// Check what caused the thread to enter supervisor mode
|
|
676 |
TUint32* sst=(TUint32*)((TUint32)iStackBase+(TUint32)iStackSize);
|
|
677 |
TUint32* sp=(TUint32*)iSavedSP; // saved supervisor stack pointer
|
|
678 |
TInt n=sst-sp; // number of words on the supervisor stack
|
|
679 |
TUint32 resched_ret=sp[SP_PC]; // return address from reschedule
|
|
680 |
if (RescheduledAfterInterrupt(resched_ret))
|
|
681 |
{
|
|
682 |
// thread was preempted due to an interrupt
|
|
683 |
// interrupt and reschedule will have pushed 20+EXTRA words onto the stack
|
|
684 |
if ((sp[SP_NEXT]&EMaskMode)==EUserMode) // interrupted mode = user?
|
|
685 |
return NThread::EContextUserInterrupt;
|
|
686 |
if (n<(30+EXTRA_WORDS)) // n<30 if interrupt occurred in exec call entry before r3-r10 saved
|
|
687 |
{ // or after r3-r10 restored
|
|
688 |
if (n==(20+EXTRA_WORDS))
|
|
689 |
{
|
|
690 |
// interrupt before return address, r11 were saved or after registers restored
|
|
691 |
return EContextSvsrInterrupt1;
|
|
692 |
}
|
|
693 |
else
|
|
694 |
{
|
|
695 |
// interrupt after return address, r11 saved
|
|
696 |
return EContextSvsrInterrupt2;
|
|
697 |
}
|
|
698 |
}
|
|
699 |
// thread was interrupted in supervisor mode
|
|
700 |
// return address and r3-r11 were saved
|
|
701 |
}
|
|
702 |
|
|
703 |
// Transition to supervisor mode must have been due to a SWI
|
|
704 |
if (n==(15+EXTRA_WORDS))
|
|
705 |
{
|
|
706 |
// thread must have blocked doing Exec::WaitForAnyRequest
|
|
707 |
return EContextWFAR;
|
|
708 |
}
|
|
709 |
|
|
710 |
// Thread must have been in a SLOW or UNPROTECTED Exec call
|
|
711 |
return EContextExec;
|
|
712 |
}
|
|
713 |
|
|
714 |
#endif // __USER_CONTEXT_TYPE_MACHINE_CODED__
|
|
715 |
|
|
716 |
// Enter and return with kernel locked
|
|
717 |
void NThread::GetContext(TArmRegSet& aContext, TUint32& aAvailRegistersMask, const TArmContextElement* aContextTable)
|
|
718 |
{
|
|
719 |
TUint32* sp = (TUint32*)iSavedSP;
|
|
720 |
TUint32* st = (TUint32*)((TUint32)iStackBase+(TUint32)iStackSize);
|
|
721 |
TArmReg* out = (TArmReg*)(&aContext);
|
|
722 |
TBool currentThread = (NCurrentThread() == this);
|
|
723 |
|
|
724 |
aAvailRegistersMask = 0;
|
|
725 |
if (iNState == EDead)
|
|
726 |
{// This thread's stack may no longer exist so just exit.
|
|
727 |
return;
|
|
728 |
}
|
|
729 |
|
|
730 |
// Copy available context into provided structure.
|
|
731 |
for (TInt i = 0; i<KArmRegisterCount; ++i)
|
|
732 |
{
|
|
733 |
TInt v = aContextTable[i].iValue;
|
|
734 |
TInt t = aContextTable[i].iType;
|
|
735 |
if(!currentThread && t==TArmContextElement::EOffsetFromSp)
|
|
736 |
{
|
|
737 |
// thread has been preempted, it is safe to fetch its context
|
|
738 |
// from the info saved in Reschedule().
|
|
739 |
v = sp[v];
|
|
740 |
aAvailRegistersMask |= (1<<i);
|
|
741 |
}
|
|
742 |
else if(t==TArmContextElement::EOffsetFromStackTop)
|
|
743 |
{
|
|
744 |
v = st[-v];
|
|
745 |
aAvailRegistersMask |= (1<<i);
|
|
746 |
}
|
|
747 |
else if(!currentThread && t==TArmContextElement::ESpPlusOffset)
|
|
748 |
{
|
|
749 |
v = (TInt)(sp+v);
|
|
750 |
aAvailRegistersMask |= (1<<i);
|
|
751 |
}
|
|
752 |
out[i] = v;
|
|
753 |
}
|
|
754 |
|
|
755 |
// Getting context of current thread? some values can be fetched directly
|
|
756 |
// from the registers if they are not available from the stack.
|
|
757 |
if (currentThread && aContextTable[EArmSp].iType == TArmContextElement::EOffsetFromSp)
|
|
758 |
{
|
|
759 |
Arm::GetUserSpAndLr(out+EArmSp);
|
|
760 |
aAvailRegistersMask |= (1<<EArmSp) | (1<<EArmLr);
|
|
761 |
}
|
|
762 |
}
|
|
763 |
|
|
764 |
// Enter and return with kernel locked
|
|
765 |
void NThread::GetUserContext(TArmRegSet& aContext, TUint32& aAvailRegistersMask)
|
|
766 |
{
|
|
767 |
TUserContextType type=UserContextType();
|
|
768 |
NThread::GetContext(aContext, aAvailRegistersMask, UserContextTables()[type]);
|
|
769 |
}
|
|
770 |
|
|
771 |
// Enter and return with kernel locked
|
|
772 |
void NThread::GetSystemContext(TArmRegSet& aContext, TUint32& aAvailRegistersMask)
|
|
773 |
{
|
|
774 |
NThread::GetContext(aContext, aAvailRegistersMask, UserContextTables()[EContextKernel]);
|
|
775 |
}
|
|
776 |
|
|
777 |
// Enter and return with kernel locked
|
|
778 |
void NThread::SetUserContext(const TArmRegSet& aContext)
|
|
779 |
{
|
|
780 |
if (iNState == EDead)
|
|
781 |
{// This thread's stack may no longer exist so just exit.
|
|
782 |
return;
|
|
783 |
}
|
|
784 |
TUserContextType type=UserContextType();
|
|
785 |
const TArmContextElement* c = NThread::UserContextTables()[type];
|
|
786 |
TUint32* sp = (TUint32*)iSavedSP;
|
|
787 |
TUint32* st = (TUint32*)((TUint32)iStackBase+(TUint32)iStackSize);
|
|
788 |
TArmReg* in = (TArmReg*)(&aContext);
|
|
789 |
TBool currentThread = (NCurrentThread() == this);
|
|
790 |
|
|
791 |
// Check that target thread is in USR mode, and update only the flags part of the PSR
|
|
792 |
TUint32 tFlags = 0;
|
|
793 |
TUint32* tFlagsPtr = &tFlags;
|
|
794 |
TUint32 flagsCtxValue = c[EArmFlags].iValue;
|
|
795 |
switch (c[EArmFlags].iType) // describes how to interpret flagsCtxValue
|
|
796 |
{
|
|
797 |
case TArmContextElement::EUndefined:
|
|
798 |
// Flags register not saved; not necessarily an error, but we can't update the flags
|
|
799 |
tFlags = flagsCtxValue; // use mode bits of flagsCtxValue itself
|
|
800 |
break;
|
|
801 |
|
|
802 |
case TArmContextElement::EOffsetFromStackTop:
|
|
803 |
// Flags register saved, flagsCtxValue is offset from ToS
|
|
804 |
tFlagsPtr = &st[-flagsCtxValue];
|
|
805 |
break;
|
|
806 |
|
|
807 |
case TArmContextElement::EOffsetFromSp:
|
|
808 |
// Flags register saved, flagsCtxValue is offset from SP
|
|
809 |
if (!currentThread)
|
|
810 |
tFlagsPtr = &sp[flagsCtxValue];
|
|
811 |
else
|
|
812 |
{
|
|
813 |
// This can only occur when the thread is exiting. Therefore,
|
|
814 |
// we allow it, but the changed values will never be used.
|
|
815 |
tFlags = 0x10;
|
|
816 |
}
|
|
817 |
break;
|
|
818 |
|
|
819 |
default:
|
|
820 |
// Assertion below will fail with default value ...
|
|
821 |
;
|
|
822 |
}
|
|
823 |
|
|
824 |
tFlags = *tFlagsPtr; // retrieve saved flags
|
|
825 |
__NK_ASSERT_ALWAYS((tFlags & 0x1f) == 0x10); // target thread must be in USR mode
|
|
826 |
const TUint32 writableFlags = 0xF80F0000; // NZCVQ.......GE3-0................
|
|
827 |
tFlags &= ~writableFlags;
|
|
828 |
tFlags |= in[EArmFlags] & writableFlags;
|
|
829 |
*tFlagsPtr = tFlags; // update saved flags
|
|
830 |
|
|
831 |
// Copy provided context into stack if possible
|
|
832 |
for (TInt i = 0; i<KArmRegisterCount; ++i)
|
|
833 |
{
|
|
834 |
// The Flags were already processed above, and we don't allow
|
|
835 |
// changing the DACR, so we can just skip these two index values
|
|
836 |
if (i == EArmFlags || i == EArmDacr)
|
|
837 |
continue;
|
|
838 |
|
|
839 |
TInt v = c[i].iValue;
|
|
840 |
TInt t = c[i].iType;
|
|
841 |
if(!currentThread && t==TArmContextElement::EOffsetFromSp)
|
|
842 |
{
|
|
843 |
// thread has been preempted, it is safe to change context
|
|
844 |
// saved in Reschedule().
|
|
845 |
sp[v] = in[i];
|
|
846 |
}
|
|
847 |
if(t==TArmContextElement::EOffsetFromStackTop)
|
|
848 |
st[-v] = in[i];
|
|
849 |
}
|
|
850 |
|
|
851 |
// Current thread? some values can be loaded straight into the registers
|
|
852 |
// if they haven't been stored on the stack yet.
|
|
853 |
if (currentThread && c[EArmSp].iType == TArmContextElement::EOffsetFromSp)
|
|
854 |
Arm::SetUserSpAndLr(in+EArmSp);
|
|
855 |
}
|
|
856 |
|
|
857 |
// Modify a non-running thread's user stack pointer
|
|
858 |
// Enter and return with kernel locked
|
|
859 |
void NThread::ModifyUsp(TLinAddr aUsp)
|
|
860 |
{
|
|
861 |
// Check what caused the thread to enter supervisor mode
|
|
862 |
TUint32* sst=(TUint32*)((TUint32)iStackBase+(TUint32)iStackSize);
|
|
863 |
if (iSpare3)
|
|
864 |
{
|
|
865 |
// exception caused transition to supervisor mode
|
|
866 |
TArmExcInfo& e=((TArmExcInfo*)sst)[-1];
|
|
867 |
e.iR13=aUsp;
|
|
868 |
return;
|
|
869 |
}
|
|
870 |
TUint32* sp=(TUint32*)iSavedSP; // saved supervisor stack pointer
|
|
871 |
sp[SP_R13U]=aUsp;
|
|
872 |
}
|
|
873 |
|
|
874 |
/** Get (subset of) user context of specified thread.
|
|
875 |
|
|
876 |
The nanokernel does not systematically save all registers in the supervisor
|
|
877 |
stack on entry into privileged mode and the exact subset depends on why the
|
|
878 |
switch to privileged mode occured. So in general only a subset of the
|
|
879 |
register set is available.
|
|
880 |
|
|
881 |
@param aThread Thread to inspect. It can be the current thread or a
|
|
882 |
non-current one.
|
|
883 |
|
|
884 |
@param aContext Pointer to TArmRegSet structure where the context is
|
|
885 |
copied.
|
|
886 |
|
|
887 |
@param aAvailRegistersMask Bit mask telling which subset of the context is
|
|
888 |
available and has been copied to aContext (1: register available / 0: not
|
|
889 |
available). Bit 0 stands for register R0.
|
|
890 |
|
|
891 |
@see TArmRegSet
|
|
892 |
@see ThreadSetUserContext
|
|
893 |
|
|
894 |
@pre Call in a thread context.
|
|
895 |
@pre Interrupts must be enabled.
|
|
896 |
*/
|
|
897 |
EXPORT_C void NKern::ThreadGetUserContext(NThread* aThread, TAny* aContext, TUint32& aAvailRegistersMask)
|
|
898 |
{
|
|
899 |
CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadGetUserContext");
|
|
900 |
TArmRegSet& a=*(TArmRegSet*)aContext;
|
|
901 |
memclr(aContext, sizeof(TArmRegSet));
|
|
902 |
NKern::Lock();
|
|
903 |
aThread->GetUserContext(a, aAvailRegistersMask);
|
|
904 |
NKern::Unlock();
|
|
905 |
}
|
|
906 |
|
|
907 |
/** Get (subset of) system context of specified thread.
|
|
908 |
|
|
909 |
@param aThread Thread to inspect. It can be the current thread or a
|
|
910 |
non-current one.
|
|
911 |
|
|
912 |
@param aContext Pointer to TArmRegSet structure where the context is
|
|
913 |
copied.
|
|
914 |
|
|
915 |
@param aAvailRegistersMask Bit mask telling which subset of the context is
|
|
916 |
available and has been copied to aContext (1: register available / 0: not
|
|
917 |
available). Bit 0 stands for register R0.
|
|
918 |
|
|
919 |
@see TArmRegSet
|
|
920 |
@see ThreadSetUserContext
|
|
921 |
|
|
922 |
@pre Call in a thread context.
|
|
923 |
@pre Interrupts must be enabled.
|
|
924 |
*/
|
|
925 |
EXPORT_C void NKern::ThreadGetSystemContext(NThread* aThread, TAny* aContext, TUint32& aAvailRegistersMask)
|
|
926 |
{
|
|
927 |
CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadGetSystemContext");
|
|
928 |
TArmRegSet& a=*(TArmRegSet*)aContext;
|
|
929 |
memclr(aContext, sizeof(TArmRegSet));
|
|
930 |
NKern::Lock();
|
|
931 |
aThread->GetSystemContext(a, aAvailRegistersMask);
|
|
932 |
NKern::Unlock();
|
|
933 |
}
|
|
934 |
|
|
935 |
/** Set (subset of) user context of specified thread.
|
|
936 |
|
|
937 |
@param aThread Thread to modify. It can be the current thread or a
|
|
938 |
non-current one.
|
|
939 |
|
|
940 |
@param aContext Pointer to TArmRegSet structure containing the context
|
|
941 |
to set. The values of registers which aren't part of the context saved
|
|
942 |
on the supervisor stack are ignored.
|
|
943 |
|
|
944 |
@see TArmRegSet
|
|
945 |
@see ThreadGetUserContext
|
|
946 |
|
|
947 |
@pre Call in a thread context.
|
|
948 |
@pre Interrupts must be enabled.
|
|
949 |
*/
|
|
950 |
EXPORT_C void NKern::ThreadSetUserContext(NThread* aThread, TAny* aContext)
|
|
951 |
{
|
|
952 |
CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadSetUserContext");
|
|
953 |
TArmRegSet& a=*(TArmRegSet*)aContext;
|
|
954 |
NKern::Lock();
|
|
955 |
aThread->SetUserContext(a);
|
|
956 |
NKern::Unlock();
|
|
957 |
}
|
|
958 |
|
|
959 |
/** @internalComponent */
|
|
960 |
void NKern::ThreadModifyUsp(NThread* aThread, TLinAddr aUsp)
|
|
961 |
{
|
|
962 |
NKern::Lock();
|
|
963 |
aThread->ModifyUsp(aUsp);
|
|
964 |
NKern::Unlock();
|
|
965 |
}
|
|
966 |
|
|
967 |
#ifdef __CPU_ARM_USE_DOMAINS
|
|
968 |
TUint32 NThread::Dacr()
|
|
969 |
{
|
|
970 |
if (this==TheScheduler.iCurrentThread)
|
|
971 |
return Arm::Dacr();
|
|
972 |
NKern::Lock();
|
|
973 |
TUint32* sp=(TUint32*)iSavedSP; // saved supervisor stack pointer
|
|
974 |
TUint32 dacr=sp[SP_DACR];
|
|
975 |
NKern::Unlock();
|
|
976 |
return dacr;
|
|
977 |
}
|
|
978 |
|
|
979 |
void NThread::SetDacr(TUint32 aDacr)
|
|
980 |
{
|
|
981 |
if (this==TheScheduler.iCurrentThread)
|
|
982 |
Arm::SetDacr(aDacr);
|
|
983 |
NKern::Lock();
|
|
984 |
TUint32* sp=(TUint32*)iSavedSP; // saved supervisor stack pointer
|
|
985 |
sp[SP_DACR]=aDacr;
|
|
986 |
NKern::Unlock();
|
|
987 |
}
|
|
988 |
|
|
989 |
TUint32 NThread::ModifyDacr(TUint32 aClearMask, TUint32 aSetMask)
|
|
990 |
{
|
|
991 |
if (this==TheScheduler.iCurrentThread)
|
|
992 |
return Arm::ModifyDacr(aClearMask,aSetMask);
|
|
993 |
NKern::Lock();
|
|
994 |
TUint32* sp=(TUint32*)iSavedSP; // saved supervisor stack pointer
|
|
995 |
TUint32 dacr=sp[SP_DACR];
|
|
996 |
sp[SP_DACR]=(dacr&~aClearMask)|aSetMask;
|
|
997 |
NKern::Unlock();
|
|
998 |
return dacr;
|
|
999 |
}
|
|
1000 |
#endif
|
|
1001 |
|
|
1002 |
#ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG
|
|
1003 |
void NThread::SetCar(TUint32 aCar)
|
|
1004 |
{
|
|
1005 |
if (this==TheScheduler.iCurrentThread)
|
|
1006 |
Arm::SetCar(aCar);
|
|
1007 |
NKern::Lock();
|
|
1008 |
TUint32* sp=(TUint32*)iSavedSP; // saved supervisor stack pointer
|
|
1009 |
sp[SP_CAR]=aCar;
|
|
1010 |
NKern::Unlock();
|
|
1011 |
}
|
|
1012 |
#endif
|
|
1013 |
|
|
1014 |
|
|
1015 |
|
|
1016 |
/** Get the saved coprocessor access register value for a thread
|
|
1017 |
|
|
1018 |
@return The saved value of the CAR, 0 if CPU doesn't have CAR
|
|
1019 |
@pre Don't call from ISR
|
|
1020 |
|
|
1021 |
@publishedPartner
|
|
1022 |
@released
|
|
1023 |
*/
|
|
1024 |
EXPORT_C TUint32 NThread::Car()
|
|
1025 |
{
|
|
1026 |
CHECK_PRECONDITIONS(MASK_NOT_ISR,"NThread::Car");
|
|
1027 |
#ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG
|
|
1028 |
if (this==TheScheduler.iCurrentThread)
|
|
1029 |
return Arm::Car();
|
|
1030 |
NKern::Lock();
|
|
1031 |
TUint32* sp=(TUint32*)iSavedSP; // saved supervisor stack pointer
|
|
1032 |
TUint32 car=sp[SP_CAR];
|
|
1033 |
NKern::Unlock();
|
|
1034 |
return car;
|
|
1035 |
#else
|
|
1036 |
return 0;
|
|
1037 |
#endif
|
|
1038 |
}
|
|
1039 |
|
|
1040 |
|
|
1041 |
|
|
1042 |
/** Modify the saved coprocessor access register value for a thread
|
|
1043 |
Does nothing if CPU does not have CAR.
|
|
1044 |
|
|
1045 |
@param aClearMask Mask of bits to clear (1 = clear this bit)
|
|
1046 |
@param aSetMask Mask of bits to set (1 = set this bit)
|
|
1047 |
@return The original saved value of the CAR, 0 if CPU doesn't have CAR
|
|
1048 |
@pre Don't call from ISR
|
|
1049 |
|
|
1050 |
@publishedPartner
|
|
1051 |
@released
|
|
1052 |
*/
|
|
1053 |
EXPORT_C TUint32 NThread::ModifyCar(TUint32 aClearMask, TUint32 aSetMask)
|
|
1054 |
{
|
|
1055 |
CHECK_PRECONDITIONS(MASK_NOT_ISR,"NThread::ModifyCar");
|
|
1056 |
#ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG
|
|
1057 |
if (this==TheScheduler.iCurrentThread)
|
|
1058 |
return Arm::ModifyCar(aClearMask,aSetMask);
|
|
1059 |
NKern::Lock();
|
|
1060 |
TUint32* sp=(TUint32*)iSavedSP; // saved supervisor stack pointer
|
|
1061 |
TUint32 car=sp[SP_CAR];
|
|
1062 |
sp[SP_CAR]=(car&~aClearMask)|aSetMask;
|
|
1063 |
NKern::Unlock();
|
|
1064 |
return car;
|
|
1065 |
#else
|
|
1066 |
return 0;
|
|
1067 |
#endif
|
|
1068 |
}
|
|
1069 |
|
|
1070 |
#ifdef __CPU_HAS_VFP
|
|
1071 |
void NThread::SetFpExc(TUint32 aVal)
|
|
1072 |
{
|
|
1073 |
if (this==TheScheduler.iCurrentThread)
|
|
1074 |
Arm::SetFpExc(aVal);
|
|
1075 |
NKern::Lock();
|
|
1076 |
TUint32* sp=(TUint32*)iSavedSP; // saved supervisor stack pointer
|
|
1077 |
sp[SP_FPEXC]=aVal;
|
|
1078 |
NKern::Unlock();
|
|
1079 |
}
|
|
1080 |
#endif
|
|
1081 |
|
|
1082 |
|
|
1083 |
|
|
1084 |
/** Get the saved VFP FPEXC register value for a thread
|
|
1085 |
|
|
1086 |
@return The saved value of FPEXC, 0 if VFP not present
|
|
1087 |
@pre Don't call from ISR
|
|
1088 |
|
|
1089 |
@publishedPartner
|
|
1090 |
@released
|
|
1091 |
*/
|
|
1092 |
EXPORT_C TUint32 NThread::FpExc()
|
|
1093 |
{
|
|
1094 |
CHECK_PRECONDITIONS(MASK_NOT_ISR,"NThread::FpExc");
|
|
1095 |
#ifdef __CPU_HAS_VFP
|
|
1096 |
if (this==TheScheduler.iCurrentThread)
|
|
1097 |
return Arm::FpExc();
|
|
1098 |
NKern::Lock();
|
|
1099 |
TUint32* sp=(TUint32*)iSavedSP; // saved supervisor stack pointer
|
|
1100 |
TUint32 r=sp[SP_FPEXC];
|
|
1101 |
NKern::Unlock();
|
|
1102 |
return r;
|
|
1103 |
#else
|
|
1104 |
return 0;
|
|
1105 |
#endif
|
|
1106 |
}
|
|
1107 |
|
|
1108 |
|
|
1109 |
|
|
1110 |
/** Modify the saved VFP FPEXC register value for a thread
|
|
1111 |
Does nothing if VFP not present
|
|
1112 |
|
|
1113 |
@param aClearMask Mask of bits to clear (1 = clear this bit)
|
|
1114 |
@param aSetMask Mask of bits to set (1 = set this bit)
|
|
1115 |
@return The original saved value of FPEXC, 0 if VFP not present
|
|
1116 |
@pre Don't call from ISR
|
|
1117 |
|
|
1118 |
@publishedPartner
|
|
1119 |
@released
|
|
1120 |
*/
|
|
1121 |
EXPORT_C TUint32 NThread::ModifyFpExc(TUint32 aClearMask, TUint32 aSetMask)
|
|
1122 |
{
|
|
1123 |
CHECK_PRECONDITIONS(MASK_NOT_ISR,"NThread::ModifyFpExc");
|
|
1124 |
#ifdef __CPU_HAS_VFP
|
|
1125 |
if (this==TheScheduler.iCurrentThread)
|
|
1126 |
return Arm::ModifyFpExc(aClearMask,aSetMask);
|
|
1127 |
NKern::Lock();
|
|
1128 |
TUint32* sp=(TUint32*)iSavedSP; // saved supervisor stack pointer
|
|
1129 |
TUint32 r=sp[SP_FPEXC];
|
|
1130 |
sp[SP_FPEXC]=(r&~aClearMask)|aSetMask;
|
|
1131 |
NKern::Unlock();
|
|
1132 |
return r;
|
|
1133 |
#else
|
|
1134 |
return 0;
|
|
1135 |
#endif
|
|
1136 |
}
|
|
1137 |
|