author | Mike Kinghan <mikek@symbian.org> |
Mon, 26 Jul 2010 14:13:30 +0100 | |
branch | GCC_SURGE |
changeset 234 | 56f96efe467a |
parent 90 | 947f0dc9f7a8 |
child 257 | 3e88ff8f41d5 |
permissions | -rw-r--r-- |
0 | 1 |
// Copyright (c) 2006-2009 Nokia Corporation and/or its subsidiary(-ies). |
2 |
// All rights reserved. |
|
3 |
// This component and the accompanying materials are made available |
|
4 |
// under the terms of the License "Eclipse Public License v1.0" |
|
5 |
// which accompanies this distribution, and is available |
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 |
// |
|
8 |
// Initial Contributors: |
|
9 |
// Nokia Corporation - initial contribution. |
|
10 |
// |
|
11 |
// Contributors: |
|
12 |
// |
|
13 |
// Description: |
|
14 |
// e32\nkernsmp\x86\ncthrd.cpp |
|
15 |
// |
|
16 |
// |
|
17 |
||
18 |
// NThreadBase member data |
|
19 |
#define __INCLUDE_NTHREADBASE_DEFINES__ |
|
20 |
||
21 |
#include <x86.h> |
|
22 |
#include <apic.h> |
|
23 |
#include <nk_irq.h> |
|
24 |
||
25 |
// Called by a thread when it first runs |
|
26 |
void __StartThread(); |
|
27 |
||
28 |
void NThreadBase::OnKill() |
|
29 |
{ |
|
30 |
} |
|
31 |
||
32 |
void NThreadBase::OnExit() |
|
33 |
{ |
|
34 |
} |
|
35 |
||
36 |
extern void __ltr(TInt /*aSelector*/); |
|
37 |
||
38 |
extern "C" TUint __tr(); |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
39 |
extern void InitTimestamp(TSubScheduler* aSS, SNThreadCreateInfo& aInfo); |
0 | 40 |
|
41 |
TInt NThread::Create(SNThreadCreateInfo& aInfo, TBool aInitial) |
|
42 |
{ |
|
43 |
if (!aInfo.iStackBase || aInfo.iStackSize<0x100) |
|
44 |
return KErrArgument; |
|
45 |
new (this) NThread; |
|
46 |
TInt cpu = -1; |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
47 |
TSubScheduler* ss = 0; |
0 | 48 |
if (aInitial) |
49 |
{ |
|
50 |
cpu = __e32_atomic_add_ord32(&TheScheduler.iNumCpus, 1); |
|
51 |
if (cpu==0) |
|
52 |
memset(SubSchedulerLookupTable, 0x9a, sizeof(SubSchedulerLookupTable)); |
|
53 |
aInfo.iCpuAffinity = cpu; |
|
54 |
// OK since we can't migrate yet |
|
55 |
TUint32 apicid = *(volatile TUint32*)(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID) >> 24; |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
56 |
ss = &TheSubSchedulers[cpu]; |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
57 |
ss->iSSX.iAPICID = apicid << 24; |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
58 |
ss->iCurrentThread = this; |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
59 |
ss->iDeferShutdown = 0; |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
60 |
SubSchedulerLookupTable[apicid] = ss; |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
61 |
iRunCount.i64 = UI64LIT(1); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
62 |
iActiveState = 1; |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
63 |
__KTRACE_OPT(KBOOT,DEBUGPRINT("Init: cpu=%d APICID=%08x ss=%08x", cpu, apicid, ss)); |
0 | 64 |
if (cpu) |
65 |
{ |
|
66 |
__ltr(TSS_SELECTOR(cpu)); |
|
67 |
NIrq::HwInit2AP(); |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
68 |
__e32_atomic_ior_ord32(&TheScheduler.iThreadAcceptCpus, 1<<cpu); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
69 |
__e32_atomic_ior_ord32(&TheScheduler.iIpiAcceptCpus, 1<<cpu); |
0 | 70 |
__e32_atomic_ior_ord32(&TheScheduler.iCpusNotIdle, 1<<cpu); |
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
71 |
__e32_atomic_add_ord32(&TheScheduler.iCCRequestLevel, 1); |
0 | 72 |
__KTRACE_OPT(KBOOT,DEBUGPRINT("AP TR=%x",__tr())); |
73 |
} |
|
74 |
} |
|
75 |
TInt r=NThreadBase::Create(aInfo,aInitial); |
|
76 |
if (r!=KErrNone) |
|
77 |
return r; |
|
78 |
if (!aInitial) |
|
79 |
{ |
|
80 |
TLinAddr stack_top = (TLinAddr)iStackBase + (TLinAddr)iStackSize; |
|
81 |
TLinAddr sp = stack_top; |
|
82 |
TUint32 pb = (TUint32)aInfo.iParameterBlock; |
|
83 |
SThreadStackStub* tss = 0; |
|
84 |
if (aInfo.iParameterBlockSize) |
|
85 |
{ |
|
86 |
tss = (SThreadStackStub*)stack_top; |
|
87 |
--tss; |
|
88 |
tss->iVector = SThreadStackStub::EVector; |
|
89 |
tss->iError = 0; |
|
90 |
tss->iEip = 0; |
|
91 |
tss->iCs = 0; |
|
92 |
tss->iEflags = 0; |
|
93 |
sp = (TLinAddr)tss; |
|
94 |
sp -= (TLinAddr)aInfo.iParameterBlockSize; |
|
95 |
wordmove((TAny*)sp, aInfo.iParameterBlock, aInfo.iParameterBlockSize); |
|
96 |
pb = (TUint32)sp; |
|
97 |
tss->iPBlock = sp; |
|
98 |
} |
|
99 |
SThreadInitStack* tis = (SThreadInitStack*)sp; |
|
100 |
--tis; |
|
101 |
tis->iR.iCR0 = X86::DefaultCR0 | KX86CR0_TS; |
|
102 |
tis->iR.iReschedFlag = 1; |
|
103 |
tis->iR.iEip = (TUint32)&__StartThread; |
|
104 |
tis->iR.iReason = 0; |
|
105 |
tis->iX.iEcx = 0; |
|
106 |
tis->iX.iEdx = 0; |
|
107 |
tis->iX.iEbx = pb; // parameter block pointer |
|
108 |
tis->iX.iEsi = 0; |
|
109 |
tis->iX.iEdi = 0; |
|
110 |
tis->iX.iEbp = stack_top; |
|
111 |
tis->iX.iEax = (TUint32)aInfo.iFunction; |
|
112 |
tis->iX.iDs = KRing0DS; |
|
113 |
tis->iX.iEs = KRing0DS; |
|
114 |
tis->iX.iFs = 0; |
|
115 |
tis->iX.iGs = KRing0DS; |
|
116 |
tis->iX.iVector = SThreadInitStack::EVector; |
|
117 |
tis->iX.iError = 0; |
|
118 |
tis->iX.iEip = (TUint32)aInfo.iFunction; |
|
119 |
tis->iX.iCs = KRing0CS; |
|
120 |
tis->iX.iEflags = (TUint32)(EX86FlagIF|EX86FlagAC|0x1002); |
|
121 |
tis->iX.iEsp3 = 0xFFFFFFFFu; |
|
122 |
tis->iX.iSs3 = 0xFFFFFFFFu; |
|
123 |
wordmove(&iCoprocessorState, DefaultCoprocessorState, sizeof(iCoprocessorState)); |
|
124 |
iSavedSP = (TLinAddr)tis; |
|
125 |
} |
|
126 |
else |
|
127 |
{ |
|
128 |
NKern::EnableAllInterrupts(); |
|
129 |
||
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
130 |
// Initialise timestamp |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
131 |
InitTimestamp(ss, aInfo); |
0 | 132 |
} |
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
133 |
AddToEnumerateList(); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
134 |
InitLbInfo(); |
0 | 135 |
#ifdef BTRACE_THREAD_IDENTIFICATION |
136 |
BTrace4(BTrace::EThreadIdentification,BTrace::ENanoThreadCreate,this); |
|
137 |
#endif |
|
138 |
return KErrNone; |
|
139 |
} |
|
140 |
||
141 |
void DumpExcInfo(TX86ExcInfo& a) |
|
142 |
{ |
|
143 |
DEBUGPRINT("Exc %02x EFLAGS=%08x FAR=%08x ErrCode=%08x",a.iExcId,a.iEflags,a.iFaultAddress,a.iExcErrorCode); |
|
144 |
DEBUGPRINT("EAX=%08x EBX=%08x ECX=%08x EDX=%08x",a.iEax,a.iEbx,a.iEcx,a.iEdx); |
|
145 |
DEBUGPRINT("ESP=%08x EBP=%08x ESI=%08x EDI=%08x",a.iEsp,a.iEbp,a.iEsi,a.iEdi); |
|
146 |
DEBUGPRINT(" CS=%08x EIP=%08x DS=%08x SS=%08x",a.iCs,a.iEip,a.iDs,a.iSs); |
|
147 |
DEBUGPRINT(" ES=%08x FS=%08x GS=%08x",a.iEs,a.iFs,a.iGs); |
|
148 |
if (a.iCs&3) |
|
149 |
{ |
|
150 |
DEBUGPRINT("SS3=%08x ESP3=%08x",a.iSs3,a.iEsp3); |
|
151 |
} |
|
152 |
TScheduler& s = TheScheduler; |
|
153 |
TInt irq = NKern::DisableAllInterrupts(); |
|
154 |
TSubScheduler& ss = SubScheduler(); |
|
155 |
NThreadBase* ct = ss.iCurrentThread; |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
156 |
TInt inc = TInt(ss.iSSX.iIrqNestCount); |
0 | 157 |
TInt cpu = ss.iCpuNum; |
158 |
NKern::RestoreInterrupts(irq); |
|
159 |
DEBUGPRINT("Thread %T, CPU %d, KLCount=%08x, IrqNest=%d",ct,cpu,ss.iKernLockCount,inc); |
|
160 |
} |
|
161 |
||
162 |
||
163 |
void GetContextAfterExc(TX86RegSet& aContext, SThreadExcStack* txs, TUint32& aAvailRegistersMask, TBool aSystem) |
|
164 |
{ |
|
165 |
TInt cpl = txs->iCs & 3; |
|
166 |
aAvailRegistersMask = 0xffffu; // EAX,EBX,ECX,EDX,ESP,EBP,ESI,EDI,CS,DS,ES,FS,GS,SS,EFLAGS,EIP all valid |
|
167 |
aContext.iEax = txs->iEax; |
|
168 |
aContext.iEbx = txs->iEbx; |
|
169 |
aContext.iEcx = txs->iEcx; |
|
170 |
aContext.iEdx = txs->iEdx; |
|
171 |
if (aSystem) |
|
172 |
{ |
|
173 |
aContext.iEsp = TUint32(txs+1); |
|
174 |
if (cpl==0) |
|
175 |
aContext.iEsp -= 8; // two less words pushed if interrupt taken while CPL=0 |
|
176 |
aContext.iSs = KRing0DS; |
|
177 |
aAvailRegistersMask &= ~0x2000u; // SS assumed not read |
|
178 |
} |
|
179 |
else if (cpl==3) |
|
180 |
{ |
|
181 |
aContext.iEsp = txs->iEsp3; |
|
182 |
aContext.iSs = txs->iSs3; |
|
183 |
} |
|
184 |
else |
|
185 |
{ |
|
186 |
__crash(); |
|
187 |
} |
|
188 |
aContext.iEbp = txs->iEbp; |
|
189 |
aContext.iEsi = txs->iEsi; |
|
190 |
aContext.iEdi = txs->iEdi; |
|
191 |
aContext.iCs = txs->iCs; |
|
192 |
aContext.iDs = txs->iDs; |
|
193 |
aContext.iEs = txs->iEs; |
|
194 |
aContext.iFs = txs->iFs; |
|
195 |
aContext.iGs = txs->iGs; |
|
196 |
aContext.iEflags = txs->iEflags; |
|
197 |
aContext.iEip = txs->iEip; |
|
198 |
} |
|
199 |
||
200 |
void GetContextAfterSlowExec(TX86RegSet& aContext, SThreadSlowExecStack* tsxs, TUint32& aAvailRegistersMask) |
|
201 |
{ |
|
202 |
TInt cpl = tsxs->iCs & 3; |
|
203 |
if (cpl!=3) |
|
204 |
{ |
|
205 |
__crash(); |
|
206 |
} |
|
207 |
aAvailRegistersMask = 0xffffu; // EAX,EBX,ECX,EDX,ESP,EBP,ESI,EDI,CS,DS,ES,FS,GS,SS,EFLAGS,EIP all valid |
|
208 |
aContext.iEax = tsxs->iEax; |
|
209 |
aContext.iEbx = tsxs->iEbx; |
|
210 |
aContext.iEcx = tsxs->iEcx; |
|
211 |
aContext.iEdx = tsxs->iEdx; |
|
212 |
aContext.iEsp = tsxs->iEsp3; |
|
213 |
aContext.iSs = tsxs->iSs3; |
|
214 |
aContext.iEbp = tsxs->iEbp; |
|
215 |
aContext.iEsi = tsxs->iEsi; |
|
216 |
aContext.iEdi = tsxs->iEdi; |
|
217 |
aContext.iCs = tsxs->iCs; |
|
218 |
aContext.iDs = tsxs->iDs; |
|
219 |
aContext.iEs = tsxs->iEs; |
|
220 |
aContext.iFs = tsxs->iFs; |
|
221 |
aContext.iGs = tsxs->iGs; |
|
222 |
aContext.iEflags = tsxs->iEflags; |
|
223 |
aContext.iEip = tsxs->iEip; |
|
224 |
} |
|
225 |
||
226 |
||
227 |
// Enter and return with kernel locked |
|
228 |
void NThread::GetUserContext(TX86RegSet& aContext, TUint32& aAvailRegistersMask) |
|
229 |
{ |
|
230 |
NThread* pC = NCurrentThreadL(); |
|
231 |
TSubScheduler* ss = 0; |
|
232 |
if (pC != this) |
|
233 |
{ |
|
234 |
AcqSLock(); |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
235 |
if (iWaitState.ThreadIsDead() || i_NThread_Initial) |
0 | 236 |
{ |
237 |
RelSLock(); |
|
238 |
aAvailRegistersMask = 0; |
|
239 |
return; |
|
240 |
} |
|
241 |
if (iReady && iParent->iReady) |
|
242 |
{ |
|
243 |
ss = TheSubSchedulers + (iParent->iReady & EReadyCpuMask); |
|
244 |
ss->iReadyListLock.LockOnly(); |
|
245 |
} |
|
246 |
if (iCurrent) |
|
247 |
{ |
|
248 |
// thread is actually running on another CPU |
|
249 |
// interrupt that CPU and wait for it to enter interrupt mode |
|
250 |
// this allows a snapshot of the thread user state to be observed |
|
251 |
// and ensures the thread cannot return to user mode |
|
252 |
send_resched_ipi_and_wait(iLastCpu); |
|
253 |
} |
|
254 |
} |
|
255 |
TUint32* stack = (TUint32*)(TLinAddr(iStackBase) + TLinAddr(iStackSize)); |
|
256 |
if (stack[-1]!=0xFFFFFFFFu && stack[-2]!=0xFFFFFFFFu && stack[-7]<0x100u) // if not, thread never entered user mode |
|
257 |
{ |
|
258 |
if (stack[-7] == 0x21) // slow exec |
|
259 |
GetContextAfterSlowExec(aContext, ((SThreadSlowExecStack*)stack)-1, aAvailRegistersMask); |
|
260 |
else |
|
261 |
GetContextAfterExc(aContext, ((SThreadExcStack*)stack)-1, aAvailRegistersMask, FALSE); |
|
262 |
} |
|
263 |
if (pC != this) |
|
264 |
{ |
|
265 |
if (ss) |
|
266 |
ss->iReadyListLock.UnlockOnly(); |
|
267 |
RelSLock(); |
|
268 |
} |
|
269 |
} |
|
270 |
||
271 |
class TGetContextIPI : public TGenericIPI |
|
272 |
{ |
|
273 |
public: |
|
274 |
void Get(TInt aCpu, TX86RegSet& aContext, TUint32& aAvailRegistersMask); |
|
275 |
static void Isr(TGenericIPI*); |
|
276 |
public: |
|
277 |
TX86RegSet* iContext; |
|
278 |
TUint32* iAvailRegsMask; |
|
279 |
}; |
|
280 |
||
281 |
void TGetContextIPI::Isr(TGenericIPI* aPtr) |
|
282 |
{ |
|
283 |
TGetContextIPI& ipi = *(TGetContextIPI*)aPtr; |
|
284 |
TX86RegSet& a = *ipi.iContext; |
|
285 |
TSubScheduler& ss = SubScheduler(); |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
286 |
TUint32* irqstack = (TUint32*)ss.iSSX.iIrqStackTop; |
0 | 287 |
SThreadExcStack* txs = (SThreadExcStack*)irqstack[-1]; // first word pushed on IRQ stack points to thread supervisor stack |
288 |
GetContextAfterExc(a, txs, *ipi.iAvailRegsMask, TRUE); |
|
289 |
} |
|
290 |
||
291 |
void TGetContextIPI::Get(TInt aCpu, TX86RegSet& aContext, TUint32& aAvailRegsMask) |
|
292 |
{ |
|
293 |
iContext = &aContext; |
|
294 |
iAvailRegsMask = &aAvailRegsMask; |
|
295 |
Queue(&Isr, 1u<<aCpu); |
|
296 |
WaitCompletion(); |
|
297 |
} |
|
298 |
||
299 |
// Enter and return with kernel locked |
|
300 |
void NThread::GetSystemContext(TX86RegSet& aContext, TUint32& aAvailRegsMask) |
|
301 |
{ |
|
302 |
aAvailRegsMask = 0; |
|
303 |
NThread* pC = NCurrentThreadL(); |
|
304 |
__NK_ASSERT_ALWAYS(pC!=this); |
|
305 |
TSubScheduler* ss = 0; |
|
306 |
AcqSLock(); |
|
307 |
if (iWaitState.ThreadIsDead()) |
|
308 |
{ |
|
309 |
RelSLock(); |
|
310 |
return; |
|
311 |
} |
|
312 |
if (iReady && iParent->iReady) |
|
313 |
{ |
|
314 |
ss = TheSubSchedulers + (iParent->iReady & EReadyCpuMask); |
|
315 |
ss->iReadyListLock.LockOnly(); |
|
316 |
} |
|
317 |
if (iCurrent) |
|
318 |
{ |
|
319 |
// thread is actually running on another CPU |
|
320 |
// use an interprocessor interrupt to get a snapshot of the state |
|
321 |
TGetContextIPI ipi; |
|
322 |
ipi.Get(iLastCpu, aContext, aAvailRegsMask); |
|
323 |
} |
|
324 |
else |
|
325 |
{ |
|
326 |
// thread is not running and can't start |
|
327 |
SThreadReschedStack* trs = (SThreadReschedStack*)iSavedSP; |
|
328 |
TUint32 kct = trs->iReason; |
|
329 |
TLinAddr sp = TLinAddr(trs+1); |
|
330 |
TUint32* stack = (TUint32*)sp; |
|
331 |
switch (kct) |
|
332 |
{ |
|
333 |
case 0: // thread not yet started |
|
334 |
{ |
|
335 |
aContext.iEcx = stack[0]; |
|
336 |
aContext.iEdx = stack[1]; |
|
337 |
aContext.iEbx = stack[2]; |
|
338 |
aContext.iEsi = stack[3]; |
|
339 |
aContext.iEdi = stack[4]; |
|
340 |
aContext.iEbp = stack[5]; |
|
341 |
aContext.iEax = stack[6]; |
|
342 |
aContext.iDs = stack[7]; |
|
343 |
aContext.iEs = stack[8]; |
|
344 |
aContext.iFs = stack[9]; |
|
345 |
aContext.iGs = stack[10]; |
|
346 |
aContext.iEsp = sp + 40 - 8; // entry to initial function |
|
347 |
aContext.iEip = aContext.iEax; |
|
348 |
aContext.iEflags = 0x41202; // guess |
|
349 |
aContext.iCs = KRing0CS; |
|
350 |
aContext.iSs = KRing0DS; |
|
351 |
aAvailRegsMask = 0x9effu; |
|
352 |
break; |
|
353 |
} |
|
354 |
case 1: // unlock |
|
355 |
{ |
|
356 |
aContext.iFs = stack[0]; |
|
357 |
aContext.iGs = stack[1]; |
|
358 |
aContext.iEbx = stack[2]; |
|
359 |
aContext.iEbp = stack[3]; |
|
360 |
aContext.iEdi = stack[4]; |
|
361 |
aContext.iEsi = stack[5]; |
|
362 |
aContext.iEip = stack[6]; // return address from NKern::Unlock() |
|
363 |
aContext.iCs = KRing0CS; |
|
364 |
aContext.iDs = KRing0DS; |
|
365 |
aContext.iEs = KRing0DS; |
|
366 |
aContext.iSs = KRing0DS; |
|
367 |
aContext.iEsp = sp + 28; // ESP after return from NKern::Unlock() |
|
368 |
aContext.iEax = 0; // unknown |
|
369 |
aContext.iEcx = 0; // unknown |
|
370 |
aContext.iEdx = 0; // unknown |
|
371 |
aContext.iEflags = 0x41202; // guess |
|
372 |
aAvailRegsMask =0x98f2u; // EIP,GS,FS,EDI,ESI,EBP,ESP,EBX available, others guessed or unavailable |
|
373 |
break; |
|
374 |
} |
|
375 |
case 2: // IRQ |
|
376 |
{ |
|
377 |
GetContextAfterExc(aContext, (SThreadExcStack*)sp, aAvailRegsMask, TRUE); |
|
378 |
break; |
|
379 |
} |
|
380 |
default: // unknown reschedule reason |
|
381 |
__NK_ASSERT_ALWAYS(0); |
|
382 |
} |
|
383 |
} |
|
384 |
if (ss) |
|
385 |
ss->iReadyListLock.UnlockOnly(); |
|
386 |
RelSLock(); |
|
387 |
} |
|
388 |
||
389 |
// Enter and return with kernel locked |
|
390 |
void NThread::SetUserContext(const TX86RegSet& aContext, TUint32& aRegMask) |
|
391 |
{ |
|
392 |
NThread* pC = NCurrentThreadL(); |
|
393 |
TSubScheduler* ss = 0; |
|
394 |
if (pC != this) |
|
395 |
{ |
|
396 |
AcqSLock(); |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
397 |
if (iWaitState.ThreadIsDead() || i_NThread_Initial) |
0 | 398 |
{ |
399 |
RelSLock(); |
|
400 |
aRegMask = 0; |
|
401 |
return; |
|
402 |
} |
|
403 |
if (iReady && iParent->iReady) |
|
404 |
{ |
|
405 |
ss = TheSubSchedulers + (iParent->iReady & EReadyCpuMask); |
|
406 |
ss->iReadyListLock.LockOnly(); |
|
407 |
} |
|
408 |
if (iCurrent) |
|
409 |
{ |
|
410 |
// thread is actually running on another CPU |
|
411 |
// interrupt that CPU and wait for it to enter interrupt mode |
|
412 |
// this allows a snapshot of the thread user state to be observed |
|
413 |
// and ensures the thread cannot return to user mode |
|
414 |
send_resched_ipi_and_wait(iLastCpu); |
|
415 |
} |
|
416 |
} |
|
417 |
TUint32* stack = (TUint32*)(TLinAddr(iStackBase) + TLinAddr(iStackSize)); |
|
418 |
SThreadExcStack* txs = 0; |
|
419 |
SThreadSlowExecStack* tsxs = 0; |
|
420 |
aRegMask &= 0xffffu; |
|
421 |
if (stack[-1]!=0xFFFFFFFFu && stack[-2]!=0xFFFFFFFFu && stack[-7]<0x100u) // if not, thread never entered user mode |
|
422 |
{ |
|
423 |
if (stack[-7] == 0x21) // slow exec |
|
424 |
tsxs = ((SThreadSlowExecStack*)stack)-1; |
|
425 |
else |
|
426 |
txs = ((SThreadExcStack*)stack)-1; |
|
427 |
||
428 |
#define WRITE_REG(reg, value) \ |
|
429 |
{ if (tsxs) tsxs->reg=(value); else txs->reg=(value); } |
|
430 |
||
431 |
if (aRegMask & 0x0001u) |
|
432 |
WRITE_REG(iEax, aContext.iEax); |
|
433 |
if (aRegMask & 0x0002u) |
|
434 |
WRITE_REG(iEbx, aContext.iEbx); |
|
435 |
if (aRegMask & 0x0004u) |
|
436 |
{ |
|
437 |
// don't allow write to iEcx if in slow exec since this may conflict |
|
438 |
// with handle preprocessing |
|
439 |
if (tsxs) |
|
440 |
aRegMask &= ~0x0004u; |
|
441 |
else |
|
442 |
txs->iEcx = aContext.iEcx; |
|
443 |
} |
|
444 |
if (aRegMask & 0x0008u) |
|
445 |
WRITE_REG(iEdx, aContext.iEdx); |
|
446 |
if (aRegMask & 0x0010u) |
|
447 |
WRITE_REG(iEsp3, aContext.iEsp); |
|
448 |
if (aRegMask & 0x0020u) |
|
449 |
WRITE_REG(iEbp, aContext.iEbp); |
|
450 |
if (aRegMask & 0x0040u) |
|
451 |
WRITE_REG(iEsi, aContext.iEsi); |
|
452 |
if (aRegMask & 0x0080u) |
|
453 |
WRITE_REG(iEdi, aContext.iEdi); |
|
454 |
if (aRegMask & 0x0100u) |
|
455 |
WRITE_REG(iCs, aContext.iCs|3); |
|
456 |
if (aRegMask & 0x0200u) |
|
457 |
WRITE_REG(iDs, aContext.iDs|3); |
|
458 |
if (aRegMask & 0x0400u) |
|
459 |
WRITE_REG(iEs, aContext.iEs|3); |
|
460 |
if (aRegMask & 0x0800u) |
|
461 |
WRITE_REG(iFs, aContext.iFs|3); |
|
462 |
if (aRegMask & 0x1000u) |
|
463 |
WRITE_REG(iGs, aContext.iGs|3); |
|
464 |
if (aRegMask & 0x2000u) |
|
465 |
WRITE_REG(iSs3, aContext.iSs|3); |
|
466 |
if (aRegMask & 0x4000u) |
|
467 |
WRITE_REG(iEflags, aContext.iEflags); |
|
468 |
if (aRegMask & 0x8000u) |
|
469 |
WRITE_REG(iEip, aContext.iEip); |
|
470 |
} |
|
471 |
else |
|
472 |
aRegMask = 0; |
|
473 |
if (pC != this) |
|
474 |
{ |
|
475 |
if (ss) |
|
476 |
ss->iReadyListLock.UnlockOnly(); |
|
477 |
RelSLock(); |
|
478 |
} |
|
479 |
} |
|
480 |
||
481 |
/** Get (subset of) user context of specified thread. |
|
482 |
||
483 |
The nanokernel does not systematically save all registers in the supervisor |
|
484 |
stack on entry into privileged mode and the exact subset depends on why the |
|
485 |
switch to privileged mode occured. So in general only a subset of the |
|
486 |
register set is available. |
|
487 |
||
488 |
@param aThread Thread to inspect. It can be the current thread or a |
|
489 |
non-current one. |
|
490 |
||
491 |
@param aContext Pointer to TX86RegSet structure where the context is |
|
492 |
copied. |
|
493 |
||
494 |
@param aAvailRegistersMask Bit mask telling which subset of the context is |
|
495 |
available and has been copied to aContext (1: register available / 0: not |
|
496 |
available). Bits represent fields in TX86RegSet, i.e. |
|
497 |
0:EAX 1:EBX 2:ECX 3:EDX 4:ESP 5:EBP 6:ESI 7:EDI |
|
498 |
8:CS 9:DS 10:ES 11:FS 12:GS 13:SS 14:EFLAGS 15:EIP |
|
499 |
||
500 |
@see TX86RegSet |
|
501 |
@see ThreadSetUserContext |
|
502 |
||
503 |
@pre Call in a thread context. |
|
504 |
@pre Interrupts must be enabled. |
|
505 |
*/ |
|
506 |
EXPORT_C void NKern::ThreadGetUserContext(NThread* aThread, TAny* aContext, TUint32& aAvailRegistersMask) |
|
507 |
{ |
|
508 |
CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadGetUserContext"); |
|
509 |
TX86RegSet& a = *(TX86RegSet*)aContext; |
|
510 |
memclr(aContext, sizeof(TX86RegSet)); |
|
511 |
NKern::Lock(); |
|
512 |
aThread->GetUserContext(a, aAvailRegistersMask); |
|
513 |
NKern::Unlock(); |
|
514 |
} |
|
515 |
||
516 |
||
517 |
/** Get (subset of) system context of specified thread. |
|
518 |
||
519 |
@param aThread Thread to inspect. It can be the current thread or a |
|
520 |
non-current one. |
|
521 |
||
522 |
@param aContext Pointer to TX86RegSet structure where the context is |
|
523 |
copied. |
|
524 |
||
525 |
@param aAvailRegistersMask Bit mask telling which subset of the context is |
|
526 |
available and has been copied to aContext (1: register available / 0: not |
|
527 |
available). Bits represent fields in TX86RegSet, i.e. |
|
528 |
0:EAX 1:EBX 2:ECX 3:EDX 4:ESP 5:EBP 6:ESI 7:EDI |
|
529 |
8:CS 9:DS 10:ES 11:FS 12:GS 13:SS 14:EFLAGS 15:EIP |
|
530 |
||
531 |
@see TX86RegSet |
|
532 |
@see ThreadGetUserContext |
|
533 |
||
534 |
@pre Call in a thread context. |
|
535 |
@pre Interrupts must be enabled. |
|
536 |
*/ |
|
537 |
EXPORT_C void NKern::ThreadGetSystemContext(NThread* aThread, TAny* aContext, TUint32& aAvailRegistersMask) |
|
538 |
{ |
|
539 |
CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadGetSystemContext"); |
|
540 |
TX86RegSet& a = *(TX86RegSet*)aContext; |
|
541 |
memclr(aContext, sizeof(TX86RegSet)); |
|
542 |
NKern::Lock(); |
|
543 |
aThread->GetSystemContext(a, aAvailRegistersMask); |
|
544 |
NKern::Unlock(); |
|
545 |
} |
|
546 |
||
547 |
||
548 |
/** Set (subset of) user context of specified thread. |
|
549 |
||
550 |
@param aThread Thread to modify. It can be the current thread or a |
|
551 |
non-current one. |
|
552 |
||
553 |
@param aContext Pointer to TX86RegSet structure containing the context |
|
554 |
to set. The values of registers which aren't part of the context saved |
|
555 |
on the supervisor stack are ignored. |
|
556 |
||
557 |
@see TX86RegSet |
|
558 |
@see ThreadGetUserContext |
|
559 |
||
560 |
@pre Call in a thread context. |
|
561 |
@pre Interrupts must be enabled. |
|
562 |
*/ |
|
563 |
EXPORT_C void NKern::ThreadSetUserContext(NThread* aThread, TAny* aContext) |
|
564 |
{ |
|
565 |
CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadSetUserContext"); |
|
566 |
TX86RegSet& a = *(TX86RegSet*)aContext; |
|
567 |
TUint32 mask = 0xffffu; |
|
568 |
NKern::Lock(); |
|
569 |
aThread->SetUserContext(a, mask); |
|
570 |
NKern::Unlock(); |
|
571 |
} |
|
572 |
||
573 |
||
574 |
extern "C" void __fastcall add_dfc(TDfc* aDfc) |
|
575 |
{ |
|
576 |
aDfc->Add(); |
|
577 |
} |
|
578 |
||
579 |
||
580 |
TInt NKern::QueueUserModeCallback(NThreadBase* aThread, TUserModeCallback* aCallback) |
|
581 |
{ |
|
582 |
__e32_memory_barrier(); |
|
583 |
if (aCallback->iNext != KUserModeCallbackUnqueued) |
|
584 |
return KErrInUse; |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
585 |
if (aThread->i_NThread_Initial) |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
586 |
return KErrArgument; |
0 | 587 |
TInt result = KErrDied; |
588 |
NKern::Lock(); |
|
589 |
TUserModeCallback* listHead = aThread->iUserModeCallbacks; |
|
590 |
do { |
|
591 |
if (TLinAddr(listHead) & 3) |
|
592 |
goto done; // thread exiting |
|
593 |
aCallback->iNext = listHead; |
|
594 |
} while (!__e32_atomic_cas_ord_ptr(&aThread->iUserModeCallbacks, &listHead, aCallback)); |
|
595 |
result = KErrNone; |
|
596 |
||
597 |
if (!listHead) // if this isn't first callback someone else will have done this bit |
|
598 |
{ |
|
599 |
/* |
|
600 |
* If aThread is currently running on another CPU we need to send an IPI so |
|
601 |
* that it will enter kernel mode and run the callback. |
|
602 |
* The synchronization is tricky here. We want to check if the thread is |
|
603 |
* running and if so on which core. We need to avoid any possibility of |
|
604 |
* the thread entering user mode without having seen the callback, |
|
605 |
* either because we thought it wasn't running so didn't send an IPI or |
|
606 |
* because the thread migrated after we looked and we sent the IPI to |
|
607 |
* the wrong processor. Sending a redundant IPI is not a problem (e.g. |
|
608 |
* because the thread is running in kernel mode - which we can't tell - |
|
609 |
* or because the thread stopped running after we looked) |
|
610 |
* The following events are significant: |
|
611 |
* Event A: Target thread writes to iCurrent when it starts running |
|
612 |
* Event B: Target thread reads iUserModeCallbacks before entering user |
|
613 |
* mode |
|
614 |
* Event C: This thread writes to iUserModeCallbacks |
|
615 |
* Event D: This thread reads iCurrent to check if aThread is running |
|
616 |
* There is a barrier between A and B since A occurs with the ready |
|
617 |
* list lock for the CPU involved or the thread lock for aThread held |
|
618 |
* and this lock is released before B occurs. |
|
619 |
* There is a barrier between C and D (__e32_atomic_cas_ord_ptr). |
|
620 |
* Any observer which observes B must also have observed A. |
|
621 |
* Any observer which observes D must also have observed C. |
|
622 |
* If aThread observes B before C (i.e. enters user mode without running |
|
623 |
* the callback) it must observe A before C and so it must also observe |
|
624 |
* A before D (i.e. D reads the correct value for iCurrent). |
|
625 |
*/ |
|
626 |
TInt current = aThread->iCurrent; |
|
627 |
if (current) |
|
628 |
{ |
|
629 |
TInt cpu = current & NSchedulable::EReadyCpuMask; |
|
630 |
if (cpu != NKern::CurrentCpu()) |
|
631 |
send_resched_ipi(cpu); |
|
632 |
} |
|
633 |
} |
|
634 |
done: |
|
635 |
NKern::Unlock(); |
|
636 |
return result; |
|
637 |
} |
|
638 |
||
639 |