|
1 // Copyright (c) 2006-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of the License "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // e32\nkernsmp\x86\ncthrd.cpp |
|
15 // |
|
16 // |
|
17 |
|
18 // NThreadBase member data |
|
19 #define __INCLUDE_NTHREADBASE_DEFINES__ |
|
20 |
|
21 #include <x86.h> |
|
22 #include <apic.h> |
|
23 #include <nk_irq.h> |
|
24 |
|
25 // Called by a thread when it first runs |
|
26 void __StartThread(); |
|
27 |
|
28 void NThreadBase::OnKill() |
|
29 { |
|
30 } |
|
31 |
|
32 void NThreadBase::OnExit() |
|
33 { |
|
34 } |
|
35 |
|
36 extern void __ltr(TInt /*aSelector*/); |
|
37 |
|
38 extern "C" TUint __tr(); |
|
39 extern void InitAPTimestamp(SNThreadCreateInfo& aInfo); |
|
40 |
|
41 TInt NThread::Create(SNThreadCreateInfo& aInfo, TBool aInitial) |
|
42 { |
|
43 if (!aInfo.iStackBase || aInfo.iStackSize<0x100) |
|
44 return KErrArgument; |
|
45 new (this) NThread; |
|
46 TInt cpu = -1; |
|
47 if (aInitial) |
|
48 { |
|
49 cpu = __e32_atomic_add_ord32(&TheScheduler.iNumCpus, 1); |
|
50 if (cpu==0) |
|
51 memset(SubSchedulerLookupTable, 0x9a, sizeof(SubSchedulerLookupTable)); |
|
52 aInfo.iCpuAffinity = cpu; |
|
53 // OK since we can't migrate yet |
|
54 TUint32 apicid = *(volatile TUint32*)(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID) >> 24; |
|
55 TSubScheduler& ss = TheSubSchedulers[cpu]; |
|
56 ss.i_APICID = (TAny*)(apicid<<24); |
|
57 ss.iCurrentThread = this; |
|
58 SubSchedulerLookupTable[apicid] = &ss; |
|
59 ss.iLastTimestamp64 = NKern::Timestamp(); |
|
60 iRunCount64 = UI64LIT(1); |
|
61 __KTRACE_OPT(KBOOT,DEBUGPRINT("Init: cpu=%d APICID=%08x ss=%08x", cpu, apicid, &ss)); |
|
62 if (cpu) |
|
63 { |
|
64 __ltr(TSS_SELECTOR(cpu)); |
|
65 NIrq::HwInit2AP(); |
|
66 __e32_atomic_ior_ord32(&TheScheduler.iActiveCpus1, 1<<cpu); |
|
67 __e32_atomic_ior_ord32(&TheScheduler.iActiveCpus2, 1<<cpu); |
|
68 __e32_atomic_ior_ord32(&TheScheduler.iCpusNotIdle, 1<<cpu); |
|
69 __KTRACE_OPT(KBOOT,DEBUGPRINT("AP TR=%x",__tr())); |
|
70 } |
|
71 } |
|
72 TInt r=NThreadBase::Create(aInfo,aInitial); |
|
73 if (r!=KErrNone) |
|
74 return r; |
|
75 if (!aInitial) |
|
76 { |
|
77 TLinAddr stack_top = (TLinAddr)iStackBase + (TLinAddr)iStackSize; |
|
78 TLinAddr sp = stack_top; |
|
79 TUint32 pb = (TUint32)aInfo.iParameterBlock; |
|
80 SThreadStackStub* tss = 0; |
|
81 if (aInfo.iParameterBlockSize) |
|
82 { |
|
83 tss = (SThreadStackStub*)stack_top; |
|
84 --tss; |
|
85 tss->iVector = SThreadStackStub::EVector; |
|
86 tss->iError = 0; |
|
87 tss->iEip = 0; |
|
88 tss->iCs = 0; |
|
89 tss->iEflags = 0; |
|
90 sp = (TLinAddr)tss; |
|
91 sp -= (TLinAddr)aInfo.iParameterBlockSize; |
|
92 wordmove((TAny*)sp, aInfo.iParameterBlock, aInfo.iParameterBlockSize); |
|
93 pb = (TUint32)sp; |
|
94 tss->iPBlock = sp; |
|
95 } |
|
96 SThreadInitStack* tis = (SThreadInitStack*)sp; |
|
97 --tis; |
|
98 tis->iR.iCR0 = X86::DefaultCR0 | KX86CR0_TS; |
|
99 tis->iR.iReschedFlag = 1; |
|
100 tis->iR.iEip = (TUint32)&__StartThread; |
|
101 tis->iR.iReason = 0; |
|
102 tis->iX.iEcx = 0; |
|
103 tis->iX.iEdx = 0; |
|
104 tis->iX.iEbx = pb; // parameter block pointer |
|
105 tis->iX.iEsi = 0; |
|
106 tis->iX.iEdi = 0; |
|
107 tis->iX.iEbp = stack_top; |
|
108 tis->iX.iEax = (TUint32)aInfo.iFunction; |
|
109 tis->iX.iDs = KRing0DS; |
|
110 tis->iX.iEs = KRing0DS; |
|
111 tis->iX.iFs = 0; |
|
112 tis->iX.iGs = KRing0DS; |
|
113 tis->iX.iVector = SThreadInitStack::EVector; |
|
114 tis->iX.iError = 0; |
|
115 tis->iX.iEip = (TUint32)aInfo.iFunction; |
|
116 tis->iX.iCs = KRing0CS; |
|
117 tis->iX.iEflags = (TUint32)(EX86FlagIF|EX86FlagAC|0x1002); |
|
118 tis->iX.iEsp3 = 0xFFFFFFFFu; |
|
119 tis->iX.iSs3 = 0xFFFFFFFFu; |
|
120 wordmove(&iCoprocessorState, DefaultCoprocessorState, sizeof(iCoprocessorState)); |
|
121 iSavedSP = (TLinAddr)tis; |
|
122 } |
|
123 else |
|
124 { |
|
125 NKern::EnableAllInterrupts(); |
|
126 |
|
127 // synchronize AP's timestamp with BP's |
|
128 if (cpu>0) |
|
129 InitAPTimestamp(aInfo); |
|
130 } |
|
131 #ifdef BTRACE_THREAD_IDENTIFICATION |
|
132 BTrace4(BTrace::EThreadIdentification,BTrace::ENanoThreadCreate,this); |
|
133 #endif |
|
134 return KErrNone; |
|
135 } |
|
136 |
|
137 void DumpExcInfo(TX86ExcInfo& a) |
|
138 { |
|
139 DEBUGPRINT("Exc %02x EFLAGS=%08x FAR=%08x ErrCode=%08x",a.iExcId,a.iEflags,a.iFaultAddress,a.iExcErrorCode); |
|
140 DEBUGPRINT("EAX=%08x EBX=%08x ECX=%08x EDX=%08x",a.iEax,a.iEbx,a.iEcx,a.iEdx); |
|
141 DEBUGPRINT("ESP=%08x EBP=%08x ESI=%08x EDI=%08x",a.iEsp,a.iEbp,a.iEsi,a.iEdi); |
|
142 DEBUGPRINT(" CS=%08x EIP=%08x DS=%08x SS=%08x",a.iCs,a.iEip,a.iDs,a.iSs); |
|
143 DEBUGPRINT(" ES=%08x FS=%08x GS=%08x",a.iEs,a.iFs,a.iGs); |
|
144 if (a.iCs&3) |
|
145 { |
|
146 DEBUGPRINT("SS3=%08x ESP3=%08x",a.iSs3,a.iEsp3); |
|
147 } |
|
148 TScheduler& s = TheScheduler; |
|
149 TInt irq = NKern::DisableAllInterrupts(); |
|
150 TSubScheduler& ss = SubScheduler(); |
|
151 NThreadBase* ct = ss.iCurrentThread; |
|
152 TInt inc = TInt(ss.i_IrqNestCount); |
|
153 TInt cpu = ss.iCpuNum; |
|
154 NKern::RestoreInterrupts(irq); |
|
155 DEBUGPRINT("Thread %T, CPU %d, KLCount=%08x, IrqNest=%d",ct,cpu,ss.iKernLockCount,inc); |
|
156 } |
|
157 |
|
158 |
|
159 void GetContextAfterExc(TX86RegSet& aContext, SThreadExcStack* txs, TUint32& aAvailRegistersMask, TBool aSystem) |
|
160 { |
|
161 TInt cpl = txs->iCs & 3; |
|
162 aAvailRegistersMask = 0xffffu; // EAX,EBX,ECX,EDX,ESP,EBP,ESI,EDI,CS,DS,ES,FS,GS,SS,EFLAGS,EIP all valid |
|
163 aContext.iEax = txs->iEax; |
|
164 aContext.iEbx = txs->iEbx; |
|
165 aContext.iEcx = txs->iEcx; |
|
166 aContext.iEdx = txs->iEdx; |
|
167 if (aSystem) |
|
168 { |
|
169 aContext.iEsp = TUint32(txs+1); |
|
170 if (cpl==0) |
|
171 aContext.iEsp -= 8; // two less words pushed if interrupt taken while CPL=0 |
|
172 aContext.iSs = KRing0DS; |
|
173 aAvailRegistersMask &= ~0x2000u; // SS assumed not read |
|
174 } |
|
175 else if (cpl==3) |
|
176 { |
|
177 aContext.iEsp = txs->iEsp3; |
|
178 aContext.iSs = txs->iSs3; |
|
179 } |
|
180 else |
|
181 { |
|
182 __crash(); |
|
183 } |
|
184 aContext.iEbp = txs->iEbp; |
|
185 aContext.iEsi = txs->iEsi; |
|
186 aContext.iEdi = txs->iEdi; |
|
187 aContext.iCs = txs->iCs; |
|
188 aContext.iDs = txs->iDs; |
|
189 aContext.iEs = txs->iEs; |
|
190 aContext.iFs = txs->iFs; |
|
191 aContext.iGs = txs->iGs; |
|
192 aContext.iEflags = txs->iEflags; |
|
193 aContext.iEip = txs->iEip; |
|
194 } |
|
195 |
|
196 void GetContextAfterSlowExec(TX86RegSet& aContext, SThreadSlowExecStack* tsxs, TUint32& aAvailRegistersMask) |
|
197 { |
|
198 TInt cpl = tsxs->iCs & 3; |
|
199 if (cpl!=3) |
|
200 { |
|
201 __crash(); |
|
202 } |
|
203 aAvailRegistersMask = 0xffffu; // EAX,EBX,ECX,EDX,ESP,EBP,ESI,EDI,CS,DS,ES,FS,GS,SS,EFLAGS,EIP all valid |
|
204 aContext.iEax = tsxs->iEax; |
|
205 aContext.iEbx = tsxs->iEbx; |
|
206 aContext.iEcx = tsxs->iEcx; |
|
207 aContext.iEdx = tsxs->iEdx; |
|
208 aContext.iEsp = tsxs->iEsp3; |
|
209 aContext.iSs = tsxs->iSs3; |
|
210 aContext.iEbp = tsxs->iEbp; |
|
211 aContext.iEsi = tsxs->iEsi; |
|
212 aContext.iEdi = tsxs->iEdi; |
|
213 aContext.iCs = tsxs->iCs; |
|
214 aContext.iDs = tsxs->iDs; |
|
215 aContext.iEs = tsxs->iEs; |
|
216 aContext.iFs = tsxs->iFs; |
|
217 aContext.iGs = tsxs->iGs; |
|
218 aContext.iEflags = tsxs->iEflags; |
|
219 aContext.iEip = tsxs->iEip; |
|
220 } |
|
221 |
|
222 |
|
223 // Enter and return with kernel locked |
|
224 void NThread::GetUserContext(TX86RegSet& aContext, TUint32& aAvailRegistersMask) |
|
225 { |
|
226 NThread* pC = NCurrentThreadL(); |
|
227 TSubScheduler* ss = 0; |
|
228 if (pC != this) |
|
229 { |
|
230 AcqSLock(); |
|
231 if (iWaitState.ThreadIsDead()) |
|
232 { |
|
233 RelSLock(); |
|
234 aAvailRegistersMask = 0; |
|
235 return; |
|
236 } |
|
237 if (iReady && iParent->iReady) |
|
238 { |
|
239 ss = TheSubSchedulers + (iParent->iReady & EReadyCpuMask); |
|
240 ss->iReadyListLock.LockOnly(); |
|
241 } |
|
242 if (iCurrent) |
|
243 { |
|
244 // thread is actually running on another CPU |
|
245 // interrupt that CPU and wait for it to enter interrupt mode |
|
246 // this allows a snapshot of the thread user state to be observed |
|
247 // and ensures the thread cannot return to user mode |
|
248 send_resched_ipi_and_wait(iLastCpu); |
|
249 } |
|
250 } |
|
251 TUint32* stack = (TUint32*)(TLinAddr(iStackBase) + TLinAddr(iStackSize)); |
|
252 if (stack[-1]!=0xFFFFFFFFu && stack[-2]!=0xFFFFFFFFu && stack[-7]<0x100u) // if not, thread never entered user mode |
|
253 { |
|
254 if (stack[-7] == 0x21) // slow exec |
|
255 GetContextAfterSlowExec(aContext, ((SThreadSlowExecStack*)stack)-1, aAvailRegistersMask); |
|
256 else |
|
257 GetContextAfterExc(aContext, ((SThreadExcStack*)stack)-1, aAvailRegistersMask, FALSE); |
|
258 } |
|
259 if (pC != this) |
|
260 { |
|
261 if (ss) |
|
262 ss->iReadyListLock.UnlockOnly(); |
|
263 RelSLock(); |
|
264 } |
|
265 } |
|
266 |
|
267 class TGetContextIPI : public TGenericIPI |
|
268 { |
|
269 public: |
|
270 void Get(TInt aCpu, TX86RegSet& aContext, TUint32& aAvailRegistersMask); |
|
271 static void Isr(TGenericIPI*); |
|
272 public: |
|
273 TX86RegSet* iContext; |
|
274 TUint32* iAvailRegsMask; |
|
275 }; |
|
276 |
|
277 void TGetContextIPI::Isr(TGenericIPI* aPtr) |
|
278 { |
|
279 TGetContextIPI& ipi = *(TGetContextIPI*)aPtr; |
|
280 TX86RegSet& a = *ipi.iContext; |
|
281 TSubScheduler& ss = SubScheduler(); |
|
282 TUint32* irqstack = (TUint32*)ss.i_IrqStackTop; |
|
283 SThreadExcStack* txs = (SThreadExcStack*)irqstack[-1]; // first word pushed on IRQ stack points to thread supervisor stack |
|
284 GetContextAfterExc(a, txs, *ipi.iAvailRegsMask, TRUE); |
|
285 } |
|
286 |
|
287 void TGetContextIPI::Get(TInt aCpu, TX86RegSet& aContext, TUint32& aAvailRegsMask) |
|
288 { |
|
289 iContext = &aContext; |
|
290 iAvailRegsMask = &aAvailRegsMask; |
|
291 Queue(&Isr, 1u<<aCpu); |
|
292 WaitCompletion(); |
|
293 } |
|
294 |
|
295 // Enter and return with kernel locked |
|
296 void NThread::GetSystemContext(TX86RegSet& aContext, TUint32& aAvailRegsMask) |
|
297 { |
|
298 aAvailRegsMask = 0; |
|
299 NThread* pC = NCurrentThreadL(); |
|
300 __NK_ASSERT_ALWAYS(pC!=this); |
|
301 TSubScheduler* ss = 0; |
|
302 AcqSLock(); |
|
303 if (iWaitState.ThreadIsDead()) |
|
304 { |
|
305 RelSLock(); |
|
306 return; |
|
307 } |
|
308 if (iReady && iParent->iReady) |
|
309 { |
|
310 ss = TheSubSchedulers + (iParent->iReady & EReadyCpuMask); |
|
311 ss->iReadyListLock.LockOnly(); |
|
312 } |
|
313 if (iCurrent) |
|
314 { |
|
315 // thread is actually running on another CPU |
|
316 // use an interprocessor interrupt to get a snapshot of the state |
|
317 TGetContextIPI ipi; |
|
318 ipi.Get(iLastCpu, aContext, aAvailRegsMask); |
|
319 } |
|
320 else |
|
321 { |
|
322 // thread is not running and can't start |
|
323 SThreadReschedStack* trs = (SThreadReschedStack*)iSavedSP; |
|
324 TUint32 kct = trs->iReason; |
|
325 TLinAddr sp = TLinAddr(trs+1); |
|
326 TUint32* stack = (TUint32*)sp; |
|
327 switch (kct) |
|
328 { |
|
329 case 0: // thread not yet started |
|
330 { |
|
331 aContext.iEcx = stack[0]; |
|
332 aContext.iEdx = stack[1]; |
|
333 aContext.iEbx = stack[2]; |
|
334 aContext.iEsi = stack[3]; |
|
335 aContext.iEdi = stack[4]; |
|
336 aContext.iEbp = stack[5]; |
|
337 aContext.iEax = stack[6]; |
|
338 aContext.iDs = stack[7]; |
|
339 aContext.iEs = stack[8]; |
|
340 aContext.iFs = stack[9]; |
|
341 aContext.iGs = stack[10]; |
|
342 aContext.iEsp = sp + 40 - 8; // entry to initial function |
|
343 aContext.iEip = aContext.iEax; |
|
344 aContext.iEflags = 0x41202; // guess |
|
345 aContext.iCs = KRing0CS; |
|
346 aContext.iSs = KRing0DS; |
|
347 aAvailRegsMask = 0x9effu; |
|
348 break; |
|
349 } |
|
350 case 1: // unlock |
|
351 { |
|
352 aContext.iFs = stack[0]; |
|
353 aContext.iGs = stack[1]; |
|
354 aContext.iEbx = stack[2]; |
|
355 aContext.iEbp = stack[3]; |
|
356 aContext.iEdi = stack[4]; |
|
357 aContext.iEsi = stack[5]; |
|
358 aContext.iEip = stack[6]; // return address from NKern::Unlock() |
|
359 aContext.iCs = KRing0CS; |
|
360 aContext.iDs = KRing0DS; |
|
361 aContext.iEs = KRing0DS; |
|
362 aContext.iSs = KRing0DS; |
|
363 aContext.iEsp = sp + 28; // ESP after return from NKern::Unlock() |
|
364 aContext.iEax = 0; // unknown |
|
365 aContext.iEcx = 0; // unknown |
|
366 aContext.iEdx = 0; // unknown |
|
367 aContext.iEflags = 0x41202; // guess |
|
368 aAvailRegsMask =0x98f2u; // EIP,GS,FS,EDI,ESI,EBP,ESP,EBX available, others guessed or unavailable |
|
369 break; |
|
370 } |
|
371 case 2: // IRQ |
|
372 { |
|
373 GetContextAfterExc(aContext, (SThreadExcStack*)sp, aAvailRegsMask, TRUE); |
|
374 break; |
|
375 } |
|
376 default: // unknown reschedule reason |
|
377 __NK_ASSERT_ALWAYS(0); |
|
378 } |
|
379 } |
|
380 if (ss) |
|
381 ss->iReadyListLock.UnlockOnly(); |
|
382 RelSLock(); |
|
383 } |
|
384 |
|
385 // Enter and return with kernel locked |
|
386 void NThread::SetUserContext(const TX86RegSet& aContext, TUint32& aRegMask) |
|
387 { |
|
388 NThread* pC = NCurrentThreadL(); |
|
389 TSubScheduler* ss = 0; |
|
390 if (pC != this) |
|
391 { |
|
392 AcqSLock(); |
|
393 if (iWaitState.ThreadIsDead()) |
|
394 { |
|
395 RelSLock(); |
|
396 aRegMask = 0; |
|
397 return; |
|
398 } |
|
399 if (iReady && iParent->iReady) |
|
400 { |
|
401 ss = TheSubSchedulers + (iParent->iReady & EReadyCpuMask); |
|
402 ss->iReadyListLock.LockOnly(); |
|
403 } |
|
404 if (iCurrent) |
|
405 { |
|
406 // thread is actually running on another CPU |
|
407 // interrupt that CPU and wait for it to enter interrupt mode |
|
408 // this allows a snapshot of the thread user state to be observed |
|
409 // and ensures the thread cannot return to user mode |
|
410 send_resched_ipi_and_wait(iLastCpu); |
|
411 } |
|
412 } |
|
413 TUint32* stack = (TUint32*)(TLinAddr(iStackBase) + TLinAddr(iStackSize)); |
|
414 SThreadExcStack* txs = 0; |
|
415 SThreadSlowExecStack* tsxs = 0; |
|
416 aRegMask &= 0xffffu; |
|
417 if (stack[-1]!=0xFFFFFFFFu && stack[-2]!=0xFFFFFFFFu && stack[-7]<0x100u) // if not, thread never entered user mode |
|
418 { |
|
419 if (stack[-7] == 0x21) // slow exec |
|
420 tsxs = ((SThreadSlowExecStack*)stack)-1; |
|
421 else |
|
422 txs = ((SThreadExcStack*)stack)-1; |
|
423 |
|
424 #define WRITE_REG(reg, value) \ |
|
425 { if (tsxs) tsxs->reg=(value); else txs->reg=(value); } |
|
426 |
|
427 if (aRegMask & 0x0001u) |
|
428 WRITE_REG(iEax, aContext.iEax); |
|
429 if (aRegMask & 0x0002u) |
|
430 WRITE_REG(iEbx, aContext.iEbx); |
|
431 if (aRegMask & 0x0004u) |
|
432 { |
|
433 // don't allow write to iEcx if in slow exec since this may conflict |
|
434 // with handle preprocessing |
|
435 if (tsxs) |
|
436 aRegMask &= ~0x0004u; |
|
437 else |
|
438 txs->iEcx = aContext.iEcx; |
|
439 } |
|
440 if (aRegMask & 0x0008u) |
|
441 WRITE_REG(iEdx, aContext.iEdx); |
|
442 if (aRegMask & 0x0010u) |
|
443 WRITE_REG(iEsp3, aContext.iEsp); |
|
444 if (aRegMask & 0x0020u) |
|
445 WRITE_REG(iEbp, aContext.iEbp); |
|
446 if (aRegMask & 0x0040u) |
|
447 WRITE_REG(iEsi, aContext.iEsi); |
|
448 if (aRegMask & 0x0080u) |
|
449 WRITE_REG(iEdi, aContext.iEdi); |
|
450 if (aRegMask & 0x0100u) |
|
451 WRITE_REG(iCs, aContext.iCs|3); |
|
452 if (aRegMask & 0x0200u) |
|
453 WRITE_REG(iDs, aContext.iDs|3); |
|
454 if (aRegMask & 0x0400u) |
|
455 WRITE_REG(iEs, aContext.iEs|3); |
|
456 if (aRegMask & 0x0800u) |
|
457 WRITE_REG(iFs, aContext.iFs|3); |
|
458 if (aRegMask & 0x1000u) |
|
459 WRITE_REG(iGs, aContext.iGs|3); |
|
460 if (aRegMask & 0x2000u) |
|
461 WRITE_REG(iSs3, aContext.iSs|3); |
|
462 if (aRegMask & 0x4000u) |
|
463 WRITE_REG(iEflags, aContext.iEflags); |
|
464 if (aRegMask & 0x8000u) |
|
465 WRITE_REG(iEip, aContext.iEip); |
|
466 } |
|
467 else |
|
468 aRegMask = 0; |
|
469 if (pC != this) |
|
470 { |
|
471 if (ss) |
|
472 ss->iReadyListLock.UnlockOnly(); |
|
473 RelSLock(); |
|
474 } |
|
475 } |
|
476 |
|
477 /** Get (subset of) user context of specified thread. |
|
478 |
|
479 The nanokernel does not systematically save all registers in the supervisor |
|
480 stack on entry into privileged mode and the exact subset depends on why the |
|
481 switch to privileged mode occured. So in general only a subset of the |
|
482 register set is available. |
|
483 |
|
484 @param aThread Thread to inspect. It can be the current thread or a |
|
485 non-current one. |
|
486 |
|
487 @param aContext Pointer to TX86RegSet structure where the context is |
|
488 copied. |
|
489 |
|
490 @param aAvailRegistersMask Bit mask telling which subset of the context is |
|
491 available and has been copied to aContext (1: register available / 0: not |
|
492 available). Bits represent fields in TX86RegSet, i.e. |
|
493 0:EAX 1:EBX 2:ECX 3:EDX 4:ESP 5:EBP 6:ESI 7:EDI |
|
494 8:CS 9:DS 10:ES 11:FS 12:GS 13:SS 14:EFLAGS 15:EIP |
|
495 |
|
496 @see TX86RegSet |
|
497 @see ThreadSetUserContext |
|
498 |
|
499 @pre Call in a thread context. |
|
500 @pre Interrupts must be enabled. |
|
501 */ |
|
502 EXPORT_C void NKern::ThreadGetUserContext(NThread* aThread, TAny* aContext, TUint32& aAvailRegistersMask) |
|
503 { |
|
504 CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadGetUserContext"); |
|
505 TX86RegSet& a = *(TX86RegSet*)aContext; |
|
506 memclr(aContext, sizeof(TX86RegSet)); |
|
507 NKern::Lock(); |
|
508 aThread->GetUserContext(a, aAvailRegistersMask); |
|
509 NKern::Unlock(); |
|
510 } |
|
511 |
|
512 |
|
513 /** Get (subset of) system context of specified thread. |
|
514 |
|
515 @param aThread Thread to inspect. It can be the current thread or a |
|
516 non-current one. |
|
517 |
|
518 @param aContext Pointer to TX86RegSet structure where the context is |
|
519 copied. |
|
520 |
|
521 @param aAvailRegistersMask Bit mask telling which subset of the context is |
|
522 available and has been copied to aContext (1: register available / 0: not |
|
523 available). Bits represent fields in TX86RegSet, i.e. |
|
524 0:EAX 1:EBX 2:ECX 3:EDX 4:ESP 5:EBP 6:ESI 7:EDI |
|
525 8:CS 9:DS 10:ES 11:FS 12:GS 13:SS 14:EFLAGS 15:EIP |
|
526 |
|
527 @see TX86RegSet |
|
528 @see ThreadGetUserContext |
|
529 |
|
530 @pre Call in a thread context. |
|
531 @pre Interrupts must be enabled. |
|
532 */ |
|
533 EXPORT_C void NKern::ThreadGetSystemContext(NThread* aThread, TAny* aContext, TUint32& aAvailRegistersMask) |
|
534 { |
|
535 CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadGetSystemContext"); |
|
536 TX86RegSet& a = *(TX86RegSet*)aContext; |
|
537 memclr(aContext, sizeof(TX86RegSet)); |
|
538 NKern::Lock(); |
|
539 aThread->GetSystemContext(a, aAvailRegistersMask); |
|
540 NKern::Unlock(); |
|
541 } |
|
542 |
|
543 |
|
544 /** Set (subset of) user context of specified thread. |
|
545 |
|
546 @param aThread Thread to modify. It can be the current thread or a |
|
547 non-current one. |
|
548 |
|
549 @param aContext Pointer to TX86RegSet structure containing the context |
|
550 to set. The values of registers which aren't part of the context saved |
|
551 on the supervisor stack are ignored. |
|
552 |
|
553 @see TX86RegSet |
|
554 @see ThreadGetUserContext |
|
555 |
|
556 @pre Call in a thread context. |
|
557 @pre Interrupts must be enabled. |
|
558 */ |
|
559 EXPORT_C void NKern::ThreadSetUserContext(NThread* aThread, TAny* aContext) |
|
560 { |
|
561 CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadSetUserContext"); |
|
562 TX86RegSet& a = *(TX86RegSet*)aContext; |
|
563 TUint32 mask = 0xffffu; |
|
564 NKern::Lock(); |
|
565 aThread->SetUserContext(a, mask); |
|
566 NKern::Unlock(); |
|
567 } |
|
568 |
|
569 |
|
570 /** Return the total CPU time so far used by the specified thread. |
|
571 |
|
572 @return The total CPU time in units of 1/NKern::CpuTimeMeasFreq(). |
|
573 */ |
|
574 EXPORT_C TUint64 NKern::ThreadCpuTime(NThread* aThread) |
|
575 { |
|
576 TSubScheduler* ss = 0; |
|
577 NKern::Lock(); |
|
578 aThread->AcqSLock(); |
|
579 if (aThread->i_NThread_Initial) |
|
580 ss = &TheSubSchedulers[aThread->iLastCpu]; |
|
581 else if (aThread->iReady && aThread->iParent->iReady) |
|
582 ss = &TheSubSchedulers[aThread->iParent->iReady & NSchedulable::EReadyCpuMask]; |
|
583 if (ss) |
|
584 ss->iReadyListLock.LockOnly(); |
|
585 TUint64 t = aThread->iTotalCpuTime64; |
|
586 if (aThread->iCurrent || (aThread->i_NThread_Initial && !ss->iCurrentThread)) |
|
587 t += (NKern::Timestamp() - ss->iLastTimestamp64); |
|
588 if (ss) |
|
589 ss->iReadyListLock.UnlockOnly(); |
|
590 aThread->RelSLock(); |
|
591 NKern::Unlock(); |
|
592 return t; |
|
593 } |
|
594 |
|
595 extern "C" void __fastcall add_dfc(TDfc* aDfc) |
|
596 { |
|
597 aDfc->Add(); |
|
598 } |
|
599 |
|
600 |
|
601 TInt NKern::QueueUserModeCallback(NThreadBase* aThread, TUserModeCallback* aCallback) |
|
602 { |
|
603 __e32_memory_barrier(); |
|
604 if (aCallback->iNext != KUserModeCallbackUnqueued) |
|
605 return KErrInUse; |
|
606 TInt result = KErrDied; |
|
607 NKern::Lock(); |
|
608 TUserModeCallback* listHead = aThread->iUserModeCallbacks; |
|
609 do { |
|
610 if (TLinAddr(listHead) & 3) |
|
611 goto done; // thread exiting |
|
612 aCallback->iNext = listHead; |
|
613 } while (!__e32_atomic_cas_ord_ptr(&aThread->iUserModeCallbacks, &listHead, aCallback)); |
|
614 result = KErrNone; |
|
615 |
|
616 if (!listHead) // if this isn't first callback someone else will have done this bit |
|
617 { |
|
618 /* |
|
619 * If aThread is currently running on another CPU we need to send an IPI so |
|
620 * that it will enter kernel mode and run the callback. |
|
621 * The synchronization is tricky here. We want to check if the thread is |
|
622 * running and if so on which core. We need to avoid any possibility of |
|
623 * the thread entering user mode without having seen the callback, |
|
624 * either because we thought it wasn't running so didn't send an IPI or |
|
625 * because the thread migrated after we looked and we sent the IPI to |
|
626 * the wrong processor. Sending a redundant IPI is not a problem (e.g. |
|
627 * because the thread is running in kernel mode - which we can't tell - |
|
628 * or because the thread stopped running after we looked) |
|
629 * The following events are significant: |
|
630 * Event A: Target thread writes to iCurrent when it starts running |
|
631 * Event B: Target thread reads iUserModeCallbacks before entering user |
|
632 * mode |
|
633 * Event C: This thread writes to iUserModeCallbacks |
|
634 * Event D: This thread reads iCurrent to check if aThread is running |
|
635 * There is a barrier between A and B since A occurs with the ready |
|
636 * list lock for the CPU involved or the thread lock for aThread held |
|
637 * and this lock is released before B occurs. |
|
638 * There is a barrier between C and D (__e32_atomic_cas_ord_ptr). |
|
639 * Any observer which observes B must also have observed A. |
|
640 * Any observer which observes D must also have observed C. |
|
641 * If aThread observes B before C (i.e. enters user mode without running |
|
642 * the callback) it must observe A before C and so it must also observe |
|
643 * A before D (i.e. D reads the correct value for iCurrent). |
|
644 */ |
|
645 TInt current = aThread->iCurrent; |
|
646 if (current) |
|
647 { |
|
648 TInt cpu = current & NSchedulable::EReadyCpuMask; |
|
649 if (cpu != NKern::CurrentCpu()) |
|
650 send_resched_ipi(cpu); |
|
651 } |
|
652 } |
|
653 done: |
|
654 NKern::Unlock(); |
|
655 return result; |
|
656 } |
|
657 |
|
658 |