author | Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com> |
Tue, 06 Jul 2010 15:50:07 +0300 | |
changeset 201 | 43365a9b78a3 |
parent 90 | 947f0dc9f7a8 |
child 257 | 3e88ff8f41d5 |
permissions | -rw-r--r-- |
0 | 1 |
// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies). |
2 |
// All rights reserved. |
|
3 |
// This component and the accompanying materials are made available |
|
4 |
// under the terms of the License "Eclipse Public License v1.0" |
|
5 |
// which accompanies this distribution, and is available |
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 |
// |
|
8 |
// Initial Contributors: |
|
9 |
// Nokia Corporation - initial contribution. |
|
10 |
// |
|
11 |
// Contributors: |
|
12 |
// |
|
13 |
// Description: |
|
14 |
// e32\nkernsmp\nk_irq.cpp |
|
15 |
// |
|
16 |
// |
|
17 |
||
18 |
/** |
|
19 |
@file |
|
20 |
@internalTechnology |
|
21 |
*/ |
|
22 |
||
23 |
#include <e32cmn.h> |
|
24 |
#include <e32cmn_private.h> |
|
25 |
#include "nk_priv.h" |
|
26 |
#include <nk_irq.h> |
|
27 |
||
28 |
NIrq Irq[NK_MAX_IRQS]; |
|
29 |
NIrqHandler Handlers[NK_MAX_IRQ_HANDLERS]; |
|
30 |
NIrqHandler* NIrqHandler::FirstFree; |
|
31 |
||
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
32 |
extern "C" void send_irq_ipi(TSubScheduler*, TInt); |
0 | 33 |
|
34 |
void StepCookie(volatile TUint16& p, TInt n) |
|
35 |
{ |
|
36 |
TUint32 x = p<<17; |
|
37 |
while(n--) |
|
38 |
{ |
|
39 |
TUint32 y = x; |
|
40 |
x<<=1; |
|
41 |
y^=x; |
|
42 |
x |= ((y>>31)<<17); |
|
43 |
} |
|
44 |
p = (TUint16)(x>>17); |
|
45 |
} |
|
46 |
||
47 |
NIrq::NIrq() |
|
48 |
: iNIrqLock(TSpinLock::EOrderNIrq) |
|
49 |
{ |
|
50 |
iIState = EWait; |
|
51 |
iEventsPending = 0; |
|
52 |
iEnabledEvents = 0; |
|
53 |
iHwId = 0; |
|
54 |
iX = 0; |
|
55 |
} |
|
56 |
||
57 |
TInt NIrq::BindRaw(NIsr aIsr, TAny* aPtr) |
|
58 |
{ |
|
59 |
// Call only from thread context |
|
60 |
TInt r = KErrNone; |
|
61 |
Wait(); |
|
62 |
iNIrqLock.LockOnly(); |
|
63 |
if (iStaticFlags & EShared) |
|
64 |
{ |
|
65 |
r = KErrAccessDenied; |
|
66 |
goto error; |
|
67 |
} |
|
68 |
if ( (iIState & ERaw) || !iHandlers.IsEmpty()) |
|
69 |
{ |
|
70 |
r = KErrInUse; |
|
71 |
goto error; |
|
72 |
} |
|
73 |
iHandlers.iA.iNext = (SDblQueLink*)aIsr; |
|
74 |
iHandlers.iA.iPrev = (SDblQueLink*)aPtr; |
|
75 |
__e32_atomic_ior_rel32(&iIState, ERaw); |
|
76 |
error: |
|
77 |
iNIrqLock.UnlockOnly(); |
|
78 |
Done(); |
|
79 |
return r; |
|
80 |
} |
|
81 |
||
82 |
TInt NIrq::UnbindRaw() |
|
83 |
{ |
|
84 |
// Call only from thread context |
|
85 |
TInt r = DisableRaw(TRUE); |
|
86 |
if (r != KErrNone) |
|
87 |
return r; |
|
88 |
Wait(); |
|
89 |
iNIrqLock.LockOnly(); |
|
90 |
if (iIState & ERaw) |
|
91 |
{ |
|
92 |
iHandlers.iA.iNext = 0; |
|
93 |
iHandlers.iA.iPrev = 0; |
|
94 |
++iGeneration; // release anyone still waiting in Disable() |
|
95 |
__e32_atomic_and_rel32(&iIState, ~(ERaw|EUnbind)); |
|
96 |
} |
|
97 |
iNIrqLock.UnlockOnly(); |
|
98 |
Done(); |
|
99 |
return r; |
|
100 |
} |
|
101 |
||
102 |
TInt NIrq::DisableRaw(TBool aUnbind) |
|
103 |
{ |
|
104 |
TBool wait = FALSE; |
|
105 |
TInt r = KErrNone; |
|
106 |
TInt irq = __SPIN_LOCK_IRQSAVE(iNIrqLock); |
|
107 |
if (!(iIState & ERaw)) |
|
108 |
r = KErrGeneral; |
|
109 |
else |
|
110 |
{ |
|
111 |
wait = TRUE; |
|
112 |
if (aUnbind) |
|
113 |
__e32_atomic_ior_acq32(&iIState, EUnbind); |
|
114 |
if (!(iEnabledEvents & 1)) |
|
115 |
{ |
|
116 |
iEnabledEvents |= 1; |
|
117 |
HwDisable(); |
|
118 |
// wait = TRUE; |
|
119 |
} |
|
120 |
} |
|
121 |
__SPIN_UNLOCK_IRQRESTORE(iNIrqLock,irq); |
|
122 |
TInt c = NKern::CurrentContext(); |
|
123 |
if (wait && c!=NKern::EInterrupt) |
|
124 |
{ |
|
125 |
// wait for currently running handler to finish or interrupt to be reenabled |
|
126 |
if (c==NKern::EThread) |
|
127 |
NKern::ThreadEnterCS(); |
|
128 |
HwWaitCpus(); // ensure other CPUs have had a chance to accept any outstanding interrupts |
|
129 |
TUint32 g = iGeneration; |
|
130 |
while ( ((iIState >> 16) || HwPending()) && (iGeneration == g)) |
|
131 |
{ |
|
132 |
__chill(); |
|
133 |
} |
|
134 |
if (c==NKern::EThread) |
|
135 |
NKern::ThreadLeaveCS(); |
|
136 |
} |
|
137 |
return r; |
|
138 |
} |
|
139 |
||
140 |
TInt NIrq::EnableRaw() |
|
141 |
{ |
|
142 |
TInt r = KErrNone; |
|
143 |
TInt irq = __SPIN_LOCK_IRQSAVE(iNIrqLock); |
|
144 |
if (!(iIState & ERaw)) |
|
145 |
r = KErrGeneral; |
|
146 |
else if (iIState & EUnbind) |
|
147 |
r = KErrNotReady; |
|
148 |
else if (iEnabledEvents & 1) |
|
149 |
{ |
|
150 |
iEnabledEvents = 0; |
|
151 |
HwEnable(); |
|
152 |
++iGeneration; |
|
153 |
} |
|
154 |
__SPIN_UNLOCK_IRQRESTORE(iNIrqLock,irq); |
|
155 |
return r; |
|
156 |
} |
|
157 |
||
158 |
TInt NIrq::Bind(NIrqHandler* aH) |
|
159 |
{ |
|
160 |
// Call only from thread context |
|
161 |
TInt r = KErrInUse; |
|
162 |
Wait(); |
|
163 |
if (!(iIState & ERaw)) |
|
164 |
{ |
|
165 |
r = KErrNone; |
|
166 |
TBool empty = iHandlers.IsEmpty(); |
|
167 |
TBool shared = iStaticFlags & EShared; |
|
168 |
TBool exclusive = iIState & NIrqHandler::EExclusive; |
|
169 |
if (!empty) |
|
170 |
{ |
|
171 |
if (!shared || exclusive) |
|
172 |
{ |
|
173 |
r = KErrAccessDenied; |
|
174 |
goto error; |
|
175 |
} |
|
176 |
NIrqHandler* h = _LOFF(iHandlers.First(), NIrqHandler, iIrqLink); |
|
177 |
if (h->iHState & NIrqHandler::EExclusive) |
|
178 |
{ |
|
179 |
r = KErrAccessDenied; |
|
180 |
goto error; |
|
181 |
} |
|
182 |
} |
|
183 |
aH->iIrq = this; |
|
184 |
iHandlers.Add(&aH->iIrqLink); |
|
185 |
} |
|
186 |
error: |
|
187 |
Done(); |
|
188 |
return r; |
|
189 |
} |
|
190 |
||
191 |
void NIrq::HwIsr() |
|
192 |
{ |
|
193 |
TRACE_IRQ12(16, this, iVector, iIState); |
|
194 |
TBool eoi_done = FALSE; |
|
195 |
TUint32 rcf0 = EnterIsr(); // for initial run count |
|
196 |
TUint32 rcf1 = iIState; // might have changed while we were waiting in EnterIsr() |
|
197 |
if (rcf1 & ERaw) |
|
198 |
{ |
|
199 |
if (!(rcf1 & EUnbind)) |
|
200 |
{ |
|
201 |
NIsr f = (NIsr)iHandlers.iA.iNext; |
|
202 |
TAny* p = iHandlers.iA.iPrev; |
|
203 |
(*f)(p); |
|
204 |
} |
|
205 |
HwEoi(); |
|
206 |
IsrDone(); |
|
207 |
return; |
|
208 |
} |
|
209 |
if (rcf0 >> 16) |
|
210 |
{ |
|
211 |
HwEoi(); |
|
212 |
return; |
|
213 |
} |
|
214 |
if (!(iStaticFlags & ELevel)) |
|
215 |
{ |
|
216 |
eoi_done = TRUE; |
|
217 |
HwEoi(); |
|
218 |
} |
|
219 |
do { |
|
220 |
// Handler list can't be touched now |
|
221 |
SDblQueLink* anchor = &iHandlers.iA; |
|
222 |
SDblQueLink* p = anchor->iNext; |
|
223 |
while (p != anchor) |
|
224 |
{ |
|
225 |
NIrqHandler* h = _LOFF(p, NIrqHandler, iIrqLink); |
|
226 |
h->Activate(1); |
|
227 |
p = p->iNext; |
|
228 |
} |
|
229 |
if (!eoi_done) |
|
230 |
{ |
|
231 |
eoi_done = TRUE; |
|
232 |
HwEoi(); |
|
233 |
} |
|
234 |
if ((iStaticFlags & ELevel) && iEventsPending) |
|
235 |
{ |
|
236 |
// For a level triggered interrupt make sure interrupt is disabled until |
|
237 |
// all pending event handlers have run, to avoid a continuous interrupt. |
|
238 |
TInt irq = __SPIN_LOCK_IRQSAVE(iNIrqLock); |
|
239 |
if (iEventsPending) |
|
240 |
{ |
|
241 |
iEnabledEvents |= 1; |
|
242 |
HwDisable(); |
|
243 |
} |
|
244 |
__SPIN_UNLOCK_IRQRESTORE(iNIrqLock,irq); |
|
245 |
} |
|
246 |
} while (IsrDone()); |
|
247 |
} |
|
248 |
||
249 |
void NIrqHandler::Activate(TInt aCount) |
|
250 |
{ |
|
251 |
TUint32 orig = DoActivate(aCount); |
|
252 |
TRACE_IRQ12(17, this, orig, aCount); |
|
253 |
if (orig & (EDisable|EUnbind|EActive)) |
|
254 |
return; // disabled or already active |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
255 |
NSchedulable* tied = iTied; |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
256 |
if (tied) |
0 | 257 |
{ |
258 |
// we need to enforce mutual exclusion between the event handler |
|
259 |
// and the tied thread or thread group, so the event handler must |
|
260 |
// run on the CPU to which the thread or group is currently attached |
|
261 |
// once the event has been attached to that CPU, the thread/group |
|
262 |
// can't be migrated until the event handler completes. |
|
263 |
// need a pending event count for the tied thread/group |
|
264 |
// so we know when the thread/group can be migrated |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
265 |
TInt tied_cpu = tied->BeginTiedEvent(); |
0 | 266 |
TInt this_cpu = NKern::CurrentCpu(); |
267 |
if (tied_cpu != this_cpu) |
|
268 |
{ |
|
269 |
__e32_atomic_add_acq32(&iIrq->iEventsPending, 1); |
|
270 |
TheSubSchedulers[tied_cpu].QueueEventAndKick(this); |
|
271 |
// FIXME: move IRQ over to tied CPU if this is the only handler for that IRQ |
|
272 |
// what to do about shared IRQs? |
|
273 |
return; |
|
274 |
} |
|
275 |
} |
|
276 |
// event can run on this CPU so run it now |
|
277 |
if (aCount) |
|
278 |
{ |
|
279 |
orig = EventBegin(); |
|
280 |
TRACE_IRQ8(18, this, orig); |
|
281 |
(*iFn)(iPtr); |
|
282 |
orig = EventDone(); |
|
283 |
TRACE_IRQ8(19, this, orig); |
|
284 |
if (!(orig & EActive)) |
|
285 |
{ |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
286 |
if (tied) |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
287 |
tied->EndTiedEvent(); |
0 | 288 |
return; // that was last occurrence or event now disabled |
289 |
} |
|
290 |
} |
|
291 |
__e32_atomic_add_ord32(&iIrq->iEventsPending, 1); |
|
292 |
// add event to this cpu |
|
293 |
SubScheduler().QueueEventAndKick(this); |
|
294 |
} |
|
295 |
||
296 |
||
297 |
NIrqHandler::NIrqHandler() |
|
298 |
{ |
|
299 |
iIrqLink.iNext = 0; |
|
300 |
iIrq = 0; |
|
301 |
iTied = 0; |
|
302 |
iHState = EDisable|EBind|ENotReady|EEventHandlerIrq; |
|
303 |
iFn = 0; |
|
304 |
iPtr = 0; |
|
305 |
memclr(iNIrqHandlerSpare, sizeof(iNIrqHandlerSpare)); |
|
306 |
} |
|
307 |
||
308 |
void NIrqHandler::Free() |
|
309 |
{ |
|
310 |
NKern::Lock(); |
|
311 |
NEventHandler::TiedLock.LockOnly(); |
|
312 |
if (!iTied) // Only free if iTied has been cleared |
|
313 |
{ |
|
314 |
iIrqLink.iNext = FirstFree; |
|
315 |
FirstFree = this; |
|
316 |
} |
|
317 |
NEventHandler::TiedLock.UnlockOnly(); |
|
318 |
NKern::Unlock(); |
|
319 |
} |
|
320 |
||
321 |
NIrqHandler* NIrqHandler::Alloc() |
|
322 |
{ |
|
323 |
NKern::Lock(); |
|
324 |
NEventHandler::TiedLock.LockOnly(); |
|
325 |
NIrqHandler* p = FirstFree; |
|
326 |
if (p) |
|
327 |
FirstFree = (NIrqHandler*)p->iIrqLink.iNext; |
|
328 |
NEventHandler::TiedLock.UnlockOnly(); |
|
329 |
NKern::Unlock(); |
|
330 |
if (p) |
|
331 |
new (p) NIrqHandler(); |
|
332 |
return p; |
|
333 |
} |
|
334 |
||
335 |
TInt NIrqHandler::Enable(TInt aHandle) |
|
336 |
{ |
|
337 |
// call from any context |
|
338 |
TBool reactivate = FALSE; |
|
339 |
TInt r = KErrNotReady; |
|
340 |
NIrq* pI = iIrq; |
|
341 |
if (!pI) |
|
342 |
return KErrNotReady; |
|
343 |
TInt irq = __SPIN_LOCK_IRQSAVE(pI->iNIrqLock); // OK since NIrq's are never deleted |
|
344 |
if (iIrq==pI && TUint(aHandle)==iHandle) // check handler not unbound |
|
345 |
{ |
|
346 |
TUint32 orig = DoSetEnabled(); // clear EDisable and EBind provided neither EUnbind nor ENotReady set |
|
347 |
if (!(orig & (EUnbind|ENotReady))) |
|
348 |
{ |
|
349 |
r = KErrNone; |
|
350 |
if (orig & EDisable) // check not already enabled |
|
351 |
{ |
|
352 |
++iGeneration; |
|
353 |
TUint32 n = pI->iEnabledEvents; |
|
354 |
pI->iEnabledEvents += 2; |
|
355 |
if (n==0) |
|
356 |
pI->HwEnable(); // enable HW interrupt if this is first handler to be enabled |
|
357 |
if ((orig >> 16) && !(orig & EActive)) |
|
358 |
// replay remembered interrupt(s) |
|
359 |
reactivate = TRUE; |
|
360 |
} |
|
361 |
} |
|
362 |
} |
|
363 |
if (reactivate) |
|
364 |
{ |
|
365 |
pI->iNIrqLock.UnlockOnly(); |
|
366 |
Activate(0); |
|
367 |
pI->iNIrqLock.LockOnly(); |
|
368 |
} |
|
369 |
__SPIN_UNLOCK_IRQRESTORE(pI->iNIrqLock,irq); |
|
370 |
return r; |
|
371 |
} |
|
372 |
||
373 |
TInt NIrqHandler::Disable(TBool aUnbind, TInt aHandle) |
|
374 |
{ |
|
375 |
// call from any context |
|
376 |
NIrq* pI = iIrq; |
|
377 |
if (!pI) |
|
378 |
return KErrGeneral; |
|
379 |
TInt irq = __SPIN_LOCK_IRQSAVE(pI->iNIrqLock); // OK since NIrq's are never deleted |
|
380 |
if (iIrq != pI || TUint(aHandle)!=iHandle) // check handler not unbound |
|
381 |
{ |
|
382 |
__SPIN_UNLOCK_IRQRESTORE(pI->iNIrqLock,irq); |
|
383 |
return KErrGeneral; |
|
384 |
} |
|
385 |
TInt r = aUnbind ? KErrGeneral : KErrNone; |
|
386 |
TUint32 f = aUnbind ? EUnbind|EDisable : EDisable; |
|
387 |
TUint32 orig = __e32_atomic_ior_acq32(&iHState, f); |
|
388 |
TUint32 g = iGeneration; |
|
389 |
if (!(orig & EDisable)) // check not already disabled |
|
390 |
{ |
|
391 |
pI->iEnabledEvents -= 2; |
|
392 |
if (!pI->iEnabledEvents) |
|
393 |
pI->HwDisable(); // disable HW interrupt if no more enabled handlers |
|
394 |
} |
|
395 |
if (aUnbind && !(orig & EUnbind)) |
|
396 |
{ |
|
397 |
volatile TUint16& cookie = *(volatile TUint16*)(((TUint8*)&iHandle)+2); |
|
398 |
StepCookie(cookie, 1); |
|
399 |
r = KErrNone; |
|
400 |
} |
|
401 |
__SPIN_UNLOCK_IRQRESTORE(pI->iNIrqLock,irq); |
|
402 |
if (NKern::CurrentContext() != NKern::EInterrupt) |
|
403 |
{ |
|
404 |
// wait for currently running handler to finish or interrupt to be reenabled |
|
405 |
while ((iHState & EActive) && (iGeneration == g)) |
|
406 |
{ |
|
407 |
__chill(); |
|
408 |
} |
|
409 |
} |
|
410 |
return r; |
|
411 |
} |
|
412 |
||
413 |
TInt NIrqHandler::Unbind(TInt aId, NSchedulable* aTied) |
|
414 |
{ |
|
415 |
TInt r = Disable(TRUE, aId); // waits for any current activation of ISR to finish |
|
416 |
if (r==KErrNone || aTied) // returns KErrGeneral if someone else already unbound this interrupt handler |
|
417 |
{ |
|
418 |
// Possible race condition here between tied thread termination and interrupt unbind. |
|
419 |
// We need to be sure that the iTied field must be NULL before the tied thread/group |
|
420 |
// is destroyed. |
|
421 |
NKern::Lock(); |
|
422 |
NEventHandler::TiedLock.LockOnly(); // this guarantees pH->iTied cannot change |
|
423 |
NSchedulable* t = iTied; |
|
424 |
if (t) |
|
425 |
{ |
|
426 |
// We need to guarantee the object pointed to by t cannot be deleted until we |
|
427 |
// have finished with it. |
|
428 |
t->AcqSLock(); |
|
429 |
if (iTiedLink.iNext) |
|
430 |
{ |
|
431 |
iTiedLink.Deque(); |
|
432 |
iTiedLink.iNext = 0; |
|
433 |
iTied = 0; |
|
434 |
} |
|
435 |
if (aTied && aTied==t) |
|
436 |
iTied = 0; |
|
437 |
t->RelSLock(); |
|
438 |
} |
|
439 |
NEventHandler::TiedLock.UnlockOnly(); |
|
440 |
NKern::Unlock(); |
|
441 |
} |
|
442 |
if (r==KErrNone) |
|
443 |
{ |
|
444 |
DoUnbind(); |
|
445 |
Free(); |
|
446 |
} |
|
447 |
return r; |
|
448 |
} |
|
449 |
||
450 |
void NIrqHandler::DoUnbind() |
|
451 |
{ |
|
452 |
// Call only from thread context |
|
453 |
NIrq* pI = iIrq; |
|
454 |
pI->Wait(); |
|
455 |
iIrqLink.Deque(); |
|
456 |
iIrq = 0; |
|
457 |
pI->Done(); |
|
458 |
} |
|
459 |
||
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
460 |
TInt TSubScheduler::QueueEvent(NEventHandler* aEvent) |
0 | 461 |
{ |
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
462 |
TInt r = 0; |
0 | 463 |
TInt irq = __SPIN_LOCK_IRQSAVE(iEventHandlerLock); |
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
464 |
if (!(iScheduler->iIpiAcceptCpus & iCpuMask)) |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
465 |
r = EQueueEvent_WakeUp; |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
466 |
else if (!iEventHandlersPending) |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
467 |
r = EQueueEvent_Kick; |
0 | 468 |
iEventHandlersPending = TRUE; |
469 |
iEventHandlers.Add(aEvent); |
|
470 |
__SPIN_UNLOCK_IRQRESTORE(iEventHandlerLock,irq); |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
471 |
return r; |
0 | 472 |
} |
473 |
||
474 |
void TSubScheduler::QueueEventAndKick(NEventHandler* aEvent) |
|
475 |
{ |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
476 |
TInt kick = QueueEvent(aEvent); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
477 |
if (kick) |
0 | 478 |
{ |
479 |
// extra barrier ? |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
480 |
send_irq_ipi(this, kick); |
0 | 481 |
} |
482 |
} |
|
483 |
||
484 |
extern "C" void run_event_handlers(TSubScheduler* aS) |
|
485 |
{ |
|
486 |
while (aS->iEventHandlersPending) |
|
487 |
{ |
|
488 |
TInt irq = __SPIN_LOCK_IRQSAVE(aS->iEventHandlerLock); |
|
489 |
if (aS->iEventHandlers.IsEmpty()) |
|
490 |
{ |
|
491 |
aS->iEventHandlersPending = FALSE; |
|
492 |
__SPIN_UNLOCK_IRQRESTORE(aS->iEventHandlerLock, irq); |
|
493 |
break; |
|
494 |
} |
|
495 |
NIrqHandler* h = (NIrqHandler*)aS->iEventHandlers.First()->Deque(); |
|
496 |
if (aS->iEventHandlers.IsEmpty()) |
|
497 |
aS->iEventHandlersPending = FALSE; |
|
498 |
TInt type = h->iHType; |
|
499 |
NSchedulable* tied = h->iTied; |
|
500 |
if (type == NEventHandler::EEventHandlerNTimer) |
|
501 |
{ |
|
502 |
NEventFn f = h->iFn; |
|
503 |
TAny* p = h->iPtr; |
|
504 |
mb(); // make sure dequeue observed and iFn,iPtr,iTied sampled before state change observed |
|
505 |
h->i8888.iHState1 = NTimer::EIdle; // can't touch timer again after this |
|
506 |
__SPIN_UNLOCK_IRQRESTORE(aS->iEventHandlerLock, irq); |
|
507 |
(*f)(p); |
|
508 |
if (tied) |
|
509 |
tied->EndTiedEvent(); |
|
510 |
continue; |
|
511 |
} |
|
512 |
__SPIN_UNLOCK_IRQRESTORE(aS->iEventHandlerLock, irq); |
|
513 |
TBool requeue = TRUE; |
|
514 |
switch (h->iHType) |
|
515 |
{ |
|
516 |
case NEventHandler::EEventHandlerIrq: |
|
517 |
{ |
|
518 |
TUint32 orig; |
|
519 |
// event can run on this CPU so run it now |
|
520 |
// if event tied, migration of tied thread/group will have been blocked |
|
521 |
orig = h->EventBegin(); |
|
522 |
TRACE_IRQ8(20, h, orig); |
|
523 |
(*h->iFn)(h->iPtr); |
|
524 |
TRACE_IRQ4(21, h); |
|
525 |
if (!(h->iHState & NIrqHandler::ERunCountMask)) // if run count still nonzero, definitely still active |
|
526 |
{ |
|
527 |
NIrq* pI = h->iIrq; |
|
528 |
irq = __SPIN_LOCK_IRQSAVE(pI->iNIrqLock); |
|
529 |
orig = h->EventDone(); |
|
530 |
TRACE_IRQ8(22, h, orig); |
|
531 |
if (!(orig & NIrqHandler::EActive)) |
|
532 |
{ |
|
533 |
// handler is no longer active - can't touch it again |
|
534 |
// pI is OK since NIrq's are never deleted/reused |
|
535 |
requeue = FALSE; |
|
536 |
if (__e32_atomic_add_rel32(&pI->iEventsPending, TUint32(-1)) == 1) |
|
537 |
{ |
|
538 |
if (pI->iEnabledEvents & 1) |
|
539 |
{ |
|
540 |
pI->iEnabledEvents &= ~1; |
|
541 |
if (pI->iEnabledEvents) |
|
542 |
pI->HwEnable(); |
|
543 |
} |
|
544 |
} |
|
545 |
} |
|
546 |
__SPIN_UNLOCK_IRQRESTORE(pI->iNIrqLock,irq); |
|
547 |
} |
|
548 |
break; |
|
549 |
} |
|
550 |
default: |
|
551 |
__KTRACE_OPT(KPANIC,DEBUGPRINT("h=%08x",h)); |
|
552 |
__NK_ASSERT_ALWAYS(0); |
|
553 |
} |
|
554 |
if (tied && !requeue) |
|
555 |
{ |
|
556 |
// If the tied thread/group has no more tied events outstanding |
|
557 |
// and has a migration pending, trigger the migration now. |
|
558 |
// Atomically change the tied_cpu to the target CPU here. An IDFC |
|
559 |
// can then effect the migration. |
|
560 |
// Note that the tied code can't run in parallel with us until |
|
561 |
// the tied_cpu is changed. However it could run as soon as the |
|
562 |
// tied_cpu is changed (e.g. if added to ready list after change) |
|
563 |
tied->EndTiedEvent(); |
|
564 |
} |
|
565 |
if (requeue) |
|
566 |
{ |
|
567 |
// still pending so put it back on the queue |
|
568 |
// leave interrupt disabled (if so) and migration of tied thread/group blocked |
|
569 |
aS->QueueEvent(h); |
|
570 |
} |
|
571 |
} |
|
572 |
} |
|
573 |
||
574 |
/****************************************************************************** |
|
575 |
* Public interrupt management functions |
|
576 |
******************************************************************************/ |
|
577 |
||
578 |
void NKern::InterruptInit0() |
|
579 |
{ |
|
580 |
TInt i; |
|
581 |
TUint16 cookie = 1; |
|
582 |
NIrqHandler::FirstFree = 0; |
|
583 |
for (i=NK_MAX_IRQ_HANDLERS-1; i>=0; --i) |
|
584 |
{ |
|
585 |
StepCookie(cookie, 61); |
|
586 |
NIrqHandler* h = &::Handlers[i]; |
|
587 |
__KTRACE_OPT(KBOOT,DEBUGPRINT("NIrqHandler[%d] at %08x", i, h)); |
|
588 |
h->iGeneration = 0; |
|
589 |
h->iHandle = (cookie << 16) | i; |
|
590 |
h->iIrqLink.iNext = NIrqHandler::FirstFree; |
|
591 |
NIrqHandler::FirstFree = h; |
|
592 |
} |
|
593 |
NIrq::HwInit0(); |
|
594 |
} |
|
595 |
||
596 |
EXPORT_C TInt NKern::InterruptInit(TInt aId, TUint32 aFlags, TInt aVector, TUint32 aHwId, TAny* aExt) |
|
597 |
{ |
|
598 |
__KTRACE_OPT(KBOOT,DEBUGPRINT("NKII: ID=%02x F=%08x V=%03x HWID=%08x X=%08x", aId, aFlags, aVector, aHwId, aExt)); |
|
599 |
TRACE_IRQ12(0, (aId|(aVector<<16)), aFlags, aHwId); |
|
600 |
if (TUint(aId) >= TUint(NK_MAX_IRQS)) |
|
601 |
return KErrArgument; |
|
602 |
NIrq* pI = &Irq[aId]; |
|
603 |
__KTRACE_OPT(KBOOT,DEBUGPRINT("NIrq[%02x] at %08x", aId, pI)); |
|
604 |
TRACE_IRQ8(1, aId, pI); |
|
605 |
new (pI) NIrq; |
|
606 |
pI->iX = (NIrqX*)aExt; |
|
607 |
pI->iIndex = (TUint16)aId; |
|
608 |
pI->iHwId = aHwId; |
|
609 |
pI->iVector = aVector; |
|
610 |
pI->iStaticFlags = (TUint16)(aFlags & 0x13); |
|
611 |
if (aFlags & NKern::EIrqInit_Count) |
|
612 |
pI->iIState |= NIrq::ECount; |
|
613 |
pI->HwInit(); |
|
614 |
__e32_atomic_and_rel32(&pI->iIState, ~NIrq::EWait); |
|
615 |
return KErrNone; |
|
616 |
} |
|
617 |
||
618 |
EXPORT_C TInt NKern::InterruptBind(TInt aId, NIsr aIsr, TAny* aPtr, TUint32 aFlags, NSchedulable* aTied) |
|
619 |
{ |
|
620 |
__KTRACE_OPT(KNKERN,DEBUGPRINT(">NKIB: ID=%02x ISR=%08x(%08x) F=%08x T=%T", aId, aIsr, aPtr, aFlags, aTied)); |
|
621 |
TRACE_IRQ12(2, aId, aIsr, aPtr); |
|
622 |
TRACE_IRQ12(3, aId, aFlags, aTied); |
|
623 |
CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::InterruptBind"); |
|
624 |
if (TUint(aId) >= TUint(NK_MAX_IRQS)) |
|
625 |
{ |
|
626 |
TRACE_IRQ8(4, aId, KErrArgument); |
|
627 |
return KErrArgument; |
|
628 |
} |
|
629 |
NIrq* pI = &Irq[aId]; |
|
630 |
NIrqHandler* pH = 0; |
|
631 |
NSchedulable* pT = 0; |
|
632 |
if (aFlags & NKern::EIrqBind_Tied) |
|
633 |
{ |
|
634 |
if (!aTied) |
|
635 |
aTied = NKern::CurrentThread(); |
|
636 |
pT = aTied; |
|
637 |
} |
|
638 |
TInt r = KErrNoMemory; |
|
639 |
TInt handle = 0; |
|
640 |
NKern::ThreadEnterCS(); |
|
641 |
if (!(aFlags & NKern::EIrqBind_Raw)) |
|
642 |
{ |
|
643 |
pH = NIrqHandler::Alloc(); |
|
644 |
if (!pH) |
|
645 |
goto out; |
|
646 |
pH->iFn = aIsr; |
|
647 |
pH->iPtr = aPtr; |
|
648 |
__e32_atomic_add_ord32(&pH->iGeneration, 1); |
|
649 |
if (aFlags & EIrqBind_Exclusive) |
|
650 |
pH->iHState |= NIrqHandler::EExclusive; |
|
651 |
if (aFlags & EIrqBind_Count) |
|
652 |
pH->iHState |= NIrqHandler::ECount; |
|
653 |
r = pI->Bind(pH); |
|
654 |
if (r==KErrNone) |
|
655 |
{ |
|
656 |
handle = pH->iHandle; |
|
657 |
// We assume that aTied cannot disappear entirely before we return |
|
658 |
if (pT) |
|
659 |
{ |
|
660 |
NKern::Lock(); |
|
661 |
r = pT->AddTiedEvent(pH); |
|
662 |
NKern::Unlock(); |
|
663 |
} |
|
664 |
if (r!=KErrNone) |
|
665 |
{ |
|
666 |
// unbind |
|
667 |
pH->DoUnbind(); |
|
668 |
} |
|
669 |
} |
|
670 |
if (r!=KErrNone) |
|
671 |
pH->Free(); |
|
672 |
} |
|
673 |
else |
|
674 |
{ |
|
675 |
if (aFlags & NKern::EIrqBind_Tied) |
|
676 |
r = KErrNotSupported; |
|
677 |
else |
|
678 |
r = pI->BindRaw(aIsr, aPtr); |
|
679 |
} |
|
680 |
out: |
|
681 |
if (r==KErrNone) |
|
682 |
{ |
|
683 |
// clear ENotReady so handler can be enabled |
|
684 |
__e32_atomic_and_rel32(&pH->iHState, ~NIrqHandler::ENotReady); |
|
685 |
r = handle; |
|
686 |
} |
|
687 |
NKern::ThreadLeaveCS(); |
|
688 |
__KTRACE_OPT(KNKERN,DEBUGPRINT("<NKIB: %08x", r)); |
|
689 |
TRACE_IRQ8(4, aId, r); |
|
690 |
return r; |
|
691 |
} |
|
692 |
||
693 |
TInt NIrq::FromHandle(TInt& aHandle, NIrq*& aIrq, NIrqHandler*& aHandler) |
|
694 |
{ |
|
695 |
TRACE_IRQ4(5, aHandle); |
|
696 |
aIrq = 0; |
|
697 |
aHandler = 0; |
|
698 |
NIrqHandler* pH = 0; |
|
699 |
NIrqHandler* pH2 = 0; |
|
700 |
NIrq* pI = 0; |
|
701 |
SDblQueLink* anchor = 0; |
|
702 |
TUint32 i; |
|
703 |
TInt r = KErrArgument; |
|
704 |
if (aHandle & NKern::EIrqCookieMask) |
|
705 |
{ |
|
706 |
i = aHandle & NKern::EIrqIndexMask; |
|
707 |
if (i>=NK_MAX_IRQ_HANDLERS) |
|
708 |
goto out; |
|
709 |
pH = &::Handlers[i]; |
|
710 |
if (pH->iHandle != TUint(aHandle)) |
|
711 |
goto out; |
|
712 |
aHandler = pH; |
|
713 |
aIrq = pH->iIrq; |
|
714 |
r = KErrNone; |
|
715 |
goto out; |
|
716 |
} |
|
717 |
if (TUint32(aHandle)>=NK_MAX_IRQS) |
|
718 |
goto out; |
|
719 |
pI = &::Irq[aHandle]; |
|
720 |
if (pI->iIState & NIrq::ERaw) |
|
721 |
{ |
|
722 |
aIrq = pI; |
|
723 |
r = KErrNone; |
|
724 |
goto out; |
|
725 |
} |
|
726 |
if (pI->iStaticFlags & NIrq::EShared) |
|
727 |
goto out; |
|
728 |
anchor = &pI->iHandlers.iA; |
|
729 |
pH = _LOFF(anchor->iNext, NIrqHandler, iIrqLink); |
|
730 |
i = pH - ::Handlers; |
|
731 |
if (i>=NK_MAX_IRQ_HANDLERS) |
|
732 |
goto out; |
|
733 |
pH2 = &::Handlers[i]; |
|
734 |
if (pH2 != pH) |
|
735 |
goto out; |
|
736 |
if (pH->iIrq != pI || anchor->iPrev != anchor->iNext) |
|
737 |
goto out; |
|
738 |
aHandle = pH->iHandle; |
|
739 |
aHandler = pH; |
|
740 |
aIrq = pI; |
|
741 |
r = KErrNone; |
|
742 |
out: |
|
743 |
TRACE_IRQ4(6, r); |
|
744 |
TRACE_IRQ12(7, aHandle, aIrq, aHandler); |
|
745 |
return r; |
|
746 |
} |
|
747 |
||
748 |
EXPORT_C TInt NKern::InterruptUnbind(TInt aId) |
|
749 |
{ |
|
750 |
TRACE_IRQ4(8, aId); |
|
751 |
__KTRACE_OPT(KNKERN,DEBUGPRINT(">NKIU: ID=%08x", aId)); |
|
36
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
752 |
CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::InterruptUnbind"); |
0 | 753 |
NIrq* pI; |
754 |
NIrqHandler* pH; |
|
755 |
TInt r = NIrq::FromHandle(aId, pI, pH); |
|
756 |
if (r!=KErrNone) |
|
757 |
return r; |
|
758 |
NKern::ThreadEnterCS(); |
|
759 |
if (!pH) |
|
760 |
{ |
|
761 |
// raw ISR |
|
762 |
r = pI->UnbindRaw(); |
|
763 |
} |
|
764 |
else |
|
765 |
{ |
|
766 |
r = pH->Unbind(aId, 0); |
|
767 |
} |
|
768 |
NKern::ThreadLeaveCS(); |
|
769 |
TRACE_IRQ4(9, r); |
|
770 |
return r; |
|
771 |
} |
|
772 |
||
773 |
EXPORT_C TInt NKern::InterruptEnable(TInt aId) |
|
774 |
{ |
|
775 |
__KTRACE_OPT(KNKERN,DEBUGPRINT(">NKIE: ID=%08x", aId)); |
|
776 |
TRACE_IRQ4(10, aId); |
|
777 |
NIrq* pI; |
|
778 |
NIrqHandler* pH; |
|
779 |
TInt r = NIrq::FromHandle(aId, pI, pH); |
|
780 |
if (r==KErrNone) |
|
781 |
r = pH ? pH->Enable(aId) : pI->EnableRaw(); |
|
782 |
TRACE_IRQ4(11, r); |
|
783 |
return r; |
|
784 |
} |
|
785 |
||
786 |
EXPORT_C TInt NKern::InterruptDisable(TInt aId) |
|
787 |
{ |
|
788 |
__KTRACE_OPT(KNKERN,DEBUGPRINT(">NKID: ID=%08x", aId)); |
|
789 |
TRACE_IRQ4(12, aId); |
|
790 |
NIrq* pI; |
|
791 |
NIrqHandler* pH; |
|
792 |
TInt r = NIrq::FromHandle(aId, pI, pH); |
|
793 |
if (r==KErrNone) |
|
794 |
r = pH ? pH->Disable(FALSE, aId) : pI->DisableRaw(FALSE); |
|
795 |
TRACE_IRQ4(13, r); |
|
796 |
return r; |
|
797 |
} |
|
798 |
||
799 |
EXPORT_C TInt NKern::InterruptClear(TInt aId) |
|
800 |
{ |
|
801 |
__KTRACE_OPT(KNKERN,DEBUGPRINT(">NKIC: ID=%08x", aId)); |
|
802 |
return KErrNotSupported; |
|
803 |
} |
|
804 |
||
805 |
EXPORT_C TInt NKern::InterruptSetPriority(TInt aId, TInt aPri) |
|
806 |
{ |
|
807 |
__KTRACE_OPT(KNKERN,DEBUGPRINT(">NKIS: ID=%08x PRI=%08x", aId, aPri)); |
|
808 |
return KErrNotSupported; |
|
809 |
} |
|
810 |
||
811 |
EXPORT_C TInt NKern::InterruptSetCpuMask(TInt aId, TUint32 aMask) |
|
812 |
{ |
|
813 |
__KTRACE_OPT(KNKERN,DEBUGPRINT(">NKIM: ID=%08x M=%08x", aId, aMask)); |
|
814 |
return KErrNotSupported; |
|
815 |
} |
|
816 |
||
817 |
EXPORT_C void NKern::Interrupt(TInt aId) |
|
818 |
{ |
|
819 |
__NK_ASSERT_ALWAYS(TUint(aId) < TUint(NK_MAX_IRQS)); |
|
820 |
NIrq* pI = &Irq[aId]; |
|
821 |
pI->HwIsr(); |
|
822 |
} |
|
823 |