|
1 // Copyright (c) 2002-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of the License "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // e32\drivers\dmapil.cpp |
|
15 // DMA Platform Independent Layer (PIL) |
|
16 // |
|
17 // |
|
18 |
|
19 #include <drivers/dma.h> |
|
20 #include <kernel/kern_priv.h> |
|
21 |
|
22 |
|
23 static const char KDmaPanicCat[] = "DMA"; |
|
24 |
|
25 NFastMutex DmaChannelMgr::Lock; |
|
26 |
|
27 class TDmaCancelInfo : public SDblQueLink |
|
28 { |
|
29 public: |
|
30 TDmaCancelInfo(); |
|
31 void Signal(); |
|
32 public: |
|
33 NFastSemaphore iSem; |
|
34 }; |
|
35 |
|
36 TDmaCancelInfo::TDmaCancelInfo() |
|
37 : iSem(0) |
|
38 { |
|
39 iNext = this; |
|
40 iPrev = this; |
|
41 } |
|
42 |
|
43 void TDmaCancelInfo::Signal() |
|
44 { |
|
45 TDmaCancelInfo* p = this; |
|
46 FOREVER |
|
47 { |
|
48 TDmaCancelInfo* next = (TDmaCancelInfo*)p->iNext; |
|
49 if (p!=next) |
|
50 p->Deque(); |
|
51 NKern::FSSignal(&p->iSem); // Don't dereference p after this |
|
52 if (p==next) |
|
53 break; |
|
54 p = next; |
|
55 } |
|
56 } |
|
57 |
|
58 ////////////////////////////////////////////////////////////////////////////// |
|
59 |
|
60 #ifdef __DMASIM__ |
|
61 #ifdef __WINS__ |
|
62 typedef TLinAddr TPhysAddr; |
|
63 #endif |
|
64 static inline TPhysAddr LinToPhys(TLinAddr aLin) {return aLin;} |
|
65 #else |
|
66 static inline TPhysAddr LinToPhys(TLinAddr aLin) {return Epoc::LinearToPhysical(aLin);} |
|
67 #endif |
|
68 |
|
69 // |
|
70 // Return minimum of aMaxSize and size of largest physically contiguous block |
|
71 // starting at aLinAddr. |
|
72 // |
|
73 static TInt MaxPhysSize(TLinAddr aLinAddr, const TInt aMaxSize) |
|
74 { |
|
75 const TPhysAddr physBase = LinToPhys(aLinAddr); |
|
76 TLinAddr lin = aLinAddr; |
|
77 TInt size = 0; |
|
78 for (;;) |
|
79 { |
|
80 // Round up the linear address to the next MMU page boundary |
|
81 const TLinAddr linBoundary = Kern::RoundToPageSize(lin + 1); |
|
82 size += linBoundary - lin; |
|
83 if (size >= aMaxSize) |
|
84 return aMaxSize; |
|
85 if ((physBase + size) != LinToPhys(linBoundary)) |
|
86 return size; |
|
87 lin = linBoundary; |
|
88 } |
|
89 } |
|
90 |
|
91 |
|
92 ////////////////////////////////////////////////////////////////////////////// |
|
93 // TDmac |
|
94 |
|
95 TDmac::TDmac(const SCreateInfo& aInfo) |
|
96 : iMaxDesCount(aInfo.iDesCount), |
|
97 iAvailDesCount(aInfo.iDesCount), |
|
98 iDesSize(aInfo.iDesSize), |
|
99 iCaps(aInfo.iCaps) |
|
100 { |
|
101 __DMA_ASSERTD(iMaxDesCount > 0); |
|
102 __DMA_ASSERTD((iCaps & ~KCapsBitHwDes) == 0); // undefined bits set? |
|
103 __DMA_ASSERTD(iDesSize > 0); |
|
104 } |
|
105 |
|
106 // |
|
107 // Second-phase c'tor |
|
108 // |
|
109 |
|
110 TInt TDmac::Create(const SCreateInfo& aInfo) |
|
111 { |
|
112 iHdrPool = new SDmaDesHdr[iMaxDesCount]; |
|
113 if (iHdrPool == NULL) |
|
114 return KErrNoMemory; |
|
115 |
|
116 TInt r = AllocDesPool(aInfo.iDesChunkAttribs); |
|
117 if (r != KErrNone) |
|
118 return KErrNoMemory; |
|
119 |
|
120 // Link all descriptor headers together on the free list |
|
121 iFreeHdr = iHdrPool; |
|
122 TInt i; |
|
123 for (i = 0; i < iMaxDesCount - 1; i++) |
|
124 iHdrPool[i].iNext = iHdrPool + i + 1; |
|
125 iHdrPool[iMaxDesCount-1].iNext = NULL; |
|
126 |
|
127 __DMA_INVARIANT(); |
|
128 return KErrNone; |
|
129 } |
|
130 |
|
131 |
|
132 TDmac::~TDmac() |
|
133 { |
|
134 __DMA_INVARIANT(); |
|
135 |
|
136 FreeDesPool(); |
|
137 delete[] iHdrPool; |
|
138 } |
|
139 |
|
140 |
|
141 // Calling thread must be in CS |
|
142 TInt TDmac::AllocDesPool(TUint aAttribs) |
|
143 { |
|
144 TInt r; |
|
145 if (iCaps & KCapsBitHwDes) |
|
146 { |
|
147 TInt size = iMaxDesCount*iDesSize; |
|
148 #ifdef __WINS__ |
|
149 (void)aAttribs; |
|
150 iDesPool = new TUint8[size]; |
|
151 r = iDesPool ? KErrNone : KErrNoMemory; |
|
152 #else |
|
153 // Chunk not mapped as supervisor r/w user none? incorrect mask passed by PSL |
|
154 __DMA_ASSERTD((aAttribs & EMapAttrAccessMask) == EMapAttrSupRw); |
|
155 TPhysAddr phys; |
|
156 r = Epoc::AllocPhysicalRam(size, phys); |
|
157 if (r == KErrNone) |
|
158 { |
|
159 r = DPlatChunkHw::New(iHwDesChunk, phys, size, aAttribs); |
|
160 if (r == KErrNone) |
|
161 { |
|
162 iDesPool = (TAny*)iHwDesChunk->LinearAddress(); |
|
163 __KTRACE_OPT(KDMA, Kern::Printf("descriptor hw chunk created lin=0x%08X phys=0x%08X, size=0x%X", |
|
164 iHwDesChunk->iLinAddr, iHwDesChunk->iPhysAddr, size)); |
|
165 } |
|
166 else |
|
167 Epoc::FreePhysicalRam(phys, size); |
|
168 } |
|
169 #endif |
|
170 } |
|
171 else |
|
172 { |
|
173 iDesPool = new SDmaPseudoDes[iMaxDesCount]; |
|
174 r = iDesPool ? KErrNone : KErrNoMemory; |
|
175 } |
|
176 return r; |
|
177 } |
|
178 |
|
179 |
|
180 // Calling thread must be in CS |
|
181 void TDmac::FreeDesPool() |
|
182 { |
|
183 if (iCaps & KCapsBitHwDes) |
|
184 { |
|
185 #ifdef __WINS__ |
|
186 delete[] iDesPool; |
|
187 #else |
|
188 if (iHwDesChunk) |
|
189 { |
|
190 TPhysAddr phys = iHwDesChunk->PhysicalAddress(); |
|
191 TInt size = iHwDesChunk->iSize; |
|
192 iHwDesChunk->Close(NULL); |
|
193 Epoc::FreePhysicalRam(phys, size); |
|
194 } |
|
195 #endif |
|
196 } |
|
197 else |
|
198 Kern::Free(iDesPool); |
|
199 } |
|
200 |
|
201 |
|
202 /** |
|
203 Prealloc the given number of descriptors. |
|
204 */ |
|
205 |
|
206 TInt TDmac::ReserveSetOfDes(TInt aCount) |
|
207 { |
|
208 __KTRACE_OPT(KDMA, Kern::Printf(">TDmac::ReserveSetOfDes count=%d", aCount)); |
|
209 __DMA_ASSERTD(aCount > 0); |
|
210 TInt r = KErrTooBig; |
|
211 Wait(); |
|
212 if (iAvailDesCount - aCount >= 0) |
|
213 { |
|
214 iAvailDesCount -= aCount; |
|
215 r = KErrNone; |
|
216 } |
|
217 Signal(); |
|
218 __DMA_INVARIANT(); |
|
219 __KTRACE_OPT(KDMA, Kern::Printf("<TDmac::ReserveSetOfDes r=%d", r)); |
|
220 return r; |
|
221 } |
|
222 |
|
223 |
|
224 /** |
|
225 Return the given number of preallocated descriptors to the free pool. |
|
226 */ |
|
227 |
|
228 void TDmac::ReleaseSetOfDes(TInt aCount) |
|
229 { |
|
230 __DMA_ASSERTD(aCount >= 0); |
|
231 Wait(); |
|
232 iAvailDesCount += aCount; |
|
233 Signal(); |
|
234 __DMA_INVARIANT(); |
|
235 } |
|
236 |
|
237 |
|
238 /** |
|
239 Queue DFC and update word used to communicate with DFC. |
|
240 |
|
241 Called in interrupt context by PSL. |
|
242 */ |
|
243 |
|
244 void TDmac::HandleIsr(TDmaChannel& aChannel, TBool aIsComplete) |
|
245 { |
|
246 //__KTRACE_OPT(KDMA, Kern::Printf("TDmac::HandleIsr channel=%d complete=%d", aChannelIdx, aIsComplete)); |
|
247 |
|
248 // Queue DFC if necessary. The possible scenarios are: |
|
249 // * no DFC queued --> need to queue DFC |
|
250 // * DFC queued (not running yet) --> just need to update iIsrDfc |
|
251 // * DFC running / iIsrDfc already reset --> need to requeue DFC |
|
252 // * DFC running / iIsrDfc not reset yet --> just need to update iIsrDfc |
|
253 // Set error flag if necessary. |
|
254 TUint32 inc = aIsComplete ? 1u : TUint32(TDmaChannel::KErrorFlagMask)|1u; |
|
255 TUint32 orig = __e32_atomic_tau_ord32(&aChannel.iIsrDfc, TUint32(TDmaChannel::KCancelFlagMask), 0, inc); |
|
256 |
|
257 // As transfer should be suspended when an error occurs, we |
|
258 // should never get there with the error flag already set. |
|
259 __DMA_ASSERTD((orig & inc & (TUint32)TDmaChannel::KErrorFlagMask) == 0); |
|
260 |
|
261 if (orig == 0) |
|
262 aChannel.iDfc.Add(); |
|
263 } |
|
264 |
|
265 |
|
266 void TDmac::InitDes(const SDmaDesHdr& aHdr, TUint32 aSrc, TUint32 aDest, TInt aCount, |
|
267 TUint aFlags, TUint32 aPslInfo, TUint32 aCookie) |
|
268 { |
|
269 if (iCaps & KCapsBitHwDes) |
|
270 InitHwDes(aHdr, aSrc, aDest, aCount, aFlags, aPslInfo, aCookie); |
|
271 else |
|
272 { |
|
273 SDmaPseudoDes& des = HdrToDes(aHdr); |
|
274 des.iSrc = aSrc; |
|
275 des.iDest = aDest; |
|
276 des.iCount = aCount; |
|
277 des.iFlags = aFlags; |
|
278 des.iPslInfo = aPslInfo; |
|
279 des.iCookie = aCookie; |
|
280 } |
|
281 } |
|
282 |
|
283 |
|
284 void TDmac::InitHwDes(const SDmaDesHdr& /*aHdr*/, TUint32 /*aSrc*/, TUint32 /*aDest*/, TInt /*aCount*/, |
|
285 TUint /*aFlags*/, TUint32 /*aPslInfo*/, TUint32 /*aCookie*/) |
|
286 { |
|
287 // concrete controller must override if KCapsBitHwDes set |
|
288 __DMA_CANT_HAPPEN(); |
|
289 } |
|
290 |
|
291 |
|
292 void TDmac::ChainHwDes(const SDmaDesHdr& /*aHdr*/, const SDmaDesHdr& /*aNextHdr*/) |
|
293 { |
|
294 // concrete controller must override if KCapsBitHwDes set |
|
295 __DMA_CANT_HAPPEN(); |
|
296 } |
|
297 |
|
298 |
|
299 void TDmac::AppendHwDes(const TDmaChannel& /*aChannel*/, const SDmaDesHdr& /*aLastHdr*/, |
|
300 const SDmaDesHdr& /*aNewHdr*/) |
|
301 { |
|
302 // concrete controller must override if KCapsBitHwDes set |
|
303 __DMA_CANT_HAPPEN(); |
|
304 } |
|
305 |
|
306 |
|
307 void TDmac::UnlinkHwDes(const TDmaChannel& /*aChannel*/, SDmaDesHdr& /*aHdr*/) |
|
308 { |
|
309 // concrete controller must override if KCapsBitHwDes set |
|
310 __DMA_CANT_HAPPEN(); |
|
311 } |
|
312 |
|
313 |
|
314 TInt TDmac::FailNext(const TDmaChannel& /*aChannel*/) |
|
315 { |
|
316 return KErrNotSupported; |
|
317 } |
|
318 |
|
319 |
|
320 TInt TDmac::MissNextInterrupts(const TDmaChannel& /*aChannel*/, TInt /*aInterruptCount*/) |
|
321 { |
|
322 return KErrNotSupported; |
|
323 } |
|
324 |
|
325 |
|
326 TInt TDmac::Extension(TDmaChannel& /*aChannel*/, TInt /*aCmd*/, TAny* /*aArg*/) |
|
327 { |
|
328 // default implementation - NOP |
|
329 return KErrNotSupported; |
|
330 } |
|
331 |
|
332 |
|
333 #ifdef _DEBUG |
|
334 |
|
335 void TDmac::Invariant() |
|
336 { |
|
337 Wait(); |
|
338 __DMA_ASSERTD(0 <= iAvailDesCount && iAvailDesCount <= iMaxDesCount); |
|
339 __DMA_ASSERTD(! iFreeHdr || IsValidHdr(iFreeHdr)); |
|
340 for (TInt i = 0; i < iMaxDesCount; i++) |
|
341 __DMA_ASSERTD(iHdrPool[i].iNext == NULL || IsValidHdr(iHdrPool[i].iNext)); |
|
342 Signal(); |
|
343 } |
|
344 |
|
345 |
|
346 TBool TDmac::IsValidHdr(const SDmaDesHdr* aHdr) |
|
347 { |
|
348 return (iHdrPool <= aHdr) && (aHdr < iHdrPool + iMaxDesCount); |
|
349 } |
|
350 |
|
351 #endif |
|
352 |
|
353 ////////////////////////////////////////////////////////////////////////////// |
|
354 // DDmaRequest |
|
355 |
|
356 |
|
357 EXPORT_C DDmaRequest::DDmaRequest(TDmaChannel& aChannel, TCallback aCb, TAny* aCbArg, TInt aMaxTransferSize) |
|
358 : iChannel(aChannel), |
|
359 iCb(aCb), |
|
360 iCbArg(aCbArg), |
|
361 iMaxTransferSize(aMaxTransferSize) |
|
362 { |
|
363 // iDesCount = 0; |
|
364 // iFirstHdr = iLastHdr = NULL; |
|
365 // iQueued = EFalse; |
|
366 iChannel.iReqCount++; |
|
367 __DMA_INVARIANT(); |
|
368 } |
|
369 |
|
370 |
|
371 |
|
372 EXPORT_C DDmaRequest::~DDmaRequest() |
|
373 { |
|
374 __DMA_ASSERTD(!iQueued); |
|
375 __DMA_INVARIANT(); |
|
376 FreeDesList(); |
|
377 iChannel.iReqCount--; |
|
378 } |
|
379 |
|
380 |
|
381 |
|
382 EXPORT_C TInt DDmaRequest::Fragment(TUint32 aSrc, TUint32 aDest, TInt aCount, TUint aFlags, TUint32 aPslInfo) |
|
383 { |
|
384 __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Fragment thread %O " |
|
385 "src=0x%08X dest=0x%08X count=%d flags=0x%X psl=0x%08X", |
|
386 &Kern::CurrentThread(), aSrc, aDest, aCount, aFlags, aPslInfo)); |
|
387 __DMA_ASSERTD(aCount > 0); |
|
388 __DMA_ASSERTD(!iQueued); |
|
389 |
|
390 const TUint alignMask = iChannel.MemAlignMask(aFlags, aPslInfo); |
|
391 const TBool memSrc = aFlags & KDmaMemSrc; |
|
392 const TBool memDest = aFlags & KDmaMemDest; |
|
393 |
|
394 // Memory buffers must satisfy alignment constraint |
|
395 __DMA_ASSERTD(!memSrc || ((aSrc & alignMask) == 0)); |
|
396 __DMA_ASSERTD(!memDest || ((aDest & alignMask) == 0)); |
|
397 |
|
398 // Ask the PSL what the maximum size possible for this transfer is |
|
399 TInt maxTransferSize = iChannel.MaxTransferSize(aFlags, aPslInfo); |
|
400 if (!maxTransferSize) |
|
401 { |
|
402 __KTRACE_OPT(KPANIC, Kern::Printf("Error: maxTransferSize == 0")); |
|
403 return KErrArgument; |
|
404 } |
|
405 |
|
406 if (iMaxTransferSize) |
|
407 { |
|
408 // User has set a size cap |
|
409 __DMA_ASSERTA((iMaxTransferSize <= maxTransferSize) || (maxTransferSize == -1)); |
|
410 maxTransferSize = iMaxTransferSize; |
|
411 } |
|
412 else |
|
413 { |
|
414 // User doesn't care about max size |
|
415 if (maxTransferSize == -1) |
|
416 { |
|
417 // No maximum imposed by controller |
|
418 maxTransferSize = aCount; |
|
419 } |
|
420 } |
|
421 |
|
422 const TInt maxAlignedSize = (maxTransferSize & ~alignMask); |
|
423 __DMA_ASSERTD(maxAlignedSize > 0); // bug in PSL if not true |
|
424 |
|
425 FreeDesList(); |
|
426 |
|
427 TInt r = KErrNone; |
|
428 do |
|
429 { |
|
430 // Allocate fragment |
|
431 r = ExpandDesList(); |
|
432 if (r != KErrNone) |
|
433 { |
|
434 FreeDesList(); |
|
435 break; |
|
436 } |
|
437 |
|
438 // Compute fragment size |
|
439 TInt c = Min(maxTransferSize, aCount); |
|
440 if (memSrc && ((aFlags & KDmaPhysAddrSrc) == 0)) |
|
441 c = MaxPhysSize(aSrc, c); |
|
442 if (memDest && ((aFlags & KDmaPhysAddrDest) == 0)) |
|
443 c = MaxPhysSize(aDest, c); |
|
444 if ((memSrc || memDest) && (c < aCount) && (c > maxAlignedSize)) |
|
445 { |
|
446 // This is not last fragment of transfer to/from memory. We must |
|
447 // round down fragment size so next one is correctly aligned. |
|
448 c = maxAlignedSize; |
|
449 } |
|
450 |
|
451 // Initialise fragment |
|
452 __KTRACE_OPT(KDMA, Kern::Printf("fragment: src=0x%08X dest=0x%08X count=%d", aSrc, aDest, c)); |
|
453 iChannel.iController->InitDes(*iLastHdr, aSrc, aDest, c, aFlags, aPslInfo, iChannel.PslId()); |
|
454 |
|
455 // Update for next iteration |
|
456 aCount -= c; |
|
457 if (memSrc) |
|
458 aSrc += c; |
|
459 if (memDest) |
|
460 aDest += c; |
|
461 } |
|
462 while (aCount > 0); |
|
463 |
|
464 __DMA_INVARIANT(); |
|
465 return r; |
|
466 } |
|
467 |
|
468 |
|
469 |
|
470 EXPORT_C void DDmaRequest::Queue() |
|
471 { |
|
472 __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Queue thread %O", &Kern::CurrentThread())); |
|
473 __DMA_ASSERTD(iDesCount > 0); // Not configured? call Fragment() first ! |
|
474 __DMA_ASSERTD(!iQueued); |
|
475 |
|
476 // append request to queue and link new descriptor list to existing one. |
|
477 iChannel.Wait(); |
|
478 if (!(iChannel.iIsrDfc & (TUint32)TDmaChannel::KCancelFlagMask)) |
|
479 { |
|
480 iQueued = ETrue; |
|
481 iChannel.iReqQ.Add(&iLink); |
|
482 *iChannel.iNullPtr = iFirstHdr; |
|
483 iChannel.iNullPtr = &(iLastHdr->iNext); |
|
484 iChannel.DoQueue(*this); |
|
485 } |
|
486 iChannel.Signal(); |
|
487 |
|
488 __DMA_INVARIANT(); |
|
489 } |
|
490 |
|
491 EXPORT_C TInt DDmaRequest::ExpandDesList(TInt aCount) |
|
492 { |
|
493 __DMA_ASSERTD(!iQueued); |
|
494 __DMA_ASSERTD(aCount > 0); |
|
495 |
|
496 if (aCount > iChannel.iAvailDesCount) |
|
497 return KErrTooBig; |
|
498 |
|
499 iChannel.iAvailDesCount -= aCount; |
|
500 iDesCount += aCount; |
|
501 |
|
502 TDmac& c = *(iChannel.iController); |
|
503 c.Wait(); |
|
504 |
|
505 if (iFirstHdr == NULL) |
|
506 { |
|
507 // handle empty list specially to simplify following loop |
|
508 iFirstHdr = iLastHdr = c.iFreeHdr; |
|
509 c.iFreeHdr = c.iFreeHdr->iNext; |
|
510 --aCount; |
|
511 } |
|
512 else |
|
513 iLastHdr->iNext = c.iFreeHdr; |
|
514 |
|
515 // Remove as many descriptors and headers from free pool as necessary and |
|
516 // ensure hardware descriptors are chained together. |
|
517 while (aCount-- > 0) |
|
518 { |
|
519 __DMA_ASSERTD(c.iFreeHdr != NULL); |
|
520 if (c.iCaps & TDmac::KCapsBitHwDes) |
|
521 c.ChainHwDes(*iLastHdr, *(c.iFreeHdr)); |
|
522 iLastHdr = c.iFreeHdr; |
|
523 c.iFreeHdr = c.iFreeHdr->iNext; |
|
524 } |
|
525 |
|
526 c.Signal(); |
|
527 |
|
528 iLastHdr->iNext = NULL; |
|
529 |
|
530 __DMA_INVARIANT(); |
|
531 return KErrNone; |
|
532 } |
|
533 |
|
534 |
|
535 |
|
536 |
|
537 EXPORT_C void DDmaRequest::FreeDesList() |
|
538 { |
|
539 __DMA_ASSERTD(!iQueued); |
|
540 if (iDesCount > 0) |
|
541 { |
|
542 iChannel.iAvailDesCount += iDesCount; |
|
543 TDmac& c = *(iChannel.iController); |
|
544 c.Wait(); |
|
545 iLastHdr->iNext = c.iFreeHdr; |
|
546 c.iFreeHdr = iFirstHdr; |
|
547 c.Signal(); |
|
548 iFirstHdr = iLastHdr = NULL; |
|
549 iDesCount = 0; |
|
550 } |
|
551 } |
|
552 |
|
553 |
|
554 #ifdef _DEBUG |
|
555 |
|
556 void DDmaRequest::Invariant() |
|
557 { |
|
558 iChannel.Wait(); |
|
559 __DMA_ASSERTD(iChannel.IsOpened()); |
|
560 __DMA_ASSERTD(0 <= iMaxTransferSize); |
|
561 __DMA_ASSERTD(0 <= iDesCount && iDesCount <= iChannel.iMaxDesCount); |
|
562 if (iDesCount == 0) |
|
563 { |
|
564 __DMA_ASSERTD(!iQueued); |
|
565 __DMA_ASSERTD(!iFirstHdr && !iLastHdr); |
|
566 } |
|
567 else |
|
568 { |
|
569 __DMA_ASSERTD(iChannel.iController->IsValidHdr(iFirstHdr)); |
|
570 __DMA_ASSERTD(iChannel.iController->IsValidHdr(iLastHdr)); |
|
571 } |
|
572 iChannel.Signal(); |
|
573 } |
|
574 |
|
575 #endif |
|
576 |
|
577 |
|
578 ////////////////////////////////////////////////////////////////////////////// |
|
579 // TDmaChannel |
|
580 |
|
581 |
|
582 EXPORT_C TInt TDmaChannel::StaticExtension(TInt aCmd, TAny* aArg) |
|
583 { |
|
584 return DmaChannelMgr::StaticExtension(aCmd, aArg); |
|
585 } |
|
586 |
|
587 |
|
588 TDmaChannel::TDmaChannel() |
|
589 : iNullPtr(&iCurHdr), |
|
590 iDfc(Dfc, NULL, 0) |
|
591 { |
|
592 // iController = NULL; |
|
593 // iPslId = 0; |
|
594 // iCurHdr = NULL; |
|
595 // iMaxDesCount = iAvailDesCount = 0; |
|
596 // iReqCount = 0; |
|
597 __DMA_INVARIANT(); |
|
598 } |
|
599 |
|
600 |
|
601 EXPORT_C TInt TDmaChannel::Open(const SCreateInfo& aInfo, TDmaChannel*& aChannel) |
|
602 { |
|
603 __KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::Open thread %O", &Kern::CurrentThread())); |
|
604 __DMA_ASSERTD(aInfo.iDfcQ != NULL); |
|
605 __DMA_ASSERTD(aInfo.iDfcPriority < KNumDfcPriorities); |
|
606 __DMA_ASSERTD(aInfo.iDesCount >= 1); |
|
607 |
|
608 aChannel = NULL; |
|
609 |
|
610 DmaChannelMgr::Wait(); |
|
611 TDmaChannel* pC = DmaChannelMgr::Open(aInfo.iCookie); |
|
612 DmaChannelMgr::Signal(); |
|
613 if (!pC) |
|
614 return KErrInUse; |
|
615 |
|
616 TInt r = pC->iController->ReserveSetOfDes(aInfo.iDesCount); |
|
617 if (r != KErrNone) |
|
618 { |
|
619 pC->Close(); |
|
620 return r; |
|
621 } |
|
622 pC->iAvailDesCount = pC->iMaxDesCount = aInfo.iDesCount; |
|
623 |
|
624 new (&pC->iDfc) TDfc(&Dfc, pC, aInfo.iDfcQ, aInfo.iDfcPriority); |
|
625 |
|
626 aChannel = pC; |
|
627 |
|
628 #ifdef _DEBUG |
|
629 pC->Invariant(); |
|
630 #endif |
|
631 __KTRACE_OPT(KDMA, Kern::Printf("opened channel %d", pC->iPslId)); |
|
632 return KErrNone; |
|
633 } |
|
634 |
|
635 |
|
636 EXPORT_C void TDmaChannel::Close() |
|
637 { |
|
638 __KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::Close %d", iPslId)); |
|
639 __DMA_ASSERTD(IsOpened()); |
|
640 __DMA_ASSERTD(IsQueueEmpty()); |
|
641 __DMA_ASSERTD(iReqCount == 0); |
|
642 |
|
643 // descriptor leak? bug in request code |
|
644 __DMA_ASSERTD(iAvailDesCount == iMaxDesCount); |
|
645 |
|
646 iController->ReleaseSetOfDes(iMaxDesCount); |
|
647 iAvailDesCount = iMaxDesCount = 0; |
|
648 |
|
649 DmaChannelMgr::Wait(); |
|
650 DmaChannelMgr::Close(this); |
|
651 iController = NULL; |
|
652 DmaChannelMgr::Signal(); |
|
653 |
|
654 __DMA_INVARIANT(); |
|
655 } |
|
656 |
|
657 |
|
658 EXPORT_C void TDmaChannel::CancelAll() |
|
659 { |
|
660 __KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::CancelAll thread %O channel - %d", |
|
661 &Kern::CurrentThread(), iPslId)); |
|
662 __DMA_ASSERTD(IsOpened()); |
|
663 |
|
664 NThread* nt = NKern::CurrentThread(); |
|
665 TBool wait = FALSE; |
|
666 TDmaCancelInfo c; |
|
667 TDmaCancelInfo* waiters = 0; |
|
668 NKern::ThreadEnterCS(); |
|
669 Wait(); |
|
670 NThreadBase* dfcnt = iDfc.Thread(); |
|
671 __e32_atomic_store_ord32(&iIsrDfc, (TUint32)KCancelFlagMask); |
|
672 // ISRs after this point will not post a DFC, however a DFC may already be queued or running or both |
|
673 if (!IsQueueEmpty()) |
|
674 { |
|
675 // There is a transfer in progress. It may complete before the DMAC |
|
676 // has stopped, but the resulting ISR will not post a DFC. |
|
677 // ISR should not happen after this function returns. |
|
678 iController->StopTransfer(*this); |
|
679 |
|
680 ResetStateMachine(); |
|
681 |
|
682 // Clean-up the request queue. |
|
683 SDblQueLink* pL; |
|
684 while ((pL = iReqQ.GetFirst()) != NULL) |
|
685 { |
|
686 DDmaRequest* pR = _LOFF(pL, DDmaRequest, iLink); |
|
687 pR->OnDeque(); |
|
688 } |
|
689 } |
|
690 if (!dfcnt || dfcnt==nt) |
|
691 { |
|
692 // no DFC queue or DFC runs in this thread, so just cancel it and we're finished |
|
693 iDfc.Cancel(); |
|
694 |
|
695 // if other calls to CancelAll() are waiting for the DFC, release them here |
|
696 waiters = iCancelInfo; |
|
697 iCancelInfo = 0; |
|
698 |
|
699 // reset the ISR count |
|
700 __e32_atomic_store_rel32(&iIsrDfc, 0); |
|
701 } |
|
702 else |
|
703 { |
|
704 // DFC runs in another thread. Make sure it's queued and then wait for it to run. |
|
705 if (iCancelInfo) |
|
706 c.InsertBefore(iCancelInfo); |
|
707 else |
|
708 iCancelInfo = &c; |
|
709 wait = TRUE; |
|
710 iDfc.Enque(); |
|
711 } |
|
712 Signal(); |
|
713 if (waiters) |
|
714 waiters->Signal(); |
|
715 if (wait) |
|
716 NKern::FSWait(&c.iSem); |
|
717 NKern::ThreadLeaveCS(); |
|
718 __DMA_INVARIANT(); |
|
719 } |
|
720 |
|
721 |
|
722 /** |
|
723 DFC callback function (static member). |
|
724 */ |
|
725 |
|
726 void TDmaChannel::Dfc(TAny* aArg) |
|
727 { |
|
728 ((TDmaChannel*)aArg)->DoDfc(); |
|
729 } |
|
730 |
|
731 |
|
732 void TDmaChannel::DoDfc() |
|
733 { |
|
734 Wait(); |
|
735 |
|
736 // Atomically fetch and reset the number of DFC queued by ISR and the error |
|
737 // flag. Leave the cancel flag alone for now. |
|
738 const TUint32 w = __e32_atomic_and_ord32(&iIsrDfc, (TUint32)KCancelFlagMask); |
|
739 TUint32 count = w & KDfcCountMask; |
|
740 const TBool error = w & (TUint32)KErrorFlagMask; |
|
741 TBool stop = w & (TUint32)KCancelFlagMask; |
|
742 __DMA_ASSERTD(count>0 || stop); |
|
743 |
|
744 while(count && !stop) |
|
745 { |
|
746 --count; |
|
747 |
|
748 // If an error occurred it must have been reported on the last interrupt since transfers are |
|
749 // suspended after an error. |
|
750 DDmaRequest::TResult res = (count==0 && error) ? DDmaRequest::EError : DDmaRequest::EOk; |
|
751 __DMA_ASSERTD(!iReqQ.IsEmpty()); |
|
752 DDmaRequest* pCompletedReq = NULL; |
|
753 DDmaRequest* pCurReq = _LOFF(iReqQ.First(), DDmaRequest, iLink); |
|
754 DDmaRequest::TCallback cb = 0; |
|
755 TAny* arg = 0; |
|
756 |
|
757 if (res == DDmaRequest::EOk) |
|
758 { |
|
759 // Update state machine, current fragment, completed fragment and |
|
760 // tell DMAC to transfer next fragment if necessary. |
|
761 SDmaDesHdr* pCompletedHdr = NULL; |
|
762 DoDfc(*pCurReq, pCompletedHdr); |
|
763 |
|
764 // If just completed last fragment from current request, switch to next |
|
765 // request (if any). |
|
766 if (pCompletedHdr == pCurReq->iLastHdr) |
|
767 { |
|
768 pCompletedReq = pCurReq; |
|
769 pCurReq->iLink.Deque(); |
|
770 if (iReqQ.IsEmpty()) |
|
771 iNullPtr = &iCurHdr; |
|
772 pCompletedReq->OnDeque(); |
|
773 } |
|
774 } |
|
775 else if (res == DDmaRequest::EError) |
|
776 pCompletedReq = pCurReq; |
|
777 else |
|
778 __DMA_CANT_HAPPEN(); |
|
779 if (pCompletedReq) |
|
780 { |
|
781 cb = pCompletedReq->iCb; |
|
782 arg = pCompletedReq->iCbArg; |
|
783 Signal(); |
|
784 __KTRACE_OPT(KDMA, Kern::Printf("notifying DMA client result=%d", res)); |
|
785 (*cb)(res,arg); |
|
786 Wait(); |
|
787 } |
|
788 if (pCompletedReq || Flash()) |
|
789 stop = __e32_atomic_load_acq32(&iIsrDfc) & (TUint32)KCancelFlagMask; |
|
790 } |
|
791 |
|
792 // Some interrupts may be missed (double-buffer and scatter-gather |
|
793 // controllers only) if two or more transfers complete while interrupts are |
|
794 // disabled in the CPU. If this happens, the framework will go out of sync |
|
795 // and leave some orphaned requests in the queue. |
|
796 // |
|
797 // To ensure correctness we handle this case here by checking that the request |
|
798 // queue is empty when all transfers have completed and, if not, cleaning up |
|
799 // and notifying the client of the completion of the orphaned requests. |
|
800 // |
|
801 // Note that if some interrupts are missed and the controller raises an |
|
802 // error while transferring a subsequent fragment, the error will be reported |
|
803 // on a fragment which was successfully completed. There is no easy solution |
|
804 // to this problem, but this is okay as the only possible action following a |
|
805 // failure is to flush the whole queue. |
|
806 if (stop) |
|
807 { |
|
808 TDmaCancelInfo* waiters = iCancelInfo; |
|
809 iCancelInfo = 0; |
|
810 |
|
811 // make sure DFC doesn't run again until a new request completes |
|
812 iDfc.Cancel(); |
|
813 |
|
814 // reset the ISR count - new requests can now be processed |
|
815 __e32_atomic_store_rel32(&iIsrDfc, 0); |
|
816 |
|
817 Signal(); |
|
818 |
|
819 // release threads doing CancelAll() |
|
820 waiters->Signal(); |
|
821 } |
|
822 else if (!error && !iDfc.Queued() && !iReqQ.IsEmpty() && iController->IsIdle(*this)) |
|
823 { |
|
824 __KTRACE_OPT(KDMA, Kern::Printf("Missed interrupt(s) - draining request queue")); |
|
825 ResetStateMachine(); |
|
826 |
|
827 // Move orphaned requests to temporary queue so channel queue can |
|
828 // accept new requests. |
|
829 SDblQue q; |
|
830 q.MoveFrom(&iReqQ); |
|
831 |
|
832 SDblQueLink* pL; |
|
833 while ((pL = q.GetFirst()) != NULL) |
|
834 { |
|
835 DDmaRequest* pR = _LOFF(pL, DDmaRequest, iLink); |
|
836 __KTRACE_OPT(KDMA, Kern::Printf("Removing request from queue and notifying client")); |
|
837 pR->OnDeque(); |
|
838 DDmaRequest::TCallback cb = pR->iCb; |
|
839 TAny* arg = pR->iCbArg; |
|
840 if (cb) |
|
841 { |
|
842 Signal(); |
|
843 (*cb)(DDmaRequest::EOk, arg); |
|
844 Wait(); |
|
845 } |
|
846 } |
|
847 Signal(); |
|
848 } |
|
849 else |
|
850 Signal(); |
|
851 |
|
852 __DMA_INVARIANT(); |
|
853 } |
|
854 |
|
855 |
|
856 /** Reset state machine only, request queue is unchanged */ |
|
857 |
|
858 void TDmaChannel::ResetStateMachine() |
|
859 { |
|
860 DoCancelAll(); |
|
861 iCurHdr = NULL; |
|
862 iNullPtr = &iCurHdr; |
|
863 } |
|
864 |
|
865 |
|
866 /** Unlink the last item of a LLI chain from the next chain. |
|
867 Default implementation does nothing. This is overridden by scatter-gather channels. */ |
|
868 |
|
869 void TDmaChannel::DoUnlink(SDmaDesHdr& /*aHdr*/) |
|
870 { |
|
871 } |
|
872 |
|
873 #ifdef _DEBUG |
|
874 |
|
875 void TDmaChannel::Invariant() |
|
876 { |
|
877 Wait(); |
|
878 |
|
879 __DMA_ASSERTD(iReqCount >= 0); |
|
880 // should always point to NULL pointer ending fragment queue |
|
881 __DMA_ASSERTD(*iNullPtr == NULL); |
|
882 |
|
883 __DMA_ASSERTD(0 <= iAvailDesCount && iAvailDesCount <= iMaxDesCount); |
|
884 |
|
885 __DMA_ASSERTD(iCurHdr == NULL || iController->IsValidHdr(iCurHdr)); |
|
886 |
|
887 if (IsOpened()) |
|
888 { |
|
889 __DMA_ASSERTD((iCurHdr && !IsQueueEmpty()) || (!iCurHdr && IsQueueEmpty())); |
|
890 if (iCurHdr == NULL) |
|
891 __DMA_ASSERTD(iNullPtr == &iCurHdr); |
|
892 } |
|
893 else |
|
894 { |
|
895 __DMA_ASSERTD(iCurHdr == NULL); |
|
896 __DMA_ASSERTD(iNullPtr == &iCurHdr); |
|
897 __DMA_ASSERTD(IsQueueEmpty()); |
|
898 } |
|
899 |
|
900 Signal(); |
|
901 } |
|
902 |
|
903 #endif |
|
904 |
|
905 ////////////////////////////////////////////////////////////////////////////// |
|
906 // TDmaSbChannel |
|
907 |
|
908 void TDmaSbChannel::DoQueue(DDmaRequest& /*aReq*/) |
|
909 { |
|
910 if (!iTransferring) |
|
911 { |
|
912 iController->Transfer(*this, *iCurHdr); |
|
913 iTransferring = ETrue; |
|
914 } |
|
915 } |
|
916 |
|
917 |
|
918 void TDmaSbChannel::DoCancelAll() |
|
919 { |
|
920 __DMA_ASSERTD(iTransferring); |
|
921 iTransferring = EFalse; |
|
922 } |
|
923 |
|
924 |
|
925 void TDmaSgChannel::DoUnlink(SDmaDesHdr& aHdr) |
|
926 { |
|
927 iController->UnlinkHwDes(*this, aHdr); |
|
928 } |
|
929 |
|
930 |
|
931 void TDmaSbChannel::DoDfc(DDmaRequest& /*aCurReq*/, SDmaDesHdr*& aCompletedHdr) |
|
932 { |
|
933 __DMA_ASSERTD(iTransferring); |
|
934 aCompletedHdr = iCurHdr; |
|
935 iCurHdr = iCurHdr->iNext; |
|
936 if (iCurHdr != NULL) |
|
937 iController->Transfer(*this, *iCurHdr); |
|
938 else |
|
939 iTransferring = EFalse; |
|
940 } |
|
941 |
|
942 |
|
943 ////////////////////////////////////////////////////////////////////////////// |
|
944 // TDmaDbChannel |
|
945 |
|
946 void TDmaDbChannel::DoQueue(DDmaRequest& aReq) |
|
947 { |
|
948 switch (iState) |
|
949 { |
|
950 case EIdle: |
|
951 iController->Transfer(*this, *iCurHdr); |
|
952 if (iCurHdr->iNext) |
|
953 { |
|
954 iController->Transfer(*this, *(iCurHdr->iNext)); |
|
955 iState = ETransferring; |
|
956 } |
|
957 else |
|
958 iState = ETransferringLast; |
|
959 break; |
|
960 case ETransferring: |
|
961 // nothing to do |
|
962 break; |
|
963 case ETransferringLast: |
|
964 iController->Transfer(*this, *(aReq.iFirstHdr)); |
|
965 iState = ETransferring; |
|
966 break; |
|
967 default: |
|
968 __DMA_CANT_HAPPEN(); |
|
969 } |
|
970 } |
|
971 |
|
972 |
|
973 void TDmaDbChannel::DoCancelAll() |
|
974 { |
|
975 iState = EIdle; |
|
976 } |
|
977 |
|
978 |
|
979 void TDmaDbChannel::DoDfc(DDmaRequest& /*aCurReq*/, SDmaDesHdr*& aCompletedHdr) |
|
980 { |
|
981 aCompletedHdr = iCurHdr; |
|
982 iCurHdr = iCurHdr->iNext; |
|
983 switch (iState) |
|
984 { |
|
985 case ETransferringLast: |
|
986 iState = EIdle; |
|
987 break; |
|
988 case ETransferring: |
|
989 if (iCurHdr->iNext == NULL) |
|
990 iState = ETransferringLast; |
|
991 else |
|
992 iController->Transfer(*this, *(iCurHdr->iNext)); |
|
993 break; |
|
994 default: |
|
995 __DMA_CANT_HAPPEN(); |
|
996 } |
|
997 } |
|
998 |
|
999 |
|
1000 ////////////////////////////////////////////////////////////////////////////// |
|
1001 // TDmaSgChannel |
|
1002 |
|
1003 void TDmaSgChannel::DoQueue(DDmaRequest& aReq) |
|
1004 { |
|
1005 if (iTransferring) |
|
1006 { |
|
1007 __DMA_ASSERTD(!aReq.iLink.Alone()); |
|
1008 DDmaRequest* pReqPrev = _LOFF(aReq.iLink.iPrev, DDmaRequest, iLink); |
|
1009 iController->AppendHwDes(*this, *(pReqPrev->iLastHdr), *(aReq.iFirstHdr)); |
|
1010 } |
|
1011 else |
|
1012 { |
|
1013 iController->Transfer(*this, *(aReq.iFirstHdr)); |
|
1014 iTransferring = ETrue; |
|
1015 } |
|
1016 } |
|
1017 |
|
1018 |
|
1019 void TDmaSgChannel::DoCancelAll() |
|
1020 { |
|
1021 __DMA_ASSERTD(iTransferring); |
|
1022 iTransferring = EFalse; |
|
1023 } |
|
1024 |
|
1025 |
|
1026 void TDmaSgChannel::DoDfc(DDmaRequest& aCurReq, SDmaDesHdr*& aCompletedHdr) |
|
1027 { |
|
1028 __DMA_ASSERTD(iTransferring); |
|
1029 aCompletedHdr = aCurReq.iLastHdr; |
|
1030 iCurHdr = aCompletedHdr->iNext; |
|
1031 iTransferring = (iCurHdr != NULL); |
|
1032 } |