|
1 /* |
|
2 * Copyright (c) 2008-2010 Nokia Corporation and/or its subsidiary(-ies). |
|
3 * All rights reserved. |
|
4 * This component and the accompanying materials are made available |
|
5 * under the terms of "Eclipse Public License v1.0" |
|
6 * which accompanies this distribution, and is available |
|
7 * at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
8 * |
|
9 * Initial Contributors: |
|
10 * Nokia Corporation - initial contribution. |
|
11 * |
|
12 * Contributors: |
|
13 * |
|
14 * Description: |
|
15 * naviengine_assp\dmapsl.cpp |
|
16 * DMA Platform Specific Layer (PSL) for Navi Engine. |
|
17 * |
|
18 */ |
|
19 |
|
20 |
|
21 |
|
22 #include <kernel/kern_priv.h> |
|
23 #include <assp.h> |
|
24 #include <naviengine_priv.h> |
|
25 #include <navienginedma.h> |
|
26 |
|
27 #include <dma.h> |
|
28 #include <dma_hai.h> |
|
29 |
|
30 |
|
31 // This macro was used to print debug info in the DMAC64 isr. |
|
32 // It may be useful to reenable it as it appears to provoke an SMP |
|
33 // race condition, which needs fixing |
|
34 //#define _DEBUG_PRINT_ISR |
|
35 |
|
36 // Debug support |
|
37 static const char KDmaPanicCat[] = "DMA PSL"; |
|
38 static const TInt KDesCount = 1024; // DMA descriptor count - sufficient to serve all channels at the time. |
|
39 |
|
40 /* Maps logical DMA channels into physical ones */ |
|
41 static const TDMAChannelLocator KDMAChannelLocator[EDmaChannelCount]= |
|
42 { |
|
43 // controller, group, subchannel DCHS (exc. SEL) TransferShiftSize |
|
44 {EDMACtrl32 ,2 ,2 ,KHvDMACHC_SDR, 1}, //EDMAChannelSD0, |
|
45 {EDMACtrl32 ,2 ,3 ,KHvDMACHC_SDW, 1}, //EDMAChannelSD1, |
|
46 |
|
47 {EDMACtrl32 ,3 ,0 ,KHvDMACHC_I2SR, 1}, //EDMAChannelI2S0RX, |
|
48 {EDMACtrl32 ,3 ,1 ,KHvDMACHC_I2SW, 1}, //EDMAChannelI2S0TX, |
|
49 {EDMACtrl32 ,3 ,2 ,KHvDMACHC_I2SR, 1}, //EDMAChannelI2S1RX, |
|
50 {EDMACtrl32 ,3 ,3 ,KHvDMACHC_I2SW, 1}, //EDMAChannelI2S1TX, |
|
51 {EDMACtrl32 ,3 ,4 ,KHvDMACHC_I2SR, 1}, //EDMAChannelI2S2RX, |
|
52 {EDMACtrl32 ,3 ,5 ,KHvDMACHC_I2SW, 1}, //EDMAChannelI2S2TX, |
|
53 {EDMACtrl32 ,3 ,6 ,KHvDMACHC_I2SR, 1}, //EDMAChannelI2S3RX, |
|
54 {EDMACtrl32 ,3 ,7 ,KHvDMACHC_I2SW, 1}, //EDMAChannelI2S3TX, |
|
55 |
|
56 {EDMACtrl32 ,0 ,2 ,KHvDMACHC_SW, 2}, //EDMAChannelUART0RX, |
|
57 {EDMACtrl32 ,0 ,3 ,KHvDMACHC_SW, 2}, //EDMAChannelUART0TX, |
|
58 {EDMACtrl32 ,0 ,4 ,KHvDMACHC_SW, 2}, //EDMAChannelUART1RX, |
|
59 {EDMACtrl32 ,0 ,5 ,KHvDMACHC_SW, 2}, //EDMAChannelUART1TX, |
|
60 {EDMACtrl32 ,0 ,6 ,KHvDMACHC_SW, 2}, //EDMAChannelUART2RX, |
|
61 {EDMACtrl32 ,0 ,7 ,KHvDMACHC_SW, 2}, //EDMAChannelUART2TX, |
|
62 |
|
63 {EDMACtrl32 ,0 ,0 ,KHvDMACHC_SW, 2}, //EDmaMemToMem0, |
|
64 {EDMACtrl32 ,0 ,1 ,KHvDMACHC_SW, 2}, //EDmaMemToMem1, |
|
65 {EDMACtrl32 ,2 ,4 ,KHvDMACHC_SW, 2}, //EDmaMemToMem2, |
|
66 {EDMACtrl32 ,2 ,5 ,KHvDMACHC_SW, 2}, //EDmaMemToMem3, |
|
67 |
|
68 {EDmaCtrl64 ,NULL ,NULL ,NULL, NULL}, //EDma64MemToMem0, |
|
69 {EDmaCtrl64 ,NULL ,NULL ,NULL, NULL}, //EDma64MemToMem1, |
|
70 {EDmaCtrl64 ,NULL ,NULL ,NULL, NULL}, //EDma64MemToMem2, |
|
71 {EDmaCtrl64 ,NULL ,NULL ,NULL, NULL}, //EDma64MemToMem3, |
|
72 }; |
|
73 |
|
74 /* Maps physical EDMACtrl32 channels into logical ones */ |
|
75 static const int DMAC32_HWChannelsLocator[KDmaHWCtrl32Count][KDmaCtrl32HWSubChannelCount] = |
|
76 { |
|
77 {EDmaMemToMem0,EDmaMemToMem1,EDMAChannelUART0RX,EDMAChannelUART0TX, |
|
78 EDMAChannelUART1RX,EDMAChannelUART1TX,EDMAChannelUART2RX,EDMAChannelUART2TX}, |
|
79 {-1,-1,-1,-1,-1,-1,-1,-1}, |
|
80 {-1,-1,EDMAChannelSD0,EDMAChannelSD1,EDmaMemToMem2,EDmaMemToMem3,-1,-1}, |
|
81 { EDMAChannelI2S0RX,EDMAChannelI2S0TX,EDMAChannelI2S1RX,EDMAChannelI2S1TX, |
|
82 EDMAChannelI2S2RX,EDMAChannelI2S2TX,EDMAChannelI2S3RX,EDMAChannelI2S3TX}, |
|
83 {-1,-1,-1,-1,-1,-1,-1,-1}, |
|
84 }; |
|
85 |
|
86 class TDmaDesc |
|
87 // |
|
88 // Hardware DMA descriptor |
|
89 // |
|
90 { |
|
91 public: |
|
92 TPhysAddr iSrcAddr; |
|
93 TPhysAddr iDestAddr; |
|
94 TUint iCount; // Transfer counter in bytes |
|
95 }; |
|
96 |
|
97 |
|
98 // |
|
99 // Test Support |
|
100 // |
|
101 //The list of S/W channels to be tested by t_dma |
|
102 TUint32 TestNEChannels[] = { EDmaMemToMem0, EDmaMemToMem1, EDmaMemToMem2, EDmaMemToMem3}; |
|
103 |
|
104 //Sg channels on the 64 bit controller |
|
105 TUint32 TestNESgChannels[] = { EDma64MemToMem0, EDma64MemToMem1, EDma64MemToMem2, EDma64MemToMem3}; |
|
106 const TInt TestNESgChannelsSize = sizeof(TestNESgChannels)/sizeof(TestNESgChannels[0]); |
|
107 |
|
108 /** |
|
109 Information about the DMA drivers and available |
|
110 channel cookies for the test harness |
|
111 */ |
|
112 TDmaTestInfo TestInfo = |
|
113 { |
|
114 4 * KMaxDMAUnitTransferLen, //a word is a unit of transfer for mem-to-mem DMA |
|
115 3, // Word alignement applies fow S/W (mem-to-mem) transfer |
|
116 0, // No need for cookie. |
|
117 4, // The number of S/W DMA channels to test |
|
118 TestNEChannels, |
|
119 0, |
|
120 NULL, |
|
121 TestNESgChannelsSize, |
|
122 TestNESgChannels, |
|
123 }; |
|
124 |
|
125 EXPORT_C const TDmaTestInfo& DmaTestInfo() |
|
126 { |
|
127 return TestInfo; |
|
128 } |
|
129 |
|
130 /** |
|
131 Information about the DMA drivers and available |
|
132 channel cookies for the test harness (V2) |
|
133 */ |
|
134 TDmaV2TestInfo TestInfov2 = |
|
135 { |
|
136 4 * KMaxDMAUnitTransferLen, //a word is a unit of transfer for mem-to-mem DMA |
|
137 3, // Word alignement applies fow S/W (mem-to-mem) transfer |
|
138 0, // No need for cookie. |
|
139 4, // The number of S/W DMA channels to test |
|
140 {EDmaMemToMem0, EDmaMemToMem1, EDmaMemToMem2, EDmaMemToMem3}, |
|
141 0, |
|
142 {NULL,}, |
|
143 TestNESgChannelsSize, |
|
144 {EDma64MemToMem0, EDma64MemToMem1, EDma64MemToMem2, EDma64MemToMem3} |
|
145 }; |
|
146 |
|
147 EXPORT_C const TDmaV2TestInfo& DmaTestInfoV2() |
|
148 { |
|
149 return TestInfov2; |
|
150 } |
|
151 |
|
152 // |
|
153 // Helper Functions |
|
154 // |
|
155 |
|
156 inline TBool IsHwDesAligned(const TAny* aDes) |
|
157 // |
|
158 // We do no need H/W descriptors to be aligned as Navi Engine DMA32 cotroller doesn't |
|
159 // support linked descriptors. Instead, they are linked by S/W. Therefore, the ordinary |
|
160 // word alignement applies (which is enforced by compiler). |
|
161 // |
|
162 { |
|
163 return ((TLinAddr)aDes & 0x3) == 0; |
|
164 } |
|
165 |
|
166 // Channel class. |
|
167 // For double buffering, TDmaDbChannel::DoQueue must be overridden. |
|
168 // NE DMA has two sets of registers (base & work) - which is not supported by the original |
|
169 #if defined(NE1_DMA_DOUBLE_BUFFER) |
|
170 class TNE1DmaChannel : public TDmaDbChannel |
|
171 #else |
|
172 class TNE1DmaChannel : public TDmaSbChannel |
|
173 #endif |
|
174 { |
|
175 public: |
|
176 TNE1DmaChannel(); |
|
177 void ProcessIrq(); |
|
178 void ProcessErrorIrq(); |
|
179 void StopTransfer(); |
|
180 void Close(); |
|
181 inline TBool IsIdle() const; |
|
182 |
|
183 private: |
|
184 #if defined(NE1_DMA_DOUBLE_BUFFER) |
|
185 virtual void DoQueue(const DDmaRequest& aReq); |
|
186 #endif |
|
187 virtual void QueuedRequestCountChanged(); |
|
188 |
|
189 inline void ProcessTC(TBool aClearStatus); |
|
190 inline void ProcessEnd(TBool aClearStatus); |
|
191 inline void ClearStatus(TUint32 aBitmask); |
|
192 inline void HandleIsr(TBool aIsComplete); |
|
193 |
|
194 public: |
|
195 TInt iHWCtrlBase;// Base address of H/W registers for this channel. |
|
196 TInt iSubChannel;// Subchannel number (0-7) within H/W controller. |
|
197 TInt iDMACHCReg; // The content of configuration (CHC) register for this channel. |
|
198 TInt iTransferDataShift;//log2 of basic unit of transfer. See TDMAChannelLocator::iTransferShiftSize |
|
199 |
|
200 // The following members are public so that they can be |
|
201 // modified by the TNaviEngineDmac class |
|
202 |
|
203 /** |
|
204 This flag is set when the base register set is filled |
|
205 |
|
206 It allows the ISR to detect the case where a base to work |
|
207 register set changeover has happened (END interrupt) but has been masked by |
|
208 the completion of the work register set |
|
209 */ |
|
210 TBool iBaseValidSet; |
|
211 |
|
212 /** |
|
213 This counter is incremented each time a LaunchTransfer is started |
|
214 and decremented when the ISR handles the transfer complete (TC |
|
215 bit). Ie. it does not keep count of the number of times the base |
|
216 register set is filled. This allows missed TC interrupts to be |
|
217 detected. |
|
218 */ |
|
219 TInt iTcIrqCount; |
|
220 |
|
221 /** |
|
222 This spinlock is used to protect both the iBaseValidSet and iTcIrqCount |
|
223 variables. It synchronises access between threads and the ISR. |
|
224 |
|
225 For the ISR the setting of iBaseValidSet must appear to be atomic with |
|
226 the launch of the transfer. |
|
227 |
|
228 For iTcIrqCount The spinlock makes the transfer launch and subsequent |
|
229 increase of the count appear atomic to the ISR. Otherwise it could |
|
230 observe a completed transfer before the count was incremented or |
|
231 vice-versa |
|
232 */ |
|
233 TSpinLock iIsrLock; |
|
234 }; |
|
235 |
|
236 // |
|
237 // Derived Controller Class |
|
238 // |
|
239 |
|
240 using namespace Dma64; |
|
241 |
|
242 class TNaviEngineDmac : public TDmac |
|
243 { |
|
244 public: |
|
245 TNaviEngineDmac(); |
|
246 TInt Create(); |
|
247 |
|
248 friend void TNE1DmaChannel::HandleIsr(TBool); // Allow channel HandleIsr to call TDmac::HandleIsr |
|
249 private: |
|
250 // from TDmac (PIL pure virtual) |
|
251 virtual void StopTransfer(const TDmaChannel& aChannel); |
|
252 virtual TBool IsIdle(const TDmaChannel& aChannel); |
|
253 virtual TUint MaxTransferLength(TDmaChannel& aChannel, TUint aSrcFlags, TUint aDstFlags, TUint32 aPslInfo); |
|
254 virtual TUint AddressAlignMask(TDmaChannel& aChannel, TUint aSrcFlags, TUint aDstFlags, TUint32 aPslInfo); |
|
255 // from TDmac (PIL virtual) |
|
256 virtual void Transfer(const TDmaChannel& aChannel, const SDmaDesHdr& aHdr); |
|
257 virtual TInt InitHwDes(const SDmaDesHdr& aHdr, const TDmaTransferArgs& aTransferArgs); |
|
258 virtual TInt UpdateHwDes(const SDmaDesHdr& aHdr, TUint32 aSrcAddr, TUint32 aDstAddr, |
|
259 TUint aTransferCount, TUint32 aPslRequestInfo); |
|
260 virtual void ChainHwDes(const SDmaDesHdr& aHdr, const SDmaDesHdr& aNextHdr); |
|
261 virtual void AppendHwDes(const TDmaChannel& aChannel, const SDmaDesHdr& aLastHdr, |
|
262 const SDmaDesHdr& aNewHdr); |
|
263 virtual void UnlinkHwDes(const TDmaChannel& aChannel, SDmaDesHdr& aHdr); |
|
264 // other |
|
265 static void DMAC32_Isr(TAny* aThis, TInt aController, TInt aTcsMask, TInt aCompleted); |
|
266 static void DMAC32_0_End_Isr(TAny* aThis); |
|
267 static void DMAC32_0_Err_Isr(TAny* aThis); |
|
268 static void DMAC32_2_End_Isr(TAny* aThis); |
|
269 static void DMAC32_2_Err_Isr(TAny* aThis); |
|
270 static void DMAC32_3_End_Isr(TAny* aThis); |
|
271 static void DMAC32_3_Err_Isr(TAny* aThis); |
|
272 static void InitHWChannel (ENaviEngineDmaController aNEController, TInt aGroup, TInt aSubChannel); |
|
273 static void InitAllHWChannels (); |
|
274 inline TDmaDesc* HdrToHwDes(const SDmaDesHdr& aHdr); |
|
275 private: |
|
276 static const SCreateInfo KInfo; |
|
277 public: |
|
278 |
|
279 void PopulateWorkSet(TNE1DmaChannel& aChannel, const SDmaDesHdr& aHdr); |
|
280 void LaunchTransfer(TNE1DmaChannel& aChannel, TBool aBaseSetValid); |
|
281 #if defined(NE1_DMA_DOUBLE_BUFFER) |
|
282 void PopulateBaseSet(TNE1DmaChannel& aChannel, const SDmaDesHdr& aHdr); |
|
283 void ContinueTransfer(TNE1DmaChannel& aChannel, TBool aBaseSetValid); |
|
284 #endif |
|
285 |
|
286 TNE1DmaChannel iChannels[EDmaChannelCount]; |
|
287 static const SDmacCaps KCaps; |
|
288 }; |
|
289 |
|
290 static TNaviEngineDmac Controller; |
|
291 |
|
292 // The following values report to the PIL what the PSL has been implemented |
|
293 // to support, not necessarily what the hardware actually supports. |
|
294 const SDmacCaps TNaviEngineDmac::KCaps = |
|
295 {0, // TInt iChannelPriorities; |
|
296 EFalse, // TBool iChannelPauseAndResume; |
|
297 ETrue, // TBool iAddrAlignedToElementSize; |
|
298 EFalse, // TBool i1DIndexAddressing; |
|
299 EFalse, // TBool i2DIndexAddressing; |
|
300 KDmaSyncAuto, // TUint iSynchronizationTypes; |
|
301 KDmaBurstSizeAny, // TUint iBurstTransactions; |
|
302 EFalse, // TBool iDescriptorInterrupt; |
|
303 EFalse, // TBool iFrameInterrupt; |
|
304 EFalse, // TBool iLinkedListPausedInterrupt; |
|
305 EFalse, // TBool iEndiannessConversion; |
|
306 KDmaGraphicsOpNone, // TUint iGraphicsOps; |
|
307 EFalse, // TBool iRepeatingTransfers; |
|
308 EFalse, // TBool iChannelLinking; |
|
309 ETrue, // TBool iHwDescriptors; // DMAC does not really use Hw descriptors |
|
310 EFalse, // TBool iSrcDstAsymmetry; |
|
311 EFalse, // TBool iAsymHwDescriptors; |
|
312 EFalse, // TBool iBalancedAsymSegments; |
|
313 EFalse, // TBool iAsymCompletionInterrupt; |
|
314 EFalse, // TBool iAsymDescriptorInterrupt; |
|
315 EFalse, // TBool iAsymFrameInterrupt; |
|
316 {0, 0, 0, 0, 0} // TUint32 iReserved[5]; |
|
317 }; |
|
318 |
|
319 |
|
320 const TDmac::SCreateInfo TNaviEngineDmac::KInfo = |
|
321 { |
|
322 ETrue, // iCapsHwDes |
|
323 KDesCount, // iDesCount |
|
324 sizeof(TDmaDesc), // iDesSize |
|
325 EMapAttrSupRw | EMapAttrFullyBlocking // iDesChunkAttribs |
|
326 }; |
|
327 |
|
328 |
|
329 ////////////////////////////////////////////////////////////////////////////// |
|
330 // AXI 64bit DMAC - (Scatter Gather |
|
331 ////////////////////////////////////////////////////////////////////////////// |
|
332 |
|
333 class TNaviEngineDmac64Sg; |
|
334 |
|
335 class TNeSgChannel : public TDmaSgChannel |
|
336 { |
|
337 friend class TNaviEngineDmac64Sg; |
|
338 public: |
|
339 TNeSgChannel(); |
|
340 TNeSgChannel(TInt aPslId); |
|
341 |
|
342 inline TUint BaseAddr() {return iBaseAddr;} |
|
343 |
|
344 #ifdef _DEBUG_PRINT_ISR |
|
345 void Print(); |
|
346 #endif |
|
347 inline void Pause() |
|
348 { |
|
349 using namespace Channel; |
|
350 AsspRegister::Modify32(iBaseAddr + Ctrl::KHoBase, NULL, Ctrl::KHtSetSuspend); |
|
351 } |
|
352 |
|
353 inline void Resume() |
|
354 { |
|
355 using namespace Channel; |
|
356 AsspRegister::Modify32(iBaseAddr + Ctrl::KHoBase, NULL, Ctrl::KHtClrSuspend); |
|
357 } |
|
358 |
|
359 inline void MaskInterrupt() |
|
360 { |
|
361 using namespace Channel; |
|
362 AsspRegister::Modify32(iBaseAddr + Cfg::KHoBase, NULL, Cfg::KHtEndMask|Cfg::KHtCompMask); |
|
363 } |
|
364 |
|
365 inline TUint32 CurrSrcAddr() const |
|
366 { |
|
367 using namespace Channel; |
|
368 return AsspRegister::Read32(iBaseAddr + RegSet::KHoBases[RegSet::ECurrent] + RegSet::KHoSrcAddr); |
|
369 } |
|
370 |
|
371 inline TUint32 CurrDstAddr() const |
|
372 { |
|
373 using namespace Channel; |
|
374 return AsspRegister::Read32(iBaseAddr + RegSet::KHoBases[RegSet::ECurrent] + RegSet::KHoDstAddr); |
|
375 } |
|
376 |
|
377 inline TUint32 CurrByteCount() const |
|
378 { |
|
379 using namespace Channel; |
|
380 return AsspRegister::Read32(iBaseAddr + RegSet::KHoBases[RegSet::ECurrent] + RegSet::KHoTranByte); |
|
381 } |
|
382 |
|
383 inline TUint32 Status() const |
|
384 { |
|
385 return AsspRegister::Read32(iBaseAddr + Channel::Status::KHoBase); |
|
386 } |
|
387 |
|
388 inline TUint32 Config() const |
|
389 { |
|
390 return AsspRegister::Read32(iBaseAddr + Channel::Cfg::KHoBase); |
|
391 } |
|
392 |
|
393 inline TUint32 NextLink() const |
|
394 { |
|
395 return AsspRegister::Read32(iBaseAddr + Channel::KHoNxtLnkAddr); |
|
396 } |
|
397 |
|
398 inline TUint32 CurrLink() const |
|
399 { |
|
400 return AsspRegister::Read32(iBaseAddr + Channel::KHoCurrtLnkAddr); |
|
401 } |
|
402 |
|
403 virtual void QueuedRequestCountChanged(); |
|
404 |
|
405 inline TBool IsIdle() const |
|
406 { |
|
407 const TUint channelStatus = Status(); |
|
408 using namespace Channel::Status; |
|
409 const TBool isIdle = (channelStatus & (KHtEnabled | KHtDescLoad)) == 0; |
|
410 return isIdle; |
|
411 } |
|
412 |
|
413 void Transfer(TDma64Desc* aHwDes); |
|
414 void Close(); |
|
415 |
|
416 private: |
|
417 TUint32 iBaseAddr; |
|
418 |
|
419 TInt iTransferCount; |
|
420 TSpinLock iLock; |
|
421 }; |
|
422 |
|
423 /** |
|
424 Represents the 64 bit controller on the AXI bus - in scatter gather |
|
425 mode. The controller supports both scatter gather and double buffered |
|
426 mode but the framework dicatates that separate logical DMACs are |
|
427 required for each mode of operation. |
|
428 */ |
|
429 class TNaviEngineDmac64Sg : public TDmac |
|
430 { |
|
431 public: |
|
432 TNaviEngineDmac64Sg(); |
|
433 TInt Create(); |
|
434 |
|
435 private: |
|
436 // from TDmac (PIL pure virtual) |
|
437 virtual void StopTransfer(const TDmaChannel& aChannel); |
|
438 virtual TBool IsIdle(const TDmaChannel& aChannel); |
|
439 virtual TUint MaxTransferLength(TDmaChannel& aChannel, TUint aSrcFlags, TUint aDstFlags, TUint32 aPslInfo); |
|
440 virtual TUint AddressAlignMask(TDmaChannel& aChannel, TUint aSrcFlags, TUint aDstFlags, TUint32 aPslInfo); |
|
441 // from TDmac (PIL virtual) |
|
442 virtual void Transfer(const TDmaChannel& aChannel, const SDmaDesHdr& aHdr); |
|
443 virtual TInt InitHwDes(const SDmaDesHdr& aHdr, const TDmaTransferArgs& aTransferArgs); |
|
444 virtual TInt UpdateHwDes(const SDmaDesHdr& aHdr, TUint32 aSrcAddr, TUint32 aDstAddr, |
|
445 TUint aTransferCount, TUint32 aPslRequestInfo); |
|
446 virtual void ChainHwDes(const SDmaDesHdr& aHdr, const SDmaDesHdr& aNextHdr); |
|
447 virtual void AppendHwDes(const TDmaChannel& aChannel, const SDmaDesHdr& aLastHdr, |
|
448 const SDmaDesHdr& aNewHdr); |
|
449 virtual void UnlinkHwDes(const TDmaChannel& aChannel, SDmaDesHdr& aHdr); |
|
450 // other |
|
451 static void IsrEnd(TAny* aThis); |
|
452 static void IsrErr(TAny* aThis); |
|
453 |
|
454 inline TDma64Desc* HdrToHwDes(const SDmaDesHdr& aHdr); |
|
455 static void JoinHwDes(TDma64Desc& aHwDes, const TDma64Desc& aNextHwDes); |
|
456 |
|
457 private: |
|
458 static const SCreateInfo KInfo; |
|
459 |
|
460 public: |
|
461 TNeSgChannel iChannels[Dma64::KChannelCount]; |
|
462 static const SDmacCaps KCaps; |
|
463 }; |
|
464 |
|
465 static TNaviEngineDmac64Sg Controller64; |
|
466 |
|
467 |
|
468 // The following values report to the PIL what the PSL has been implemented |
|
469 // to support, not necessarily what the hardware actually supports. |
|
470 const SDmacCaps TNaviEngineDmac64Sg::KCaps = |
|
471 {0, // TInt iChannelPriorities; |
|
472 EFalse, // TBool iChannelPauseAndResume; |
|
473 EFalse, // TBool iAddrAlignedToElementSize; |
|
474 EFalse, // TBool i1DIndexAddressing; |
|
475 EFalse, // TBool i2DIndexAddressing; |
|
476 KDmaSyncAuto, // TUint iSynchronizationTypes; |
|
477 KDmaBurstSizeAny, // TUint iBurstTransactions; |
|
478 EFalse, // TBool iDescriptorInterrupt; |
|
479 EFalse, // TBool iFrameInterrupt; |
|
480 EFalse, // TBool iLinkedListPausedInterrupt; |
|
481 EFalse, // TBool iEndiannessConversion; |
|
482 KDmaGraphicsOpNone, // TUint iGraphicsOps; |
|
483 EFalse, // TBool iRepeatingTransfers; |
|
484 EFalse, // TBool iChannelLinking; |
|
485 ETrue, // TBool iHwDescriptors; |
|
486 EFalse, // TBool iSrcDstAsymmetry; |
|
487 EFalse, // TBool iAsymHwDescriptors; |
|
488 EFalse, // TBool iBalancedAsymSegments; |
|
489 EFalse, // TBool iAsymCompletionInterrupt; |
|
490 EFalse, // TBool iAsymDescriptorInterrupt; |
|
491 EFalse, // TBool iAsymFrameInterrupt; |
|
492 {0, 0, 0, 0, 0} // TUint32 iReserved[5]; |
|
493 }; |
|
494 |
|
495 |
|
496 const TDmac::SCreateInfo TNaviEngineDmac64Sg::KInfo = |
|
497 { |
|
498 ETrue, // iCapsHwDes |
|
499 KDesCount, // iDesCount |
|
500 sizeof(TDma64Desc), // iDesSize |
|
501 EMapAttrSupRw | EMapAttrFullyBlocking // iDesChunkAttribs |
|
502 }; |
|
503 |
|
504 |
|
505 TNaviEngineDmac::TNaviEngineDmac() |
|
506 // |
|
507 // Constructor. |
|
508 // |
|
509 : TDmac(KInfo) |
|
510 { |
|
511 } |
|
512 |
|
513 TInt TNaviEngineDmac::Create() |
|
514 // |
|
515 // Second phase construction. |
|
516 // |
|
517 { |
|
518 TInt r = TDmac::Create(KInfo); // Base class Create() |
|
519 if (r == KErrNone) |
|
520 { |
|
521 // Read KDMAChannelLocator constants and populate the values in channel objects. |
|
522 for (TInt i=0; i < EDma32ChannelCount; ++i) |
|
523 { |
|
524 TUint ctrlBase = 0; |
|
525 switch (KDMAChannelLocator[i].iDMACtrl) |
|
526 { |
|
527 case EDmaCtrlExBus: ctrlBase = KDMACExBusBase; break; |
|
528 case EDMACtrl32: ctrlBase = KDMAC32Base; break; |
|
529 default: __DMA_CANT_HAPPEN(); |
|
530 } |
|
531 iChannels[i].iHWCtrlBase = ctrlBase + KDMAChannelLocator[i].iGroup * KDMAGroupOffset + |
|
532 KDMAChannelLocator[i].iSubChannel * KDMAChannelOffset; |
|
533 iChannels[i].iSubChannel = KDMAChannelLocator[i].iSubChannel; |
|
534 iChannels[i].iDMACHCReg = KDMAChannelLocator[i].iDMACHCReg | iChannels[i].iSubChannel; |
|
535 iChannels[i].iTransferDataShift = KDMAChannelLocator[i].iTransferShiftSize; |
|
536 iFreeHdr = iFreeHdr->iNext; |
|
537 } |
|
538 |
|
539 //Bind DMA interrupt for channels we support |
|
540 TInt irqh0 = Interrupt::Bind(KIntDMAC32_0_End, DMAC32_0_End_Isr, this); __DMA_ASSERTA(irqh0>=0); |
|
541 TInt irqh1 = Interrupt::Bind(KIntDMAC32_0_Err, DMAC32_0_Err_Isr, this); __DMA_ASSERTA(irqh1>=0); |
|
542 TInt irqh2 = Interrupt::Bind(KIntDMAC32_2_End, DMAC32_2_End_Isr, this); __DMA_ASSERTA(irqh2>=0); |
|
543 TInt irqh3 = Interrupt::Bind(KIntDMAC32_2_Err, DMAC32_2_Err_Isr, this); __DMA_ASSERTA(irqh3>=0); |
|
544 TInt irqh4 = Interrupt::Bind(KIntDMAC32_3_End, DMAC32_3_End_Isr, this); __DMA_ASSERTA(irqh4>=0); |
|
545 TInt irqh5 = Interrupt::Bind(KIntDMAC32_3_Err, DMAC32_3_Err_Isr, this); __DMA_ASSERTA(irqh5>=0); |
|
546 |
|
547 |
|
548 InitAllHWChannels(); |
|
549 |
|
550 r = Interrupt::Enable(irqh0); __DMA_ASSERTA(r==KErrNone); |
|
551 r = Interrupt::Enable(irqh1); __DMA_ASSERTA(r==KErrNone); |
|
552 r = Interrupt::Enable(irqh2); __DMA_ASSERTA(r==KErrNone); |
|
553 r = Interrupt::Enable(irqh3); __DMA_ASSERTA(r==KErrNone); |
|
554 r = Interrupt::Enable(irqh4); __DMA_ASSERTA(r==KErrNone); |
|
555 r = Interrupt::Enable(irqh5); __DMA_ASSERTA(r==KErrNone); |
|
556 } |
|
557 return r; |
|
558 } |
|
559 |
|
560 // Initialises all H/W channels. This will make sure they are off on soft restart. |
|
561 void TNaviEngineDmac::InitAllHWChannels() |
|
562 { |
|
563 int i,j; |
|
564 for (i=0;i<KDmaHWCtrl32Count;i++) |
|
565 { |
|
566 for (j=0;j<KDmaCtrl32HWSubChannelCount; j++) InitHWChannel(EDMACtrl32, i, j); |
|
567 AsspRegister::Write32(KDMAC32Base+i*KDMAGroupOffset+KHoDMACONT, 0); |
|
568 } |
|
569 } |
|
570 |
|
571 //Initialises a single H/W channel |
|
572 void TNaviEngineDmac::InitHWChannel (ENaviEngineDmaController aNEController, TInt aGroup, TInt aSubChannel) |
|
573 { |
|
574 TUint neCtrlBase = 0; |
|
575 switch(aNEController) |
|
576 { |
|
577 case EDMACtrl32: neCtrlBase = KDMAC32Base ; break; |
|
578 default: __DMA_CANT_HAPPEN(); |
|
579 } |
|
580 neCtrlBase += aGroup*KDMAGroupOffset + aSubChannel*KDMAChannelOffset; |
|
581 AsspRegister::Write32(neCtrlBase+KHoDMACHS, KHtDMACHS_EN_EN); //disable channel |
|
582 } |
|
583 |
|
584 #if defined(NE1_DMA_DOUBLE_BUFFER) |
|
585 |
|
586 #ifdef _DEBUG |
|
587 //These values indicate whether all corner cases are running. |
|
588 //Proper test shouldn't leave any of these values to zero. |
|
589 TInt InterruptCounter_DMA32 = 0; // Interrupt counter |
|
590 TInt Transfer_IdleOnStart = 0; // DMA channel is idle on the start of Transfer. |
|
591 TInt Transfer_NotIdleOnStart = 0; // DMA channel is not idle on the start of Transfer. |
|
592 TInt Transfer_MatchWorkSetTrue = 0; // Descriptor matches "work set" registers |
|
593 TInt Transfer_MatchWorkSetFalse = 0;// Descriptor doesn't match "work set" descriptor. |
|
594 #endif |
|
595 |
|
596 void TNaviEngineDmac::Transfer(const TDmaChannel& aChannel, const SDmaDesHdr& aHdr) |
|
597 // |
|
598 // Initiates a (previously constructed) request on a specific channel. |
|
599 // |
|
600 { |
|
601 TDmaChannel& mutableChannel = const_cast<TDmaChannel&>(aChannel); |
|
602 TNE1DmaChannel& channel = static_cast<TNE1DmaChannel&>(mutableChannel); |
|
603 |
|
604 const TBool isIsr = (NKern::CurrentContext() == NKern::EInterrupt); |
|
605 TInt irq = 0; |
|
606 if(!isIsr) // If we are in ISR context, assume that the lock is already held |
|
607 { |
|
608 irq = __SPIN_LOCK_IRQSAVE(channel.iIsrLock); |
|
609 } |
|
610 |
|
611 // The fragment descriptor (src/dest address, size) should be placed into either "Work Set" or "Base Set" |
|
612 // depending on the actual state of the H/W |
|
613 if (IsIdle(channel)) |
|
614 { |
|
615 // The channel is idle, for the (most likely) reason that both "Work Set" and "Base Set" transfers are |
|
616 // completed since the last time we run this function. |
|
617 #ifdef _DEBUG |
|
618 Transfer_IdleOnStart++; |
|
619 #endif |
|
620 PopulateWorkSet(channel, aHdr); // Populate "Work Set" |
|
621 LaunchTransfer(channel, EFalse); // Start the transfer, base set is invalid |
|
622 } |
|
623 else |
|
624 { |
|
625 // "Work Set" transfer is still going on. It seems we will manage to place |
|
626 // the next fragment in time for continious traffic flow. |
|
627 #ifdef _DEBUG |
|
628 Transfer_NotIdleOnStart++; |
|
629 #endif |
|
630 PopulateBaseSet(channel, aHdr); // Populate "Base Set" |
|
631 ContinueTransfer(channel, ETrue);// Indicate Base Set is valid (bvalid = ETrue) |
|
632 |
|
633 // We should expect here that the "work set" traffic is still in progress. |
|
634 // Once it is completed, "Base Set" content is copied into "Work Set" and the traffic will go on. |
|
635 // However, there is a corner case where we configure "Base Set" too late. |
|
636 // Therefore, check if transfer is still active. |
|
637 if (IsIdle(channel)) |
|
638 { |
|
639 // There is no DMA traffic. There could be two reason for that. Either, |
|
640 // 1. The transfer we have just configured in "Base Set" has already completed, or |
|
641 // 2. We configured base set too late, after "Work Set" transfer has already finished. |
|
642 |
|
643 // Check BVALID bit |
|
644 // if its now clear, then it was set in time, if it's |
|
645 // still set it was set too late |
|
646 const TUint32 dchs = AsspRegister::Read32(channel.iHWCtrlBase+KHoDMACHS); |
|
647 const TBool bvalidSet = dchs & KHtDMACHS_BVALID; |
|
648 |
|
649 if (!bvalidSet) |
|
650 { |
|
651 DMA_PSL_CHAN_TRACE_STATIC(channel, "Base set transferred already"); |
|
652 #ifdef _DEBUG |
|
653 Transfer_MatchWorkSetTrue++; |
|
654 #endif |
|
655 } |
|
656 else |
|
657 { |
|
658 DMA_PSL_CHAN_TRACE_STATIC(channel, "Too late for base set"); |
|
659 |
|
660 // BVALID bit was set after "Work Set" transfer completed, and DMA H/W didn't |
|
661 // copy the content of "Base Set" into "Work Set". We have to re-launch the transfer. |
|
662 // This time we have to configure "Work Set" |
|
663 #ifdef _DEBUG |
|
664 Transfer_MatchWorkSetFalse++; |
|
665 #endif |
|
666 PopulateWorkSet(channel, aHdr); // Populate "Work Set". |
|
667 LaunchTransfer(channel, EFalse); // Start the transfer, "Base Set" is invalid. |
|
668 } |
|
669 } |
|
670 } |
|
671 if(!isIsr) |
|
672 { |
|
673 __SPIN_UNLOCK_IRQRESTORE(channel.iIsrLock, irq); |
|
674 } |
|
675 } |
|
676 #else |
|
677 void TNaviEngineDmac::Transfer(const TDmaChannel& aChannel, const SDmaDesHdr& aHdr) |
|
678 // |
|
679 // Initiates a (previously constructed) request on a specific channel. |
|
680 // |
|
681 { |
|
682 TDmaChannel& mutableChannel = const_cast<TDmaChannel&>(aChannel); |
|
683 TNE1DmaChannel& channel = static_cast<TNE1DmaChannel&>(mutableChannel); |
|
684 |
|
685 DMA_PSL_CHAN_TRACE_STATIC1(channel, "TNaviEngineDmac::Transfer des=0x%08X", HdrToHwDes(aHdr)); |
|
686 |
|
687 const TBool isIsr = (NKern::CurrentContext() == NKern::EInterrupt); |
|
688 TInt irq = 0; |
|
689 if(!isIsr) // If we are in ISR context, assume that the lock is already held |
|
690 { |
|
691 irq = __SPIN_LOCK_IRQSAVE(channel.iIsrLock); |
|
692 } |
|
693 |
|
694 // The fragment descriptor (src/dest address, size) should be placed into either "Work Set" or "Base Set" |
|
695 // depending on the actual state of the H/W |
|
696 __NK_ASSERT_ALWAYS(IsIdle(channel)); |
|
697 PopulateWorkSet(channel, aHdr); // Populate "Work Set" |
|
698 LaunchTransfer(channel, EFalse); // Start the transfer, base set is invalid |
|
699 if(!isIsr) |
|
700 { |
|
701 __SPIN_UNLOCK_IRQRESTORE(channel.iIsrLock, irq); |
|
702 } |
|
703 } |
|
704 #endif |
|
705 |
|
706 |
|
707 void TNaviEngineDmac::StopTransfer(const TDmaChannel& aChannel) |
|
708 // |
|
709 // Stops a running channel. |
|
710 // |
|
711 { |
|
712 TDmaChannel& mutableChannel = const_cast<TDmaChannel&>(aChannel); |
|
713 TNE1DmaChannel& channel = static_cast<TNE1DmaChannel&>(mutableChannel); |
|
714 |
|
715 __KTRACE_OPT(KDMA, Kern::Printf("TNaviEngineDmac::StopTransfer channel=%d", channel.PslId())); |
|
716 |
|
717 channel.StopTransfer(); |
|
718 |
|
719 __KTRACE_OPT(KDMA, Kern::Printf("<TNaviEngineDmac::StopTransfer channel=%d", channel.PslId())); |
|
720 } |
|
721 |
|
722 TBool TNaviEngineDmac::IsIdle(const TDmaChannel& aChannel) |
|
723 // |
|
724 // Returns the state of a given channel. |
|
725 // |
|
726 { |
|
727 TDmaChannel& mutableChannel = const_cast<TDmaChannel&>(aChannel); |
|
728 TNE1DmaChannel& channel = static_cast<TNE1DmaChannel&>(mutableChannel); |
|
729 |
|
730 const TBool idle = channel.IsIdle(); |
|
731 __KTRACE_OPT(KDMA, Kern::Printf(">Dmac::IsIdle channel=%d, idle=%d", channel.PslId(), idle)); |
|
732 return idle; |
|
733 } |
|
734 |
|
735 // Places the descriptor into "Work Set" |
|
736 void TNaviEngineDmac::PopulateWorkSet(TNE1DmaChannel& aChannel, const SDmaDesHdr& aHdr) |
|
737 { |
|
738 TDmaDesc* pD = HdrToHwDes(aHdr); |
|
739 __KTRACE_OPT(KDMA, Kern::Printf("TNaviEngineDmac::PopulateWorkSet channel=%d des=0x%08X", |
|
740 aChannel.PslId(), pD)); |
|
741 |
|
742 AsspRegister::Write32(aChannel.iHWCtrlBase+KHoDMACHC, aChannel.iDMACHCReg); //configure channel |
|
743 AsspRegister::Write32(aChannel.iHWCtrlBase+KHoDMASAW, pD->iSrcAddr); //source addr |
|
744 AsspRegister::Write32(aChannel.iHWCtrlBase+KHoDMADAW, pD->iDestAddr); //dest addr |
|
745 AsspRegister::Write32(aChannel.iHWCtrlBase+KHoDMATCW, (pD->iCount>>aChannel.iTransferDataShift)-1); //transfer counter |
|
746 } |
|
747 |
|
748 // Starts the transfer. |
|
749 // @pre The chanel is idle. |
|
750 // @arg aBaseSetValid if true, BVALID bit should be set. |
|
751 // @pre iIsrLock must be held |
|
752 void TNaviEngineDmac::LaunchTransfer(TNE1DmaChannel& aChannel, TBool aBaseSetValid) |
|
753 { |
|
754 __KTRACE_OPT(KDMA, Kern::Printf("TNaviEngineDmac::LaunchTransfer channel=%d", aChannel.PslId())); |
|
755 TInt val = KHtDMACHS_EN|KHtDMACHS_EN_EN; |
|
756 if (TUint(aChannel.iDMACHCReg ^ aChannel.iSubChannel) == (TUint)KHvDMACHC_SW) |
|
757 val |=KHtDMACHS_STG; |
|
758 if (aBaseSetValid) |
|
759 val|=KHtDMACHS_BVALID; |
|
760 |
|
761 aChannel.iBaseValidSet = aBaseSetValid; |
|
762 |
|
763 AsspRegister::Write32(aChannel.iHWCtrlBase+KHoDMACHS, val); |
|
764 |
|
765 aChannel.iTcIrqCount++; |
|
766 DMA_PSL_CHAN_TRACE_STATIC1(aChannel, "inc iTcIrqCount to %d", aChannel.iTcIrqCount); |
|
767 } |
|
768 |
|
769 #if defined(NE1_DMA_DOUBLE_BUFFER) |
|
770 // Places the descriptor into "Base Set" |
|
771 void TNaviEngineDmac::PopulateBaseSet(TNE1DmaChannel& aChannel, const SDmaDesHdr& aHdr) |
|
772 { |
|
773 TDmaDesc* pD = HdrToHwDes(aHdr); |
|
774 __KTRACE_OPT(KDMA, Kern::Printf(">TNaviEngineDmac::PopulateBaseSet channel=%d des=0x%08X", aChannel.PslId(), pD)); |
|
775 |
|
776 AsspRegister::Write32(aChannel.iHWCtrlBase+KHoDMASAB, pD->iSrcAddr); // Source addr |
|
777 AsspRegister::Write32(aChannel.iHWCtrlBase+KHoDMADAB, pD->iDestAddr); // Dest addr |
|
778 AsspRegister::Write32(aChannel.iHWCtrlBase+KHoDMATCB, (pD->iCount>>aChannel.iTransferDataShift)-1); // Transfer counter |
|
779 } |
|
780 |
|
781 // @pre DMA transfer is in progress. |
|
782 // @arg aBaseSetValid if true, BVALID bit should be set. |
|
783 // @pre iIsrLock must be held |
|
784 void TNaviEngineDmac::ContinueTransfer(TNE1DmaChannel& aChannel, TBool aBaseSetValid) |
|
785 { |
|
786 __KTRACE_OPT(KDMA, Kern::Printf("TNaviEngineDmac::ContinueTransfer channel=%d", aChannel.PslId())); |
|
787 TInt val = 0; |
|
788 if (TUint(aChannel.iDMACHCReg ^ aChannel.iSubChannel) == (TUint)KHvDMACHC_SW) |
|
789 val |=KHtDMACHS_STG; // Set software trigger |
|
790 if (aBaseSetValid) |
|
791 { |
|
792 __NK_ASSERT_DEBUG(!aChannel.iBaseValidSet); |
|
793 aChannel.iBaseValidSet = ETrue; |
|
794 val|=KHtDMACHS_BVALID; |
|
795 } |
|
796 |
|
797 if (val) |
|
798 { |
|
799 AsspRegister::Write32(aChannel.iHWCtrlBase+KHoDMACHS, val); |
|
800 } |
|
801 } |
|
802 |
|
803 // As in TDmaDbChannel, except for EIdle state as we have place the 1st and the 2nd |
|
804 // fragment into different registers. |
|
805 void TNE1DmaChannel::DoQueue(const DDmaRequest& aReq) |
|
806 { |
|
807 TNaviEngineDmac* controller = (TNaviEngineDmac*)iController; |
|
808 |
|
809 switch (iState) |
|
810 { |
|
811 case EIdle: |
|
812 { |
|
813 controller->PopulateWorkSet(*this, *iCurHdr); |
|
814 const TInt irq = __SPIN_LOCK_IRQSAVE(iIsrLock); |
|
815 if (iCurHdr->iNext) |
|
816 { |
|
817 controller->PopulateBaseSet(*this, *(iCurHdr->iNext)); |
|
818 controller->LaunchTransfer(*this, ETrue);//BaseSetValid=True |
|
819 iState = ETransferring; |
|
820 } |
|
821 else |
|
822 { |
|
823 controller->LaunchTransfer(*this, EFalse);//BaseSetValid=False |
|
824 iState = ETransferringLast; |
|
825 } |
|
826 __SPIN_UNLOCK_IRQRESTORE(iIsrLock, irq); |
|
827 break; |
|
828 } |
|
829 case ETransferring: |
|
830 // nothing to do |
|
831 break; |
|
832 case ETransferringLast: |
|
833 iController->Transfer(*this, *(aReq.iFirstHdr)); |
|
834 iState = ETransferring; |
|
835 break; |
|
836 default: |
|
837 __DMA_CANT_HAPPEN(); |
|
838 } |
|
839 } |
|
840 #endif |
|
841 |
|
842 TNE1DmaChannel::TNE1DmaChannel() |
|
843 :iBaseValidSet(EFalse), iTcIrqCount(0), iIsrLock(TSpinLock::EOrderGenericIrqHigh0) |
|
844 {} |
|
845 |
|
846 /** |
|
847 Handles normal interrupts as well as recovering from missed interrupts. |
|
848 It must therefore be called during an ISR, for every valid, open channel |
|
849 on a DMAC .ie not just channels which have status bits set. |
|
850 */ |
|
851 void TNE1DmaChannel::ProcessIrq() |
|
852 { |
|
853 // The spinlock protects access to the iBaseValidSet flag |
|
854 // This is needed because it is possible for TNaviEngineDmac::Transfer to |
|
855 // attempt to populate the base set, set iBaseValidSet, but then |
|
856 // realize it was too late, and have to unset it. |
|
857 const TInt irq = __SPIN_LOCK_IRQSAVE(iIsrLock); |
|
858 |
|
859 // check that channel is open |
|
860 if(iController == NULL) |
|
861 { |
|
862 __SPIN_UNLOCK_IRQRESTORE(iIsrLock, irq); |
|
863 return; |
|
864 } |
|
865 |
|
866 const TInt irqCount = iTcIrqCount; |
|
867 __NK_ASSERT_ALWAYS(irqCount >= 0); |
|
868 __NK_ASSERT_ALWAYS(irqCount < 3); |
|
869 |
|
870 TUint32 dchs = AsspRegister::Read32(iHWCtrlBase+KHoDMACHS); |
|
871 |
|
872 // Detect if we have missed 1 TC interrupt. |
|
873 // This can happen when there is one transfer in progress, |
|
874 // and the channel attempts to populate the base reg set, but |
|
875 // is too late and launches a new transfer. The second transfer |
|
876 // may then complete during the ISR of the first, or the ISR may |
|
877 // not run untill after the second has already completed. |
|
878 if((irqCount > 0) && IsIdle()) |
|
879 { |
|
880 // Reread status now that we have observed channel as idle. |
|
881 // If a transfer completed between the first read and now, we |
|
882 // can handle that as a normal interrupt instead of as a |
|
883 // missed interrupt. This is not essential, just neater |
|
884 dchs = AsspRegister::Read32(iHWCtrlBase+KHoDMACHS); |
|
885 if(irqCount == 1) // There may or may not be a missed IRQ |
|
886 { |
|
887 if((dchs & KHtDMACHS_TC) == 0) |
|
888 { |
|
889 DMA_PSL_CHAN_TRACE1("Channel had missed TC IRQ irqs=%d", irqCount); |
|
890 ProcessTC(EFalse); |
|
891 } |
|
892 } |
|
893 else if(irqCount == 2) // There is 1 missed and 1 normal IRQ |
|
894 { |
|
895 DMA_PSL_CHAN_TRACE1("Channel had missed TC IRQ irqs=%d", irqCount); |
|
896 ProcessTC(EFalse); |
|
897 |
|
898 // Ensure that remaining IRQ will be dealt with in next block |
|
899 __NK_ASSERT_ALWAYS((dchs & KHtDMACHS_TC)); |
|
900 } |
|
901 else |
|
902 { |
|
903 // It should not be possible for there to be more than 2 |
|
904 // outstanding transfers launched |
|
905 FAULT(); |
|
906 } |
|
907 } |
|
908 |
|
909 // Deal with normal interrupts |
|
910 if (dchs&KHtDMACHS_TC) |
|
911 { |
|
912 // ISR should not be able to observe the BVALID bit itself |
|
913 // since TNaviEngineDmac::Transfer should hold iIsrLock whilst |
|
914 // it decides if it was set in time |
|
915 __NK_ASSERT_DEBUG(!(dchs & KHtDMACHS_BVALID)); |
|
916 |
|
917 // Here we find out if a base-set-copy (END) interrupt has |
|
918 // been missed. If a TC comes shortly after an END IRQ then |
|
919 // it would be impossible to tell by looking at the status |
|
920 // register alone |
|
921 if(iBaseValidSet) |
|
922 { |
|
923 DMA_PSL_CHAN_TRACE("END irq missed "); |
|
924 ProcessEnd(EFalse); |
|
925 } |
|
926 ProcessTC(ETrue); |
|
927 } |
|
928 else if (dchs&KHtDMACHS_END) |
|
929 { |
|
930 ProcessEnd(ETrue); |
|
931 } |
|
932 |
|
933 __SPIN_UNLOCK_IRQRESTORE(iIsrLock, irq); |
|
934 } |
|
935 |
|
936 void TNE1DmaChannel::ProcessErrorIrq() |
|
937 { |
|
938 const TInt irq = __SPIN_LOCK_IRQSAVE(iIsrLock); |
|
939 |
|
940 // check that channel is open |
|
941 if(iController == NULL) |
|
942 { |
|
943 __SPIN_UNLOCK_IRQRESTORE(iIsrLock, irq); |
|
944 return; |
|
945 } |
|
946 |
|
947 // reset channel |
|
948 ClearStatus(KHtDMACHS_FCLR); |
|
949 TInt badIrqCount = iTcIrqCount; |
|
950 iTcIrqCount = 0; |
|
951 |
|
952 if(iBaseValidSet) |
|
953 { |
|
954 iBaseValidSet = EFalse; |
|
955 badIrqCount++; |
|
956 } |
|
957 |
|
958 // complete all outstanding requests as being in error |
|
959 for(TInt i=0; i < badIrqCount; i++) |
|
960 { |
|
961 HandleIsr(EFalse); |
|
962 } |
|
963 |
|
964 __SPIN_UNLOCK_IRQRESTORE(iIsrLock, irq); |
|
965 } |
|
966 |
|
967 /** |
|
968 Handle a transfer complete (work set transfer complete and base set was |
|
969 empty) on this channel. |
|
970 |
|
971 @param aClearStatus - Status bits should be cleared |
|
972 */ |
|
973 void TNE1DmaChannel::ProcessTC(TBool aClearStatus) |
|
974 { |
|
975 // iTcIrqCount may be zero if StopTransfer were called |
|
976 // between the transfer being started and the ISR running |
|
977 if(iTcIrqCount>0) |
|
978 { |
|
979 DMA_PSL_CHAN_TRACE1("dec iTcIrqCount to %d", iTcIrqCount); |
|
980 iTcIrqCount--; |
|
981 DMA_PSL_CHAN_TRACE("TC"); |
|
982 HandleIsr(ETrue); |
|
983 } |
|
984 |
|
985 __NK_ASSERT_DEBUG(iTcIrqCount >= 0); |
|
986 |
|
987 if(aClearStatus) |
|
988 ClearStatus(KHtDMACHS_TC|KHtDMACHS_END); //Traffic completed BVALID=OFF |
|
989 } |
|
990 |
|
991 /** |
|
992 Handle a END (transfer complete, base set loaded in to work set) on this channel. |
|
993 |
|
994 @param aClearStatus - Status bit should be cleared |
|
995 */ |
|
996 void TNE1DmaChannel::ProcessEnd(TBool aClearStatus) |
|
997 { |
|
998 if(iBaseValidSet) |
|
999 { |
|
1000 DMA_PSL_CHAN_TRACE("END"); |
|
1001 iBaseValidSet = EFalse; |
|
1002 HandleIsr(ETrue); |
|
1003 } |
|
1004 |
|
1005 if(aClearStatus) |
|
1006 ClearStatus(KHtDMACHS_END); //Traffic completed BVALID=ON |
|
1007 } |
|
1008 |
|
1009 /** |
|
1010 @param aBitmask The bits to be cleared in this channel's status register |
|
1011 */ |
|
1012 void TNE1DmaChannel::ClearStatus(TUint32 aBitmask) |
|
1013 { |
|
1014 if (TUint((this->iDMACHCReg) ^ (this->iSubChannel)) == (TUint)KHvDMACHC_SW) |
|
1015 aBitmask |= KHtDMACHS_STG; //Add STG for S/W channel |
|
1016 |
|
1017 AsspRegister::Write32(iHWCtrlBase+KHoDMACHS, aBitmask); //End-of-Int |
|
1018 } |
|
1019 |
|
1020 /** |
|
1021 Call HandleIsr for this channel |
|
1022 */ |
|
1023 void TNE1DmaChannel::HandleIsr(TBool aIsComplete) |
|
1024 { |
|
1025 // iController must be casted so that the private method |
|
1026 // TDmac::HandleIsr can be called since this method is |
|
1027 // a friend of TNaviEngineDmac, but not TDmac. |
|
1028 static_cast<TNaviEngineDmac*>(iController)->HandleIsr(*this, EDmaCallbackRequestCompletion, aIsComplete); |
|
1029 } |
|
1030 |
|
1031 /** |
|
1032 Stop transfer for this channel |
|
1033 */ |
|
1034 void TNE1DmaChannel::StopTransfer() |
|
1035 { |
|
1036 const TInt irq = __SPIN_LOCK_IRQSAVE(iIsrLock); |
|
1037 // At this point, device driver should have cancelled DMA request. |
|
1038 |
|
1039 // The procedure for clearing the EN bit to 0 via CPU access during DMA transfer (EN bit = 1) |
|
1040 TUint32 dmaCHS = AsspRegister::Read32(iHWCtrlBase+KHoDMACHS); |
|
1041 |
|
1042 // Read the DCHSn register to be cleared at the relevant channel and confirm that |
|
1043 // both the RQST and ACT bits are cleared to 0. If either or both of them are 1, |
|
1044 // perform polling until their values become 0.. |
|
1045 // OR unless KHtDMACHS_EN is already cleared by the HW at the end of the transfer - |
|
1046 // while we're polling... |
|
1047 while( (dmaCHS & KHtDMACHS_EN) && |
|
1048 dmaCHS & (KHtDMACHS_RQST | KHtDMACHS_ACT) ) |
|
1049 { |
|
1050 dmaCHS = AsspRegister::Read32(iHWCtrlBase+KHoDMACHS); |
|
1051 } |
|
1052 |
|
1053 // enable writing to EN bit.. |
|
1054 dmaCHS |= KHtDMACHS_EN_EN; |
|
1055 AsspRegister::Write32(iHWCtrlBase+KHoDMACHS, dmaCHS); |
|
1056 |
|
1057 // clear the EN bit to 0 |
|
1058 // and set the FCLR bit of the DCHSn register to 1. |
|
1059 dmaCHS &= (~KHtDMACHS_EN); |
|
1060 dmaCHS |= KHtDMACHS_FCLR; |
|
1061 AsspRegister::Write32(iHWCtrlBase+KHoDMACHS, dmaCHS); |
|
1062 |
|
1063 // check that channel is idle, and status bits have been cleared |
|
1064 __NK_ASSERT_ALWAYS(IsIdle()); |
|
1065 dmaCHS = AsspRegister::Read32(iHWCtrlBase+KHoDMACHS); |
|
1066 __NK_ASSERT_ALWAYS((dmaCHS & (KHtDMACHS_TC | KHtDMACHS_END)) == 0); |
|
1067 __NK_ASSERT_ALWAYS(iTcIrqCount >=0); |
|
1068 |
|
1069 // given the above checks, clear the iTcIrqCount and iBaseValidSet so |
|
1070 // that the ISR won't mistakenly think there are missed interrupts |
|
1071 iTcIrqCount = 0; |
|
1072 iBaseValidSet = EFalse; |
|
1073 |
|
1074 __SPIN_UNLOCK_IRQRESTORE(iIsrLock, irq); |
|
1075 } |
|
1076 |
|
1077 /** |
|
1078 @pre Channel has been stopped |
|
1079 */ |
|
1080 void TNE1DmaChannel::Close() |
|
1081 { |
|
1082 // The lock prevents a channel being closed |
|
1083 // during an ISR. |
|
1084 const TInt irq = __SPIN_LOCK_IRQSAVE(iIsrLock); |
|
1085 DMA_PSL_CHAN_TRACE("Close"); |
|
1086 |
|
1087 // Check that the channel was Idle and Stopped |
|
1088 __NK_ASSERT_ALWAYS(IsIdle()); |
|
1089 __NK_ASSERT_ALWAYS(iTcIrqCount == 0); |
|
1090 __NK_ASSERT_ALWAYS(!iBaseValidSet); |
|
1091 |
|
1092 |
|
1093 // Here we clear iController in advance of the PIL |
|
1094 // If we did not do this, then when we release the lock, the ISR |
|
1095 // could observe it as non-null, proceed, but then have the PIL |
|
1096 // clear it mid-isr |
|
1097 iController = NULL; |
|
1098 |
|
1099 __SPIN_UNLOCK_IRQRESTORE(iIsrLock, irq); |
|
1100 } |
|
1101 |
|
1102 TBool TNE1DmaChannel::IsIdle() const |
|
1103 { |
|
1104 TUint status = AsspRegister::Read32(iHWCtrlBase+KHoDMACHS); |
|
1105 return !(status & KHtDMACHS_EN); |
|
1106 } |
|
1107 |
|
1108 |
|
1109 void TNE1DmaChannel::QueuedRequestCountChanged() |
|
1110 { |
|
1111 const TInt qreqs = __e32_atomic_load_acq32(&iQueuedRequests); |
|
1112 DMA_PSL_CHAN_TRACE1("TNE1DmaChannel::QueuedRequestCountChanged() %d", qreqs); |
|
1113 __DMA_ASSERTA(qreqs >= 0); |
|
1114 } |
|
1115 |
|
1116 |
|
1117 TUint TNaviEngineDmac::MaxTransferLength(TDmaChannel& aChannel, TUint /*aSrcFlags*/, TUint /*aDstFlags*/, |
|
1118 TUint32 /*aPslInfo*/) |
|
1119 // |
|
1120 // Returns the maximum transfer size for a given transfer. |
|
1121 // |
|
1122 { |
|
1123 TNE1DmaChannel& channel = (TNE1DmaChannel&)aChannel; |
|
1124 return (1u<<channel.iTransferDataShift) * KMaxDMAUnitTransferLen; |
|
1125 } |
|
1126 |
|
1127 |
|
1128 TUint TNaviEngineDmac::AddressAlignMask(TDmaChannel& aChannel, TUint /*aSrcFlags*/, TUint /*aDstFlags*/, |
|
1129 TUint32 /*aPslInfo*/) |
|
1130 // |
|
1131 // Returns the memory buffer alignment restrictions mask for a given transfer. |
|
1132 // |
|
1133 { |
|
1134 TNE1DmaChannel& channel = (TNE1DmaChannel&)aChannel; |
|
1135 return (1<<channel.iTransferDataShift) - 1; |
|
1136 } |
|
1137 |
|
1138 |
|
1139 TInt TNaviEngineDmac::InitHwDes(const SDmaDesHdr& aHdr, const TDmaTransferArgs& aTransferArgs) |
|
1140 // |
|
1141 // Sets up (from a passed in request) the descriptor with that fragment's |
|
1142 // transfer parameters. |
|
1143 // |
|
1144 { |
|
1145 TDmaDesc* pD = HdrToHwDes(aHdr); |
|
1146 |
|
1147 __KTRACE_OPT(KDMA, Kern::Printf("TNaviEngineDmac::InitHwDes 0x%08X", pD)); |
|
1148 |
|
1149 // Unaligned descriptor? Bug in generic layer! |
|
1150 __DMA_ASSERTD(IsHwDesAligned(pD)); |
|
1151 |
|
1152 const TDmaTransferConfig& src = aTransferArgs.iSrcConfig; |
|
1153 const TDmaTransferConfig& dst = aTransferArgs.iDstConfig; |
|
1154 pD->iSrcAddr = (src.iFlags & KDmaPhysAddr) ? src.iAddr : Epoc::LinearToPhysical(src.iAddr); |
|
1155 __DMA_ASSERTD(pD->iSrcAddr != KPhysAddrInvalid); |
|
1156 pD->iDestAddr = (dst.iFlags & KDmaPhysAddr) ? dst.iAddr : Epoc::LinearToPhysical(dst.iAddr); |
|
1157 __DMA_ASSERTD(pD->iDestAddr != KPhysAddrInvalid); |
|
1158 pD->iCount = aTransferArgs.iTransferCount; |
|
1159 |
|
1160 __KTRACE_OPT(KDMA, Kern::Printf(" src=0x%08X dest=0x%08X count=%d", |
|
1161 pD->iSrcAddr, pD->iDestAddr, pD->iCount)); |
|
1162 return KErrNone; |
|
1163 } |
|
1164 |
|
1165 |
|
1166 TInt TNaviEngineDmac::UpdateHwDes(const SDmaDesHdr& aHdr, TUint32 aSrcAddr, TUint32 aDstAddr, |
|
1167 TUint aTransferCount, TUint32 aPslRequestInfo) |
|
1168 // |
|
1169 // Updates (from the passed in arguments) fields of the descriptor. This |
|
1170 // function is called by the PIL in ISR context. |
|
1171 // |
|
1172 { |
|
1173 TDmaDesc* pD = HdrToHwDes(aHdr); |
|
1174 |
|
1175 __KTRACE_OPT(KDMA, Kern::Printf("TNaviEngineDmac::UpdateHwDes 0x%08X", pD)); |
|
1176 |
|
1177 // Unaligned descriptor? Bug in generic layer! |
|
1178 __DMA_ASSERTD(IsHwDesAligned(pD)); |
|
1179 |
|
1180 // Addresses passed into this function are always physical ones. |
|
1181 if (aSrcAddr != KPhysAddrInvalid) |
|
1182 { |
|
1183 __KTRACE_OPT(KDMA, Kern::Printf(" Changing src addr, old: 0x%08X new: 0x%08X", |
|
1184 pD->iSrcAddr, aSrcAddr)); |
|
1185 pD->iSrcAddr = aSrcAddr; |
|
1186 } |
|
1187 if (aDstAddr != KPhysAddrInvalid) |
|
1188 { |
|
1189 __KTRACE_OPT(KDMA, Kern::Printf(" Changing dst addr, old: 0x%08X new: 0x%08X", |
|
1190 pD->iDestAddr, aDstAddr)); |
|
1191 pD->iDestAddr = aDstAddr; |
|
1192 } |
|
1193 if (aTransferCount != 0) |
|
1194 { |
|
1195 __KTRACE_OPT(KDMA, Kern::Printf(" Changing xfer count, old: %d new: %d", |
|
1196 pD->iCount, aTransferCount)); |
|
1197 pD->iCount = aTransferCount; |
|
1198 } |
|
1199 if (aPslRequestInfo != 0) |
|
1200 { |
|
1201 __KTRACE_OPT(KDMA, Kern::Printf(" aPslRequestInfo specified (0x%08X) but ignored", |
|
1202 aPslRequestInfo)); |
|
1203 } |
|
1204 |
|
1205 __KTRACE_OPT(KDMA, Kern::Printf(" src=0x%08X dst=0x%08X count=%d", |
|
1206 pD->iSrcAddr, pD->iDestAddr, pD->iCount)); |
|
1207 |
|
1208 return KErrNone; |
|
1209 } |
|
1210 |
|
1211 |
|
1212 void TNaviEngineDmac::ChainHwDes(const SDmaDesHdr& /*aHdr*/, const SDmaDesHdr& /*aNextHd*/) |
|
1213 // |
|
1214 // Chains hardware descriptors together. |
|
1215 // DMAC32 doesn't support linked descriptors, therefore there is nothing we have to do here. |
|
1216 // |
|
1217 { |
|
1218 } |
|
1219 |
|
1220 |
|
1221 void TNaviEngineDmac::AppendHwDes(const TDmaChannel& /*aChannel*/, const SDmaDesHdr& /*aLastHdr*/, |
|
1222 const SDmaDesHdr& /*aNewHdr*/) |
|
1223 // |
|
1224 // Appends a descriptor to the chain while the channel is running. |
|
1225 // DMAC32 doesn't support linked descriptors, therefore there is nothing we have to do here. |
|
1226 // |
|
1227 { |
|
1228 } |
|
1229 |
|
1230 |
|
1231 void TNaviEngineDmac::UnlinkHwDes(const TDmaChannel& /*aChannel*/, SDmaDesHdr& /*aHdr*/) |
|
1232 // |
|
1233 // Unlink the last item in the h/w descriptor chain from a subsequent chain that it was possibly linked to. |
|
1234 // DMAC32 doesn't support linked descriptors, therefore there is nothing we have to do here. |
|
1235 // |
|
1236 { |
|
1237 } |
|
1238 |
|
1239 |
|
1240 void TNaviEngineDmac::DMAC32_Isr(TAny* aThis, TInt aController, TInt aDmaStat, TInt aCompleted) |
|
1241 // |
|
1242 // Generic part for all DMA32 interrupts. |
|
1243 // Reads the interrupt identification and calls back into the base class |
|
1244 // interrupt service handler with the channel identifier and an indication whether the |
|
1245 // transfer completed correctly or with an error. |
|
1246 // |
|
1247 { |
|
1248 DMA_PSL_TRACE("Begin ISR"); |
|
1249 |
|
1250 TNaviEngineDmac& me = *static_cast<TNaviEngineDmac*>(aThis); |
|
1251 int i; |
|
1252 |
|
1253 if (aCompleted) // Transfer-completed interrupt has occured |
|
1254 { |
|
1255 // Go through the all eight subchannels to check which event has occured. |
|
1256 for (i=0;i<KDmaCtrl32HWSubChannelCount;i++) |
|
1257 { |
|
1258 TInt channel = DMAC32_HWChannelsLocator[aController][i]; |
|
1259 #ifdef _DEBUG |
|
1260 if (channel >= EDma32ChannelCount) __DMA_CANT_HAPPEN(); |
|
1261 #endif |
|
1262 |
|
1263 // Skip unused physical channels |
|
1264 // .ie those with no corresponding entry |
|
1265 // in KDMAChannelLocator |
|
1266 if(channel == -1) |
|
1267 continue; |
|
1268 |
|
1269 TNE1DmaChannel& ne1Chan = me.iChannels[channel]; |
|
1270 |
|
1271 ne1Chan.ProcessIrq(); |
|
1272 } |
|
1273 } |
|
1274 else // Error interrupt has occured. aDmaStat is not valid. Should read H/W registers. |
|
1275 { |
|
1276 // Go through the all eight subchannels to check which event has occured. |
|
1277 for (i=0;i<KDmaCtrl32HWSubChannelCount;i++) |
|
1278 { |
|
1279 TInt dchs= AsspRegister::Read32(KDMAC32Base+aController*KDMAGroupOffset+i*KDMAChannelOffset+KHoDMACHS); |
|
1280 |
|
1281 if (dchs&KHtDMACHS_ERR) |
|
1282 { |
|
1283 TInt channel = DMAC32_HWChannelsLocator[aController][i]; |
|
1284 #ifdef _DEBUG |
|
1285 if (channel >= EDma32ChannelCount) __DMA_CANT_HAPPEN(); |
|
1286 #endif |
|
1287 |
|
1288 TNE1DmaChannel& ne1Chan = me.iChannels[channel]; |
|
1289 ne1Chan.ProcessErrorIrq(); |
|
1290 } |
|
1291 } |
|
1292 } |
|
1293 #if defined(NE1_DMA_DOUBLE_BUFFER) |
|
1294 #ifdef _DEBUG |
|
1295 InterruptCounter_DMA32++; |
|
1296 #endif |
|
1297 #endif |
|
1298 } |
|
1299 |
|
1300 |
|
1301 void TNaviEngineDmac::DMAC32_0_End_Isr(TAny* aThis) |
|
1302 { |
|
1303 TInt stat = (TInt)AsspRegister::Read32(KDMAC32Base+0*KDMAGroupOffset+KHoDMASTAT); |
|
1304 DMAC32_Isr(aThis, 0, stat, 1); |
|
1305 } |
|
1306 void TNaviEngineDmac::DMAC32_2_End_Isr(TAny* aThis) |
|
1307 { |
|
1308 TInt stat = (TInt)AsspRegister::Read32(KDMAC32Base+2*KDMAGroupOffset+KHoDMASTAT); |
|
1309 DMAC32_Isr(aThis, 2, stat, 1); |
|
1310 } |
|
1311 void TNaviEngineDmac::DMAC32_3_End_Isr(TAny* aThis) |
|
1312 { |
|
1313 TInt stat = (TInt)AsspRegister::Read32(KDMAC32Base+3*KDMAGroupOffset+KHoDMASTAT); |
|
1314 DMAC32_Isr(aThis, 3, stat, 1); |
|
1315 } |
|
1316 |
|
1317 void TNaviEngineDmac::DMAC32_0_Err_Isr(TAny* aThis) |
|
1318 { |
|
1319 DMAC32_Isr(aThis, 0, 0, 0); |
|
1320 } |
|
1321 void TNaviEngineDmac::DMAC32_2_Err_Isr(TAny* aThis) |
|
1322 { |
|
1323 DMAC32_Isr(aThis, 2, 0, 0); |
|
1324 } |
|
1325 void TNaviEngineDmac::DMAC32_3_Err_Isr(TAny* aThis) |
|
1326 { |
|
1327 DMAC32_Isr(aThis, 3, 0, 0); |
|
1328 } |
|
1329 |
|
1330 inline TDmaDesc* TNaviEngineDmac::HdrToHwDes(const SDmaDesHdr& aHdr) |
|
1331 // |
|
1332 // Changes return type of base class call. |
|
1333 // |
|
1334 { |
|
1335 return static_cast<TDmaDesc*>(TDmac::HdrToHwDes(aHdr)); |
|
1336 } |
|
1337 |
|
1338 // |
|
1339 // Channel Opening/Closing (Channel Allocator) |
|
1340 // |
|
1341 TDmaChannel* DmaChannelMgr::Open(TUint32 aOpenId, TBool aDynChannel, TUint aPriority) |
|
1342 { |
|
1343 __KTRACE_OPT(KDMA, Kern::Printf("DmaChannelMgr::Open Id=%d DynChannel=%d Priority=%d", |
|
1344 aOpenId, aDynChannel, aPriority)); |
|
1345 |
|
1346 __DMA_ASSERTA(aOpenId < static_cast<TUint32>(EDmaChannelCount)); |
|
1347 |
|
1348 if (aDynChannel) |
|
1349 { |
|
1350 __KTRACE_OPT(KPANIC, Kern::Printf("DmaChannelMgr::Open failed as dynamic channel allocation is not supported")); |
|
1351 return NULL; |
|
1352 } |
|
1353 |
|
1354 const ENaviEngineDmaController controllerId = KDMAChannelLocator[aOpenId].iDMACtrl; |
|
1355 |
|
1356 TUint32 pslId = NULL; |
|
1357 TDmaChannel* pC = NULL; |
|
1358 TDmac* dmac = NULL; |
|
1359 const SDmacCaps* caps = NULL; |
|
1360 |
|
1361 switch (controllerId) |
|
1362 { |
|
1363 case EDmaCtrlExBus: |
|
1364 // fall through, EDmaCtrlExBus and EDMACtrl32 contollers work the same way |
|
1365 case EDMACtrl32: |
|
1366 pslId = aOpenId; |
|
1367 pC = Controller.iChannels + pslId; |
|
1368 dmac = &Controller; |
|
1369 caps = &TNaviEngineDmac::KCaps; |
|
1370 break; |
|
1371 case EDmaCtrl64: |
|
1372 pslId = aOpenId - EDma64MemToMem0; |
|
1373 pC = Controller64.iChannels + pslId; |
|
1374 dmac = &Controller64; |
|
1375 caps = &TNaviEngineDmac64Sg::KCaps; |
|
1376 break; |
|
1377 default: |
|
1378 __DMA_CANT_HAPPEN(); |
|
1379 } |
|
1380 |
|
1381 if (pC->IsOpened()) |
|
1382 { |
|
1383 pC = NULL; |
|
1384 } |
|
1385 else |
|
1386 { |
|
1387 pC->iController = dmac; |
|
1388 pC->iDmacCaps = caps; |
|
1389 pC->iPslId = pslId; |
|
1390 // Note: Dynamic channel allocation not currently supported by PIL |
|
1391 pC->iDynChannel = EFalse; |
|
1392 // Note: Channel priority setting not currently supported by PIL |
|
1393 pC->iPriority = aPriority; |
|
1394 } |
|
1395 |
|
1396 return pC; |
|
1397 } |
|
1398 |
|
1399 void DmaChannelMgr::Close(TDmaChannel* aChannel) |
|
1400 { |
|
1401 if(aChannel->iController == &Controller) |
|
1402 { |
|
1403 // Check if this is a TNE1DmaChannel |
|
1404 TNE1DmaChannel* channel = static_cast<TNE1DmaChannel*>(aChannel); |
|
1405 channel->Close(); |
|
1406 } |
|
1407 else if(aChannel->iController == &Controller64) |
|
1408 { |
|
1409 TNeSgChannel* channel = static_cast<TNeSgChannel*>(aChannel); |
|
1410 channel->Close(); |
|
1411 } |
|
1412 else |
|
1413 { |
|
1414 FAULT(); |
|
1415 } |
|
1416 } |
|
1417 |
|
1418 TInt DmaChannelMgr::StaticExtension(TInt /* aCmd */, TAny* /* aArg */) |
|
1419 { |
|
1420 return KErrNotSupported; |
|
1421 } |
|
1422 |
|
1423 TNeSgChannel::TNeSgChannel() |
|
1424 :TDmaSgChannel(), iBaseAddr(NULL), iTransferCount(0), iLock(TSpinLock::EOrderGenericIrqHigh0) |
|
1425 { |
|
1426 FUNC_LOG; |
|
1427 } |
|
1428 |
|
1429 TNeSgChannel::TNeSgChannel(TInt aPslId) |
|
1430 :TDmaSgChannel(), iTransferCount(0), iLock(TSpinLock::EOrderGenericIrqHigh0) |
|
1431 { |
|
1432 FUNC_LOG; |
|
1433 iPslId = aPslId; |
|
1434 iBaseAddr = Channel::KHoBases[iPslId] + KHwDMAC64Base; |
|
1435 } |
|
1436 |
|
1437 void TNeSgChannel::Transfer(TDma64Desc* aHwDes) |
|
1438 { |
|
1439 __DMA_ASSERTD(aHwDes); |
|
1440 #ifdef _DEBUG_PRINT |
|
1441 Print(); |
|
1442 aHwDes->Print(); |
|
1443 #endif |
|
1444 const TPhysAddr descAddr = iController->HwDesLinToPhys(aHwDes); |
|
1445 |
|
1446 __NK_ASSERT_DEBUG(IsIdle()); |
|
1447 // We shouldn't be clobbering the "next link" register. |
|
1448 // When a descriptor chain has completed the |
|
1449 // register should be empty. |
|
1450 __NK_ASSERT_DEBUG(NextLink() == NULL); |
|
1451 |
|
1452 const TBool isIsr = (NKern::CurrentContext() == NKern::EInterrupt); |
|
1453 TInt irq = 0; |
|
1454 if(!isIsr) // If we are in ISR context, assume that the lock is already held |
|
1455 { |
|
1456 irq = __SPIN_LOCK_IRQSAVE(iLock); |
|
1457 } |
|
1458 DMA_PSL_CHAN_TRACE1("Transfer iTransferCount %d", iTransferCount); |
|
1459 iTransferCount++; |
|
1460 |
|
1461 AsspRegister::Write32(iBaseAddr + Channel::KHoNxtLnkAddr, descAddr); |
|
1462 AsspRegister::Modify32(iBaseAddr + Channel::Cfg::KHoBase, 0, Channel::Cfg::KHtLinkMode); |
|
1463 AsspRegister::Modify32(iBaseAddr + Channel::Ctrl::KHoBase, 0, Channel::Ctrl::KHtSetEnable); |
|
1464 |
|
1465 #ifdef _DEBUG_PRINT |
|
1466 { |
|
1467 TUint32 status = NULL; |
|
1468 do |
|
1469 { |
|
1470 status = Status(); |
|
1471 __NK_ASSERT_ALWAYS((status & Channel::Status::KHtDescErr) == 0); |
|
1472 } while (status & Channel::Status::KHtDescLoad); // are we still loading desc? |
|
1473 } |
|
1474 #endif |
|
1475 |
|
1476 AsspRegister::Modify32(iBaseAddr + Channel::Ctrl::KHoBase, 0, Channel::Ctrl::KHtSwTrigger); |
|
1477 if(!isIsr) |
|
1478 { |
|
1479 __SPIN_UNLOCK_IRQRESTORE(iLock, irq); |
|
1480 } |
|
1481 |
|
1482 #ifdef _DEBUG_PRINT |
|
1483 Print(); |
|
1484 #endif |
|
1485 } |
|
1486 |
|
1487 void TNeSgChannel::Close() |
|
1488 { |
|
1489 // The lock prevents a channel being closed |
|
1490 // during an ISR. |
|
1491 const TInt irq = __SPIN_LOCK_IRQSAVE(iLock); |
|
1492 DMA_PSL_CHAN_TRACE("Close"); |
|
1493 |
|
1494 // Check that the channel was Idle and Stopped |
|
1495 __NK_ASSERT_ALWAYS(IsIdle()); |
|
1496 __NK_ASSERT_ALWAYS(iTransferCount == 0); |
|
1497 |
|
1498 // Here we clear iController in advance of the PIL |
|
1499 // If we did not do this, then when we release the lock, the ISR |
|
1500 // could observe it as non-null, proceed, but then have the PIL |
|
1501 // clear it mid-isr |
|
1502 iController = NULL; |
|
1503 |
|
1504 __SPIN_UNLOCK_IRQRESTORE(iLock, irq); |
|
1505 } |
|
1506 |
|
1507 #ifdef _DEBUG_PRINT_ISR |
|
1508 void TNeSgChannel::Print() |
|
1509 { |
|
1510 FUNC_LOG; |
|
1511 PRINT(CurrSrcAddr()); |
|
1512 PRINT(CurrDstAddr()); |
|
1513 PRINT(CurrByteCount()); |
|
1514 PRINT(Status()); |
|
1515 PRINT(Config()); |
|
1516 PRINT(CurrLink()); |
|
1517 PRINT(NextLink()); |
|
1518 |
|
1519 Kern::Printf(""); |
|
1520 } |
|
1521 #endif |
|
1522 |
|
1523 void TNeSgChannel::QueuedRequestCountChanged() |
|
1524 { |
|
1525 const TInt qreqs = __e32_atomic_load_acq32(&iQueuedRequests); |
|
1526 DMA_PSL_CHAN_TRACE1("TNE1DmaChannel::QueuedRequestCountChanged() %d", qreqs); |
|
1527 __DMA_ASSERTA(qreqs >= 0); |
|
1528 } |
|
1529 |
|
1530 |
|
1531 TNaviEngineDmac64Sg::TNaviEngineDmac64Sg() |
|
1532 // |
|
1533 // Constructor. |
|
1534 // |
|
1535 : TDmac(KInfo) |
|
1536 { |
|
1537 FUNC_LOG; |
|
1538 for(TInt i = 0; i < Dma64::KChannelCount; i++) |
|
1539 { |
|
1540 // cannot use assignment since internal refernces |
|
1541 // eg. iNullPtr = &iCurHdr will become invalid |
|
1542 new (&iChannels[i]) TNeSgChannel(i); |
|
1543 } |
|
1544 } |
|
1545 |
|
1546 |
|
1547 TInt TNaviEngineDmac64Sg::Create() |
|
1548 // |
|
1549 // Second phase construction. |
|
1550 // |
|
1551 { |
|
1552 FUNC_LOG; |
|
1553 TInt r = TDmac::Create(KInfo); // Base class Create() |
|
1554 if (r == KErrNone) |
|
1555 { |
|
1556 r = Interrupt::Bind(KIntDMAC64_End, IsrEnd, this); |
|
1557 __DMA_ASSERTA(r >= KErrNone); |
|
1558 |
|
1559 r = Interrupt::Enable(KIntDMAC64_End); |
|
1560 __DMA_ASSERTA(r >= KErrNone); |
|
1561 |
|
1562 r = Interrupt::Bind(KIntDMAC64_Err, IsrErr, this); |
|
1563 __DMA_ASSERTA(r >= KErrNone); |
|
1564 |
|
1565 r = Interrupt::Enable(KIntDMAC64_Err); |
|
1566 __DMA_ASSERTA(r >= KErrNone); |
|
1567 } |
|
1568 return r; |
|
1569 } |
|
1570 |
|
1571 |
|
1572 void TNaviEngineDmac64Sg::Transfer(const TDmaChannel& aChannel, const SDmaDesHdr& aHdr) |
|
1573 // |
|
1574 // Initiates a (previously constructed) request on a specific channel. |
|
1575 // |
|
1576 { |
|
1577 TDma64Desc* hwDes = HdrToHwDes(aHdr); |
|
1578 |
|
1579 TDmaChannel& mutableChannel = const_cast<TDmaChannel&>(aChannel); |
|
1580 TNeSgChannel& channel = static_cast<TNeSgChannel&>(mutableChannel); |
|
1581 channel.Transfer(hwDes); |
|
1582 } |
|
1583 |
|
1584 // Note for if Pause and Resume is ever made externally |
|
1585 // accessible for this driver. This function has potentially |
|
1586 // undesirable behaviour if it is called while the channel is |
|
1587 // paused. Although any remaining transfers in the linked list |
|
1588 // will be abandoned it will allow the current transfer to |
|
1589 // complete. It would need to be modified to prevent this. |
|
1590 void TNaviEngineDmac64Sg::StopTransfer(const TDmaChannel& aChannel) |
|
1591 // |
|
1592 // Stops a running channel. |
|
1593 // |
|
1594 { |
|
1595 const TInt id = aChannel.PslId(); |
|
1596 |
|
1597 DMA_PSL_CHAN_TRACE_STATIC(aChannel, "StopTransfer"); |
|
1598 |
|
1599 #ifdef _DEBUG_PRINT |
|
1600 iChannels[id].Print(); |
|
1601 #endif |
|
1602 |
|
1603 //This implements the ForcedEnd procdure on page 2-10-82 of NaviEngine TRM |
|
1604 const TUint32 channelBase = iChannels[id].BaseAddr(); |
|
1605 volatile TUint32 channelStatus = NULL; |
|
1606 |
|
1607 |
|
1608 AsspRegister::Write32(channelBase + Channel::Interval::KHo, 0); |
|
1609 FOREVER |
|
1610 { |
|
1611 FOREVER |
|
1612 { |
|
1613 AsspRegister::Modify32(channelBase + Channel::Ctrl::KHoBase, NULL, Channel::Ctrl::KHtSetSuspend); |
|
1614 channelStatus = AsspRegister::Read32(channelBase + Channel::Status::KHoBase); |
|
1615 |
|
1616 if((channelStatus & (Channel::Status::KHtDescWb|Channel::Status::KHtDescLoad)) == 0) |
|
1617 { |
|
1618 //we can leave loop if there is no descriptor load or |
|
1619 //writeback in progress |
|
1620 break; |
|
1621 } |
|
1622 AsspRegister::Modify32(channelBase + Channel::Ctrl::KHoBase, NULL, Channel::Ctrl::KHtClrSuspend); |
|
1623 } |
|
1624 |
|
1625 //Switch back to single transfer + register mode, |
|
1626 //mask completion + end interrupts. |
|
1627 AsspRegister::Modify32(channelBase + Channel::Cfg::KHoBase, |
|
1628 Channel::Cfg::KHtTransMode | Channel::Cfg::KHtLinkMode, |
|
1629 Channel::Cfg::KHtEndMask | Channel::Cfg::KHtCompMask); |
|
1630 |
|
1631 AsspRegister::Modify32(channelBase + Channel::Ctrl::KHoBase, NULL, Channel::Ctrl::KHtClrSuspend); |
|
1632 AsspRegister::Modify32(channelBase + Channel::Ctrl::KHoBase, NULL, Channel::Ctrl::KHtSetSuspend); |
|
1633 |
|
1634 channelStatus = AsspRegister::Read32(channelBase + Channel::Status::KHoBase); |
|
1635 AsspRegister::Modify32(channelBase + Channel::Ctrl::KHoBase, NULL, Channel::Ctrl::KHtClrSuspend); |
|
1636 |
|
1637 if((channelStatus & (Channel::Status::KHtDescWb|Channel::Status::KHtDescLoad)) == 0) |
|
1638 { |
|
1639 break; |
|
1640 } |
|
1641 } |
|
1642 |
|
1643 //WARNING: This step is not mentioned in the forced stop |
|
1644 //procedure. |
|
1645 //We have now broken out of linked mode, and just want to finish |
|
1646 //transfer of the current desc, so switch back to block transfer |
|
1647 //mode and set the software trigger so we can complete. |
|
1648 AsspRegister::Modify32(channelBase + Channel::Cfg::KHoBase, 0, Channel::Cfg::KHtTransMode); |
|
1649 AsspRegister::Modify32(channelBase + Channel::Ctrl::KHoBase, 0, Channel::Ctrl::KHtSwTrigger); |
|
1650 |
|
1651 FOREVER |
|
1652 { |
|
1653 if( |
|
1654 ((channelStatus & (Channel::Status::KHtAct | Channel::Status::KHtRqst)) == 0) || |
|
1655 ((channelStatus & Channel::Status::KHtEnabled) == 0) |
|
1656 ) |
|
1657 { |
|
1658 break; |
|
1659 } |
|
1660 channelStatus = AsspRegister::Read32(channelBase + Channel::Status::KHoBase); |
|
1661 } |
|
1662 |
|
1663 |
|
1664 AsspRegister::Modify32(channelBase + Channel::Ctrl::KHoBase, NULL, Channel::Ctrl::KHtSwReset); |
|
1665 |
|
1666 #ifdef _DEBUG |
|
1667 //we are deliberatly breaking a descriptor chain |
|
1668 //so clear Next Link Address |
|
1669 AsspRegister::Write32(channelBase + Channel::KHoNxtLnkAddr, NULL); |
|
1670 #endif |
|
1671 |
|
1672 TDmaChannel& mutableChannel = const_cast<TDmaChannel&>(aChannel); |
|
1673 TNeSgChannel& channel = static_cast<TNeSgChannel&>(mutableChannel); |
|
1674 |
|
1675 const TInt irq = __SPIN_LOCK_IRQSAVE(channel.iLock); |
|
1676 channel.iTransferCount = 0; |
|
1677 __SPIN_UNLOCK_IRQRESTORE(channel.iLock, irq); |
|
1678 } |
|
1679 |
|
1680 TBool TNaviEngineDmac64Sg::IsIdle(const TDmaChannel& aChannel) |
|
1681 // |
|
1682 // Returns the state of a given channel. |
|
1683 // |
|
1684 { |
|
1685 const TNeSgChannel& channel = static_cast<const TNeSgChannel&>(aChannel); |
|
1686 |
|
1687 __KTRACE_OPT(KDMA, Kern::Printf("TNaviEngineDmac64Sg::IsIdle channel=%d, chanStatus=0x%08x", |
|
1688 channel.PslId(), channel.Status())); |
|
1689 |
|
1690 return channel.IsIdle(); |
|
1691 } |
|
1692 |
|
1693 |
|
1694 TUint TNaviEngineDmac64Sg::MaxTransferLength(TDmaChannel& /*aChannel*/, TUint /*aSrcFlags*/, |
|
1695 TUint /*aDstFlags*/, TUint32 /*aPslInfo*/) |
|
1696 // |
|
1697 // Returns the maximum transfer size for a given transfer. |
|
1698 // |
|
1699 { |
|
1700 return KDma64MaxTransferBytes; |
|
1701 } |
|
1702 |
|
1703 |
|
1704 TUint TNaviEngineDmac64Sg::AddressAlignMask(TDmaChannel& /*aChannel*/, TUint /*aSrcFlags*/, |
|
1705 TUint /*aDstFlags*/, TUint32 /*aPslInfo*/) |
|
1706 // |
|
1707 // Returns the memory buffer alignment restrictions mask for a given transfer. |
|
1708 // |
|
1709 { |
|
1710 // The 64 bit DMAC does not impose any alignment restriction on |
|
1711 // src and dst buffers, for any channel |
|
1712 return 0x0; |
|
1713 } |
|
1714 |
|
1715 |
|
1716 TInt TNaviEngineDmac64Sg::InitHwDes(const SDmaDesHdr& aHdr, const TDmaTransferArgs& aTransferArgs) |
|
1717 // |
|
1718 // Sets up (from a passed in request) the descriptor with that fragment's |
|
1719 // transfer parameters. |
|
1720 // |
|
1721 { |
|
1722 TDma64Desc* pD = HdrToHwDes(aHdr); |
|
1723 |
|
1724 __KTRACE_OPT(KDMA, Kern::Printf("TNaviEngineDmac64Sg::InitHwDes 0x%08X", pD)); |
|
1725 |
|
1726 // Unaligned descriptor? Bug in generic layer! |
|
1727 __DMA_ASSERTD(IsHwDesAligned(pD)); |
|
1728 pD->Clear(); |
|
1729 |
|
1730 // The DMAC supports write back (ie. marking a descriptor as dirty after |
|
1731 // it's been transferred) |
|
1732 // We disable it here, because the test code (and other clients?) assume |
|
1733 // they may simply requeue a transfer after it has run |
|
1734 pD->iHeader = HwDesHeader::KHtLe | HwDesHeader::KHtLv | HwDesHeader::KHtWbd; |
|
1735 |
|
1736 const TDmaTransferConfig& src = aTransferArgs.iSrcConfig; |
|
1737 const TDmaTransferConfig& dst = aTransferArgs.iDstConfig; |
|
1738 pD->iSrcAddr = (src.iFlags & KDmaPhysAddr) ? src.iAddr : Epoc::LinearToPhysical(src.iAddr); |
|
1739 __DMA_ASSERTD(pD->iSrcAddr != KPhysAddrInvalid); |
|
1740 pD->iDestAddr = (dst.iFlags & KDmaPhysAddr) ? dst.iAddr : Epoc::LinearToPhysical(dst.iAddr); |
|
1741 __DMA_ASSERTD(pD->iDestAddr != KPhysAddrInvalid); |
|
1742 |
|
1743 pD->iTransactionByte = aTransferArgs.iTransferCount; |
|
1744 |
|
1745 __KTRACE_OPT(KDMA, Kern::Printf(" src=0x%08X dest=0x%08X count=%d", |
|
1746 pD->iSrcAddr, pD->iDestAddr, pD->iTransactionByte)); |
|
1747 pD->iConfig = 0; |
|
1748 |
|
1749 // Since this controller only supports memory to memory transfers |
|
1750 // we enable block transfer for every descriptor |
|
1751 pD->iConfig |= Channel::Cfg::KHtTransMode; |
|
1752 |
|
1753 pD->SetSourceDataSize(TDma64Desc::E512Bit); |
|
1754 pD->SetDestDataSize(TDma64Desc::E512Bit); |
|
1755 |
|
1756 return KErrNone; |
|
1757 } |
|
1758 |
|
1759 |
|
1760 TInt TNaviEngineDmac64Sg::UpdateHwDes(const SDmaDesHdr& aHdr, TUint32 aSrcAddr, TUint32 aDstAddr, |
|
1761 TUint aTransferCount, TUint32 aPslRequestInfo) |
|
1762 // |
|
1763 // Updates (from the passed in arguments) fields of the descriptor. This |
|
1764 // function is called by the PIL in ISR context. |
|
1765 // |
|
1766 { |
|
1767 TDma64Desc* pD = HdrToHwDes(aHdr); |
|
1768 |
|
1769 __KTRACE_OPT(KDMA, Kern::Printf("TNaviEngineDmac64Sg::UpdateHwDes 0x%08X", pD)); |
|
1770 |
|
1771 // Unaligned descriptor? Bug in generic layer! |
|
1772 __DMA_ASSERTD(IsHwDesAligned(pD)); |
|
1773 |
|
1774 // Addresses passed into this function are always physical ones. |
|
1775 if (aSrcAddr != KPhysAddrInvalid) |
|
1776 { |
|
1777 __KTRACE_OPT(KDMA, Kern::Printf(" Changing src addr, old: 0x%08X new: 0x%08X", |
|
1778 pD->iSrcAddr, aSrcAddr)); |
|
1779 pD->iSrcAddr = aSrcAddr; |
|
1780 } |
|
1781 if (aDstAddr != KPhysAddrInvalid) |
|
1782 { |
|
1783 __KTRACE_OPT(KDMA, Kern::Printf(" Changing dst addr, old: 0x%08X new: 0x%08X", |
|
1784 pD->iDestAddr, aDstAddr)); |
|
1785 pD->iDestAddr = aDstAddr; |
|
1786 } |
|
1787 if (aTransferCount != 0) |
|
1788 { |
|
1789 __KTRACE_OPT(KDMA, Kern::Printf(" Changing xfer count, old: %d new: %d", |
|
1790 pD->iTransactionByte, aTransferCount)); |
|
1791 pD->iTransactionByte = aTransferCount; |
|
1792 } |
|
1793 if (aPslRequestInfo != 0) |
|
1794 { |
|
1795 __KTRACE_OPT(KDMA, Kern::Printf(" aPslRequestInfo specified (0x%08X) but ignored", |
|
1796 aPslRequestInfo)); |
|
1797 } |
|
1798 |
|
1799 __KTRACE_OPT(KDMA, Kern::Printf(" src=0x%08X dest=0x%08X count=%d", |
|
1800 pD->iSrcAddr, pD->iDestAddr, pD->iTransactionByte)); |
|
1801 |
|
1802 return KErrNone; |
|
1803 } |
|
1804 |
|
1805 /** |
|
1806 Like ChainHwDes, but does not suppress interrupts in the descriptor |
|
1807 being appended to. |
|
1808 */ |
|
1809 void TNaviEngineDmac64Sg::JoinHwDes(TDma64Desc& aHwDes, const TDma64Desc& aNextHwDes) |
|
1810 { |
|
1811 // Unaligned descriptor? Bug in generic layer! |
|
1812 __DMA_ASSERTD(IsHwDesAligned(&aHwDes) && IsHwDesAligned(&aNextHwDes)); |
|
1813 |
|
1814 aHwDes.iHeader &= ~(HwDesHeader::KHtLe); |
|
1815 __NK_ASSERT_DEBUG(aHwDes.iNextLink == NULL); |
|
1816 aHwDes.iNextLink = Epoc::LinearToPhysical(reinterpret_cast<TLinAddr>(&aNextHwDes)); |
|
1817 __DMA_ASSERTD(aHwDes.iNextLink != KPhysAddrInvalid); |
|
1818 } |
|
1819 |
|
1820 void TNaviEngineDmac64Sg::ChainHwDes(const SDmaDesHdr& aHdr, const SDmaDesHdr& aNextHdr) |
|
1821 // |
|
1822 // Chains hardware descriptors together by setting the next pointer of the original descriptor |
|
1823 // to the physical address of the descriptor to be chained. |
|
1824 // |
|
1825 { |
|
1826 TDma64Desc* pD = HdrToHwDes(aHdr); |
|
1827 TDma64Desc* pN = HdrToHwDes(aNextHdr); |
|
1828 |
|
1829 __KTRACE_OPT(KDMA, Kern::Printf("TNaviEngineDmac64Sg::ChainHwDes des=0x%08X next des=0x%08X", pD, pN)); |
|
1830 |
|
1831 JoinHwDes(*pD, *pN); |
|
1832 |
|
1833 //only the last link in the chain should produce an interrupt or |
|
1834 //set the "transfer complete" bit |
|
1835 pD->iConfig |= (Channel::Cfg::KHtEndMask|Channel::Cfg::KHtCompMask); |
|
1836 } |
|
1837 |
|
1838 |
|
1839 void TNaviEngineDmac64Sg::AppendHwDes(const TDmaChannel& aChannel, const SDmaDesHdr& aLastHdr, |
|
1840 const SDmaDesHdr& aNewHdr) |
|
1841 // |
|
1842 // Appends a descriptor to the chain while the channel is running. |
|
1843 // |
|
1844 { |
|
1845 const TUint32 i = static_cast<TUint8>(aChannel.PslId()); |
|
1846 |
|
1847 TDma64Desc* pL = HdrToHwDes(aLastHdr); |
|
1848 TDma64Desc* pN = HdrToHwDes(aNewHdr); |
|
1849 |
|
1850 __KTRACE_OPT(KDMA, Kern::Printf("TNaviEngineDmac64Sg::AppendHwDes channel=%d last des=0x%08X new des=0x%08X", |
|
1851 i, pL, pN)); |
|
1852 |
|
1853 TNeSgChannel& channel(iChannels[i]); |
|
1854 |
|
1855 channel.Pause(); |
|
1856 |
|
1857 #ifdef _DEBUG_PRINT |
|
1858 __KTRACE_OPT(KDMA, Kern::Printf("Last descriptor:")); |
|
1859 pL->Print(); |
|
1860 __KTRACE_OPT(KDMA, Kern::Printf("Next descriptor:")); |
|
1861 pN->Print(); |
|
1862 iChannels[i].Print(); |
|
1863 #endif |
|
1864 //Check we are really appending on to the end of a chain |
|
1865 __NK_ASSERT_DEBUG(pL->iNextLink == 0); |
|
1866 |
|
1867 if(channel.NextLink()) |
|
1868 { |
|
1869 //Simple case, the dmac is still working through the desriptor |
|
1870 //chain - just add new link to the end. |
|
1871 JoinHwDes(*pL, *pN); |
|
1872 |
|
1873 const TInt irq = __SPIN_LOCK_IRQSAVE(channel.iLock); |
|
1874 channel.iTransferCount++; |
|
1875 __SPIN_UNLOCK_IRQRESTORE(channel.iLock, irq); |
|
1876 |
|
1877 DMA_PSL_CHAN_TRACE_STATIC1(aChannel, "requests chained iTransferCount %d", channel.iTransferCount); |
|
1878 channel.Resume(); |
|
1879 } |
|
1880 else |
|
1881 { |
|
1882 // If the next link register is not set, then the controller is |
|
1883 // either on last descriptor or has completed (though PIL not yet aware) |
|
1884 channel.Resume(); |
|
1885 |
|
1886 // Wait till the channel is idle so that we don't disrupt the |
|
1887 // transfer of the final descriptor, if it's still running. |
|
1888 while(!channel.IsIdle()) |
|
1889 { |
|
1890 } |
|
1891 Transfer(aChannel, aNewHdr); |
|
1892 } |
|
1893 |
|
1894 #ifdef _DEBUG_PRINT |
|
1895 __KTRACE_OPT(KDMA, Kern::Printf("Last descriptor:")); |
|
1896 pL->Print(); |
|
1897 iChannels[i].Print(); |
|
1898 #endif |
|
1899 __KTRACE_OPT(KDMA, Kern::Printf("<TNaviEngineDmac64Sg::AppendHwDes")); |
|
1900 } |
|
1901 |
|
1902 |
|
1903 void TNaviEngineDmac64Sg::UnlinkHwDes(const TDmaChannel& /*aChannel*/, SDmaDesHdr& aHdr) |
|
1904 // |
|
1905 // Unlink the last item in the h/w descriptor chain from a subsequent chain that it was |
|
1906 // possibly linked to. |
|
1907 // |
|
1908 { |
|
1909 __KTRACE_OPT(KDMA, Kern::Printf("TNaviEngineDmac64Sg::UnlinkHwDes")); |
|
1910 TDma64Desc* pD = HdrToHwDes(aHdr); |
|
1911 |
|
1912 //Descriptor is now an end link |
|
1913 pD->iHeader |= HwDesHeader::KHtLe; |
|
1914 |
|
1915 //Allow this descriptor to raise completion interrupts again. |
|
1916 pD->iConfig &= ~(Channel::Cfg::KHtEndMask|Channel::Cfg::KHtCompMask); |
|
1917 |
|
1918 pD->iNextLink = 0; |
|
1919 } |
|
1920 |
|
1921 |
|
1922 void TNaviEngineDmac64Sg::IsrEnd(TAny* aThis) |
|
1923 // |
|
1924 // This ISR reads the interrupt identification and calls back into the base class |
|
1925 // interrupt service handler with the channel identifier and an indication whether the |
|
1926 // transfer completed correctly or with an error. |
|
1927 // |
|
1928 { |
|
1929 FUNC_LOG; |
|
1930 |
|
1931 TNaviEngineDmac64Sg& me = *static_cast<TNaviEngineDmac64Sg*>(aThis); |
|
1932 |
|
1933 const TUint32 channelTcStates = AsspRegister::Read32(KHwDMAC64Base + Cmn::KHoTc); |
|
1934 const TUint32 channelErrStates = AsspRegister::Read32(KHwDMAC64Base + Cmn::KHoErr); |
|
1935 |
|
1936 TNeSgChannel* channel = NULL; |
|
1937 for (TInt i=0; i<Dma64::KChannelCount; i++) |
|
1938 { |
|
1939 const TUint32 mask = (1<<i); |
|
1940 const TBool transferComplete = (channelTcStates & mask); |
|
1941 const TBool error = (channelErrStates & mask); |
|
1942 |
|
1943 |
|
1944 channel = me.iChannels + i; |
|
1945 const TInt irq = __SPIN_LOCK_IRQSAVE(channel->iLock); |
|
1946 |
|
1947 if(channel->iController == NULL) |
|
1948 { |
|
1949 // skip closed channel |
|
1950 __SPIN_UNLOCK_IRQRESTORE(channel->iLock, irq); |
|
1951 continue; |
|
1952 } |
|
1953 |
|
1954 // If error then it could have been raised by any of the |
|
1955 // previous requests - they must all be completed with |
|
1956 // an error status |
|
1957 if(error) |
|
1958 { |
|
1959 // Reset the channel and iTransferCount before |
|
1960 // calling HandleIsr since each one could lead |
|
1961 // to a new transfer |
|
1962 TUint32 clearMask = Channel::Ctrl::KHtSwReset; |
|
1963 AsspRegister::Modify32(channel->BaseAddr() + Channel::Ctrl::KHoBase, 0, clearMask); |
|
1964 |
|
1965 const TInt badTransfers = channel->iTransferCount; |
|
1966 channel->iTransferCount = 0; |
|
1967 for(TInt j=0; j < badTransfers; j++) |
|
1968 { |
|
1969 HandleIsr(*channel, EDmaCallbackRequestCompletion, EFalse); |
|
1970 } |
|
1971 __SPIN_UNLOCK_IRQRESTORE(channel->iLock, irq); |
|
1972 continue; |
|
1973 } |
|
1974 |
|
1975 if((channel->iTransferCount > 0) && channel->IsIdle()) |
|
1976 { |
|
1977 TInt missedCount = channel->iTransferCount; |
|
1978 if(transferComplete) |
|
1979 { |
|
1980 // If a transfer has completed normally |
|
1981 // then allow that to be handled in the next block |
|
1982 missedCount--; |
|
1983 } |
|
1984 DMA_PSL_CHAN_TRACE_STATIC1((*channel), "clearing %d missed irqs", missedCount); |
|
1985 for(TInt j=0; j < missedCount; j++) |
|
1986 { |
|
1987 HandleIsr(*channel, EDmaCallbackRequestCompletion, ETrue); |
|
1988 } |
|
1989 channel->iTransferCount -= missedCount; |
|
1990 } |
|
1991 |
|
1992 if (transferComplete) |
|
1993 { |
|
1994 #ifdef _DEBUG_PRINT_ISR |
|
1995 channel->Print(); |
|
1996 #endif |
|
1997 { |
|
1998 using namespace Channel::Ctrl; |
|
1999 TUint32 setMask = KHtClrTc | KHtClrEnd | KHtSwTrigger; |
|
2000 AsspRegister::Modify32(channel->BaseAddr() + KHoBase, 0, setMask); |
|
2001 } |
|
2002 |
|
2003 // Only signal the PIL if iTransferCount is not 0. |
|
2004 // StopTransfer could have reset the count. |
|
2005 if(channel->iTransferCount > 0) |
|
2006 { |
|
2007 HandleIsr(*channel, EDmaCallbackRequestCompletion, ETrue); |
|
2008 channel->iTransferCount--; |
|
2009 } |
|
2010 |
|
2011 } |
|
2012 __SPIN_UNLOCK_IRQRESTORE(channel->iLock, irq); |
|
2013 } |
|
2014 } |
|
2015 |
|
2016 |
|
2017 void TNaviEngineDmac64Sg::IsrErr(TAny* aThis) |
|
2018 // |
|
2019 // This ISR reads the interrupt identification and calls back into the base class |
|
2020 // interrupt service handler with the channel identifier and an indication whether the |
|
2021 // transfer completed correctly or with an error. |
|
2022 // |
|
2023 { |
|
2024 FUNC_LOG; |
|
2025 IsrEnd(aThis); // IsrEnd will report the error |
|
2026 } |
|
2027 |
|
2028 inline TDma64Desc* TNaviEngineDmac64Sg::HdrToHwDes(const SDmaDesHdr& aHdr) |
|
2029 // |
|
2030 // Changes return type of base class call. |
|
2031 // |
|
2032 { |
|
2033 return static_cast<TDma64Desc*>(TDmac::HdrToHwDes(aHdr)); |
|
2034 } |
|
2035 // |
|
2036 // DLL Exported Function |
|
2037 // |
|
2038 |
|
2039 DECLARE_STANDARD_EXTENSION() |
|
2040 // |
|
2041 // Creates and initializes a new DMA controller object on the kernel heap. |
|
2042 // |
|
2043 { |
|
2044 __KTRACE_OPT2(KBOOT, KDMA, Kern::Printf("Starting DMA Extension")); |
|
2045 |
|
2046 TInt r = DmaChannelMgr::Initialise(); |
|
2047 if(r!=KErrNone) |
|
2048 { |
|
2049 return r; |
|
2050 } |
|
2051 |
|
2052 r = Controller64.Create(); |
|
2053 if(r!=KErrNone) |
|
2054 { |
|
2055 return r; |
|
2056 } |
|
2057 |
|
2058 return Controller.Create(); |
|
2059 } |