|
1 /* |
|
2 * Copyright (c) 2008-2010 Nokia Corporation and/or its subsidiary(-ies). |
|
3 * All rights reserved. |
|
4 * This component and the accompanying materials are made available |
|
5 * under the terms of "Eclipse Public License v1.0" |
|
6 * which accompanies this distribution, and is available |
|
7 * at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
8 * |
|
9 * Initial Contributors: |
|
10 * Nokia Corporation - initial contribution. |
|
11 * |
|
12 * Contributors: |
|
13 * |
|
14 * Description: |
|
15 * naviengine_assp\dmapsl.cpp |
|
16 * DMA Platform Specific Layer (PSL) for Navi Engine. |
|
17 * |
|
18 */ |
|
19 |
|
20 |
|
21 |
|
22 |
|
23 #include <kernel/kern_priv.h> |
|
24 #include <assp.h> |
|
25 #include <naviengine_priv.h> |
|
26 #include <navienginedma.h> |
|
27 #include <dma.h> |
|
28 |
|
29 // Debug support |
|
30 static const char KDmaPanicCat[] = "DMA PSL"; |
|
31 static const TInt KDesCount = 1024; // DMA descriptor count - sufficient to serve all channels at the time. |
|
32 |
|
33 /* Maps logical DMA channels into physical ones */ |
|
34 static const TDMAChannelLocator KDMAChannelLocator[EDmaChannelCount]= |
|
35 { |
|
36 // controller, group, subchannel DCHS (exc. SEL) TransferShiftSize |
|
37 {EDMACtrl32 ,2 ,2 ,KHvDMACHC_SDR, 1}, //EDMAChannelSD0, |
|
38 {EDMACtrl32 ,2 ,3 ,KHvDMACHC_SDW, 1}, //EDMAChannelSD1, |
|
39 |
|
40 {EDMACtrl32 ,3 ,0 ,KHvDMACHC_I2SR, 1}, //EDMAChannelI2S0RX, |
|
41 {EDMACtrl32 ,3 ,1 ,KHvDMACHC_I2SW, 1}, //EDMAChannelI2S0TX, |
|
42 {EDMACtrl32 ,3 ,2 ,KHvDMACHC_I2SR, 1}, //EDMAChannelI2S1RX, |
|
43 {EDMACtrl32 ,3 ,3 ,KHvDMACHC_I2SW, 1}, //EDMAChannelI2S1TX, |
|
44 {EDMACtrl32 ,3 ,4 ,KHvDMACHC_I2SR, 1}, //EDMAChannelI2S2RX, |
|
45 {EDMACtrl32 ,3 ,5 ,KHvDMACHC_I2SW, 1}, //EDMAChannelI2S2TX, |
|
46 {EDMACtrl32 ,3 ,6 ,KHvDMACHC_I2SR, 1}, //EDMAChannelI2S3RX, |
|
47 {EDMACtrl32 ,3 ,7 ,KHvDMACHC_I2SW, 1}, //EDMAChannelI2S3TX, |
|
48 |
|
49 {EDMACtrl32 ,0 ,2 ,KHvDMACHC_SW, 2}, //EDMAChannelUART0RX, |
|
50 {EDMACtrl32 ,0 ,3 ,KHvDMACHC_SW, 2}, //EDMAChannelUART0TX, |
|
51 {EDMACtrl32 ,0 ,4 ,KHvDMACHC_SW, 2}, //EDMAChannelUART1RX, |
|
52 {EDMACtrl32 ,0 ,5 ,KHvDMACHC_SW, 2}, //EDMAChannelUART1TX, |
|
53 {EDMACtrl32 ,0 ,6 ,KHvDMACHC_SW, 2}, //EDMAChannelUART2RX, |
|
54 {EDMACtrl32 ,0 ,7 ,KHvDMACHC_SW, 2}, //EDMAChannelUART2TX, |
|
55 |
|
56 {EDMACtrl32 ,0 ,0 ,KHvDMACHC_SW, 2}, //EDmaMemToMem0, |
|
57 {EDMACtrl32 ,0 ,1 ,KHvDMACHC_SW, 2}, //EDmaMemToMem1, |
|
58 {EDMACtrl32 ,2 ,4 ,KHvDMACHC_SW, 2}, //EDmaMemToMem2, |
|
59 {EDMACtrl32 ,2 ,5 ,KHvDMACHC_SW, 2}, //EDmaMemToMem3, |
|
60 }; |
|
61 |
|
62 /* Maps physical EDMACtrl32 channels into logical ones */ |
|
63 static const int DMAC32_HWChannelsLocator[KDmaHWCtrl32Count][KDmaCtrl32HWSubChannelCount] = |
|
64 { |
|
65 {EDmaMemToMem0,EDmaMemToMem1,EDMAChannelUART0RX,EDMAChannelUART0TX, |
|
66 EDMAChannelUART1RX,EDMAChannelUART1TX,EDMAChannelUART2RX,EDMAChannelUART2TX}, |
|
67 {-1,-1,-1,-1,-1,-1,-1,-1}, |
|
68 {-1,-1,EDMAChannelSD0,EDMAChannelSD1,EDmaMemToMem2,EDmaMemToMem3,-1,-1}, |
|
69 { EDMAChannelI2S0RX,EDMAChannelI2S0TX,EDMAChannelI2S1RX,EDMAChannelI2S1TX, |
|
70 EDMAChannelI2S2RX,EDMAChannelI2S2TX,EDMAChannelI2S3RX,EDMAChannelI2S3TX}, |
|
71 {-1,-1,-1,-1,-1,-1,-1,-1}, |
|
72 }; |
|
73 |
|
74 class TDmaDesc |
|
75 // |
|
76 // Hardware DMA descriptor |
|
77 // |
|
78 { |
|
79 public: |
|
80 TPhysAddr iSrcAddr; |
|
81 TPhysAddr iDestAddr; |
|
82 TInt iCount; //Transfer counter in bytes |
|
83 }; |
|
84 |
|
85 |
|
86 // |
|
87 // Test Support |
|
88 // |
|
89 //The list of S/W channels to be tested by t_dma |
|
90 TUint32 TestNEChannels[] = { EDmaMemToMem0, EDmaMemToMem1, EDmaMemToMem2, EDmaMemToMem3}; |
|
91 TDmaTestInfo TestInfo = |
|
92 { |
|
93 4*KMaxDMAUnitTransferLen, //a word is a unit of transfer for mem-to-mem DMA |
|
94 3, // Word alignement applies fow S/W (mem-to-mem) transfer |
|
95 0, // No need for cookie. |
|
96 4, // The number of S/W DMA channels to test |
|
97 TestNEChannels, |
|
98 0, |
|
99 NULL, |
|
100 0, |
|
101 NULL |
|
102 }; |
|
103 |
|
104 EXPORT_C const TDmaTestInfo& DmaTestInfo() |
|
105 { |
|
106 return TestInfo; |
|
107 } |
|
108 |
|
109 // |
|
110 // Helper Functions |
|
111 // |
|
112 |
|
113 inline TBool IsHwDesAligned(TAny* aDes) |
|
114 // |
|
115 // We do no need H/W descriptors to be aligned as Navi Engine DMA32 cotroller doesn't |
|
116 // support linked descriptors. Instead, they are linked by S/W. Therefore, the ordinary |
|
117 // word alignement applies (which is enforced by compiler). |
|
118 // |
|
119 { |
|
120 return ((TLinAddr)aDes & 0x3) == 0; |
|
121 } |
|
122 |
|
123 // Channel class. |
|
124 // For double buffering, we cannot use the provided TDmaDbChannel class because |
|
125 // NE DMA has two sets of registers (base & work) - wich is not supported by TDmaDbChannel. |
|
126 #if defined(NE1_DMA_DOUBLE_BUFFER) |
|
127 class TNE1DmaChannel : public TDmaChannel |
|
128 #else |
|
129 class TNE1DmaChannel : public TDmaSbChannel |
|
130 #endif |
|
131 { |
|
132 public: |
|
133 TNE1DmaChannel(); |
|
134 void ProcessIrq(); |
|
135 void ProcessErrorIrq(); |
|
136 void StopTransfer(); |
|
137 void Close(); |
|
138 inline TBool IsIdle() const; |
|
139 |
|
140 private: |
|
141 #if defined(NE1_DMA_DOUBLE_BUFFER) |
|
142 virtual void DoQueue(DDmaRequest& aReq); |
|
143 virtual void DoCancelAll(); |
|
144 virtual void DoDfc(DDmaRequest& aCurReq, SDmaDesHdr*& aCompletedHdr); |
|
145 private: |
|
146 enum { EIdle = 0, ETransferring, ETransferringLast } iState; |
|
147 #endif |
|
148 virtual void QueuedRequestCountChanged(); |
|
149 |
|
150 inline void ProcessTC(TBool aClearStatus); |
|
151 inline void ProcessEnd(TBool aClearStatus); |
|
152 inline void ClearStatus(TUint32 aBitmask); |
|
153 inline void HandleIsr(TBool aIsComplete); |
|
154 |
|
155 public: |
|
156 TInt iHWCtrlBase;// Base address of H/W registers for this channel. |
|
157 TInt iSubChannel;// Subchannel number (0-7) within H/W controller. |
|
158 TInt iDMACHCReg; // The content of configuration (CHC) register for this channel. |
|
159 TInt iTransferDataShift;//log2 of basic unit of transfer. See TDMAChannelLocator::iTransferShiftSize |
|
160 |
|
161 // The following members are public so that they can be |
|
162 // modified by the TNaviEngineDmac class |
|
163 |
|
164 /** |
|
165 This flag is set when the base register set is filled |
|
166 |
|
167 It allows the ISR to detect the case where a base to work |
|
168 register set changeover has happened (END interrupt) but has been masked by |
|
169 the completion of the work register set |
|
170 */ |
|
171 TBool iBaseValidSet; |
|
172 |
|
173 /** |
|
174 This counter is incremented each time a LaunchTransfer is started |
|
175 and decremented when the ISR handles the transfer complete (TC |
|
176 bit). Ie. it does not keep count of the number of times the base |
|
177 register set is filled. This allows missed TC interrupts to be |
|
178 detected. |
|
179 */ |
|
180 TInt iTcIrqCount; |
|
181 |
|
182 /** |
|
183 This spinlock is used to protect both the iBaseValidSet and iTcIrqCount |
|
184 variables. It synchronises access between threads and the ISR. |
|
185 |
|
186 For the ISR the setting of iBaseValidSet must appear to be atomic with |
|
187 the launch of the transfer. |
|
188 |
|
189 For iTcIrqCount The spinlock makes the transfer launch and subsequent |
|
190 increase of the count appear atomic to the ISR. Otherwise it could |
|
191 observe a completed transfer before the count was incremented or |
|
192 vice-versa |
|
193 */ |
|
194 TSpinLock iIsrLock; |
|
195 }; |
|
196 |
|
197 // |
|
198 // Derived Controller Class |
|
199 // |
|
200 class TNaviEngineDmac : public TDmac |
|
201 { |
|
202 public: |
|
203 TNaviEngineDmac(); |
|
204 TInt Create(); |
|
205 |
|
206 friend void TNE1DmaChannel::HandleIsr(TBool); // Allow channel HandleIsr to call TDmac::HandleIsr |
|
207 private: |
|
208 // from TDmac (PIL pure virtual) |
|
209 virtual void Transfer(const TDmaChannel& aChannel, const SDmaDesHdr& aHdr); |
|
210 virtual void StopTransfer(const TDmaChannel& aChannel); |
|
211 virtual TBool IsIdle(const TDmaChannel& aChannel); |
|
212 virtual TInt MaxTransferSize(TDmaChannel& aChannel, TUint aFlags, TUint32 aPslInfo); |
|
213 virtual TUint MemAlignMask(TDmaChannel& aChannel, TUint aFlags, TUint32 aPslInfo); |
|
214 // from TDmac (PIL virtual) |
|
215 virtual void InitHwDes(const SDmaDesHdr& aHdr, TUint32 aSrc, TUint32 aDest, TInt aCount, |
|
216 TUint aFlags, TUint32 aPslInfo, TUint32 aCookie); |
|
217 virtual void ChainHwDes(const SDmaDesHdr& aHdr, const SDmaDesHdr& aNextHdr); |
|
218 virtual void AppendHwDes(const TDmaChannel& aChannel, const SDmaDesHdr& aLastHdr, |
|
219 const SDmaDesHdr& aNewHdr); |
|
220 virtual void UnlinkHwDes(const TDmaChannel& aChannel, SDmaDesHdr& aHdr); |
|
221 // other |
|
222 static void DMAC32_Isr(TAny* aThis, TInt aController, TInt aTcsMask, TInt aCompleted); |
|
223 static void DMAC32_0_End_Isr(TAny* aThis); |
|
224 static void DMAC32_0_Err_Isr(TAny* aThis); |
|
225 static void DMAC32_2_End_Isr(TAny* aThis); |
|
226 static void DMAC32_2_Err_Isr(TAny* aThis); |
|
227 static void DMAC32_3_End_Isr(TAny* aThis); |
|
228 static void DMAC32_3_Err_Isr(TAny* aThis); |
|
229 static void InitHWChannel (ENaviEngineDmaController aNEController, TInt aGroup, TInt aSubChannel); |
|
230 static void InitAllHWChannels (); |
|
231 inline TDmaDesc* HdrToHwDes(const SDmaDesHdr& aHdr); |
|
232 private: |
|
233 static const SCreateInfo KInfo; |
|
234 public: |
|
235 |
|
236 void PopulateWorkSet(TNE1DmaChannel& aChannel, const SDmaDesHdr& aHdr); |
|
237 void LaunchTransfer(TNE1DmaChannel& aChannel, TBool aBaseSetValid); |
|
238 #if defined(NE1_DMA_DOUBLE_BUFFER) |
|
239 void PopulateBaseSet(TNE1DmaChannel& aChannel, const SDmaDesHdr& aHdr); |
|
240 void ContinueTransfer(TNE1DmaChannel& aChannel, TBool aBaseSetValid); |
|
241 #endif |
|
242 |
|
243 TNE1DmaChannel iChannels[EDmaChannelCount]; |
|
244 }; |
|
245 |
|
246 static TNaviEngineDmac Controller; |
|
247 |
|
248 const TDmac::SCreateInfo TNaviEngineDmac::KInfo = |
|
249 { |
|
250 EDmaChannelCount, |
|
251 KDesCount, |
|
252 TDmac::KCapsBitHwDes, |
|
253 sizeof(TDmaDesc), |
|
254 EMapAttrSupRw | EMapAttrFullyBlocking //?? |
|
255 }; |
|
256 |
|
257 TNaviEngineDmac::TNaviEngineDmac() |
|
258 // |
|
259 // Constructor. |
|
260 // |
|
261 : TDmac(KInfo) |
|
262 {} |
|
263 |
|
264 TInt TNaviEngineDmac::Create() |
|
265 // |
|
266 // Second phase construction. |
|
267 // |
|
268 { |
|
269 TInt r = TDmac::Create(KInfo); // Base class Create() |
|
270 if (r == KErrNone) |
|
271 { |
|
272 __DMA_ASSERTA(ReserveSetOfDes(EDmaChannelCount) == KErrNone); |
|
273 |
|
274 //Read KDMAChannelLocator constants and populate the values in channel objects. |
|
275 |
|
276 for (TInt i=0; i < EDmaChannelCount; ++i) |
|
277 { |
|
278 TUint ctrlBase = 0; |
|
279 switch (KDMAChannelLocator[i].iDMACtrl) |
|
280 { |
|
281 case EDmaCtrlExBus: ctrlBase = KDMACExBusBase; break; |
|
282 case EDMACtrl32: ctrlBase = KDMAC32Base; break; |
|
283 default: __DMA_CANT_HAPPEN(); |
|
284 } |
|
285 iChannels[i].iHWCtrlBase = ctrlBase + KDMAChannelLocator[i].iGroup * KDMAGroupOffset + |
|
286 KDMAChannelLocator[i].iSubChannel * KDMAChannelOffset; |
|
287 iChannels[i].iSubChannel = KDMAChannelLocator[i].iSubChannel; |
|
288 iChannels[i].iDMACHCReg = KDMAChannelLocator[i].iDMACHCReg | iChannels[i].iSubChannel; |
|
289 iChannels[i].iTransferDataShift = KDMAChannelLocator[i].iTransferShiftSize; |
|
290 iFreeHdr = iFreeHdr->iNext; |
|
291 } |
|
292 |
|
293 //Bind DMA interrupt for channels we support |
|
294 TInt irqh0 = Interrupt::Bind(KIntDMAC32_0_End, DMAC32_0_End_Isr, this); __DMA_ASSERTA(irqh0>=0); |
|
295 TInt irqh1 = Interrupt::Bind(KIntDMAC32_0_Err, DMAC32_0_Err_Isr, this); __DMA_ASSERTA(irqh1>=0); |
|
296 TInt irqh2 = Interrupt::Bind(KIntDMAC32_2_End, DMAC32_2_End_Isr, this); __DMA_ASSERTA(irqh2>=0); |
|
297 TInt irqh3 = Interrupt::Bind(KIntDMAC32_2_Err, DMAC32_2_Err_Isr, this); __DMA_ASSERTA(irqh3>=0); |
|
298 TInt irqh4 = Interrupt::Bind(KIntDMAC32_3_End, DMAC32_3_End_Isr, this); __DMA_ASSERTA(irqh4>=0); |
|
299 TInt irqh5 = Interrupt::Bind(KIntDMAC32_3_Err, DMAC32_3_Err_Isr, this); __DMA_ASSERTA(irqh5>=0); |
|
300 |
|
301 |
|
302 if (r == KErrNone) |
|
303 { |
|
304 InitAllHWChannels(); |
|
305 |
|
306 r = Interrupt::Enable(irqh0); __DMA_ASSERTA(r==KErrNone); |
|
307 r = Interrupt::Enable(irqh1); __DMA_ASSERTA(r==KErrNone); |
|
308 r = Interrupt::Enable(irqh2); __DMA_ASSERTA(r==KErrNone); |
|
309 r = Interrupt::Enable(irqh3); __DMA_ASSERTA(r==KErrNone); |
|
310 r = Interrupt::Enable(irqh4); __DMA_ASSERTA(r==KErrNone); |
|
311 r = Interrupt::Enable(irqh5); __DMA_ASSERTA(r==KErrNone); |
|
312 |
|
313 } |
|
314 } |
|
315 return r; |
|
316 } |
|
317 |
|
318 // Initialises all H/W channels. This will make sure they are off on soft restart. |
|
319 void TNaviEngineDmac::InitAllHWChannels() |
|
320 { |
|
321 int i,j; |
|
322 for (i=0;i<KDmaHWCtrl32Count;i++) |
|
323 { |
|
324 for (j=0;j<KDmaCtrl32HWSubChannelCount; j++) InitHWChannel(EDMACtrl32, i, j); |
|
325 AsspRegister::Write32(KDMAC32Base+i*KDMAGroupOffset+KHoDMACONT, 0); |
|
326 } |
|
327 } |
|
328 |
|
329 //Initialises a single H/W channel |
|
330 void TNaviEngineDmac::InitHWChannel (ENaviEngineDmaController aNEController, TInt aGroup, TInt aSubChannel) |
|
331 { |
|
332 TUint neCtrlBase = 0; |
|
333 switch(aNEController) |
|
334 { |
|
335 case EDMACtrl32: neCtrlBase = KDMAC32Base ; break; |
|
336 default: __DMA_CANT_HAPPEN(); |
|
337 } |
|
338 neCtrlBase += aGroup*KDMAGroupOffset + aSubChannel*KDMAChannelOffset; |
|
339 AsspRegister::Write32(neCtrlBase+KHoDMACHS, KHtDMACHS_EN_EN); //disable channel |
|
340 } |
|
341 |
|
342 #if defined(NE1_DMA_DOUBLE_BUFFER) |
|
343 |
|
344 #ifdef _DEBUG |
|
345 //These values indicate whether all corner cases are running. |
|
346 //Proper test shouldn't leave any of these values to zero. |
|
347 TInt InterruptCounter_DMA32 = 0; // Interrupt counter |
|
348 TInt Transfer_IdleOnStart = 0; // DMA channel is idle on the start of Transfer. |
|
349 TInt Transfer_NotIdleOnStart = 0; // DMA channel is not idle on the start of Transfer. |
|
350 TInt Transfer_MatchWorkSetTrue = 0; // Descriptor matches "work set" registers |
|
351 TInt Transfer_MatchWorkSetFalse = 0;// Descriptor doesn't match "work set" descriptor. |
|
352 #endif |
|
353 |
|
354 void TNaviEngineDmac::Transfer(const TDmaChannel& aChannel, const SDmaDesHdr& aHdr) |
|
355 // |
|
356 // Initiates a (previously constructed) request on a specific channel. |
|
357 // |
|
358 { |
|
359 TDmaChannel& mutableChannel = const_cast<TDmaChannel&>(aChannel); |
|
360 TNE1DmaChannel& channel = static_cast<TNE1DmaChannel&>(mutableChannel); |
|
361 |
|
362 const TInt irq = __SPIN_LOCK_IRQSAVE(channel.iIsrLock); |
|
363 // The fragment descriptor (src/dest address, size) should be placed into either "Work Set" or "Base Set" |
|
364 // depending on the actual state of the H/W |
|
365 if (IsIdle(channel)) |
|
366 { |
|
367 // The channel is idle, for the (most likely) reason that both "Work Set" and "Base Set" transfers are |
|
368 // completed since the last time we run this function. |
|
369 #ifdef _DEBUG |
|
370 Transfer_IdleOnStart++; |
|
371 #endif |
|
372 PopulateWorkSet(channel, aHdr); // Populate "Work Set" |
|
373 LaunchTransfer(channel, EFalse); // Start the transfer, base set is invalid |
|
374 } |
|
375 else |
|
376 { |
|
377 // "Work Set" transfer is still going on. It seems we will manage to place |
|
378 // the next fragment in time for continious traffic flow. |
|
379 #ifdef _DEBUG |
|
380 Transfer_NotIdleOnStart++; |
|
381 #endif |
|
382 PopulateBaseSet(channel, aHdr); // Populate "Base Set" |
|
383 ContinueTransfer(channel, ETrue);// Indicate Base Set is valid (bvalid = ETrue) |
|
384 |
|
385 // We should expect here that the "work set" traffic is still in progress. |
|
386 // Once it is completed, "Base Set" content is copied into "Work Set" and the traffic will go on. |
|
387 // However, there is a corner case where we configure "Base Set" too late. |
|
388 // Therefore, check if transfer is still active. |
|
389 if (IsIdle(channel)) |
|
390 { |
|
391 // There is no DMA traffic. There could be two reason for that. Either, |
|
392 // 1. The transfer we have just configured in "Base Set" has already completed, or |
|
393 // 2. We configured base set too late, after "Work Set" transfer has already finished. |
|
394 |
|
395 // Check BVALID bit |
|
396 // if its now clear, then it was set in time, if it's |
|
397 // still set it was set too late |
|
398 const TUint32 dchs = AsspRegister::Read32(channel.iHWCtrlBase+KHoDMACHS); |
|
399 const TBool bvalidSet = dchs & KHtDMACHS_BVALID; |
|
400 |
|
401 if (!bvalidSet) |
|
402 { |
|
403 DMA_PSL_CHAN_TRACE_STATIC(channel, "Base set transferred already"); |
|
404 #ifdef _DEBUG |
|
405 Transfer_MatchWorkSetTrue++; |
|
406 #endif |
|
407 } |
|
408 else |
|
409 { |
|
410 DMA_PSL_CHAN_TRACE_STATIC(channel, "Too late for base set"); |
|
411 |
|
412 // BVALID bit was set after "Work Set" transfer completed, and DMA H/W didn't |
|
413 // copy the content of "Base Set" into "Work Set". We have to re-launch the transfer. |
|
414 // This time we have to configure "Work Set" |
|
415 #ifdef _DEBUG |
|
416 Transfer_MatchWorkSetFalse++; |
|
417 #endif |
|
418 PopulateWorkSet(channel, aHdr); // Populate "Work Set". |
|
419 LaunchTransfer(channel, EFalse); // Start the transfer, "Base Set" is invalid. |
|
420 } |
|
421 } |
|
422 } |
|
423 __SPIN_UNLOCK_IRQRESTORE(channel.iIsrLock, irq); |
|
424 } |
|
425 #else |
|
426 void TNaviEngineDmac::Transfer(const TDmaChannel& aChannel, const SDmaDesHdr& aHdr) |
|
427 // |
|
428 // Initiates a (previously constructed) request on a specific channel. |
|
429 // |
|
430 { |
|
431 TDmaChannel& mutableChannel = const_cast<TDmaChannel&>(aChannel); |
|
432 TNE1DmaChannel& channel = static_cast<TNE1DmaChannel&>(mutableChannel); |
|
433 |
|
434 DMA_PSL_CHAN_TRACE_STATIC1(channel, ">TNaviEngineDmac::Transfer des=0x%08X", HdrToHwDes(aHdr)); |
|
435 |
|
436 const TInt irq = __SPIN_LOCK_IRQSAVE(channel.iIsrLock); |
|
437 // The fragment descriptor (src/dest address, size) should be placed into either "Work Set" or "Base Set" |
|
438 // depending on the actual state of the H/W |
|
439 __NK_ASSERT_ALWAYS(IsIdle(channel)); |
|
440 PopulateWorkSet(channel, aHdr); // Populate "Work Set" |
|
441 LaunchTransfer(channel, EFalse); // Start the transfer, base set is invalid |
|
442 __SPIN_UNLOCK_IRQRESTORE(channel.iIsrLock, irq); |
|
443 } |
|
444 #endif |
|
445 |
|
446 |
|
447 void TNaviEngineDmac::StopTransfer(const TDmaChannel& aChannel) |
|
448 // |
|
449 // Stops a running channel. |
|
450 // |
|
451 { |
|
452 TDmaChannel& mutableChannel = const_cast<TDmaChannel&>(aChannel); |
|
453 TNE1DmaChannel& channel = static_cast<TNE1DmaChannel&>(mutableChannel); |
|
454 |
|
455 __KTRACE_OPT(KDMA, Kern::Printf(">TNaviEngineDmac::StopTransfer channel=%d", channel.PslId())); |
|
456 |
|
457 channel.StopTransfer(); |
|
458 |
|
459 __KTRACE_OPT(KDMA, Kern::Printf("<TNaviEngineDmac::StopTransfer channel=%d", channel.PslId())); |
|
460 } |
|
461 |
|
462 TBool TNaviEngineDmac::IsIdle(const TDmaChannel& aChannel) |
|
463 { |
|
464 TDmaChannel& mutableChannel = const_cast<TDmaChannel&>(aChannel); |
|
465 TNE1DmaChannel& channel = static_cast<TNE1DmaChannel&>(mutableChannel); |
|
466 |
|
467 const TBool idle = channel.IsIdle(); |
|
468 __KTRACE_OPT(KDMA, Kern::Printf(">Dmac::IsIdle channel=%d, idle=%d", channel.PslId(), idle)); |
|
469 return idle; |
|
470 } |
|
471 |
|
472 // Places the descriptor into "Work Set" |
|
473 void TNaviEngineDmac::PopulateWorkSet(TNE1DmaChannel& aChannel, const SDmaDesHdr& aHdr) |
|
474 { |
|
475 TDmaDesc* pD = HdrToHwDes(aHdr); |
|
476 __KTRACE_OPT(KDMA, Kern::Printf(">TNaviEngineDmac::PopulateWorkSet channel=%d des=0x%08X", aChannel.PslId(), pD)); |
|
477 |
|
478 AsspRegister::Write32(aChannel.iHWCtrlBase+KHoDMACHC, aChannel.iDMACHCReg); //configure channel |
|
479 AsspRegister::Write32(aChannel.iHWCtrlBase+KHoDMASAW, pD->iSrcAddr); //source addr |
|
480 AsspRegister::Write32(aChannel.iHWCtrlBase+KHoDMADAW, pD->iDestAddr); //dest addr |
|
481 AsspRegister::Write32(aChannel.iHWCtrlBase+KHoDMATCW, (pD->iCount>>aChannel.iTransferDataShift)-1); //transfer counter |
|
482 } |
|
483 |
|
484 // Starts the transfer. |
|
485 // @pre The chanel is idle. |
|
486 // @arg aBaseSetValid if true, BVALID bit should be set. |
|
487 // @pre iIsrLock must be held |
|
488 void TNaviEngineDmac::LaunchTransfer(TNE1DmaChannel& aChannel, TBool aBaseSetValid) |
|
489 { |
|
490 __KTRACE_OPT(KDMA, Kern::Printf(">TNaviEngineDmac::LaunchTransfer channel=%d", aChannel.PslId())); |
|
491 TInt val = KHtDMACHS_EN|KHtDMACHS_EN_EN; |
|
492 if (TUint(aChannel.iDMACHCReg ^ aChannel.iSubChannel) == (TUint)KHvDMACHC_SW) |
|
493 val |=KHtDMACHS_STG; |
|
494 if (aBaseSetValid) |
|
495 val|=KHtDMACHS_BVALID; |
|
496 |
|
497 aChannel.iBaseValidSet = aBaseSetValid; |
|
498 |
|
499 AsspRegister::Write32(aChannel.iHWCtrlBase+KHoDMACHS, val); |
|
500 |
|
501 aChannel.iTcIrqCount++; |
|
502 DMA_PSL_CHAN_TRACE_STATIC1(aChannel, "inc iTcIrqCount to %d", aChannel.iTcIrqCount); |
|
503 } |
|
504 |
|
505 #if defined(NE1_DMA_DOUBLE_BUFFER) |
|
506 // Places the descriptor into "Base Set" |
|
507 void TNaviEngineDmac::PopulateBaseSet(TNE1DmaChannel& aChannel, const SDmaDesHdr& aHdr) |
|
508 { |
|
509 TDmaDesc* pD = HdrToHwDes(aHdr); |
|
510 __KTRACE_OPT(KDMA, Kern::Printf(">TNaviEngineDmac::PopulateBaseSet channel=%d des=0x%08X", aChannel.PslId(), pD)); |
|
511 |
|
512 AsspRegister::Write32(aChannel.iHWCtrlBase+KHoDMASAB, pD->iSrcAddr); // Source addr |
|
513 AsspRegister::Write32(aChannel.iHWCtrlBase+KHoDMADAB, pD->iDestAddr); // Dest addr |
|
514 AsspRegister::Write32(aChannel.iHWCtrlBase+KHoDMATCB, (pD->iCount>>aChannel.iTransferDataShift)-1); // Transfer counter |
|
515 } |
|
516 |
|
517 // @pre DMA transfer is in progress. |
|
518 // @arg aBaseSetValid if true, BVALID bit should be set. |
|
519 // @pre iIsrLock must be held |
|
520 void TNaviEngineDmac::ContinueTransfer(TNE1DmaChannel& aChannel, TBool aBaseSetValid) |
|
521 { |
|
522 __KTRACE_OPT(KDMA, Kern::Printf(">TNaviEngineDmac::ContinueTransfer channel=%d", aChannel.PslId())); |
|
523 TInt val = 0; |
|
524 if (TUint(aChannel.iDMACHCReg ^ aChannel.iSubChannel) == (TUint)KHvDMACHC_SW) |
|
525 val |=KHtDMACHS_STG; // Set software trigger |
|
526 if (aBaseSetValid) |
|
527 { |
|
528 __NK_ASSERT_DEBUG(!aChannel.iBaseValidSet); |
|
529 aChannel.iBaseValidSet = ETrue; |
|
530 val|=KHtDMACHS_BVALID; |
|
531 } |
|
532 |
|
533 if (val) |
|
534 { |
|
535 AsspRegister::Write32(aChannel.iHWCtrlBase+KHoDMACHS, val); |
|
536 } |
|
537 } |
|
538 |
|
539 // As in TDmaDbChannel, except for EIdle state as we have place the 1st and the 2nd |
|
540 // fragment into different registers. |
|
541 void TNE1DmaChannel::DoQueue(DDmaRequest& aReq) |
|
542 { |
|
543 TNaviEngineDmac* controller = (TNaviEngineDmac*)iController; |
|
544 |
|
545 switch (iState) |
|
546 { |
|
547 case EIdle: |
|
548 { |
|
549 controller->PopulateWorkSet(*this, *iCurHdr); |
|
550 const TInt irq = __SPIN_LOCK_IRQSAVE(iIsrLock); |
|
551 if (iCurHdr->iNext) |
|
552 { |
|
553 controller->PopulateBaseSet(*this, *(iCurHdr->iNext)); |
|
554 controller->LaunchTransfer(*this, ETrue);//BaseSetValid=True |
|
555 iState = ETransferring; |
|
556 } |
|
557 else |
|
558 { |
|
559 controller->LaunchTransfer(*this, EFalse);//BaseSetValid=False |
|
560 iState = ETransferringLast; |
|
561 } |
|
562 __SPIN_UNLOCK_IRQRESTORE(iIsrLock, irq); |
|
563 break; |
|
564 } |
|
565 case ETransferring: |
|
566 // nothing to do |
|
567 break; |
|
568 case ETransferringLast: |
|
569 iController->Transfer(*this, *(aReq.iFirstHdr)); |
|
570 iState = ETransferring; |
|
571 break; |
|
572 default: |
|
573 __DMA_CANT_HAPPEN(); |
|
574 } |
|
575 } |
|
576 |
|
577 //As in TDmaDbChannel |
|
578 void TNE1DmaChannel::DoCancelAll() |
|
579 { |
|
580 iState = EIdle; |
|
581 } |
|
582 |
|
583 // As in TDmaDbChannel |
|
584 void TNE1DmaChannel::DoDfc(DDmaRequest& /*aCurReq*/, SDmaDesHdr*& aCompletedHdr) |
|
585 { |
|
586 aCompletedHdr = iCurHdr; |
|
587 iCurHdr = iCurHdr->iNext; |
|
588 switch (iState) |
|
589 { |
|
590 case ETransferringLast: |
|
591 iState = EIdle; |
|
592 break; |
|
593 case ETransferring: |
|
594 if (iCurHdr->iNext == NULL) |
|
595 iState = ETransferringLast; |
|
596 else |
|
597 iController->Transfer(*this, *(iCurHdr->iNext)); |
|
598 break; |
|
599 default: |
|
600 __DMA_CANT_HAPPEN(); |
|
601 } |
|
602 } |
|
603 #endif |
|
604 |
|
605 TNE1DmaChannel::TNE1DmaChannel() |
|
606 :iBaseValidSet(EFalse), iTcIrqCount(0), iIsrLock(TSpinLock::EOrderGenericIrqHigh0) |
|
607 {} |
|
608 |
|
609 /** |
|
610 Handles normal interrupts as well as recovering from missed interrupts. |
|
611 It must therefore be called during an ISR, for every valid, open channel |
|
612 on a DMAC .ie not just channels which have status bits set. |
|
613 */ |
|
614 void TNE1DmaChannel::ProcessIrq() |
|
615 { |
|
616 // The spinlock protects access to the iBaseValidSet flag |
|
617 // This is needed because it is possible for TNaviEngineDmac::Transfer to |
|
618 // attempt to populate the base set, set iBaseValidSet, but then |
|
619 // realize it was too late, and have to unset it. |
|
620 const TInt irq = __SPIN_LOCK_IRQSAVE(iIsrLock); |
|
621 |
|
622 // check that channel is open |
|
623 if(iController == NULL) |
|
624 { |
|
625 __SPIN_UNLOCK_IRQRESTORE(iIsrLock, irq); |
|
626 return; |
|
627 } |
|
628 |
|
629 const TInt irqCount = iTcIrqCount; |
|
630 __NK_ASSERT_ALWAYS(irqCount >= 0); |
|
631 __NK_ASSERT_ALWAYS(irqCount < 3); |
|
632 |
|
633 TUint32 dchs = AsspRegister::Read32(iHWCtrlBase+KHoDMACHS); |
|
634 |
|
635 // Detect if we have missed 1 TC interrupt. |
|
636 // This can happen when there is one transfer in progress, |
|
637 // and the channel attempts to populate the base reg set, but |
|
638 // is too late and launches a new transfer. The second transfer |
|
639 // may then complete during the ISR of the first, or the ISR may |
|
640 // not run untill after the second has already completed. |
|
641 if((irqCount > 0) && IsIdle()) |
|
642 { |
|
643 // Reread status now that we have observed channel as idle. |
|
644 // If a transfer completed between the first read and now, we |
|
645 // can handle that as a normal interrupt instead of as a |
|
646 // missed interrupt. This is not essential, just neater |
|
647 dchs = AsspRegister::Read32(iHWCtrlBase+KHoDMACHS); |
|
648 if(irqCount == 1) // There may or may not be a missed IRQ |
|
649 { |
|
650 if((dchs & KHtDMACHS_TC) == 0) |
|
651 { |
|
652 DMA_PSL_CHAN_TRACE1("Channel had missed TC IRQ irqs=%d", irqCount); |
|
653 ProcessTC(EFalse); |
|
654 } |
|
655 } |
|
656 else if(irqCount == 2) // There is 1 missed and 1 normal IRQ |
|
657 { |
|
658 DMA_PSL_CHAN_TRACE1("Channel had missed TC IRQ irqs=%d", irqCount); |
|
659 ProcessTC(EFalse); |
|
660 |
|
661 // Ensure that remaining IRQ will be dealt with in next block |
|
662 __NK_ASSERT_ALWAYS((dchs & KHtDMACHS_TC)); |
|
663 } |
|
664 else |
|
665 { |
|
666 // It should not be possible for there to be more than 2 |
|
667 // outstanding transfers launched |
|
668 FAULT(); |
|
669 } |
|
670 } |
|
671 |
|
672 // Deal with normal interrupts |
|
673 if (dchs&KHtDMACHS_TC) |
|
674 { |
|
675 // ISR should not be able to observe the BVALID bit itself |
|
676 // since TNaviEngineDmac::Transfer should hold iIsrLock whilst |
|
677 // it decides if it was set in time |
|
678 __NK_ASSERT_DEBUG(!(dchs & KHtDMACHS_BVALID)); |
|
679 |
|
680 // Here we find out if a base-set-copy (END) interrupt has |
|
681 // been missed. If a TC comes shortly after an END IRQ then |
|
682 // it would be impossible to tell by looking at the status |
|
683 // register alone |
|
684 if(iBaseValidSet) |
|
685 { |
|
686 DMA_PSL_CHAN_TRACE("END irq missed "); |
|
687 ProcessEnd(EFalse); |
|
688 } |
|
689 ProcessTC(ETrue); |
|
690 } |
|
691 else if (dchs&KHtDMACHS_END) |
|
692 { |
|
693 ProcessEnd(ETrue); |
|
694 } |
|
695 |
|
696 __SPIN_UNLOCK_IRQRESTORE(iIsrLock, irq); |
|
697 } |
|
698 |
|
699 void TNE1DmaChannel::ProcessErrorIrq() |
|
700 { |
|
701 const TInt irq = __SPIN_LOCK_IRQSAVE(iIsrLock); |
|
702 |
|
703 // check that channel is open |
|
704 if(iController == NULL) |
|
705 { |
|
706 __SPIN_UNLOCK_IRQRESTORE(iIsrLock, irq); |
|
707 return; |
|
708 } |
|
709 |
|
710 // reset channel |
|
711 ClearStatus(KHtDMACHS_FCLR); |
|
712 TInt badIrqCount = iTcIrqCount; |
|
713 iTcIrqCount = 0; |
|
714 |
|
715 if(iBaseValidSet) |
|
716 { |
|
717 iBaseValidSet = EFalse; |
|
718 badIrqCount++; |
|
719 } |
|
720 |
|
721 // complete all outstanding requests as being in error |
|
722 for(TInt i=0; i < badIrqCount; i++) |
|
723 { |
|
724 HandleIsr(EFalse); |
|
725 } |
|
726 |
|
727 __SPIN_UNLOCK_IRQRESTORE(iIsrLock, irq); |
|
728 } |
|
729 /** |
|
730 Handle a transfer complete (work set transfer complete and base set was |
|
731 empty) on this channel. |
|
732 |
|
733 @param aClearStatus - Status bits should be cleared |
|
734 */ |
|
735 void TNE1DmaChannel::ProcessTC(TBool aClearStatus) |
|
736 { |
|
737 // iTcIrqCount may be zero if StopTransfer were called |
|
738 // between the transfer being started and the ISR running |
|
739 if(iTcIrqCount>0) |
|
740 { |
|
741 DMA_PSL_CHAN_TRACE1("dec iTcIrqCount to %d", iTcIrqCount); |
|
742 iTcIrqCount--; |
|
743 DMA_PSL_CHAN_TRACE("TC"); |
|
744 HandleIsr(ETrue); |
|
745 } |
|
746 |
|
747 __NK_ASSERT_DEBUG(iTcIrqCount >= 0); |
|
748 |
|
749 if(aClearStatus) |
|
750 ClearStatus(KHtDMACHS_TC|KHtDMACHS_END); //Traffic completed BVALID=OFF |
|
751 } |
|
752 |
|
753 /** |
|
754 Handle a END (transfer complete, base set loaded in to work set) on this channel. |
|
755 |
|
756 @param aClearStatus - Status bit should be cleared |
|
757 */ |
|
758 void TNE1DmaChannel::ProcessEnd(TBool aClearStatus) |
|
759 { |
|
760 if(iBaseValidSet) |
|
761 { |
|
762 DMA_PSL_CHAN_TRACE("END"); |
|
763 iBaseValidSet = EFalse; |
|
764 HandleIsr(ETrue); |
|
765 } |
|
766 |
|
767 if(aClearStatus) |
|
768 ClearStatus(KHtDMACHS_END); //Traffic completed BVALID=ON |
|
769 } |
|
770 |
|
771 /** |
|
772 @param aBitmask The bits to be cleared in this channel's status register |
|
773 */ |
|
774 void TNE1DmaChannel::ClearStatus(TUint32 aBitmask) |
|
775 { |
|
776 if (TUint((this->iDMACHCReg) ^ (this->iSubChannel)) == (TUint)KHvDMACHC_SW) |
|
777 aBitmask |= KHtDMACHS_STG; //Add STG for S/W channel |
|
778 |
|
779 AsspRegister::Write32(iHWCtrlBase+KHoDMACHS, aBitmask); //End-of-Int |
|
780 } |
|
781 |
|
782 /** |
|
783 Call HandleIsr for this channel |
|
784 */ |
|
785 void TNE1DmaChannel::HandleIsr(TBool aIsComplete) |
|
786 { |
|
787 // iController must be casted so that the private method |
|
788 // TDmac::HandleIsr can be called since this method is |
|
789 // a friend of TNaviEngineDmac, but not TDmac. |
|
790 static_cast<TNaviEngineDmac*>(iController)->HandleIsr(*this, aIsComplete); |
|
791 } |
|
792 |
|
793 /** |
|
794 Stop transfer for this channel |
|
795 */ |
|
796 void TNE1DmaChannel::StopTransfer() |
|
797 { |
|
798 const TInt irq = __SPIN_LOCK_IRQSAVE(iIsrLock); |
|
799 // At this point, device driver should have cancelled DMA request. |
|
800 |
|
801 // The procedure for clearing the EN bit to 0 via CPU access during DMA transfer (EN bit = 1) |
|
802 TUint32 dmaCHS = AsspRegister::Read32(iHWCtrlBase+KHoDMACHS); |
|
803 |
|
804 // Read the DCHSn register to be cleared at the relevant channel and confirm that |
|
805 // both the RQST and ACT bits are cleared to 0. If either or both of them are 1, |
|
806 // perform polling until their values become 0.. |
|
807 // OR unless KHtDMACHS_EN is already cleared by the HW at the end of the transfer - |
|
808 // while we're polling... |
|
809 while( (dmaCHS & KHtDMACHS_EN) && |
|
810 dmaCHS & (KHtDMACHS_RQST | KHtDMACHS_ACT) ) |
|
811 { |
|
812 dmaCHS = AsspRegister::Read32(iHWCtrlBase+KHoDMACHS); |
|
813 } |
|
814 |
|
815 // enable writing to EN bit.. |
|
816 dmaCHS |= KHtDMACHS_EN_EN; |
|
817 AsspRegister::Write32(iHWCtrlBase+KHoDMACHS, dmaCHS); |
|
818 |
|
819 // clear the EN bit to 0 |
|
820 // and set the FCLR bit of the DCHSn register to 1. |
|
821 dmaCHS &= (~KHtDMACHS_EN); |
|
822 dmaCHS |= KHtDMACHS_FCLR; |
|
823 AsspRegister::Write32(iHWCtrlBase+KHoDMACHS, dmaCHS); |
|
824 |
|
825 // check that channel is idle, and status bits have been cleared |
|
826 __NK_ASSERT_ALWAYS(IsIdle()); |
|
827 dmaCHS = AsspRegister::Read32(iHWCtrlBase+KHoDMACHS); |
|
828 __NK_ASSERT_ALWAYS((dmaCHS & (KHtDMACHS_TC | KHtDMACHS_END)) == 0); |
|
829 __NK_ASSERT_ALWAYS(iTcIrqCount >=0); |
|
830 |
|
831 // given the above checks, clear the iTcIrqCount and iBaseValidSet so |
|
832 // that the ISR won't mistakenly think there are missed interrupts |
|
833 iTcIrqCount = 0; |
|
834 iBaseValidSet = EFalse; |
|
835 |
|
836 __SPIN_UNLOCK_IRQRESTORE(iIsrLock, irq); |
|
837 } |
|
838 |
|
839 /** |
|
840 @pre Channel has been stopped |
|
841 */ |
|
842 void TNE1DmaChannel::Close() |
|
843 { |
|
844 // The lock prevents a channel being closed |
|
845 // during an ISR. |
|
846 const TInt irq = __SPIN_LOCK_IRQSAVE(iIsrLock); |
|
847 DMA_PSL_CHAN_TRACE("Close"); |
|
848 |
|
849 // Check that the channel was Idle and Stopped |
|
850 __NK_ASSERT_ALWAYS(IsIdle()); |
|
851 __NK_ASSERT_ALWAYS(iTcIrqCount == 0); |
|
852 __NK_ASSERT_ALWAYS(!iBaseValidSet); |
|
853 |
|
854 |
|
855 // Here we clear iController in advance of the PIL |
|
856 // If we did not do this, then when we release the lock, the ISR |
|
857 // could observe it as non-null, proceed, but then have the PIL |
|
858 // clear it mid-isr |
|
859 iController = NULL; |
|
860 |
|
861 __SPIN_UNLOCK_IRQRESTORE(iIsrLock, irq); |
|
862 } |
|
863 |
|
864 TBool TNE1DmaChannel::IsIdle() const |
|
865 { |
|
866 TUint status = AsspRegister::Read32(iHWCtrlBase+KHoDMACHS); |
|
867 return !(status & KHtDMACHS_EN); |
|
868 } |
|
869 |
|
870 |
|
871 void TNE1DmaChannel::QueuedRequestCountChanged() |
|
872 { |
|
873 const TInt qreqs = __e32_atomic_load_acq32(&iQueuedRequests); |
|
874 DMA_PSL_CHAN_TRACE1("TNE1DmaChannel::QueuedRequestCountChanged() %d", qreqs); |
|
875 __DMA_ASSERTA(qreqs >= 0); |
|
876 } |
|
877 |
|
878 |
|
879 TInt TNaviEngineDmac::MaxTransferSize(TDmaChannel& aChannel, TUint /*aFlags*/, TUint32 /*aPslInfo*/) |
|
880 // |
|
881 // Returns the maximum transfer size for a given transfer. |
|
882 // |
|
883 { |
|
884 TNE1DmaChannel& channel = (TNE1DmaChannel&)aChannel; |
|
885 return (1<<channel.iTransferDataShift) * KMaxDMAUnitTransferLen; |
|
886 } |
|
887 |
|
888 |
|
889 TUint TNaviEngineDmac::MemAlignMask(TDmaChannel& aChannel, TUint /*aFlags*/, TUint32 /*aPslInfo*/) |
|
890 // |
|
891 // Returns the memory buffer alignment restrictions mask for a given transfer. |
|
892 // |
|
893 { |
|
894 TNE1DmaChannel& channel = (TNE1DmaChannel&)aChannel; |
|
895 return (1<<channel.iTransferDataShift) - 1; |
|
896 } |
|
897 |
|
898 |
|
899 void TNaviEngineDmac::InitHwDes(const SDmaDesHdr& aHdr, TUint32 aSrc, TUint32 aDest, TInt aCount, |
|
900 TUint aFlags, TUint32 /*aPslInfo*/, TUint32 /*aCookie*/) |
|
901 // |
|
902 // Sets up (from a passed in request) the descriptor with that fragment's source and destination address, |
|
903 // the fragment size, and the (driver/DMA controller) specific transfer parameters (mem/peripheral, |
|
904 // burst size, transfer width). |
|
905 // |
|
906 { |
|
907 TDmaDesc* pD = HdrToHwDes(aHdr); |
|
908 |
|
909 __KTRACE_OPT(KDMA, Kern::Printf("TNaviEngineDmac::InitHwDes 0x%08X", pD)); |
|
910 |
|
911 // Unaligned descriptor? Bug in generic layer! |
|
912 __DMA_ASSERTD(IsHwDesAligned(pD)); |
|
913 |
|
914 pD->iSrcAddr = (aFlags & KDmaPhysAddrSrc) ? aSrc : Epoc::LinearToPhysical(aSrc); |
|
915 pD->iDestAddr = (aFlags & KDmaPhysAddrDest) ? aDest : Epoc::LinearToPhysical(aDest); |
|
916 pD->iCount = aCount; |
|
917 } |
|
918 |
|
919 void TNaviEngineDmac::ChainHwDes(const SDmaDesHdr& /*aHdr*/, const SDmaDesHdr& /*aNextHd*/) |
|
920 // |
|
921 // Chains hardware descriptors together. |
|
922 // DMAC32 doesn't support linked descriptors, therefore there is nothing we have to do here. |
|
923 // |
|
924 { |
|
925 } |
|
926 |
|
927 void TNaviEngineDmac::AppendHwDes(const TDmaChannel& /*aChannel*/, const SDmaDesHdr& /*aLastHdr*/, |
|
928 const SDmaDesHdr& /*aNewHdr*/) |
|
929 // |
|
930 // Appends a descriptor to the chain while the channel is running. |
|
931 // DMAC32 doesn't support linked descriptors, therefore there is nothing we have to do here. |
|
932 // |
|
933 { |
|
934 } |
|
935 |
|
936 void TNaviEngineDmac::UnlinkHwDes(const TDmaChannel& /*aChannel*/, SDmaDesHdr& /*aHdr*/) |
|
937 // |
|
938 // Unlink the last item in the h/w descriptor chain from a subsequent chain that it was possibly linked to. |
|
939 // DMAC32 doesn't support linked descriptors, therefore there is nothing we have to do here. |
|
940 // |
|
941 { |
|
942 } |
|
943 |
|
944 void TNaviEngineDmac::DMAC32_Isr(TAny* aThis, TInt aController, TInt aDmaStat, TInt aCompleted) |
|
945 // |
|
946 // Generic part for all DMA32 interrupts. |
|
947 // Reads the interrupt identification and calls back into the base class |
|
948 // interrupt service handler with the channel identifier and an indication whether the |
|
949 // transfer completed correctly or with an error. |
|
950 // |
|
951 { |
|
952 DMA_PSL_TRACE("Begin ISR"); |
|
953 |
|
954 TNaviEngineDmac& me = *static_cast<TNaviEngineDmac*>(aThis); |
|
955 int i; |
|
956 |
|
957 if (aCompleted) // Transfer-completed interrupt has occured |
|
958 { |
|
959 // Go through the all eight subchannels to check which event has occured. |
|
960 for (i=0;i<KDmaCtrl32HWSubChannelCount;i++) |
|
961 { |
|
962 TInt channel = DMAC32_HWChannelsLocator[aController][i]; |
|
963 #ifdef _DEBUG |
|
964 if (channel >= EDma32ChannelCount) __DMA_CANT_HAPPEN(); |
|
965 #endif |
|
966 |
|
967 // Skip unused physical channels |
|
968 // .ie those with no corresponding entry |
|
969 // in KDMAChannelLocator |
|
970 if(channel == -1) |
|
971 continue; |
|
972 |
|
973 TNE1DmaChannel& ne1Chan = me.iChannels[channel]; |
|
974 |
|
975 ne1Chan.ProcessIrq(); |
|
976 } |
|
977 } |
|
978 else // Error interrupt has occured. aDmaStat is not valid. Should read H/W registers. |
|
979 { |
|
980 // Go through the all eight subchannels to check which event has occured. |
|
981 for (i=0;i<KDmaCtrl32HWSubChannelCount;i++) |
|
982 { |
|
983 TInt dchs= AsspRegister::Read32(KDMAC32Base+aController*KDMAGroupOffset+i*KDMAChannelOffset+KHoDMACHS); |
|
984 |
|
985 if (dchs&KHtDMACHS_ERR) |
|
986 { |
|
987 TInt channel = DMAC32_HWChannelsLocator[aController][i]; |
|
988 #ifdef _DEBUG |
|
989 if (channel >= EDmaChannelCount) __DMA_CANT_HAPPEN(); |
|
990 #endif |
|
991 TNE1DmaChannel& ne1Chan = me.iChannels[channel]; |
|
992 ne1Chan.ProcessErrorIrq(); |
|
993 } |
|
994 } |
|
995 } |
|
996 #if defined(NE1_DMA_DOUBLE_BUFFER) |
|
997 #ifdef _DEBUG |
|
998 InterruptCounter_DMA32++; |
|
999 #endif |
|
1000 #endif |
|
1001 } |
|
1002 |
|
1003 |
|
1004 void TNaviEngineDmac::DMAC32_0_End_Isr(TAny* aThis) |
|
1005 { |
|
1006 TInt stat = (TInt)AsspRegister::Read32(KDMAC32Base+0*KDMAGroupOffset+KHoDMASTAT); |
|
1007 DMAC32_Isr(aThis, 0, stat, 1); |
|
1008 } |
|
1009 void TNaviEngineDmac::DMAC32_2_End_Isr(TAny* aThis) |
|
1010 { |
|
1011 TInt stat = (TInt)AsspRegister::Read32(KDMAC32Base+2*KDMAGroupOffset+KHoDMASTAT); |
|
1012 DMAC32_Isr(aThis, 2, stat, 1); |
|
1013 } |
|
1014 void TNaviEngineDmac::DMAC32_3_End_Isr(TAny* aThis) |
|
1015 { |
|
1016 TInt stat = (TInt)AsspRegister::Read32(KDMAC32Base+3*KDMAGroupOffset+KHoDMASTAT); |
|
1017 DMAC32_Isr(aThis, 3, stat, 1); |
|
1018 } |
|
1019 |
|
1020 void TNaviEngineDmac::DMAC32_0_Err_Isr(TAny* aThis) |
|
1021 { |
|
1022 DMAC32_Isr(aThis, 0, 0, 0); |
|
1023 } |
|
1024 void TNaviEngineDmac::DMAC32_2_Err_Isr(TAny* aThis) |
|
1025 { |
|
1026 DMAC32_Isr(aThis, 2, 0, 0); |
|
1027 } |
|
1028 void TNaviEngineDmac::DMAC32_3_Err_Isr(TAny* aThis) |
|
1029 { |
|
1030 DMAC32_Isr(aThis, 3, 0, 0); |
|
1031 } |
|
1032 |
|
1033 inline TDmaDesc* TNaviEngineDmac::HdrToHwDes(const SDmaDesHdr& aHdr) |
|
1034 // |
|
1035 // Changes return type of base class call. |
|
1036 // |
|
1037 { |
|
1038 return static_cast<TDmaDesc*>(TDmac::HdrToHwDes(aHdr)); |
|
1039 } |
|
1040 |
|
1041 // |
|
1042 // Channel Opening/Closing (Channel Allocator) |
|
1043 // |
|
1044 |
|
1045 TDmaChannel* DmaChannelMgr::Open(TUint32 aOpenId) |
|
1046 { |
|
1047 __KTRACE_OPT(KDMA, Kern::Printf(">DmaChannelMgr::Open aOpenId=%d", aOpenId)); |
|
1048 |
|
1049 __DMA_ASSERTA(aOpenId < static_cast<TUint32>(EDmaChannelCount)); |
|
1050 |
|
1051 TDmaChannel* pC = Controller.iChannels + aOpenId; |
|
1052 if (pC->IsOpened()) |
|
1053 pC = NULL; |
|
1054 else |
|
1055 { |
|
1056 pC->iController = &Controller; |
|
1057 pC->iPslId = aOpenId; |
|
1058 } |
|
1059 return pC; |
|
1060 } |
|
1061 |
|
1062 void DmaChannelMgr::Close(TDmaChannel* aChannel) |
|
1063 { |
|
1064 static_cast<TNE1DmaChannel*>(aChannel)->Close(); |
|
1065 } |
|
1066 |
|
1067 TInt DmaChannelMgr::StaticExtension(TInt /* aCmd */, TAny* /* aArg */) |
|
1068 { |
|
1069 return KErrNotSupported; |
|
1070 } |
|
1071 |
|
1072 // |
|
1073 // DLL Exported Function |
|
1074 // |
|
1075 |
|
1076 DECLARE_STANDARD_EXTENSION() |
|
1077 // |
|
1078 // Creates and initializes a new DMA controller object on the kernel heap. |
|
1079 // |
|
1080 { |
|
1081 __KTRACE_OPT2(KBOOT, KDMA, Kern::Printf("Starting DMA Extension")); |
|
1082 |
|
1083 return Controller.Create(); |
|
1084 } |