|
1 // Copyright (c) 2002-2010 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // Test driver for DMA V2 framework |
|
15 // |
|
16 // |
|
17 |
|
18 #include <kernel/kern_priv.h> |
|
19 #include <drivers/dma.h> |
|
20 #include "d_dma2.h" |
|
21 |
|
22 _LIT(KClientPanicCat, "D_DMA2"); |
|
23 _LIT(KDFCThreadName,"D_DMA_DFC_THREAD"); |
|
24 _LIT(KIsrCbDfcThreadName,"D_DMA_IsrCb_thread"); |
|
25 const TInt KDFCThreadPriority=26; |
|
26 |
|
27 class TStopwatch |
|
28 { |
|
29 public: |
|
30 TStopwatch() |
|
31 :iStart(0), iStop(0) |
|
32 {} |
|
33 |
|
34 void Start() |
|
35 {iStart = NKern::FastCounter();} |
|
36 |
|
37 void Stop() |
|
38 { |
|
39 iStop = NKern::FastCounter(); |
|
40 |
|
41 __KTRACE_OPT(KDMA, Kern::Printf(">TStopwatch::Stop FastCounter ticks: iStart=0x%lx iStop=0x%lx", iStart, iStop)); |
|
42 } |
|
43 |
|
44 TUint64 ReadMicroSecs() const |
|
45 { |
|
46 #ifndef __SMP__ |
|
47 TUint64 diff = 0; |
|
48 if(iStart > iStop) |
|
49 { |
|
50 diff = (KMaxTUint64 - iStart) + iStop; |
|
51 } |
|
52 else |
|
53 { |
|
54 diff = iStop - iStart; |
|
55 } |
|
56 return FastCountToMicroSecs(diff); |
|
57 #else |
|
58 //On SMP it is possible for the value returned from |
|
59 //NKern::FastCounter to depend on the current CPU (ie. |
|
60 //NaviEngine) |
|
61 // |
|
62 //One solution would be to tie DFC's and ISR's to the same |
|
63 //core as the client, but this would reduce the usefulness of |
|
64 //SMP testing. |
|
65 return 0; |
|
66 #endif |
|
67 } |
|
68 private: |
|
69 |
|
70 TUint64 FastCountToMicroSecs(TUint64 aCount) const |
|
71 { |
|
72 const TUint64 countsPerS = NKern::FastCounterFrequency(); |
|
73 |
|
74 TUint64 timeuS = (aCount*1000000)/countsPerS; |
|
75 __KTRACE_OPT(KDMA, Kern::Printf(">TStopwatch::FastCountToMicroSecs FastCounter ticks: aCount=0x%lx countsPerS=0x%lx time=0x%lx", aCount, countsPerS, timeuS)); |
|
76 return timeuS; |
|
77 } |
|
78 |
|
79 TUint64 iStart; |
|
80 TUint64 iStop; |
|
81 }; |
|
82 |
|
83 ////////////////////////////////////////////////////////////////////////////// |
|
84 |
|
85 class DClientDmaRequest; |
|
86 /** |
|
87 Driver channel. Only accessible by a single client thread |
|
88 */ |
|
89 class DDmaTestSession : public DLogicalChannelBase |
|
90 { |
|
91 public: |
|
92 DDmaTestSession(); |
|
93 virtual ~DDmaTestSession(); |
|
94 protected: |
|
95 // from DLogicalChannelBase |
|
96 virtual TInt DoCreate(TInt aUnit, const TDesC8* anInfo, const TVersion& aVer); |
|
97 virtual TInt Request(TInt aFunction, TAny* a1, TAny* a2); |
|
98 virtual TInt RequestUserHandle(DThread* aThread, TOwnerType aType); |
|
99 private: |
|
100 TInt DoGetInfo(TAny* aInfo); |
|
101 |
|
102 TInt OpenDmaChannel(TUint aPslCookie, TUint& aDriverCookie); |
|
103 TInt OpenDmaChannel(TUint& aDriverCookie, TDmaChannel::SCreateInfo& aInfo); |
|
104 TInt LinkDmaChannelByCookie(TUint aDriverCookie); |
|
105 TInt UnlinkDmaChannelByCookie(TUint aDriverCookie); |
|
106 TInt CloseDmaChannelByCookie(TUint aDriverCookie); |
|
107 TInt PauseDmaChannelByCookie(TUint aDriverCookie); |
|
108 TInt ResumeDmaChannelByCookie(TUint aDriverCookie); |
|
109 TInt GetChannelCapsByCookie(TUint aDriverCookie, SDmacCaps& aChannelCaps); |
|
110 TInt GetChannelCapsByCookie(TUint aDriverCookie, TDmacTestCaps& aChannelCaps); |
|
111 TInt CancelAllByCookie(TUint aDriverCookie); |
|
112 TInt IsrRedoRequestByCookie(TUint aDriverCookie,TUint32 aSrcAddr,TUint32 aDstAddr,TInt aTransferCount,TUint32 aPslRequestInfo,TBool aIsrCb); |
|
113 TInt IsQueueEmptyByCookie(TUint aDriverCookie, TBool& aQueueEmpty); |
|
114 TInt ChannelIsOpenedByCookie(TUint aDriverCookie, TBool& aChannelOpen); |
|
115 TInt EnableDstElementCountingByCookie(TUint aDriverCookie); |
|
116 TInt EnableSrcElementCountingByCookie(TUint aDriverCookie); |
|
117 TInt DisableDstElementCountingByCookie(TUint aDriverCookie); |
|
118 TInt DisableSrcElementCountingByCookie(TUint aDriverCookie); |
|
119 TInt TotalNumDstElementsTransferredByCookie(TUint aDriverCookie); |
|
120 TInt TotalNumSrcElementsTransferredByCookie(TUint aDriverCookie); |
|
121 void CloseDmaChannelByIndex(TInt aIndex); |
|
122 void CancelAllByIndex(TInt aIndex); |
|
123 TInt LinkDmaChannelByIndex(TInt aIndex); |
|
124 TInt UnlinkDmaChannelByIndex(TInt aIndex); |
|
125 TInt PauseDmaChannelByIndex(TInt aIndex); |
|
126 TInt ResumeDmaChannelByIndex(TInt aIndex); |
|
127 TInt IsrRedoRequestByIndex(TInt aIndex,TUint32 aSrcAddr,TUint32 aDstAddr,TInt aTransferCount,TUint32 aPslRequestInfo,TBool aIsrCb); |
|
128 void EnableDstElementCountingByIndex(TInt aIndex); |
|
129 void EnableSrcElementCountingByIndex(TInt aIndex); |
|
130 void DisableDstElementCountingByIndex(TInt aIndex); |
|
131 void DisableSrcElementCountingByIndex(TInt aIndex); |
|
132 TInt TotalNumDstElementsTransferredByIndex(TInt aIndex); |
|
133 TInt TotalNumSrcElementsTransferredByIndex(TInt aIndex); |
|
134 TInt CreateSharedChunk(); |
|
135 TUint OpenSharedChunkHandle(); |
|
136 |
|
137 /** |
|
138 Creates a new kernel-side DMA request object, associated with a previously |
|
139 opened channel |
|
140 |
|
141 @param aChannelCookie - A channel cookie as returned by OpenDmaChannel |
|
142 @param aRequestCookie - On success will be a cookie by which the dma request can be referred to |
|
143 @param aNewCallback - If true, then a new style DMA callback will be used |
|
144 */ |
|
145 TInt CreateDmaRequest(TUint aChannelCookie, TUint& aRequestCookie, TBool aNewCallback = EFalse, TInt aMaxFragmentSizeBytes=0); |
|
146 |
|
147 /** |
|
148 Destroys a previously created dma request object |
|
149 */ |
|
150 TInt DestroyDmaRequestByCookie(TUint aRequestCookie); |
|
151 |
|
152 void DestroyDmaRequestByIndex(TInt aIndex); |
|
153 |
|
154 |
|
155 TInt CookieToChannelIndex(TUint aDriverCookie) const; |
|
156 TInt CookieToRequestIndex(TUint aRequestCookie) const; |
|
157 |
|
158 void MakeAddressesAbsoulute(TDmaTransferArgs& aTransferArgs) const; |
|
159 TInt FragmentRequest(TUint aRequestCookie, const TDmaTransferArgs& aTransferArgs, TBool aLegacy=ETrue); |
|
160 |
|
161 TInt QueueRequest(TUint aRequestCookie, TRequestStatus* aStatus, TCallbackRecord* aRecord, TUint64* aDurationMicroSecs); |
|
162 DClientDmaRequest* RequestFromCookie(TUint aRequestCookie) const; |
|
163 TInt RequestFragmentCount(TUint aRequestCookie); |
|
164 TDmaV2TestInfo ConvertTestInfo(const TDmaTestInfo& aOldInfo) const; |
|
165 private: |
|
166 DThread* iClient; |
|
167 TDynamicDfcQue* iDfcQ; |
|
168 TDynamicDfcQue* iIsrCallbackDfcQ; // Will be used by requests which complete with an ISR callback |
|
169 static const TInt KMaxChunkSize; |
|
170 TLinAddr iChunkBase; |
|
171 DChunk* iChunk; |
|
172 |
|
173 RPointerArray<TDmaChannel> iChannels; |
|
174 RPointerArray<DClientDmaRequest> iClientDmaReqs; |
|
175 }; |
|
176 |
|
177 |
|
178 /** |
|
179 Allows a TClientRequest to be associated with a DDmaRequest |
|
180 */ |
|
181 class DClientDmaRequest : public DDmaRequest |
|
182 { |
|
183 public: |
|
184 static DClientDmaRequest* Construct(DThread* aClient, TDfcQue* const aDfcQ, TDmaChannel& aChannel, TBool aNewStyle=EFalse, TInt aMaxTransferSize=0); |
|
185 ~DClientDmaRequest(); |
|
186 |
|
187 TInt Queue(TRequestStatus* aRequestStatus, TCallbackRecord* aRecord, TUint64* aDurationMicroSecs); |
|
188 void AddRequeArgs(const TIsrRequeArgsSet& aRequeArgSet); |
|
189 |
|
190 TUint64 GetDuration() |
|
191 {return iStopwatch.ReadMicroSecs();} |
|
192 |
|
193 /** |
|
194 Store a copy of the TDmaTransferArgs which was used for fragmentation |
|
195 for argument checking |
|
196 */ |
|
197 void SetAddressParms(const TDmaTransferArgs& aAddressParms) |
|
198 {iFragmentedTransfer = aAddressParms;} |
|
199 |
|
200 /** |
|
201 Retrieve stored TDmaTransferArgs |
|
202 */ |
|
203 const TDmaTransferArgs& GetAddressParms() const |
|
204 {return iFragmentedTransfer;} |
|
205 |
|
206 protected: |
|
207 TInt Create(); |
|
208 /** Construct with old style callback */ |
|
209 DClientDmaRequest(DThread* aClient, TDfcQue* const aDfcQ, TDmaChannel& aChannel, TInt aMaxTransferSize); |
|
210 |
|
211 /** Construct with new style callback */ |
|
212 DClientDmaRequest(DThread* aClient, TDfcQue* const aDfcQ, TDmaChannel& aChannel, TBool aNewStyle, TInt aMaxTransferSize); |
|
213 |
|
214 private: |
|
215 static void CallbackOldStyle(TResult aResult, TAny* aRequest); |
|
216 static void Callback(TUint, TDmaResult, TAny*, SDmaDesHdr*); |
|
217 static void CompleteCallback(TAny* aRequest); |
|
218 |
|
219 void DoCallback(TUint, TDmaResult); |
|
220 TBool RedoRequest(); |
|
221 |
|
222 //!< Used to return a TCallbackRecord and transfer time |
|
223 TClientDataRequest2<TCallbackRecord, TUint64>* iClientDataRequest; |
|
224 |
|
225 DThread* const iClient; |
|
226 TDfcQue* const iDfcQ; //!< Use the DDmaTestSession's dfc queue |
|
227 TDfc iDfc; |
|
228 |
|
229 TStopwatch iStopwatch; |
|
230 TIsrRequeArgsSet iIsrRequeArgSet; |
|
231 |
|
232 /** |
|
233 This will be updated each time fragment is called. |
|
234 It is required so that, at queue time, if ISR re-queue |
|
235 arguments are added, they can be checked for sanity |
|
236 */ |
|
237 TDmaTransferArgs iFragmentedTransfer; |
|
238 }; |
|
239 |
|
240 DClientDmaRequest* DClientDmaRequest::Construct(DThread* aClient, TDfcQue* const aDfcQ, TDmaChannel& aChannel, TBool aNewStyle, TInt aMaxTransferSize) |
|
241 { |
|
242 DClientDmaRequest* dmaRequest = NULL; |
|
243 if(aNewStyle) |
|
244 { |
|
245 #ifdef DMA_APIV2 |
|
246 dmaRequest = new DClientDmaRequest(aClient, aDfcQ, aChannel, aNewStyle, aMaxTransferSize); |
|
247 #else |
|
248 TEST_FAULT; // if a new style dma request was requested it should have been caught earlier |
|
249 #endif |
|
250 } |
|
251 else |
|
252 { |
|
253 dmaRequest = new DClientDmaRequest(aClient, aDfcQ, aChannel, aMaxTransferSize); |
|
254 } |
|
255 |
|
256 if(dmaRequest == NULL) |
|
257 { |
|
258 return dmaRequest; |
|
259 } |
|
260 |
|
261 const TInt r = dmaRequest->Create(); |
|
262 if(r != KErrNone) |
|
263 { |
|
264 delete dmaRequest; |
|
265 dmaRequest = NULL; |
|
266 } |
|
267 return dmaRequest; |
|
268 } |
|
269 |
|
270 DClientDmaRequest::DClientDmaRequest(DThread* aClient, TDfcQue* const aDfcQ, TDmaChannel& aChannel, TInt aMaxFragmentSize) |
|
271 :DDmaRequest(aChannel, &CallbackOldStyle, this, aMaxFragmentSize), |
|
272 iClientDataRequest(NULL), |
|
273 iClient(aClient), |
|
274 iDfcQ(aDfcQ), |
|
275 iDfc(CompleteCallback,NULL, iDfcQ, KMaxDfcPriority) |
|
276 { |
|
277 } |
|
278 #ifdef DMA_APIV2 |
|
279 DClientDmaRequest::DClientDmaRequest(DThread* aClient, TDfcQue* const aDfcQ, TDmaChannel& aChannel, TBool /*aNewStyle*/, TInt aMaxFragmentSize) |
|
280 :DDmaRequest(aChannel, &Callback, this, aMaxFragmentSize), |
|
281 iClientDataRequest(NULL), |
|
282 iClient(aClient), |
|
283 iDfcQ(aDfcQ), |
|
284 iDfc(CompleteCallback,NULL, iDfcQ, KMaxDfcPriority) |
|
285 { |
|
286 } |
|
287 #endif |
|
288 |
|
289 TInt DClientDmaRequest::Create() |
|
290 { |
|
291 return Kern::CreateClientDataRequest2(iClientDataRequest); |
|
292 } |
|
293 |
|
294 DClientDmaRequest::~DClientDmaRequest() |
|
295 { |
|
296 __KTRACE_OPT(KDMA, Kern::Printf(">DClientDmaRequest::~DClientDmaRequest")); |
|
297 if(iClientDataRequest) |
|
298 { |
|
299 Kern::DestroyClientRequest(iClientDataRequest); |
|
300 } |
|
301 } |
|
302 |
|
303 /** |
|
304 Queue the DClientDmaRequest. |
|
305 |
|
306 @param aRequestStatus Pointer to the client's request status |
|
307 @param aRecord Pointer to the user's TCallbackRecord, may be null |
|
308 @return |
|
309 -KErrInUse The client request is in use |
|
310 -KErrNone success |
|
311 */ |
|
312 TInt DClientDmaRequest::Queue(TRequestStatus* aRequestStatus, TCallbackRecord* aRecord, TUint64* aDurationMicroSecs) |
|
313 { |
|
314 __NK_ASSERT_ALWAYS(aRecord); |
|
315 __NK_ASSERT_ALWAYS(aDurationMicroSecs); |
|
316 |
|
317 //erase results from last transfer |
|
318 iClientDataRequest->Data1().Reset(); |
|
319 iClientDataRequest->SetDestPtr1(aRecord); |
|
320 |
|
321 iClientDataRequest->SetDestPtr2(aDurationMicroSecs); |
|
322 |
|
323 |
|
324 TInt r = iClientDataRequest->SetStatus(aRequestStatus); |
|
325 if(r != KErrNone) |
|
326 { |
|
327 return r; |
|
328 } |
|
329 |
|
330 iStopwatch.Start(); |
|
331 #ifdef DMA_APIV2 |
|
332 r = DDmaRequest::Queue(); |
|
333 #else |
|
334 // old version of queue did not return an error code |
|
335 DDmaRequest::Queue(); |
|
336 r = KErrNone; |
|
337 #endif |
|
338 |
|
339 return r; |
|
340 } |
|
341 |
|
342 void DClientDmaRequest::AddRequeArgs(const TIsrRequeArgsSet& aRequeArgSet) |
|
343 { |
|
344 iIsrRequeArgSet = aRequeArgSet; |
|
345 } |
|
346 |
|
347 /** |
|
348 If a transfer complete callback in ISR context s received this will be |
|
349 called to redo the request with the first entry in the array |
|
350 |
|
351 @return ETrue If the redo was successful - indicates that another callback is comming |
|
352 */ |
|
353 TBool DClientDmaRequest::RedoRequest() |
|
354 { |
|
355 TIsrRequeArgs args = iIsrRequeArgSet.GetArgs(); |
|
356 const TInt r = args.Call(iChannel); |
|
357 TCallbackRecord& record = iClientDataRequest->Data1(); |
|
358 record.IsrRedoResult(r); |
|
359 return (r == KErrNone); |
|
360 } |
|
361 |
|
362 |
|
363 /** |
|
364 Calls TDmaChannel::IsrRedoRequest on aChannel |
|
365 with this object's parameters |
|
366 */ |
|
367 TInt TIsrRequeArgs::Call(TDmaChannel& aChannel) |
|
368 { |
|
369 #ifdef DMA_APIV2 |
|
370 return aChannel.IsrRedoRequest(iSrcAddr, iDstAddr, iTransferCount, iPslRequestInfo, iIsrCb); |
|
371 #else |
|
372 TEST_FAULT; |
|
373 return KErrNotSupported; |
|
374 #endif |
|
375 } |
|
376 |
|
377 /** Translate an old style dma callback to a new-style one |
|
378 */ |
|
379 void DClientDmaRequest::CallbackOldStyle(TResult aResult, TAny* aArg) |
|
380 { |
|
381 __KTRACE_OPT(KDMA, Kern::Printf(">DClientDmaRequest::CallBackOldStyle: TResult result=%d", aResult)); |
|
382 TEST_ASSERT(aResult != EBadResult); |
|
383 //translate result code |
|
384 const TDmaResult result = (aResult == EOk) ? EDmaResultOK : EDmaResultError; |
|
385 |
|
386 //call the new-style callback |
|
387 Callback(EDmaCallbackRequestCompletion, result, aArg, NULL); |
|
388 } |
|
389 |
|
390 |
|
391 /** |
|
392 The new style callback called by the DMA framework |
|
393 may be called in either thread or ISR context |
|
394 */ |
|
395 void DClientDmaRequest::Callback(TUint aCallbackType, TDmaResult aResult, TAny* aArg, SDmaDesHdr* /*aHdr*/) |
|
396 { |
|
397 const TInt context = NKern::CurrentContext(); |
|
398 __KTRACE_OPT(KDMA, Kern::Printf(">DClientDmaRequest::CallBack: TDmaResult result = %d, NKern::TContext context = %d", aResult, context)); |
|
399 |
|
400 DClientDmaRequest& self = *reinterpret_cast<DClientDmaRequest*>(aArg); |
|
401 self.DoCallback(aCallbackType, aResult); |
|
402 |
|
403 // decide if callback is complete |
|
404 const TBool transferComplete = aCallbackType & EDmaCallbackRequestCompletion; |
|
405 if(!transferComplete) |
|
406 { |
|
407 return; |
|
408 } |
|
409 |
|
410 // If there are reque args then redo this request |
|
411 // another callback would then be expected. |
|
412 // Requests can only be re-queued in ISR context, but we |
|
413 // do not check that here as it is up to the client to get |
|
414 // it right - also, we want to test that the PIL catches this |
|
415 // error |
|
416 if(!self.iIsrRequeArgSet.IsEmpty()) |
|
417 { |
|
418 // If redo call was succesful, return and wait for next call back |
|
419 if(self.RedoRequest()) |
|
420 return; |
|
421 } |
|
422 |
|
423 switch(context) |
|
424 { |
|
425 case NKern::EThread: |
|
426 { |
|
427 CompleteCallback(aArg); |
|
428 break; |
|
429 } |
|
430 case NKern::EInterrupt: |
|
431 { |
|
432 self.iDfc.iPtr = aArg; |
|
433 self.iDfc.Add(); |
|
434 break; |
|
435 } |
|
436 //Fall-through: If context is IDFC or the EEscaped marker occur |
|
437 //it is an error |
|
438 case NKern::EIDFC: |
|
439 case NKern::EEscaped: |
|
440 default: |
|
441 TEST_FAULT; |
|
442 } |
|
443 } |
|
444 |
|
445 /** |
|
446 Log results of callback. May be called in either thread or ISR context |
|
447 */ |
|
448 void DClientDmaRequest::DoCallback(TUint aCallbackType, TDmaResult aResult) |
|
449 { |
|
450 iStopwatch.Stop(); //sucessive calls will simply over write the stop time |
|
451 |
|
452 // This will always be done whether the client requested a |
|
453 // callback record or not |
|
454 TCallbackRecord& record = iClientDataRequest->Data1(); |
|
455 record.ProcessCallback(aCallbackType, aResult); |
|
456 } |
|
457 |
|
458 /** |
|
459 This function may either be called directly or queued as a DFC |
|
460 */ |
|
461 void DClientDmaRequest::CompleteCallback(TAny* aArg) |
|
462 { |
|
463 __KTRACE_OPT(KDMA, Kern::Printf(">DClientDmaRequest::CompleteCallBack thread %O", &Kern::CurrentThread())); |
|
464 __ASSERT_NOT_ISR; |
|
465 |
|
466 DClientDmaRequest& self = *reinterpret_cast<DClientDmaRequest*>(aArg); |
|
467 |
|
468 self.iClientDataRequest->Data2() = self.iStopwatch.ReadMicroSecs(); |
|
469 |
|
470 //Assert that we called SetRequestStatus on this object before |
|
471 //queueing |
|
472 __NK_ASSERT_DEBUG(self.iClientDataRequest->IsReady()); |
|
473 |
|
474 // This is an inelegant, temporary, solution to the following problem: |
|
475 // |
|
476 // If a dma request completes with an ISR callback the test |
|
477 // framework will queue this function as a DFC which |
|
478 // will then signal the user-side client. As a consequence of |
|
479 // this the user side client may then decide to destroy this |
|
480 // request. However, untill the DMA framework's DFC has run |
|
481 // and called OnDeque() on this request, it is still considered as |
|
482 // queued. Since it is possible that this DFC could run |
|
483 // before the DMA fw's DFC, this request could get destroyed while |
|
484 // it is stil queued, triggering a PIL assertion. |
|
485 // |
|
486 // The real fix is likely be for the PIL to call the callback |
|
487 // twice, but with different arguments, once to annonunce the |
|
488 // ISR and again to announce the dequeue. |
|
489 // |
|
490 // Here we poll and wait for this request to be dequeued. Note, |
|
491 // this DFC is currently run on a separate DFC queue, otherwise |
|
492 // it could get deadlocked. An alternative to polling would be |
|
493 // to use DCondVar, but that would require PIL modification |
|
494 |
|
495 if(NKern::CurrentThread() == self.iDfcQ->iThread) |
|
496 { |
|
497 // Only need to poll if we aren't on the channel's DFC queue |
|
498 for(;;) |
|
499 { |
|
500 // once the request has been unqueued it |
|
501 // can only be queued again by the client |
|
502 const TBool queued = __e32_atomic_load_acq32(&self.iQueued); |
|
503 if(!queued) |
|
504 break; |
|
505 __KTRACE_OPT(KDMA, Kern::Printf("Waiting for requeuest to be dequeued")); |
|
506 NKern::Sleep(10); |
|
507 } |
|
508 } |
|
509 else |
|
510 { |
|
511 // If we are on the channel's DFCQ we should be dequeued |
|
512 // already |
|
513 __NK_ASSERT_DEBUG(!__e32_atomic_load_acq32(&self.iQueued)); |
|
514 } |
|
515 |
|
516 // We can always complete with KErrNone, the actual DMA result is |
|
517 // logged in the TCallbackRecord |
|
518 Kern::QueueRequestComplete(self.iClient, self.iClientDataRequest, KErrNone); |
|
519 } |
|
520 |
|
521 const TInt DDmaTestSession::KMaxChunkSize = 8 * KMega; |
|
522 |
|
523 TInt DDmaTestSession::RequestUserHandle(DThread* aThread, TOwnerType aType) |
|
524 { |
|
525 if (aType!=EOwnerThread || aThread!=iClient) |
|
526 return KErrAccessDenied; |
|
527 return KErrNone; |
|
528 } |
|
529 |
|
530 DDmaTestSession::DDmaTestSession() |
|
531 : iClient(NULL), iDfcQ(NULL), iIsrCallbackDfcQ(NULL), iChunkBase(0), iChunk(NULL) |
|
532 {} |
|
533 |
|
534 // called in thread critical section |
|
535 TInt DDmaTestSession::DoCreate(TInt /*aUnit*/, const TDesC8* /*aInfo*/, const TVersion& /*aVer*/) |
|
536 { |
|
537 __NK_ASSERT_ALWAYS(iDfcQ == NULL); |
|
538 __NK_ASSERT_ALWAYS(iIsrCallbackDfcQ == NULL); |
|
539 |
|
540 TInt r = Kern::DynamicDfcQCreate(iDfcQ, KDFCThreadPriority, KDFCThreadName); |
|
541 if (r != KErrNone) |
|
542 { |
|
543 Kern::Printf("DDmaTestSession::DoCreate D_DMA_DFC_THREAD returned (%d)\n", r); |
|
544 return r; |
|
545 } |
|
546 NKern::ThreadSetCpuAffinity((NThread*)(iDfcQ->iThread), KCpuAffinityAny); |
|
547 |
|
548 r = Kern::DynamicDfcQCreate(iIsrCallbackDfcQ, KDFCThreadPriority, KIsrCbDfcThreadName); |
|
549 if (r != KErrNone) |
|
550 { |
|
551 Kern::Printf("DDmaTestSession::DoCreate D_DMA_IsrCb_thread returned (%d)\n", r); |
|
552 return r; |
|
553 } |
|
554 NKern::ThreadSetCpuAffinity((NThread*)(iIsrCallbackDfcQ->iThread), KCpuAffinityAny); |
|
555 |
|
556 iClient = &Kern::CurrentThread(); |
|
557 |
|
558 r = CreateSharedChunk(); |
|
559 Kern::Printf("DDmaTestSession::DoCreate CreateSharedChunk returned (%d)\n", r); |
|
560 return r; |
|
561 } |
|
562 |
|
563 DDmaTestSession::~DDmaTestSession() |
|
564 { |
|
565 //Destroy requests before channels |
|
566 //or we will trigger an assertion |
|
567 while(iClientDmaReqs.Count()) |
|
568 { |
|
569 DestroyDmaRequestByIndex(0); |
|
570 } |
|
571 iClientDmaReqs.Close(); |
|
572 |
|
573 while(iChannels.Count()) |
|
574 { |
|
575 CloseDmaChannelByIndex(0); |
|
576 } |
|
577 iChannels.Close(); |
|
578 |
|
579 |
|
580 if (iDfcQ) |
|
581 { |
|
582 iDfcQ->Destroy(); |
|
583 } |
|
584 |
|
585 if (iIsrCallbackDfcQ) |
|
586 { |
|
587 iIsrCallbackDfcQ->Destroy(); |
|
588 } |
|
589 |
|
590 if(iChunk) |
|
591 { |
|
592 Kern::ChunkClose(iChunk); |
|
593 iChunk = NULL; |
|
594 } |
|
595 } |
|
596 |
|
597 TInt DDmaTestSession::Request(TInt aFunction, TAny* a1, TAny* a2) |
|
598 { |
|
599 __NK_ASSERT_DEBUG(&Kern::CurrentThread() == iClient); |
|
600 |
|
601 switch (aFunction) |
|
602 { |
|
603 case RDmaSession::EOpenChannel: |
|
604 { |
|
605 TUint pslCookie = (TUint)a1; |
|
606 TUint driverCookie = 0; |
|
607 TInt r = OpenDmaChannel(pslCookie, driverCookie); |
|
608 umemput32(a2, &driverCookie, sizeof(TAny*)); |
|
609 return r; |
|
610 } |
|
611 case RDmaSession::EOpenChannelExposed: |
|
612 { |
|
613 TDmaChannel::SCreateInfo openInfo; |
|
614 TUint driverCookie = 0; |
|
615 |
|
616 TPckgBuf<SCreateInfoTest> openArgsBuf; |
|
617 Kern::KUDesGet(openArgsBuf, *reinterpret_cast<TDes8*>(a2)); |
|
618 |
|
619 SCreateInfoTest& openTestInfo = openArgsBuf(); |
|
620 openInfo.iCookie = openTestInfo.iCookie; |
|
621 openInfo.iDesCount = openTestInfo.iDesCount; |
|
622 openInfo.iDfcQ = iDfcQ; |
|
623 openInfo.iDfcPriority = openTestInfo.iDfcPriority; |
|
624 |
|
625 #ifdef DMA_APIV2 |
|
626 openInfo.iPriority = openTestInfo.iPriority; |
|
627 openInfo.iDynChannel = openTestInfo.iDynChannel; |
|
628 #endif |
|
629 |
|
630 TInt r = OpenDmaChannel(driverCookie, openInfo); |
|
631 umemput32(a1, &driverCookie, sizeof(TAny*)); |
|
632 Kern::KUDesPut(*reinterpret_cast<TDes8*>(a2), openArgsBuf); |
|
633 return r; |
|
634 } |
|
635 case RDmaSession::ECloseChannel: |
|
636 { |
|
637 TUint driverCookie = reinterpret_cast<TUint>(a1); |
|
638 TInt r = CloseDmaChannelByCookie(driverCookie); |
|
639 return r; |
|
640 } |
|
641 case RDmaSession::EChannelCaps: |
|
642 { |
|
643 TUint driverCookie = reinterpret_cast<TUint>(a1); |
|
644 TPckgBuf<TDmacTestCaps> capsBuf; |
|
645 TInt r = GetChannelCapsByCookie(driverCookie, capsBuf()); |
|
646 Kern::KUDesPut(*reinterpret_cast<TDes8*>(a2), capsBuf); |
|
647 return r; |
|
648 } |
|
649 case RDmaSession::EPauseChannel: |
|
650 { |
|
651 TUint driverCookie = reinterpret_cast<TUint>(a1); |
|
652 TInt r = PauseDmaChannelByCookie(driverCookie); |
|
653 return r; |
|
654 } |
|
655 case RDmaSession::EResumeChannel: |
|
656 { |
|
657 TUint driverCookie = reinterpret_cast<TUint>(a1); |
|
658 TInt r = ResumeDmaChannelByCookie(driverCookie); |
|
659 return r; |
|
660 } |
|
661 case RDmaSession::ELinkChannel: |
|
662 { |
|
663 TUint driverCookie = reinterpret_cast<TUint>(a1); |
|
664 TInt r = LinkDmaChannelByCookie(driverCookie); |
|
665 return r; |
|
666 } |
|
667 case RDmaSession::EUnlinkChannel: |
|
668 { |
|
669 TUint driverCookie = reinterpret_cast<TUint>(a1); |
|
670 TInt r = UnlinkDmaChannelByCookie(driverCookie); |
|
671 return r; |
|
672 } |
|
673 case RDmaSession::EFragmentCount: |
|
674 { |
|
675 TUint requestCookie = reinterpret_cast<TUint>(a1); |
|
676 TInt r = RequestFragmentCount(requestCookie); |
|
677 return r; |
|
678 } |
|
679 case RDmaSession::EEnableDstElementCounting: |
|
680 { |
|
681 TUint requestCookie = reinterpret_cast<TUint>(a1); |
|
682 TInt r = EnableDstElementCountingByCookie(requestCookie); |
|
683 return r; |
|
684 } |
|
685 case RDmaSession::EEnableSrcElementCounting: |
|
686 { |
|
687 TUint requestCookie = reinterpret_cast<TUint>(a1); |
|
688 TInt r = EnableSrcElementCountingByCookie(requestCookie); |
|
689 return r; |
|
690 } |
|
691 case RDmaSession::EDisableDstElementCounting: |
|
692 { |
|
693 TUint requestCookie = reinterpret_cast<TUint>(a1); |
|
694 TInt r = DisableDstElementCountingByCookie(requestCookie); |
|
695 return r; |
|
696 } |
|
697 case RDmaSession::EDisableSrcElementCounting: |
|
698 { |
|
699 TUint requestCookie = reinterpret_cast<TUint>(a1); |
|
700 TInt r = DisableSrcElementCountingByCookie(requestCookie); |
|
701 return r; |
|
702 } |
|
703 case RDmaSession::ETotalNumDstElementsTransferred: |
|
704 { |
|
705 TUint requestCookie = reinterpret_cast<TUint>(a1); |
|
706 TInt r = TotalNumDstElementsTransferredByCookie(requestCookie); |
|
707 return r; |
|
708 } |
|
709 case RDmaSession::ETotalNumSrcElementsTransferred: |
|
710 { |
|
711 TUint requestCookie = reinterpret_cast<TUint>(a1); |
|
712 TInt r = TotalNumSrcElementsTransferredByCookie(requestCookie); |
|
713 return r; |
|
714 } |
|
715 case RDmaSession::ERequestOpen: |
|
716 { |
|
717 RDmaSession::TRequestCreateArgs createArgs(0, EFalse, 0); |
|
718 TPckg<RDmaSession::TRequestCreateArgs> package(createArgs); |
|
719 Kern::KUDesGet(package, *reinterpret_cast<TDes8*>(a1)); |
|
720 |
|
721 const TUint channelCookie = createArgs.iChannelCookie; |
|
722 TUint requestCookie = 0; |
|
723 |
|
724 TInt r = CreateDmaRequest(channelCookie, requestCookie, createArgs.iNewStyle, createArgs.iMaxFragmentSize); |
|
725 |
|
726 umemput32(a2, &requestCookie, sizeof(TAny*)); |
|
727 return r; |
|
728 } |
|
729 case RDmaSession::ERequestClose: |
|
730 { |
|
731 const TUint requestCookie = reinterpret_cast<TUint>(a1); |
|
732 return DestroyDmaRequestByCookie(requestCookie); |
|
733 } |
|
734 case RDmaSession::EFragmentLegacy: |
|
735 case RDmaSession::EFragment: |
|
736 { |
|
737 TPckgBuf<RDmaSession::TFragmentArgs> argsBuff; |
|
738 Kern::KUDesGet(argsBuff, *reinterpret_cast<TDes8*>(a1)); |
|
739 const TUint requestCookie = argsBuff().iRequestCookie; |
|
740 |
|
741 //must remove constness as we actually need to |
|
742 //convert the src and dst offsets to addresses |
|
743 TDmaTransferArgs& transferArgs = const_cast<TDmaTransferArgs&>(argsBuff().iTransferArgs); |
|
744 |
|
745 //convert address offsets in to kernel virtual addresses |
|
746 MakeAddressesAbsoulute(transferArgs); |
|
747 |
|
748 TInt r = KErrGeneral; |
|
749 if (!TAddressParms(transferArgs).CheckRange(iChunkBase, iChunk->Size())) |
|
750 { |
|
751 // Return error code for invalid src and destination arguments used in tranferArgs |
|
752 r=KErrArgument; |
|
753 return r; |
|
754 } |
|
755 |
|
756 TStopwatch clock; |
|
757 clock.Start(); |
|
758 switch (aFunction) |
|
759 { |
|
760 case RDmaSession::EFragmentLegacy: |
|
761 r = FragmentRequest(requestCookie, transferArgs, ETrue); break; |
|
762 case RDmaSession::EFragment: |
|
763 r = FragmentRequest(requestCookie, transferArgs, EFalse); break; |
|
764 default: |
|
765 TEST_FAULT; |
|
766 } |
|
767 clock.Stop(); |
|
768 |
|
769 const TUint64 time = clock.ReadMicroSecs(); |
|
770 |
|
771 TUint64* const timePtr = argsBuff().iDurationMicroSecs; |
|
772 if(timePtr) |
|
773 { |
|
774 umemput(timePtr, &time, sizeof(time)); |
|
775 } |
|
776 return r; |
|
777 } |
|
778 case RDmaSession::EQueueRequest: |
|
779 { |
|
780 TPckgBuf<RDmaSession::TQueueArgs> argsBuff; |
|
781 Kern::KUDesGet(argsBuff, *reinterpret_cast<TDes8*>(a1)); |
|
782 |
|
783 //this is an Asynchronous request |
|
784 const TUint requestCookie = argsBuff().iRequestCookie; |
|
785 TRequestStatus* requestStatus = argsBuff().iStatus; |
|
786 TCallbackRecord* callbackRec = argsBuff().iCallbackRecord; |
|
787 TUint64* duration = argsBuff().iDurationMicroSecs; |
|
788 |
|
789 TInt r = QueueRequest(requestCookie, requestStatus, callbackRec, duration); |
|
790 if(r != KErrNone) |
|
791 { |
|
792 Kern::RequestComplete(requestStatus, r); |
|
793 } |
|
794 return r; |
|
795 } |
|
796 case RDmaSession::EQueueRequestWithReque: |
|
797 { |
|
798 TPckgBuf<RDmaSession::TQueueArgsWithReque> argsBuff; |
|
799 Kern::KUDesGet(argsBuff, *reinterpret_cast<TDes8*>(a1)); |
|
800 |
|
801 //this is an Asynchronous request |
|
802 const TUint requestCookie = argsBuff().iRequestCookie; |
|
803 TRequestStatus* requestStatus = argsBuff().iStatus; |
|
804 TCallbackRecord* callbackRec = argsBuff().iCallbackRecord; |
|
805 TUint64* duration = argsBuff().iDurationMicroSecs; |
|
806 |
|
807 TInt r = KErrNotFound; |
|
808 |
|
809 DClientDmaRequest* const request = RequestFromCookie(requestCookie); |
|
810 if(request != NULL) |
|
811 { |
|
812 TIsrRequeArgsSet& requeArgs = argsBuff().iRequeSet; |
|
813 requeArgs.Fixup(iChunkBase); |
|
814 |
|
815 TEST_ASSERT(requeArgs.CheckRange(iChunkBase, iChunk->Size(), request->GetAddressParms() )); |
|
816 request->AddRequeArgs(requeArgs); |
|
817 |
|
818 r = QueueRequest(requestCookie, requestStatus, callbackRec, duration); |
|
819 } |
|
820 |
|
821 if(r != KErrNone) |
|
822 { |
|
823 Kern::RequestComplete(requestStatus, r); |
|
824 } |
|
825 return r; |
|
826 } |
|
827 case RDmaSession::EIsOpened: |
|
828 { |
|
829 TUint driverCookie = (TUint)a1; |
|
830 TBool channelOpen = EFalse;; |
|
831 TInt r = ChannelIsOpenedByCookie(driverCookie,channelOpen); |
|
832 umemput32(a2, &channelOpen, sizeof(TAny*)); |
|
833 return r; |
|
834 } |
|
835 case RDmaSession::EIsQueueEmpty: |
|
836 { |
|
837 TUint driverCookie = (TUint)a1; |
|
838 TBool queueEmpty = EFalse;; |
|
839 TInt r = IsQueueEmptyByCookie(driverCookie,queueEmpty); |
|
840 umemput32(a2, &queueEmpty, sizeof(TAny*)); |
|
841 return r; |
|
842 } |
|
843 case RDmaSession::ECancelAllChannel: |
|
844 { |
|
845 TUint driverCookie = reinterpret_cast<TUint>(a1); |
|
846 TInt r = CancelAllByCookie(driverCookie); |
|
847 return r; |
|
848 } |
|
849 case RDmaSession::EOpenSharedChunk: |
|
850 { |
|
851 return OpenSharedChunkHandle(); |
|
852 } |
|
853 case RDmaSession::EGetTestInfo: |
|
854 { |
|
855 #ifdef DMA_APIV2 |
|
856 TPckgC<TDmaV2TestInfo> package(DmaTestInfoV2()); |
|
857 #else |
|
858 TPckgC<TDmaV2TestInfo> package(ConvertTestInfo(DmaTestInfo())); |
|
859 #endif |
|
860 Kern::KUDesPut(*reinterpret_cast<TDes8*>(a1), package); |
|
861 return KErrNone; |
|
862 } |
|
863 default: |
|
864 Kern::PanicCurrentThread(KClientPanicCat, __LINE__); |
|
865 return KErrGeneral; |
|
866 } |
|
867 } |
|
868 |
|
869 TInt DDmaTestSession::OpenDmaChannel(TUint& aDriverCookie, TDmaChannel::SCreateInfo& aInfo) |
|
870 { |
|
871 //cs so thread can't be killed between |
|
872 //opening channel and adding to array |
|
873 NKern::ThreadEnterCS(); |
|
874 TDmaChannel* channel = NULL; |
|
875 TInt r = TDmaChannel::Open(aInfo, channel); |
|
876 if(KErrNone == r) |
|
877 { |
|
878 __NK_ASSERT_ALWAYS(channel); |
|
879 |
|
880 __KTRACE_OPT(KDMA, Kern::Printf("OpenDmaChannel: channel@ 0x%08x", channel)); |
|
881 |
|
882 r = iChannels.Append(channel); |
|
883 if(KErrNone == r) |
|
884 { |
|
885 aDriverCookie = reinterpret_cast<TUint>(channel); |
|
886 } |
|
887 else |
|
888 { |
|
889 channel->Close(); |
|
890 r = KErrNoMemory; |
|
891 } |
|
892 } |
|
893 NKern::ThreadLeaveCS(); |
|
894 |
|
895 return r; |
|
896 } |
|
897 |
|
898 /** |
|
899 Open a DMA channel with arbitrary default parameters |
|
900 */ |
|
901 TInt DDmaTestSession::OpenDmaChannel(TUint aPslCookie, TUint& aDriverCookie ) |
|
902 { |
|
903 TDmaChannel::SCreateInfo info; |
|
904 info.iCookie = aPslCookie; |
|
905 info.iDfcQ = iDfcQ; |
|
906 info.iDfcPriority = 3; |
|
907 info.iDesCount = 128; |
|
908 |
|
909 return OpenDmaChannel(aDriverCookie, info); |
|
910 } |
|
911 |
|
912 TInt DDmaTestSession::CookieToChannelIndex(TUint aDriverCookie) const |
|
913 { |
|
914 const TInt r = iChannels.Find(reinterpret_cast<TDmaChannel*>(aDriverCookie)); |
|
915 |
|
916 if(r < 0) |
|
917 { |
|
918 __KTRACE_OPT(KDMA, Kern::Printf("CookieToChannelIndex: cookie 0x%08x not found!", aDriverCookie)); |
|
919 } |
|
920 return r; |
|
921 } |
|
922 |
|
923 TInt DDmaTestSession::CookieToRequestIndex(TUint aRequestCookie) const |
|
924 { |
|
925 const TInt r = iClientDmaReqs.Find(reinterpret_cast<DClientDmaRequest*>(aRequestCookie)); |
|
926 |
|
927 if(r < 0) |
|
928 { |
|
929 __KTRACE_OPT(KDMA, Kern::Printf("CookieToRequestIndex: cookie 0x%08x not found!", aRequestCookie)); |
|
930 } |
|
931 return r; |
|
932 } |
|
933 |
|
934 void DDmaTestSession::CloseDmaChannelByIndex(TInt aIndex) |
|
935 { |
|
936 __KTRACE_OPT(KDMA, Kern::Printf("CloseDmaChannelByIndex: %d", aIndex)); |
|
937 __NK_ASSERT_DEBUG(aIndex < iChannels.Count()); |
|
938 // cs so client thread can't be killed between removing channel from |
|
939 // array and closing it. |
|
940 NKern::ThreadEnterCS(); |
|
941 TDmaChannel* channel = iChannels[aIndex]; |
|
942 iChannels.Remove(aIndex); |
|
943 channel->Close(); |
|
944 NKern::ThreadLeaveCS(); |
|
945 } |
|
946 |
|
947 TInt DDmaTestSession::CloseDmaChannelByCookie(TUint aDriverCookie) |
|
948 { |
|
949 __KTRACE_OPT(KDMA, Kern::Printf("CloseDmaChannelByCookie: 0x%08x", aDriverCookie)); |
|
950 const TInt index = CookieToChannelIndex(aDriverCookie); |
|
951 |
|
952 if(index >= 0) |
|
953 { |
|
954 CloseDmaChannelByIndex(index); |
|
955 return KErrNone; |
|
956 } |
|
957 else |
|
958 { |
|
959 return KErrNotFound; |
|
960 } |
|
961 } |
|
962 |
|
963 TInt DDmaTestSession::CancelAllByCookie(TUint aDriverCookie) |
|
964 { |
|
965 __KTRACE_OPT(KDMA, Kern::Printf("CancelAllByCookie: 0x%08x", aDriverCookie)); |
|
966 const TInt index = CookieToChannelIndex(aDriverCookie); |
|
967 |
|
968 if(index >= 0) |
|
969 { |
|
970 CancelAllByIndex(index); |
|
971 return KErrNone; |
|
972 } |
|
973 else |
|
974 { |
|
975 return KErrNotFound; |
|
976 } |
|
977 } |
|
978 |
|
979 void DDmaTestSession::CancelAllByIndex(TInt aIndex) |
|
980 { |
|
981 __KTRACE_OPT(KDMA, Kern::Printf("CancelAllByIndex: %d", aIndex)); |
|
982 __NK_ASSERT_DEBUG(aIndex < iChannels.Count()); |
|
983 |
|
984 TDmaChannel* channel = iChannels[aIndex]; |
|
985 channel->CancelAll(); |
|
986 } |
|
987 |
|
988 TInt DDmaTestSession::LinkDmaChannelByIndex(TInt aIndex) |
|
989 { |
|
990 __KTRACE_OPT(KDMA, Kern::Printf("LinkDmaChannelByIndex: %d", aIndex)); |
|
991 __NK_ASSERT_DEBUG(aIndex < iChannels.Count()); |
|
992 |
|
993 #ifdef DMA_APIV2 |
|
994 TDmaChannel* channel = iChannels[aIndex]; |
|
995 return channel->LinkToChannel(channel); |
|
996 #else |
|
997 return KErrNotSupported; |
|
998 #endif |
|
999 } |
|
1000 |
|
1001 TInt DDmaTestSession::LinkDmaChannelByCookie(TUint aDriverCookie) |
|
1002 { |
|
1003 __KTRACE_OPT(KDMA, Kern::Printf("LinkDmaChannelByCookie: 0x%08x", aDriverCookie)); |
|
1004 const TInt index = CookieToChannelIndex(aDriverCookie); |
|
1005 |
|
1006 if(index >= 0) |
|
1007 { |
|
1008 TInt r = LinkDmaChannelByIndex(index); |
|
1009 return r; |
|
1010 } |
|
1011 else |
|
1012 { |
|
1013 return KErrNotFound; |
|
1014 } |
|
1015 } |
|
1016 |
|
1017 TInt DDmaTestSession::UnlinkDmaChannelByIndex(TInt aIndex) |
|
1018 { |
|
1019 __KTRACE_OPT(KDMA, Kern::Printf("UnlinkDmaChannelByIndex: %d", aIndex)); |
|
1020 __NK_ASSERT_DEBUG(aIndex < iChannels.Count()); |
|
1021 |
|
1022 #ifdef DMA_APIV2 |
|
1023 TDmaChannel* channel = iChannels[aIndex]; |
|
1024 return channel->LinkToChannel(NULL); |
|
1025 #else |
|
1026 return KErrNotSupported; |
|
1027 #endif |
|
1028 } |
|
1029 |
|
1030 TInt DDmaTestSession::UnlinkDmaChannelByCookie(TUint aDriverCookie) |
|
1031 { |
|
1032 __KTRACE_OPT(KDMA, Kern::Printf("UnlinkDmaChannelByCookie: 0x%08x", aDriverCookie)); |
|
1033 const TInt index = CookieToChannelIndex(aDriverCookie); |
|
1034 |
|
1035 if(index >= 0) |
|
1036 { |
|
1037 TInt r = UnlinkDmaChannelByIndex(index); |
|
1038 return r; |
|
1039 } |
|
1040 else |
|
1041 { |
|
1042 return KErrNotFound; |
|
1043 } |
|
1044 } |
|
1045 |
|
1046 TInt DDmaTestSession::PauseDmaChannelByIndex(TInt aIndex) |
|
1047 { |
|
1048 __KTRACE_OPT(KDMA, Kern::Printf("PauseDmaChannelByIndex: %d", aIndex)); |
|
1049 __NK_ASSERT_DEBUG(aIndex < iChannels.Count()); |
|
1050 |
|
1051 #ifdef DMA_APIV2 |
|
1052 TDmaChannel* channel = iChannels[aIndex]; |
|
1053 return channel->Pause(); |
|
1054 #else |
|
1055 return KErrNotSupported; |
|
1056 #endif |
|
1057 } |
|
1058 |
|
1059 TInt DDmaTestSession::PauseDmaChannelByCookie(TUint aDriverCookie) |
|
1060 { |
|
1061 __KTRACE_OPT(KDMA, Kern::Printf("PauseDmaChannelByCookie: 0x%08x", aDriverCookie)); |
|
1062 const TInt index = CookieToChannelIndex(aDriverCookie); |
|
1063 |
|
1064 if(index >= 0) |
|
1065 { |
|
1066 TInt r = PauseDmaChannelByIndex(index); |
|
1067 return r; |
|
1068 } |
|
1069 else |
|
1070 { |
|
1071 return KErrNotFound; |
|
1072 } |
|
1073 } |
|
1074 |
|
1075 TInt DDmaTestSession::ResumeDmaChannelByIndex(TInt aIndex) |
|
1076 { |
|
1077 __KTRACE_OPT(KDMA, Kern::Printf("ResumeDmaChannelByIndex: %d", aIndex)); |
|
1078 __NK_ASSERT_DEBUG(aIndex < iChannels.Count()); |
|
1079 |
|
1080 #ifdef DMA_APIV2 |
|
1081 TDmaChannel* channel = iChannels[aIndex]; |
|
1082 return channel->Resume(); |
|
1083 #else |
|
1084 return KErrNotSupported; |
|
1085 #endif |
|
1086 } |
|
1087 |
|
1088 TInt DDmaTestSession::ResumeDmaChannelByCookie(TUint aDriverCookie) |
|
1089 { |
|
1090 __KTRACE_OPT(KDMA, Kern::Printf("ResumeDmaChannelByCookie: 0x%08x", aDriverCookie)); |
|
1091 const TInt index = CookieToChannelIndex(aDriverCookie); |
|
1092 |
|
1093 if(index >= 0) |
|
1094 { |
|
1095 TInt r = ResumeDmaChannelByIndex(index); |
|
1096 return r; |
|
1097 } |
|
1098 else |
|
1099 { |
|
1100 return KErrNotFound; |
|
1101 } |
|
1102 } |
|
1103 |
|
1104 TInt DDmaTestSession::EnableDstElementCountingByCookie(TUint aDriverCookie) |
|
1105 { |
|
1106 __KTRACE_OPT(KDMA, Kern::Printf("EnableDstElementCountingByCookie: 0x%08x", aDriverCookie)); |
|
1107 const TInt index = CookieToRequestIndex(aDriverCookie); |
|
1108 |
|
1109 if(index >= 0) |
|
1110 { |
|
1111 EnableDstElementCountingByIndex(index); |
|
1112 return KErrNone; |
|
1113 } |
|
1114 else |
|
1115 { |
|
1116 return KErrNotFound; |
|
1117 } |
|
1118 } |
|
1119 |
|
1120 void DDmaTestSession::EnableDstElementCountingByIndex(TInt aIndex) |
|
1121 { |
|
1122 __KTRACE_OPT(KDMA, Kern::Printf("EnableDstElementCountingByIndex: %d", aIndex)); |
|
1123 __NK_ASSERT_DEBUG(aIndex < iClientDmaReqs.Count()); |
|
1124 #ifdef DMA_APIV2 |
|
1125 iClientDmaReqs[aIndex]->EnableDstElementCounting(); |
|
1126 #endif |
|
1127 } |
|
1128 |
|
1129 TInt DDmaTestSession::EnableSrcElementCountingByCookie(TUint aDriverCookie) |
|
1130 { |
|
1131 __KTRACE_OPT(KDMA, Kern::Printf("EnableSrcElementCountingByCookie: 0x%08x", aDriverCookie)); |
|
1132 const TInt index = CookieToRequestIndex(aDriverCookie); |
|
1133 |
|
1134 if(index >= 0) |
|
1135 { |
|
1136 EnableSrcElementCountingByIndex(index); |
|
1137 return KErrNone; |
|
1138 } |
|
1139 else |
|
1140 { |
|
1141 return KErrNotFound; |
|
1142 } |
|
1143 } |
|
1144 |
|
1145 void DDmaTestSession::EnableSrcElementCountingByIndex(TInt aIndex) |
|
1146 { |
|
1147 __KTRACE_OPT(KDMA, Kern::Printf("EnableSrcElementCountingByIndex: %d", aIndex)); |
|
1148 __NK_ASSERT_DEBUG(aIndex < iClientDmaReqs.Count()); |
|
1149 |
|
1150 #ifdef DMA_APIV2 |
|
1151 iClientDmaReqs[aIndex]->EnableSrcElementCounting(); |
|
1152 #endif |
|
1153 } |
|
1154 |
|
1155 TInt DDmaTestSession::DisableDstElementCountingByCookie(TUint aDriverCookie) |
|
1156 { |
|
1157 __KTRACE_OPT(KDMA, Kern::Printf("DisableDstElementCountingByCookie: 0x%08x", aDriverCookie)); |
|
1158 const TInt index = CookieToRequestIndex(aDriverCookie); |
|
1159 |
|
1160 if(index >= 0) |
|
1161 { |
|
1162 DisableDstElementCountingByIndex(index); |
|
1163 return KErrNone; |
|
1164 } |
|
1165 else |
|
1166 { |
|
1167 return KErrNotFound; |
|
1168 } |
|
1169 } |
|
1170 |
|
1171 void DDmaTestSession::DisableDstElementCountingByIndex(TInt aIndex) |
|
1172 { |
|
1173 __KTRACE_OPT(KDMA, Kern::Printf("DisableDstElementCountingByIndex: %d", aIndex)); |
|
1174 __NK_ASSERT_DEBUG(aIndex < iClientDmaReqs.Count()); |
|
1175 #ifdef DMA_APIV2 |
|
1176 iClientDmaReqs[aIndex]->DisableDstElementCounting(); |
|
1177 #endif |
|
1178 } |
|
1179 |
|
1180 TInt DDmaTestSession::DisableSrcElementCountingByCookie(TUint aDriverCookie) |
|
1181 { |
|
1182 __KTRACE_OPT(KDMA, Kern::Printf("DisableSrcElementCountingByCookie: 0x%08x", aDriverCookie)); |
|
1183 const TInt index = CookieToRequestIndex(aDriverCookie); |
|
1184 |
|
1185 if(index >= 0) |
|
1186 { |
|
1187 DisableSrcElementCountingByIndex(index); |
|
1188 return KErrNone; |
|
1189 } |
|
1190 else |
|
1191 { |
|
1192 return KErrNotFound; |
|
1193 } |
|
1194 } |
|
1195 |
|
1196 void DDmaTestSession::DisableSrcElementCountingByIndex(TInt aIndex) |
|
1197 { |
|
1198 __KTRACE_OPT(KDMA, Kern::Printf("DisableSrcElementCountingByIndex: %d", aIndex)); |
|
1199 __NK_ASSERT_DEBUG(aIndex < iClientDmaReqs.Count()); |
|
1200 #ifdef DMA_APIV2 |
|
1201 iClientDmaReqs[aIndex]->DisableSrcElementCounting(); |
|
1202 #endif |
|
1203 } |
|
1204 |
|
1205 TInt DDmaTestSession::TotalNumDstElementsTransferredByCookie(TUint aDriverCookie) |
|
1206 { |
|
1207 __KTRACE_OPT(KDMA, Kern::Printf("TotalNumDstElementsTransferredByCookie: 0x%08x", aDriverCookie)); |
|
1208 const TInt index = CookieToRequestIndex(aDriverCookie); |
|
1209 |
|
1210 if(index >= 0) |
|
1211 { |
|
1212 TInt r = TotalNumDstElementsTransferredByIndex(index); |
|
1213 return r; |
|
1214 } |
|
1215 else |
|
1216 { |
|
1217 return KErrNotFound; |
|
1218 } |
|
1219 } |
|
1220 |
|
1221 TInt DDmaTestSession::TotalNumDstElementsTransferredByIndex(TInt aIndex) |
|
1222 { |
|
1223 __KTRACE_OPT(KDMA, Kern::Printf("TotalNumDstElementsTransferredByIndex: %d", aIndex)); |
|
1224 __NK_ASSERT_DEBUG(aIndex < iClientDmaReqs.Count()); |
|
1225 |
|
1226 #ifdef DMA_APIV2 |
|
1227 TInt r = iClientDmaReqs[aIndex]->TotalNumDstElementsTransferred(); |
|
1228 return r; |
|
1229 #else |
|
1230 return KErrNotSupported; |
|
1231 #endif |
|
1232 } |
|
1233 |
|
1234 TInt DDmaTestSession::TotalNumSrcElementsTransferredByCookie(TUint aDriverCookie) |
|
1235 { |
|
1236 __KTRACE_OPT(KDMA, Kern::Printf("TotalNumSrcElementsTransferredByCookie: 0x%08x", aDriverCookie)); |
|
1237 const TInt index = CookieToRequestIndex(aDriverCookie); |
|
1238 |
|
1239 if(index >= 0) |
|
1240 { |
|
1241 TInt r = TotalNumSrcElementsTransferredByIndex(index); |
|
1242 return r; |
|
1243 } |
|
1244 else |
|
1245 { |
|
1246 return KErrNotFound; |
|
1247 } |
|
1248 } |
|
1249 |
|
1250 TInt DDmaTestSession::TotalNumSrcElementsTransferredByIndex(TInt aIndex) |
|
1251 { |
|
1252 __KTRACE_OPT(KDMA, Kern::Printf("TotalNumSrcElementsTransferredByIndex: %d", aIndex)); |
|
1253 __NK_ASSERT_DEBUG(aIndex < iClientDmaReqs.Count()); |
|
1254 |
|
1255 #ifdef DMA_APIV2 |
|
1256 TInt r = iClientDmaReqs[aIndex]->TotalNumSrcElementsTransferred(); |
|
1257 return r; |
|
1258 #else |
|
1259 return KErrNotSupported; |
|
1260 #endif |
|
1261 } |
|
1262 TInt DDmaTestSession::IsrRedoRequestByCookie(TUint aDriverCookie,TUint32 aSrcAddr,TUint32 aDstAddr,TInt aTransferCount,TUint32 aPslRequestInfo,TBool aIsrCb) |
|
1263 { |
|
1264 __KTRACE_OPT(KDMA, Kern::Printf("IsrRedoRequestByCookie: 0x%08x", aDriverCookie)); |
|
1265 const TInt index = CookieToChannelIndex(aDriverCookie); |
|
1266 |
|
1267 if(index >= 0) |
|
1268 { |
|
1269 TInt r = IsrRedoRequestByIndex(index,aSrcAddr,aDstAddr,aTransferCount,aPslRequestInfo,aIsrCb); |
|
1270 return r; |
|
1271 } |
|
1272 else |
|
1273 { |
|
1274 return KErrNotFound; |
|
1275 } |
|
1276 } |
|
1277 |
|
1278 TInt DDmaTestSession::IsrRedoRequestByIndex(TInt aIndex,TUint32 aSrcAddr,TUint32 aDstAddr,TInt aTransferCount,TUint32 aPslRequestInfo,TBool aIsrCb) |
|
1279 { |
|
1280 __KTRACE_OPT(KDMA, Kern::Printf("IsrRedoRequestByIndex: %d", aIndex)); |
|
1281 __NK_ASSERT_DEBUG(aIndex < iChannels.Count()); |
|
1282 |
|
1283 #ifdef DMA_APIV2 |
|
1284 TDmaChannel* channel = iChannels[aIndex]; |
|
1285 return channel->IsrRedoRequest(aSrcAddr,aDstAddr,aTransferCount,aPslRequestInfo,aIsrCb); |
|
1286 #else |
|
1287 return KErrNotSupported; |
|
1288 #endif |
|
1289 } |
|
1290 |
|
1291 /** |
|
1292 aChannelCaps will be set to "NULL" values |
|
1293 */ |
|
1294 TInt DDmaTestSession::GetChannelCapsByCookie(TUint aDriverCookie, TDmacTestCaps& aChannelCaps) |
|
1295 { |
|
1296 SDmacCaps caps = {0,}; //initialise with NULL values |
|
1297 TInt r = GetChannelCapsByCookie(aDriverCookie, caps); |
|
1298 |
|
1299 if(r == KErrNotSupported) |
|
1300 { |
|
1301 //If we can not query caps it means |
|
1302 //that we are using the v1 driver |
|
1303 //we construct a empty TDmacTestCaps |
|
1304 //but with an iPILVersion of 1 |
|
1305 const TDmacTestCaps nullCapsV1(caps, 1); |
|
1306 aChannelCaps = nullCapsV1; |
|
1307 r = KErrNone; |
|
1308 } |
|
1309 else if(r == KErrNone) |
|
1310 { |
|
1311 const TDmacTestCaps capsV2(caps, 2); |
|
1312 aChannelCaps = capsV2; |
|
1313 } |
|
1314 |
|
1315 return r; |
|
1316 } |
|
1317 |
|
1318 /** |
|
1319 Will return the capabilities of the DMA channel. |
|
1320 Querying SDmacCaps is not possible on V1 of the DMA framework. |
|
1321 In that case an error of KErrNotSupported will be returned |
|
1322 */ |
|
1323 TInt DDmaTestSession::GetChannelCapsByCookie(TUint aDriverCookie, SDmacCaps& aChannelCaps) |
|
1324 { |
|
1325 __KTRACE_OPT(KDMA, Kern::Printf("GetChannelCapsByCookie: 0x%08x", aDriverCookie)); |
|
1326 const TInt index = CookieToChannelIndex(aDriverCookie); |
|
1327 if(index >= 0) |
|
1328 { |
|
1329 #ifdef DMA_APIV2 |
|
1330 aChannelCaps = iChannels[index]->DmacCaps(); |
|
1331 return KErrNone; |
|
1332 #else |
|
1333 return KErrNotSupported; |
|
1334 #endif |
|
1335 } |
|
1336 else |
|
1337 { |
|
1338 return KErrNotFound; |
|
1339 } |
|
1340 } |
|
1341 |
|
1342 TInt DDmaTestSession::IsQueueEmptyByCookie(TUint aDriverCookie, TBool& aQueueEmpty) |
|
1343 { |
|
1344 __KTRACE_OPT(KDMA, Kern::Printf("IsQueueEmptyByCookie: 0x%08x", aDriverCookie)); |
|
1345 const TInt index = CookieToChannelIndex(aDriverCookie); |
|
1346 |
|
1347 if(index >= 0) |
|
1348 { |
|
1349 aQueueEmpty=iChannels[index]->IsQueueEmpty(); |
|
1350 return KErrNone; |
|
1351 } |
|
1352 else |
|
1353 { |
|
1354 return KErrNotFound; |
|
1355 } |
|
1356 } |
|
1357 |
|
1358 TInt DDmaTestSession::ChannelIsOpenedByCookie(TUint aDriverCookie, TBool& aChannelOpen) |
|
1359 { |
|
1360 __KTRACE_OPT(KDMA, Kern::Printf("ChannelIsOpenedByCookie: 0x%08x", aDriverCookie)); |
|
1361 const TInt index = CookieToChannelIndex(aDriverCookie); |
|
1362 |
|
1363 if(index >= 0) |
|
1364 { |
|
1365 aChannelOpen=iChannels[index]->IsOpened(); |
|
1366 return KErrNone; |
|
1367 } |
|
1368 else |
|
1369 { |
|
1370 return KErrNotFound; |
|
1371 } |
|
1372 } |
|
1373 |
|
1374 TInt DDmaTestSession::CreateDmaRequest(TUint aChannelCookie, TUint& aRequestCookie, TBool aNewCallback, TInt aMaxFragmentSizeBytes) |
|
1375 { |
|
1376 #ifndef DMA_APIV2 |
|
1377 if(aNewCallback) |
|
1378 return KErrNotSupported; |
|
1379 #endif |
|
1380 |
|
1381 TInt channelIndex = CookieToChannelIndex(aChannelCookie); |
|
1382 if(channelIndex < 0) |
|
1383 return channelIndex; |
|
1384 |
|
1385 NKern::ThreadEnterCS(); |
|
1386 DClientDmaRequest* request = DClientDmaRequest::Construct(iClient, iIsrCallbackDfcQ, *iChannels[channelIndex], aNewCallback, aMaxFragmentSizeBytes); |
|
1387 if(request == NULL) |
|
1388 { |
|
1389 NKern::ThreadLeaveCS(); |
|
1390 return KErrNoMemory; |
|
1391 } |
|
1392 |
|
1393 TInt r = iClientDmaReqs.Append(request); |
|
1394 if(r == KErrNone) |
|
1395 { |
|
1396 aRequestCookie = reinterpret_cast<TUint>(request); |
|
1397 } |
|
1398 else |
|
1399 { |
|
1400 delete request; |
|
1401 } |
|
1402 NKern::ThreadLeaveCS(); |
|
1403 |
|
1404 return r; |
|
1405 } |
|
1406 |
|
1407 TInt DDmaTestSession::DestroyDmaRequestByCookie(TUint aRequestCookie) |
|
1408 { |
|
1409 TInt requestIndex = CookieToRequestIndex(aRequestCookie); |
|
1410 if(requestIndex < 0) |
|
1411 return requestIndex; |
|
1412 |
|
1413 DestroyDmaRequestByIndex(requestIndex); |
|
1414 |
|
1415 return KErrNone; |
|
1416 } |
|
1417 |
|
1418 void DDmaTestSession::DestroyDmaRequestByIndex(TInt aIndex) |
|
1419 { |
|
1420 __KTRACE_OPT(KDMA, Kern::Printf("DestroyDmaRequestByIndex: %d", aIndex)); |
|
1421 __NK_ASSERT_DEBUG(aIndex < iClientDmaReqs.Count()); |
|
1422 NKern::ThreadEnterCS(); |
|
1423 |
|
1424 DClientDmaRequest* request = iClientDmaReqs[aIndex]; |
|
1425 iClientDmaReqs.Remove(aIndex); |
|
1426 delete request; |
|
1427 |
|
1428 NKern::ThreadLeaveCS(); |
|
1429 } |
|
1430 |
|
1431 TInt DDmaTestSession::CreateSharedChunk() |
|
1432 { |
|
1433 // Enter critical section so we can't die and leak the objects we are creating |
|
1434 // I.e. the TChunkCleanup and DChunk (Shared Chunk) |
|
1435 NKern::ThreadEnterCS(); |
|
1436 |
|
1437 // Create the chunk |
|
1438 TChunkCreateInfo info; |
|
1439 info.iType = TChunkCreateInfo::ESharedKernelSingle; |
|
1440 info.iMaxSize = KMaxChunkSize; |
|
1441 #ifndef __WINS__ |
|
1442 info.iMapAttr = EMapAttrFullyBlocking | EMapAttrUserRw; |
|
1443 #endif |
|
1444 |
|
1445 info.iOwnsMemory = ETrue; |
|
1446 info.iDestroyedDfc = NULL; |
|
1447 |
|
1448 DChunk* chunk; |
|
1449 TUint32 mapAttr; |
|
1450 TInt r = Kern::ChunkCreate(info, chunk, iChunkBase, mapAttr); |
|
1451 if(r!=KErrNone) |
|
1452 { |
|
1453 NKern::ThreadLeaveCS(); |
|
1454 return r; |
|
1455 } |
|
1456 |
|
1457 // Map our device's memory into the chunk (at offset 0) |
|
1458 TUint32 physicalAddr; |
|
1459 r = Kern::ChunkCommitContiguous(chunk,0,KMaxChunkSize, physicalAddr); |
|
1460 if(r!=KErrNone) |
|
1461 { |
|
1462 // Commit failed so tidy-up... |
|
1463 Kern::ChunkClose(chunk); |
|
1464 } |
|
1465 else |
|
1466 { |
|
1467 iChunk = chunk; |
|
1468 } |
|
1469 |
|
1470 // Can leave critical section now that we have saved pointers to created objects |
|
1471 NKern::ThreadLeaveCS(); |
|
1472 |
|
1473 return r; |
|
1474 } |
|
1475 |
|
1476 TUint DDmaTestSession::OpenSharedChunkHandle() |
|
1477 { |
|
1478 NKern::ThreadEnterCS(); |
|
1479 const TInt r = Kern::MakeHandleAndOpen(NULL, iChunk); |
|
1480 NKern::ThreadLeaveCS(); |
|
1481 return r; |
|
1482 } |
|
1483 |
|
1484 /** |
|
1485 Replace addresses specified as an offset from the chunk base with absolute |
|
1486 virtual addresses. |
|
1487 */ |
|
1488 void DDmaTestSession::MakeAddressesAbsoulute(TDmaTransferArgs& aTransferArgs) const |
|
1489 { |
|
1490 aTransferArgs.iSrcConfig.iAddr += iChunkBase; |
|
1491 aTransferArgs.iDstConfig.iAddr += iChunkBase; |
|
1492 } |
|
1493 |
|
1494 #ifndef DMA_APIV2 |
|
1495 static TInt FragmentCount(DDmaRequest* aRequest) |
|
1496 { |
|
1497 TInt count = 0; |
|
1498 for (SDmaDesHdr* pH = aRequest->iFirstHdr; pH != NULL; pH = pH->iNext) |
|
1499 count++; |
|
1500 return count; |
|
1501 } |
|
1502 #endif |
|
1503 |
|
1504 TInt DDmaTestSession::RequestFragmentCount(TUint aRequestCookie) |
|
1505 { |
|
1506 TInt requestIndex = CookieToRequestIndex(aRequestCookie); |
|
1507 if(requestIndex < 0) |
|
1508 return requestIndex; |
|
1509 #ifdef DMA_APIV2 |
|
1510 TInt r = iClientDmaReqs[requestIndex]->FragmentCount(); |
|
1511 #else |
|
1512 TInt r = FragmentCount(iClientDmaReqs[requestIndex]); |
|
1513 #endif |
|
1514 |
|
1515 return r; |
|
1516 } |
|
1517 |
|
1518 TInt DDmaTestSession::FragmentRequest(TUint aRequestCookie, const TDmaTransferArgs& aTransferArgs, TBool aLegacy) |
|
1519 { |
|
1520 __KTRACE_OPT(KDMA, Kern::Printf(">FragmentRequest: cookie=0x%08x, legacy=%d", aRequestCookie, aLegacy)); |
|
1521 TInt requestIndex = CookieToRequestIndex(aRequestCookie); |
|
1522 if(requestIndex < 0) |
|
1523 return requestIndex; |
|
1524 |
|
1525 DClientDmaRequest& request = *iClientDmaReqs[requestIndex]; |
|
1526 request.SetAddressParms(aTransferArgs); |
|
1527 |
|
1528 TInt r = KErrNotSupported; |
|
1529 |
|
1530 if (aTransferArgs.iTransferCount < 1) |
|
1531 { |
|
1532 // Return error code for invalid transfer size used in tranferArgs |
|
1533 r=KErrArgument; |
|
1534 return r; |
|
1535 } |
|
1536 |
|
1537 if(aLegacy) |
|
1538 { |
|
1539 TUint flags = KDmaMemSrc | KDmaIncSrc | KDmaMemDest | KDmaIncDest; |
|
1540 const TUint src = aTransferArgs.iSrcConfig.iAddr; |
|
1541 const TUint dst = aTransferArgs.iDstConfig.iAddr; |
|
1542 r = request.Fragment(src, dst, aTransferArgs.iTransferCount, flags, NULL); |
|
1543 } |
|
1544 else |
|
1545 { |
|
1546 #ifdef DMA_APIV2 |
|
1547 r = request.Fragment(aTransferArgs); |
|
1548 #endif |
|
1549 } |
|
1550 return r; |
|
1551 } |
|
1552 |
|
1553 /** |
|
1554 Queue the request refered to by aRequestCookie |
|
1555 |
|
1556 @param aRequestCookie Client identifier for the DDmaRequest |
|
1557 @param aStatus Pointer to the client's TRequestStatus |
|
1558 @param aRecord Pointer to the client's TCallbackRecord |
|
1559 @return |
|
1560 - KErrNotFound - aRequestCookie was invalid |
|
1561 - KErrNone - Success |
|
1562 */ |
|
1563 TInt DDmaTestSession::QueueRequest(TUint aRequestCookie, TRequestStatus* aStatus, TCallbackRecord* aRecord, TUint64* aDurationMicroSecs) |
|
1564 { |
|
1565 __KTRACE_OPT(KDMA, Kern::Printf(">QueueRequest: 0x%08x", aRequestCookie)); |
|
1566 |
|
1567 DClientDmaRequest* request = RequestFromCookie(aRequestCookie); |
|
1568 if(request == NULL) |
|
1569 return KErrNotFound; |
|
1570 |
|
1571 return request->Queue(aStatus, aRecord, aDurationMicroSecs); |
|
1572 } |
|
1573 |
|
1574 DClientDmaRequest* DDmaTestSession::RequestFromCookie(TUint aRequestCookie) const |
|
1575 { |
|
1576 TInt requestIndex = CookieToRequestIndex(aRequestCookie); |
|
1577 if(requestIndex < 0) |
|
1578 return NULL; |
|
1579 |
|
1580 return (iClientDmaReqs[requestIndex]); |
|
1581 } |
|
1582 |
|
1583 TDmaV2TestInfo DDmaTestSession::ConvertTestInfo(const TDmaTestInfo& aOldInfo) const |
|
1584 { |
|
1585 TDmaV2TestInfo newInfo; |
|
1586 newInfo.iMaxTransferSize = aOldInfo.iMaxTransferSize; |
|
1587 newInfo.iMemAlignMask = aOldInfo.iMemAlignMask; |
|
1588 newInfo.iMemMemPslInfo = aOldInfo.iMemMemPslInfo; |
|
1589 |
|
1590 newInfo.iMaxSbChannels = aOldInfo.iMaxSbChannels; |
|
1591 { |
|
1592 for(TInt i=0; i<aOldInfo.iMaxSbChannels; i++) |
|
1593 { |
|
1594 newInfo.iSbChannels[i] = aOldInfo.iSbChannels[i]; |
|
1595 } |
|
1596 } |
|
1597 |
|
1598 newInfo.iMaxDbChannels = aOldInfo.iMaxDbChannels; |
|
1599 { |
|
1600 for(TInt i=0; i<aOldInfo.iMaxDbChannels; i++) |
|
1601 { |
|
1602 newInfo.iDbChannels[i] = aOldInfo.iDbChannels[i]; |
|
1603 } |
|
1604 } |
|
1605 |
|
1606 newInfo.iMaxSgChannels = aOldInfo.iMaxSgChannels; |
|
1607 { |
|
1608 for(TInt i=0; i<aOldInfo.iMaxSgChannels; i++) |
|
1609 { |
|
1610 newInfo.iSgChannels[i] = aOldInfo.iSgChannels[i]; |
|
1611 } |
|
1612 } |
|
1613 |
|
1614 return newInfo; |
|
1615 } |
|
1616 ////////////////////////////////////////////////////////////////////////////// |
|
1617 |
|
1618 class DDmaTestFactory : public DLogicalDevice |
|
1619 { |
|
1620 public: |
|
1621 DDmaTestFactory(); |
|
1622 // from DLogicalDevice |
|
1623 virtual ~DDmaTestFactory() |
|
1624 { |
|
1625 __KTRACE_OPT(KDMA, Kern::Printf(">DDmaTestFactory::~DDmaTestFactory")); |
|
1626 } |
|
1627 virtual TInt Install(); |
|
1628 virtual void GetCaps(TDes8& aDes) const; |
|
1629 virtual TInt Create(DLogicalChannelBase*& aChannel); |
|
1630 }; |
|
1631 |
|
1632 |
|
1633 DDmaTestFactory::DDmaTestFactory() |
|
1634 { |
|
1635 iVersion = TestDmaLddVersion(); |
|
1636 iParseMask = KDeviceAllowUnit; // no info, no PDD |
|
1637 } |
|
1638 |
|
1639 |
|
1640 TInt DDmaTestFactory::Create(DLogicalChannelBase*& aChannel) |
|
1641 { |
|
1642 aChannel=new DDmaTestSession; |
|
1643 Kern::Printf("DDmaTestFactory::Create %d\n", aChannel?KErrNone : KErrNoMemory); |
|
1644 return aChannel ? KErrNone : KErrNoMemory; |
|
1645 } |
|
1646 |
|
1647 |
|
1648 TInt DDmaTestFactory::Install() |
|
1649 { |
|
1650 TInt r = SetName(&KTestDmaLddName); |
|
1651 Kern::Printf("DDmaTestFactory::Install %d\n",r); |
|
1652 return r; |
|
1653 } |
|
1654 |
|
1655 |
|
1656 void DDmaTestFactory::GetCaps(TDes8& /*aDes*/) const |
|
1657 { |
|
1658 } |
|
1659 |
|
1660 ////////////////////////////////////////////////////////////////////////////// |
|
1661 |
|
1662 DECLARE_STANDARD_LDD() |
|
1663 { |
|
1664 return new DDmaTestFactory; |
|
1665 } |