kernel/eka/drivers/dma/dma2_pil.cpp
branchRCL_3
changeset 43 c1f20ce4abcf
equal deleted inserted replaced
42:a179b74831c9 43:c1f20ce4abcf
       
     1 // Copyright (c) 2002-2010 Nokia Corporation and/or its subsidiary(-ies).
       
     2 // All rights reserved.
       
     3 // This component and the accompanying materials are made available
       
     4 // under the terms of "Eclipse Public License v1.0"
       
     5 // which accompanies this distribution, and is available
       
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
       
     7 //
       
     8 // Initial Contributors:
       
     9 // Nokia Corporation - initial contribution.
       
    10 //
       
    11 // Contributors:
       
    12 //
       
    13 // Description:
       
    14 // e32/drivers/dma2_pil.cpp
       
    15 // DMA Platform Independent Layer (PIL)
       
    16 //
       
    17 //
       
    18 
       
    19 #include <drivers/dma.h>
       
    20 #include <drivers/dma_hai.h>
       
    21 
       
    22 #include <kernel/kern_priv.h>
       
    23 
       
    24 
       
    25 // Symbian _Min() & _Max() are broken, so we have to define them ourselves
       
    26 inline TUint _Min(TUint aLeft, TUint aRight)
       
    27 	{return(aLeft < aRight ? aLeft : aRight);}
       
    28 inline TUint _Max(TUint aLeft, TUint aRight)
       
    29 	{return(aLeft > aRight ? aLeft : aRight);}
       
    30 
       
    31 
       
    32 // The following section is used only when freezing the DMA2 export library
       
    33 /*
       
    34 TInt DmaChannelMgr::StaticExtension(TInt, TAny*) {return 0;}
       
    35 TDmaChannel* DmaChannelMgr::Open(TUint32, TBool, TUint) {return 0;}
       
    36 void DmaChannelMgr::Close(TDmaChannel*) {}
       
    37 EXPORT_C const TDmaTestInfo& DmaTestInfo() {static TDmaTestInfo a; return a;}
       
    38 EXPORT_C const TDmaV2TestInfo& DmaTestInfoV2() {static TDmaV2TestInfo a; return a;}
       
    39 */
       
    40 
       
    41 static const char KDmaPanicCat[] = "DMA " __FILE__;
       
    42 
       
    43 //////////////////////////////////////////////////////////////////////
       
    44 // DmaChannelMgr
       
    45 //
       
    46 // Wait, Signal, and Initialise are defined here in the PIL.
       
    47 // Open, Close and Extension must be defined in the PSL.
       
    48 
       
    49 NFastMutex DmaChannelMgr::Lock;
       
    50 
       
    51 
       
    52 void DmaChannelMgr::Wait()
       
    53 	{
       
    54 	NKern::FMWait(&Lock);
       
    55 	}
       
    56 
       
    57 
       
    58 void DmaChannelMgr::Signal()
       
    59 	{
       
    60 	NKern::FMSignal(&Lock);
       
    61 	}
       
    62 
       
    63 
       
    64 TInt DmaChannelMgr::Initialise()
       
    65 	{
       
    66 	return KErrNone;
       
    67 	}
       
    68 
       
    69 
       
    70 class TDmaCancelInfo : public SDblQueLink
       
    71 	{
       
    72 public:
       
    73 	TDmaCancelInfo();
       
    74 	void Signal();
       
    75 public:
       
    76 	NFastSemaphore iSem;
       
    77 	};
       
    78 
       
    79 
       
    80 TDmaCancelInfo::TDmaCancelInfo()
       
    81 	: iSem(0)
       
    82 	{
       
    83 	iNext = this;
       
    84 	iPrev = this;
       
    85 	}
       
    86 
       
    87 
       
    88 void TDmaCancelInfo::Signal()
       
    89 	{
       
    90 	TDmaCancelInfo* p = this;
       
    91 	FOREVER
       
    92 		{
       
    93 		TDmaCancelInfo* next = (TDmaCancelInfo*)p->iNext;
       
    94 		if (p!=next)
       
    95 			p->Deque();
       
    96 		NKern::FSSignal(&p->iSem);	// Don't dereference p after this
       
    97 		if (p==next)
       
    98 			break;
       
    99 		p = next;
       
   100 		}
       
   101 	}
       
   102 
       
   103 
       
   104 //////////////////////////////////////////////////////////////////////////////
       
   105 
       
   106 #ifdef __DMASIM__
       
   107 #ifdef __WINS__
       
   108 typedef TLinAddr TPhysAddr;
       
   109 #endif
       
   110 static inline TPhysAddr LinToPhys(TLinAddr aLin) {return aLin;}
       
   111 #else
       
   112 static inline TPhysAddr LinToPhys(TLinAddr aLin) {return Epoc::LinearToPhysical(aLin);}
       
   113 #endif
       
   114 
       
   115 //
       
   116 // Return minimum of aMaxSize and size of largest physically contiguous block
       
   117 // starting at aLinAddr.
       
   118 //
       
   119 static TUint MaxPhysSize(TLinAddr aLinAddr, const TUint aMaxSize)
       
   120 	{
       
   121 	const TPhysAddr physBase = LinToPhys(aLinAddr);
       
   122 	__DMA_ASSERTD(physBase != KPhysAddrInvalid);
       
   123 	TLinAddr lin = aLinAddr;
       
   124 	TUint size = 0;
       
   125 	for (;;)
       
   126 		{
       
   127 		// Round up the linear address to the next MMU page boundary
       
   128 		const TLinAddr linBoundary = Kern::RoundToPageSize(lin + 1);
       
   129 		size += linBoundary - lin;
       
   130 		if (size >= aMaxSize)
       
   131 			return aMaxSize;
       
   132 		if ((physBase + size) != LinToPhys(linBoundary))
       
   133 			return size;
       
   134 		lin = linBoundary;
       
   135 		}
       
   136 	}
       
   137 
       
   138 
       
   139 //////////////////////////////////////////////////////////////////////////////
       
   140 // TDmac
       
   141 
       
   142 TDmac::TDmac(const SCreateInfo& aInfo)
       
   143 	: iMaxDesCount(aInfo.iDesCount),
       
   144 	  iAvailDesCount(aInfo.iDesCount),
       
   145 	  iHdrPool(NULL),
       
   146 #ifndef __WINS__
       
   147 	  iHwDesChunk(NULL),
       
   148 #endif
       
   149 	  iDesPool(NULL),
       
   150 	  iDesSize(aInfo.iDesSize),
       
   151 	  iCapsHwDes(aInfo.iCapsHwDes),
       
   152 	  iFreeHdr(NULL)
       
   153 	{
       
   154 	__KTRACE_OPT(KDMA, Kern::Printf("TDmac::TDmac"));
       
   155 	__DMA_ASSERTD(iMaxDesCount > 0);
       
   156 	__DMA_ASSERTD(iDesSize > 0);
       
   157 	}
       
   158 
       
   159 
       
   160 //
       
   161 // Second-phase c'tor
       
   162 //
       
   163 TInt TDmac::Create(const SCreateInfo& aInfo)
       
   164 	{
       
   165 	__KTRACE_OPT(KDMA, Kern::Printf("TDmac::Create"));
       
   166 	iHdrPool = new SDmaDesHdr[iMaxDesCount];
       
   167 	if (iHdrPool == NULL)
       
   168 		{
       
   169 		return KErrNoMemory;
       
   170 		}
       
   171 
       
   172 	TInt r = AllocDesPool(aInfo.iDesChunkAttribs);
       
   173 	if (r != KErrNone)
       
   174 		{
       
   175 		return KErrNoMemory;
       
   176 		}
       
   177 
       
   178 	// Link all descriptor headers together on the free list
       
   179 	iFreeHdr = iHdrPool;
       
   180 	for (TInt i = 0; i < iMaxDesCount - 1; i++)
       
   181 		iHdrPool[i].iNext = iHdrPool + i + 1;
       
   182 	iHdrPool[iMaxDesCount-1].iNext = NULL;
       
   183 
       
   184 	__DMA_INVARIANT();
       
   185 	return KErrNone;
       
   186 	}
       
   187 
       
   188 
       
   189 TDmac::~TDmac()
       
   190 	{
       
   191 	__KTRACE_OPT(KDMA, Kern::Printf("TDmac::~TDmac"));
       
   192 	__DMA_INVARIANT();
       
   193 
       
   194 	FreeDesPool();
       
   195 	delete[] iHdrPool;
       
   196 	}
       
   197 
       
   198 
       
   199 void TDmac::Transfer(const TDmaChannel& /*aChannel*/, const SDmaDesHdr& /*aHdr*/)
       
   200 	{
       
   201 	// TDmac needs to override this function if it has reported the channel
       
   202 	// type for which the PIL calls it.
       
   203 	__DMA_UNREACHABLE_DEFAULT();
       
   204 	}
       
   205 
       
   206 
       
   207 void TDmac::Transfer(const TDmaChannel& /*aChannel*/, const SDmaDesHdr& /*aSrcHdr*/,
       
   208 					 const SDmaDesHdr& /*aDstHdr*/)
       
   209 	{
       
   210 	// TDmac needs to override this function if it has reported the channel
       
   211 	// type for which the PIL calls it.
       
   212 	__DMA_UNREACHABLE_DEFAULT();
       
   213 	}
       
   214 
       
   215 
       
   216 TInt TDmac::PauseTransfer(const TDmaChannel& /*aChannel*/)
       
   217 	{
       
   218 	// TDmac needs to override this function if it has reported support for
       
   219 	// channel pausing/resuming.
       
   220 	return KErrNotSupported;
       
   221 	}
       
   222 
       
   223 
       
   224 TInt TDmac::ResumeTransfer(const TDmaChannel& /*aChannel*/)
       
   225 	{
       
   226 	// TDmac needs to override this function if it has reported support for
       
   227 	// channel pausing/resuming.
       
   228 	return KErrNotSupported;
       
   229 	}
       
   230 
       
   231 
       
   232 TInt TDmac::AllocDesPool(TUint aAttribs)
       
   233 	{
       
   234 	__KTRACE_OPT(KDMA, Kern::Printf("TDmac::AllocDesPool"));
       
   235 	// Calling thread must be in CS
       
   236 	__ASSERT_CRITICAL;
       
   237 	TInt r;
       
   238 	if (iCapsHwDes)
       
   239 		{
       
   240 		const TInt size = iMaxDesCount * iDesSize;
       
   241 #ifdef __WINS__
       
   242 		(void)aAttribs;
       
   243 		iDesPool = new TUint8[size];
       
   244 		r = iDesPool ? KErrNone : KErrNoMemory;
       
   245 #else
       
   246 		// Chunk not mapped as supervisor r/w user none? incorrect mask passed by PSL
       
   247 		__DMA_ASSERTD((aAttribs & EMapAttrAccessMask) == EMapAttrSupRw);
       
   248 		TPhysAddr phys;
       
   249 		r = Epoc::AllocPhysicalRam(size, phys);
       
   250 		if (r == KErrNone)
       
   251 			{
       
   252 			r = DPlatChunkHw::New(iHwDesChunk, phys, size, aAttribs);
       
   253 			if (r == KErrNone)
       
   254 				{
       
   255 				iDesPool = (TAny*)iHwDesChunk->LinearAddress();
       
   256 				__KTRACE_OPT(KDMA, Kern::Printf("descriptor hw chunk created lin=0x%08X phys=0x%08X, size=0x%X",
       
   257 												iHwDesChunk->iLinAddr, iHwDesChunk->iPhysAddr, size));
       
   258 				}
       
   259 			else
       
   260 				Epoc::FreePhysicalRam(phys, size);
       
   261 			}
       
   262 #endif
       
   263 		}
       
   264 	else
       
   265 		{
       
   266 		iDesPool = Kern::Alloc(iMaxDesCount * sizeof(TDmaTransferArgs));
       
   267 		r = iDesPool ? KErrNone : KErrNoMemory;
       
   268 		}
       
   269 	return r;
       
   270 	}
       
   271 
       
   272 
       
   273 void TDmac::FreeDesPool()
       
   274 	{
       
   275 	__KTRACE_OPT(KDMA, Kern::Printf("TDmac::FreeDesPool"));
       
   276 	// Calling thread must be in CS
       
   277 	__ASSERT_CRITICAL;
       
   278 	if (iCapsHwDes)
       
   279 		{
       
   280 #ifdef __WINS__
       
   281 		delete[] iDesPool;
       
   282 #else
       
   283 		if (iHwDesChunk)
       
   284 			{
       
   285 			const TPhysAddr phys = iHwDesChunk->PhysicalAddress();
       
   286 			const TInt size = iHwDesChunk->iSize;
       
   287 			iHwDesChunk->Close(NULL);
       
   288 			Epoc::FreePhysicalRam(phys, size);
       
   289 			}
       
   290 #endif
       
   291 		}
       
   292 	else
       
   293 		{
       
   294 		Kern::Free(iDesPool);
       
   295 		}
       
   296 	}
       
   297 
       
   298 
       
   299 //
       
   300 // Prealloc the given number of descriptors.
       
   301 //
       
   302 TInt TDmac::ReserveSetOfDes(TInt aCount)
       
   303 	{
       
   304 	__KTRACE_OPT(KDMA, Kern::Printf("TDmac::ReserveSetOfDes count=%d", aCount));
       
   305 	__DMA_ASSERTD(aCount > 0);
       
   306 	TInt r = KErrTooBig;
       
   307 	Wait();
       
   308 	if (iAvailDesCount - aCount >= 0)
       
   309 		{
       
   310 		iAvailDesCount -= aCount;
       
   311 		r = KErrNone;
       
   312 		}
       
   313 	Signal();
       
   314 	__DMA_INVARIANT();
       
   315 	return r;
       
   316 	}
       
   317 
       
   318 
       
   319 //
       
   320 // Return the given number of preallocated descriptors to the free pool.
       
   321 //
       
   322 void TDmac::ReleaseSetOfDes(TInt aCount)
       
   323 	{
       
   324 	__KTRACE_OPT(KDMA, Kern::Printf("TDmac::ReleaseSetOfDes count=%d", aCount));
       
   325 	__DMA_ASSERTD(aCount >= 0);
       
   326 	Wait();
       
   327 	iAvailDesCount += aCount;
       
   328 	Signal();
       
   329 	__DMA_INVARIANT();
       
   330 	}
       
   331 
       
   332 
       
   333 //
       
   334 // Queue DFC and update word used to communicate with channel DFC.
       
   335 //
       
   336 // Called in interrupt context by PSL.
       
   337 //
       
   338 void TDmac::HandleIsr(TDmaChannel& aChannel, TUint aEventMask, TBool aIsComplete)
       
   339 	{
       
   340 	__KTRACE_OPT(KDMA, Kern::Printf("TDmac::HandleIsr"));
       
   341 
       
   342 	// Function needs to be called by PSL in ISR context
       
   343 	__DMA_ASSERTD(NKern::CurrentContext() == NKern::EInterrupt);
       
   344 
       
   345 	// First the ISR callback stuff
       
   346 
       
   347 	// Is this a transfer completion notification?
       
   348 	if (aEventMask & EDmaCallbackRequestCompletion)
       
   349 		{
       
   350 		// If so, has the client requested an ISR callback?
       
   351 		if (__e32_atomic_load_acq32(&aChannel.iIsrCbRequest))
       
   352 			{
       
   353 			__KTRACE_OPT(KDMA, Kern::Printf("ISR callback"));
       
   354 
       
   355 			// Since iIsrCbRequest was set no threads will be
       
   356 			// modifying the request queue.
       
   357 			const DDmaRequest* const req = _LOFF(aChannel.iReqQ.First(), DDmaRequest, iLink);
       
   358 
       
   359 			// We expect the request to have requested
       
   360 			// ISR callback
       
   361 			__NK_ASSERT_DEBUG(req->iIsrCb);
       
   362 
       
   363 			TDmaCallback const cb = req->iDmaCb;
       
   364 			TAny* const arg = req->iDmaCbArg;
       
   365 			// Execute the client callback
       
   366 			(*cb)(EDmaCallbackRequestCompletion,
       
   367 				  (aIsComplete ? EDmaResultOK : EDmaResultError),
       
   368 				  arg,
       
   369 				  NULL);
       
   370 			// Now let's see if the callback rescheduled the transfer request
       
   371 			// (see TDmaChannel::IsrRedoRequest()).
       
   372 			const TBool redo = aChannel.iRedoRequest;
       
   373 			aChannel.iRedoRequest = EFalse;
       
   374 			const TBool stop = __e32_atomic_load_acq32(&aChannel.iIsrDfc) &
       
   375 				(TUint32)TDmaChannel::KCancelFlagMask;
       
   376 			// There won't be another ISR callback if this callback didn't
       
   377 			// reschedule the request, or the client cancelled all requests, or
       
   378 			// this callback rescheduled the request with a DFC callback.
       
   379 			if (!redo || stop || !req->iIsrCb)
       
   380 				{
       
   381 				__e32_atomic_store_rel32(&aChannel.iIsrCbRequest, EFalse);
       
   382 				}
       
   383 			if (redo && !stop)
       
   384 				{
       
   385 				// We won't queue the channel DFC in this case and just return.
       
   386 				__KTRACE_OPT(KDMA, Kern::Printf("CB rescheduled xfer -> no DFC"));
       
   387 				return;
       
   388 				}
       
   389 			// Not redoing or being cancelled means we've been calling the
       
   390 			// request's ISR callback for the last time. We're going to
       
   391 			// complete the request via the DFC in the usual way.
       
   392 			}
       
   393 		}
       
   394 	else
       
   395 		{
       
   396 		// The PIL doesn't support yet any completion types other than
       
   397 		// EDmaCallbackRequestCompletion.
       
   398 		__DMA_CANT_HAPPEN();
       
   399 		}
       
   400 
       
   401 	// Now queue a DFC if necessary. The possible scenarios are:
       
   402 	// a) DFC not queued (orig == 0)              -> update iIsrDfc + queue DFC
       
   403 	// b) DFC queued, not running yet (orig != 0) -> just update iIsrDfc
       
   404 	// c) DFC running / iIsrDfc not reset yet (orig != 0) -> just update iIsrDfc
       
   405 	// d) DFC running / iIsrDfc already reset (orig == 0) -> update iIsrDfc + requeue DFC
       
   406 
       
   407 	// Set error flag if necessary.
       
   408 	const TUint32 inc = aIsComplete ? 1u : TUint32(TDmaChannel::KErrorFlagMask) | 1u;
       
   409 
       
   410 	// Add 'inc' (interrupt count increment + poss. error flag) to 'iIsrDfc' if
       
   411 	// cancel flag is not set, do nothing otherwise. Assign original value of
       
   412 	// 'iIsrDfc' to 'orig' in any case.
       
   413 	const TUint32 orig = __e32_atomic_tau_ord32(&aChannel.iIsrDfc,
       
   414 												TUint32(TDmaChannel::KCancelFlagMask),
       
   415 												0,
       
   416 												inc);
       
   417 
       
   418 	// As transfer should be suspended when an error occurs, we
       
   419 	// should never get there with the error flag already set.
       
   420 	__DMA_ASSERTD((orig & inc & (TUint32)TDmaChannel::KErrorFlagMask) == 0);
       
   421 
       
   422 	if (orig == 0)
       
   423 		{
       
   424 		aChannel.iDfc.Add();
       
   425 		}
       
   426 	}
       
   427 
       
   428 
       
   429 TInt TDmac::InitDes(const SDmaDesHdr& aHdr, const TDmaTransferArgs& aTransferArgs)
       
   430 	{
       
   431 	__KTRACE_OPT(KDMA, Kern::Printf("TDmac::InitDes"));
       
   432 	TInt r;
       
   433 	if (iCapsHwDes)
       
   434 		{
       
   435 		__KTRACE_OPT(KDMA, Kern::Printf("iCaps.iHwDescriptors"));
       
   436 		r = InitHwDes(aHdr, aTransferArgs);
       
   437 		}
       
   438 	else
       
   439 		{
       
   440 		TDmaTransferArgs& args = HdrToDes(aHdr);
       
   441 		args = aTransferArgs;
       
   442 		r = KErrNone;
       
   443 		}
       
   444 	return r;
       
   445 	}
       
   446 
       
   447 
       
   448 TInt TDmac::InitHwDes(const SDmaDesHdr& /*aHdr*/, const TDmaTransferArgs& /*aTransferArgs*/)
       
   449 	{
       
   450 	// concrete controller must override if SDmacCaps::iHwDescriptors set
       
   451 	__DMA_UNREACHABLE_DEFAULT();
       
   452 	return KErrGeneral;
       
   453 	}
       
   454 
       
   455 
       
   456 TInt TDmac::InitSrcHwDes(const SDmaDesHdr& /*aHdr*/, const TDmaTransferArgs& /*aTransferArgs*/)
       
   457 	{
       
   458 	// concrete controller must override if SDmacCaps::iAsymHwDescriptors set
       
   459 	__DMA_UNREACHABLE_DEFAULT();
       
   460 	return KErrGeneral;
       
   461 	}
       
   462 
       
   463 
       
   464 TInt TDmac::InitDstHwDes(const SDmaDesHdr& /*aHdr*/, const TDmaTransferArgs& /*aTransferArgs*/)
       
   465 	{
       
   466 	// concrete controller must override if SDmacCaps::iAsymHwDescriptors set
       
   467 	__DMA_UNREACHABLE_DEFAULT();
       
   468 	return KErrGeneral;
       
   469 	}
       
   470 
       
   471 
       
   472 TInt TDmac::UpdateDes(const SDmaDesHdr& aHdr, TUint32 aSrcAddr, TUint32 aDstAddr,
       
   473 					  TUint aTransferCount, TUint32 aPslRequestInfo)
       
   474 	{
       
   475 	__KTRACE_OPT(KDMA, Kern::Printf("TDmac::UpdateDes"));
       
   476 	TInt r;
       
   477 	if (iCapsHwDes)
       
   478 		{
       
   479 		__KTRACE_OPT(KDMA, Kern::Printf("iCaps.iHwDescriptors"));
       
   480 		r = UpdateHwDes(aHdr, aSrcAddr, aDstAddr, aTransferCount, aPslRequestInfo);
       
   481 		}
       
   482 	else
       
   483 		{
       
   484 		TDmaTransferArgs& args = HdrToDes(aHdr);
       
   485 		if (aSrcAddr != KPhysAddrInvalid)
       
   486 			args.iSrcConfig.iAddr = aSrcAddr;
       
   487 		if (aDstAddr != KPhysAddrInvalid)
       
   488 			args.iDstConfig.iAddr = aDstAddr;
       
   489 		if (aTransferCount)
       
   490 			args.iTransferCount = aTransferCount;
       
   491 		if (aPslRequestInfo)
       
   492 			args.iPslRequestInfo = aPslRequestInfo;
       
   493 		r = KErrNone;
       
   494 		}
       
   495 	return r;
       
   496 	}
       
   497 
       
   498 
       
   499 TInt TDmac::UpdateHwDes(const SDmaDesHdr& /*aHdr*/, TUint32 /*aSrcAddr*/, TUint32 /*aDstAddr*/,
       
   500 						TUint /*aTransferCount*/, TUint32 /*aPslRequestInfo*/)
       
   501 	{
       
   502 	// concrete controller must override if SDmacCaps::iHwDescriptors set
       
   503 	__DMA_UNREACHABLE_DEFAULT();
       
   504 	return KErrGeneral;
       
   505 	}
       
   506 
       
   507 
       
   508 TInt TDmac::UpdateSrcHwDes(const SDmaDesHdr& /*aHdr*/, TUint32 /*aSrcAddr*/,
       
   509 						   TUint /*aTransferCount*/, TUint32 /*aPslRequestInfo*/)
       
   510 	{
       
   511 	// concrete controller must override if SDmacCaps::iAsymHwDescriptors set
       
   512 	__DMA_UNREACHABLE_DEFAULT();
       
   513 	return KErrGeneral;
       
   514 	}
       
   515 
       
   516 
       
   517 TInt TDmac::UpdateDstHwDes(const SDmaDesHdr& /*aHdr*/, TUint32 /*aDstAddr*/,
       
   518 						   TUint /*aTransferCount*/, TUint32 /*aPslRequestInfo*/)
       
   519 	{
       
   520 	// concrete controller must override if SDmacCaps::iAsymHwDescriptors set
       
   521 	__DMA_UNREACHABLE_DEFAULT();
       
   522 	return KErrGeneral;
       
   523 	}
       
   524 
       
   525 
       
   526 void TDmac::ChainHwDes(const SDmaDesHdr& /*aHdr*/, const SDmaDesHdr& /*aNextHdr*/)
       
   527 	{
       
   528 	// concrete controller must override if SDmacCaps::iHwDescriptors set
       
   529 	__DMA_UNREACHABLE_DEFAULT();
       
   530 	}
       
   531 
       
   532 
       
   533 void TDmac::AppendHwDes(const TDmaChannel& /*aChannel*/, const SDmaDesHdr& /*aLastHdr*/,
       
   534 						const SDmaDesHdr& /*aNewHdr*/)
       
   535 	{
       
   536  	// concrete controller must override if SDmacCaps::iHwDescriptors set
       
   537 	__DMA_UNREACHABLE_DEFAULT();
       
   538 	}
       
   539 
       
   540 
       
   541 void TDmac::AppendHwDes(const TDmaChannel& /*aChannel*/,
       
   542 						const SDmaDesHdr& /*aSrcLastHdr*/, const SDmaDesHdr& /*aSrcNewHdr*/,
       
   543 						const SDmaDesHdr& /*aDstLastHdr*/, const SDmaDesHdr& /*aDstNewHdr*/)
       
   544 	{
       
   545 	// concrete controller must override if SDmacCaps::iAsymHwDescriptors set
       
   546 	__DMA_UNREACHABLE_DEFAULT();
       
   547 	}
       
   548 
       
   549 
       
   550 void TDmac::UnlinkHwDes(const TDmaChannel& /*aChannel*/, SDmaDesHdr& /*aHdr*/)
       
   551 	{
       
   552  	// concrete controller must override if SDmacCaps::iHwDescriptors set
       
   553 	__DMA_UNREACHABLE_DEFAULT();
       
   554 	}
       
   555 
       
   556 
       
   557 void TDmac::ClearHwDes(const SDmaDesHdr& /*aHdr*/)
       
   558 	{
       
   559 	// default implementation - NOP; concrete controller may override
       
   560 	return;
       
   561 	}
       
   562 
       
   563 
       
   564 TInt TDmac::LinkChannels(TDmaChannel& /*a1stChannel*/, TDmaChannel& /*a2ndChannel*/)
       
   565 	{
       
   566 	// default implementation - NOP; concrete controller may override
       
   567 	return KErrNotSupported;
       
   568 	}
       
   569 
       
   570 
       
   571 TInt TDmac::UnlinkChannel(TDmaChannel& /*aChannel*/)
       
   572 	{
       
   573 	// default implementation - NOP; concrete controller may override
       
   574 	return KErrNotSupported;
       
   575 	}
       
   576 
       
   577 
       
   578 TInt TDmac::FailNext(const TDmaChannel& /*aChannel*/)
       
   579 	{
       
   580 	// default implementation - NOP; concrete controller may override
       
   581 	return KErrNotSupported;
       
   582 	}
       
   583 
       
   584 
       
   585 TInt TDmac::MissNextInterrupts(const TDmaChannel& /*aChannel*/, TInt /*aInterruptCount*/)
       
   586 	{
       
   587 	// default implementation - NOP; concrete controller may override
       
   588 	return KErrNotSupported;
       
   589 	}
       
   590 
       
   591 
       
   592 TInt TDmac::Extension(TDmaChannel& /*aChannel*/, TInt /*aCmd*/, TAny* /*aArg*/)
       
   593 	{
       
   594 	// default implementation - NOP; concrete controller may override
       
   595 	return KErrNotSupported;
       
   596 	}
       
   597 
       
   598 
       
   599 TUint32 TDmac::HwDesNumDstElementsTransferred(const SDmaDesHdr& /*aHdr*/)
       
   600 	{
       
   601  	// Concrete controller must override if SDmacCaps::iHwDescriptors set.
       
   602 	__DMA_UNREACHABLE_DEFAULT();
       
   603 	return 0;
       
   604 	}
       
   605 
       
   606 
       
   607 TUint32 TDmac::HwDesNumSrcElementsTransferred(const SDmaDesHdr& /*aHdr*/)
       
   608 	{
       
   609  	// Concrete controller must override if SDmacCaps::iHwDescriptors set.
       
   610 	__DMA_UNREACHABLE_DEFAULT();
       
   611 	return 0;
       
   612 	}
       
   613 
       
   614 
       
   615 #ifdef _DEBUG
       
   616 
       
   617 void TDmac::Invariant()
       
   618 	{
       
   619 	Wait();
       
   620 	__DMA_ASSERTD(0 <= iAvailDesCount && iAvailDesCount <= iMaxDesCount);
       
   621 	__DMA_ASSERTD(!iFreeHdr || IsValidHdr(iFreeHdr));
       
   622 	for (TInt i = 0; i < iMaxDesCount; i++)
       
   623 		__DMA_ASSERTD(iHdrPool[i].iNext == NULL || IsValidHdr(iHdrPool[i].iNext));
       
   624 	Signal();
       
   625 	}
       
   626 
       
   627 
       
   628 TBool TDmac::IsValidHdr(const SDmaDesHdr* aHdr)
       
   629 	{
       
   630 	return (iHdrPool <= aHdr) && (aHdr < iHdrPool + iMaxDesCount);
       
   631 	}
       
   632 
       
   633 #endif
       
   634 
       
   635 
       
   636 //
       
   637 // Internal compat version, used by legacy Fragment()
       
   638 //
       
   639 TDmaTransferConfig::TDmaTransferConfig(TUint32 aAddr, TUint aFlags, TBool aAddrInc)
       
   640 	: iAddr(aAddr),
       
   641 	  iAddrMode(aAddrInc ? KDmaAddrModePostIncrement : KDmaAddrModeConstant),
       
   642 	  iElementSize(0),
       
   643 	  iElementsPerFrame(0),
       
   644 	  iElementsPerPacket(0),
       
   645 	  iFramesPerTransfer(0),
       
   646 	  iElementSkip(0),
       
   647 	  iFrameSkip(0),
       
   648 	  iBurstSize(KDmaBurstSizeAny),
       
   649 	  iFlags(aFlags),
       
   650 	  iSyncFlags(KDmaSyncAuto),
       
   651 	  iPslTargetInfo(0),
       
   652 	  iRepeatCount(0),
       
   653 	  iDelta(~0u),
       
   654 	  iReserved(0)
       
   655 	{
       
   656 	__KTRACE_OPT(KDMA,
       
   657 				 Kern::Printf("TDmaTransferConfig::TDmaTransferConfig "
       
   658 							  "aAddr=0x%08X aFlags=0x%08X aAddrInc=%d",
       
   659 							  aAddr, aFlags, aAddrInc));
       
   660 	}
       
   661 
       
   662 
       
   663 //
       
   664 // Internal compat version, used by legacy Fragment()
       
   665 //
       
   666 TDmaTransferArgs::TDmaTransferArgs(TUint32 aSrc, TUint32 aDest, TInt aCount,
       
   667 								   TUint aFlags, TUint32 aPslInfo)
       
   668 	: iSrcConfig(aSrc, RequestFlags2SrcConfigFlags(aFlags), (aFlags & KDmaIncSrc)),
       
   669 	  iDstConfig(aDest, RequestFlags2DstConfigFlags(aFlags), (aFlags & KDmaIncDest)),
       
   670 	  iTransferCount(aCount),
       
   671 	  iGraphicsOps(KDmaGraphicsOpNone),
       
   672 	  iColour(0),
       
   673 	  iFlags(0),
       
   674 	  iChannelPriority(KDmaPriorityNone),
       
   675 	  iPslRequestInfo(aPslInfo),
       
   676 	  iChannelCookie(0),
       
   677 	  iDelta(~0u),
       
   678 	  iReserved1(0),
       
   679 	  iReserved2(0)
       
   680 	{
       
   681 	__KTRACE_OPT(KDMA,
       
   682 				 Kern::Printf("TDmaTransferArgs::TDmaTransferArgs"));
       
   683 	__KTRACE_OPT(KDMA,
       
   684 				 Kern::Printf("  aSrc=0x%08X aDest=0x%08X aCount=%d aFlags=0x%08X aPslInfo=0x%08X",
       
   685 							  aSrc, aDest, aCount, aFlags, aPslInfo));
       
   686 	}
       
   687 
       
   688 
       
   689 //
       
   690 // As DDmaRequest is derived from DBase, the initializations with zero aren't
       
   691 // strictly necessary here, but this way it's nicer.
       
   692 //
       
   693 EXPORT_C DDmaRequest::DDmaRequest(TDmaChannel& aChannel, TCallback aCb,
       
   694 								  TAny* aCbArg, TInt aMaxTransferSize)
       
   695 	: iChannel(aChannel),
       
   696 	  iCb(aCb),
       
   697 	  iCbArg(aCbArg),
       
   698 	  iDmaCb(NULL),
       
   699 	  iDmaCbArg(NULL),
       
   700 	  iIsrCb(EFalse),
       
   701 	  iDesCount(0),
       
   702 	  iFirstHdr(NULL),
       
   703 	  iLastHdr(NULL),
       
   704 	  iSrcDesCount(0),
       
   705 	  iSrcFirstHdr(NULL),
       
   706 	  iSrcLastHdr(NULL),
       
   707 	  iDstDesCount(0),
       
   708 	  iDstFirstHdr(NULL),
       
   709 	  iDstLastHdr(NULL),
       
   710 	  iQueued(EFalse),
       
   711 	  iMaxTransferSize(aMaxTransferSize),
       
   712 	  iTotalNumSrcElementsTransferred(0),
       
   713 	  iTotalNumDstElementsTransferred(0)
       
   714 	{
       
   715 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::DDmaRequest =0x%08X (old style)", this));
       
   716 	iChannel.iReqCount++;
       
   717 	__DMA_ASSERTD(0 <= aMaxTransferSize);
       
   718 	__DMA_INVARIANT();
       
   719 	}
       
   720 
       
   721 
       
   722 //
       
   723 // As DDmaRequest is derived from DBase, the initializations with zero aren't
       
   724 // strictly necessary here, but this way it's nicer.
       
   725 //
       
   726 EXPORT_C DDmaRequest::DDmaRequest(TDmaChannel& aChannel, TDmaCallback aDmaCb,
       
   727 								  TAny* aCbArg, TUint aMaxTransferSize)
       
   728 	: iChannel(aChannel),
       
   729 	  iCb(NULL),
       
   730 	  iCbArg(NULL),
       
   731 	  iDmaCb(aDmaCb),
       
   732 	  iDmaCbArg(aCbArg),
       
   733 	  iIsrCb(EFalse),
       
   734 	  iDesCount(0),
       
   735 	  iFirstHdr(NULL),
       
   736 	  iLastHdr(NULL),
       
   737 	  iSrcDesCount(0),
       
   738 	  iSrcFirstHdr(NULL),
       
   739 	  iSrcLastHdr(NULL),
       
   740 	  iDstDesCount(0),
       
   741 	  iDstFirstHdr(NULL),
       
   742 	  iDstLastHdr(NULL),
       
   743 	  iQueued(EFalse),
       
   744 	  iMaxTransferSize(aMaxTransferSize),
       
   745 	  iTotalNumSrcElementsTransferred(0),
       
   746 	  iTotalNumDstElementsTransferred(0)
       
   747 	{
       
   748 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::DDmaRequest =0x%08X (new style)", this));
       
   749 	__e32_atomic_add_ord32(&iChannel.iReqCount, 1);
       
   750 	__DMA_INVARIANT();
       
   751 	}
       
   752 
       
   753 
       
   754 EXPORT_C DDmaRequest::~DDmaRequest()
       
   755 	{
       
   756 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::~DDmaRequest"));
       
   757 	__DMA_ASSERTD(!iQueued);
       
   758 	__DMA_INVARIANT();
       
   759 	if (iChannel.iDmacCaps->iAsymHwDescriptors)
       
   760 		{
       
   761 		FreeSrcDesList();
       
   762 		FreeDstDesList();
       
   763 		}
       
   764 	else
       
   765 		{
       
   766 		FreeDesList();
       
   767 		}
       
   768 	__e32_atomic_add_ord32(&iChannel.iReqCount, TUint32(-1));
       
   769 	}
       
   770 
       
   771 
       
   772 EXPORT_C TInt DDmaRequest::Fragment(TUint32 aSrc, TUint32 aDest, TInt aCount,
       
   773 									TUint aFlags, TUint32 aPslInfo)
       
   774 	{
       
   775 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Fragment thread %O (old style)",
       
   776 									&Kern::CurrentThread()));
       
   777 
       
   778 	__DMA_ASSERTD(aCount > 0);
       
   779 
       
   780 	TDmaTransferArgs args(aSrc, aDest, aCount, aFlags, aPslInfo);
       
   781 
       
   782 	return Frag(args);
       
   783 	}
       
   784 
       
   785 
       
   786 EXPORT_C TInt DDmaRequest::Fragment(const TDmaTransferArgs& aTransferArgs)
       
   787 	{
       
   788 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Fragment thread %O (new style)",
       
   789 									&Kern::CurrentThread()));
       
   790 
       
   791 	// Writable temporary working copy of the transfer arguments.
       
   792 	// We need this because we may have to modify some fields before passing it
       
   793 	// to the PSL (for example iChannelCookie, iTransferCount,
       
   794 	// iDstConfig::iAddr, and iSrcConfig::iAddr).
       
   795 	TDmaTransferArgs args(aTransferArgs);
       
   796 
       
   797 	return Frag(args);
       
   798 	}
       
   799 
       
   800 
       
   801 TInt DDmaRequest::CheckTransferConfig(const TDmaTransferConfig& aTarget, TUint aCount) const
       
   802 	{
       
   803 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::CheckTransferConfig"));
       
   804 
       
   805 	if (aTarget.iElementSize != 0)
       
   806 		{
       
   807 		if ((aCount % aTarget.iElementSize) != 0)
       
   808 			{
       
   809 			// 2, 7 (These strange numbers refer to some test cases documented
       
   810 			// elsewhere - they will be removed eventually.)
       
   811 			__KTRACE_OPT(KPANIC,
       
   812 						 Kern::Printf("Error: ((aCount %% iElementSize) != 0)"));
       
   813 			return KErrArgument;
       
   814 			}
       
   815 		if (aTarget.iElementsPerFrame != 0)
       
   816 			{
       
   817 			if ((aTarget.iElementSize * aTarget.iElementsPerFrame *
       
   818 				 aTarget.iFramesPerTransfer) != aCount)
       
   819 				{
       
   820 				// 3, 8
       
   821 				__KTRACE_OPT(KPANIC,
       
   822 							 Kern::Printf("Error: ((iElementSize * "
       
   823 										  "iElementsPerFrame * "
       
   824 										  "iFramesPerTransfer) != aCount)"));
       
   825 				return KErrArgument;
       
   826 				}
       
   827 			}
       
   828 		}
       
   829 	else
       
   830 		{
       
   831 		if (aTarget.iElementsPerFrame != 0)
       
   832 			{
       
   833 			// 4, 9
       
   834 			__KTRACE_OPT(KPANIC,
       
   835 						 Kern::Printf("Error: (iElementsPerFrame != 0)"));
       
   836 			return KErrArgument;
       
   837 			}
       
   838 		if (aTarget.iFramesPerTransfer != 0)
       
   839 			{
       
   840 			// 5, 10
       
   841 			__KTRACE_OPT(KPANIC,
       
   842 						 Kern::Printf("Error: (iFramesPerTransfer != 0)"));
       
   843 			return KErrArgument;
       
   844 			}
       
   845 		if (aTarget.iElementsPerPacket != 0)
       
   846 			{
       
   847 			// 6, 11
       
   848 			__KTRACE_OPT(KPANIC,
       
   849 						 Kern::Printf("Error: (iElementsPerPacket != 0)"));
       
   850 			return KErrArgument;
       
   851 			}
       
   852 		}
       
   853 	return KErrNone;
       
   854 	}
       
   855 
       
   856 
       
   857 TInt DDmaRequest::CheckMemFlags(const TDmaTransferConfig& aTarget) const
       
   858 	{
       
   859 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::CheckMemFlags"));
       
   860 
       
   861 	const TBool mem_target = (aTarget.iFlags & KDmaMemAddr);
       
   862 
       
   863 	if (mem_target && (aTarget.iFlags & KDmaPhysAddr) && !(aTarget.iFlags & KDmaMemIsContiguous))
       
   864 		{
       
   865 		// Physical memory address implies contiguous range
       
   866 		// 13, 15
       
   867 		__KTRACE_OPT(KPANIC, Kern::Printf("Error: mem_target && KDmaPhysAddr && !KDmaMemIsContiguous"));
       
   868 		return KErrArgument;
       
   869 		}
       
   870 	else if ((aTarget.iFlags & KDmaMemIsContiguous) && !mem_target)
       
   871 		{
       
   872 		// Contiguous range implies memory address
       
   873 		// 14, 16
       
   874 		__KTRACE_OPT(KPANIC, Kern::Printf("Error: KDmaMemIsContiguous && !mem_target"));
       
   875 		return KErrArgument;
       
   876 		}
       
   877 	return KErrNone;
       
   878 	}
       
   879 
       
   880 
       
   881 // Makes sure an element or frame never straddles two DMA subtransfer
       
   882 // fragments. This would be a fragmentation error by the PIL.
       
   883 //
       
   884 TInt DDmaRequest::AdjustFragmentSize(TUint& aFragSize, TUint aElementSize,
       
   885 									 TUint aFrameSize)
       
   886 	{
       
   887 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::AdjustFragmentSize FragSize=%d ES=%d FS=%d",
       
   888 									aFragSize, aElementSize, aFrameSize));
       
   889 
       
   890 	TUint rem = 0;
       
   891 	TInt r = KErrNone;
       
   892 
       
   893 	FOREVER
       
   894 		{
       
   895 		// If an element size is defined, make sure the fragment size is
       
   896 		// greater or equal.
       
   897 		if (aElementSize)
       
   898 			{
       
   899 			if (aFragSize < aElementSize)
       
   900 				{
       
   901 				__KTRACE_OPT(KPANIC, Kern::Printf("Error: aFragSize < aElementSize"));
       
   902 				r = KErrArgument;
       
   903 				break;
       
   904 				}
       
   905 			}
       
   906 		// If a frame size is defined, make sure the fragment size is greater
       
   907 		// or equal.
       
   908 		if (aFrameSize)
       
   909 			{
       
   910 			if (aFragSize < aFrameSize)
       
   911 				{
       
   912 				__KTRACE_OPT(KPANIC, Kern::Printf("Error: aFragSize < aFrameSize"));
       
   913 				r = KErrArgument;
       
   914 				break;
       
   915 				}
       
   916 			}
       
   917 		// If a frame size is defined, make sure the fragment ends on a frame
       
   918 		// boundary.
       
   919 		if (aFrameSize)
       
   920 			{
       
   921 			rem = aFragSize % aFrameSize;
       
   922 			if (rem != 0)
       
   923 				{
       
   924 				aFragSize -= rem;
       
   925 				// 20, 22
       
   926 				__KTRACE_OPT(KDMA, Kern::Printf("aFragSize %% aFrameSize != 0 --> aFragSize = %d",
       
   927 												aFragSize));
       
   928 				// aFragSize has changed, so we have to do all the checks
       
   929 				// again.
       
   930 				continue;
       
   931 				}
       
   932 			}
       
   933 		// If an element size is defined, make sure the fragment ends on an
       
   934 		// element boundary.
       
   935 		if (aElementSize)
       
   936 			{
       
   937 			rem = aFragSize % aElementSize;
       
   938 			if (rem != 0)
       
   939 				{
       
   940 				aFragSize -= rem;
       
   941 				// 21, 23
       
   942 				__KTRACE_OPT(KDMA, Kern::Printf("aFragSize %% aElementSize != 0 --> aFragSize = %d",
       
   943 												aFragSize));
       
   944 				// aFragSize has changed, so we have to do all the checks
       
   945 				// again.
       
   946 				continue;
       
   947 				}
       
   948 			}
       
   949 		// Done - all checks passed. Let's get out.
       
   950 		break;
       
   951 		}
       
   952 
       
   953 	return r;
       
   954 	}
       
   955 
       
   956 
       
   957 TUint DDmaRequest::GetTransferCount(const TDmaTransferArgs& aTransferArgs) const
       
   958 	{
       
   959 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::GetTransferCount"));
       
   960 
       
   961 	const TDmaTransferConfig& src = aTransferArgs.iSrcConfig;
       
   962 #ifdef _DEBUG
       
   963 	const TDmaTransferConfig& dst = aTransferArgs.iDstConfig;
       
   964 #endif	// #ifdef _DEBUG
       
   965 
       
   966 	TUint count = aTransferArgs.iTransferCount;
       
   967 	if (count == 0)
       
   968 		{
       
   969 		__KTRACE_OPT(KDMA, Kern::Printf("iTransferCount == 0"));
       
   970 		count = src.iElementSize * src.iElementsPerFrame *
       
   971 			src.iFramesPerTransfer;
       
   972 #ifdef _DEBUG
       
   973 		const TUint dst_cnt = dst.iElementSize * dst.iElementsPerFrame *
       
   974 			dst.iFramesPerTransfer;
       
   975 		if (count != dst_cnt)
       
   976 			{
       
   977 			// 1
       
   978 			__KTRACE_OPT(KPANIC, Kern::Printf("Error: (count != dst_cnt)"));
       
   979 			return 0;
       
   980 			}
       
   981 #endif	// #ifdef _DEBUG
       
   982 		}
       
   983 	else
       
   984 		{
       
   985 		__KTRACE_OPT(KDMA, Kern::Printf("iTransferCount == %d", count));
       
   986 #ifdef _DEBUG
       
   987 		// Client shouldn't specify contradictory or incomplete things
       
   988 		if (CheckTransferConfig(src, count) != KErrNone)
       
   989 			{
       
   990 			__KTRACE_OPT(KPANIC, Kern::Printf("Error: CheckTransferConfig(src)"));
       
   991 			return 0;
       
   992 			}
       
   993 		if (CheckTransferConfig(dst, count) != KErrNone)
       
   994 			{
       
   995 			__KTRACE_OPT(KPANIC, Kern::Printf("Error: CheckTransferConfig(dst)"));
       
   996 			return 0;
       
   997 			}
       
   998 #endif	// #ifdef _DEBUG
       
   999 		}
       
  1000 	return count;
       
  1001 	}
       
  1002 
       
  1003 
       
  1004 TUint DDmaRequest::GetMaxTransferlength(const TDmaTransferArgs& aTransferArgs, TUint aCount) const
       
  1005 	{
       
  1006 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::GetMaxTransferlength"));
       
  1007 
       
  1008 	const TDmaTransferConfig& src = aTransferArgs.iSrcConfig;
       
  1009 	const TDmaTransferConfig& dst = aTransferArgs.iDstConfig;
       
  1010 
       
  1011 	// Ask the PSL what the maximum length is for a single transfer
       
  1012 	TUint max_xfer_len = iChannel.MaxTransferLength(src.iFlags, dst.iFlags,
       
  1013 													aTransferArgs.iPslRequestInfo);
       
  1014 	if (iMaxTransferSize)
       
  1015 		{
       
  1016 		// (User has set a transfer size cap)
       
  1017 		__KTRACE_OPT(KDMA, Kern::Printf("iMaxTransferSize: %d", iMaxTransferSize));
       
  1018 		if ((max_xfer_len != 0) && (iMaxTransferSize > max_xfer_len))
       
  1019 			{
       
  1020 			// Not really an error, but still...
       
  1021 			__KTRACE_OPT(KPANIC, Kern::Printf("Warning: iMaxTransferSize > max_xfer_len"));
       
  1022 			}
       
  1023 		max_xfer_len = iMaxTransferSize;
       
  1024 		}
       
  1025 	else
       
  1026 		{
       
  1027 		// (User doesn't care about max transfer size)
       
  1028 		if (max_xfer_len == 0)
       
  1029 			{
       
  1030 			// '0' = no maximum imposed by controller
       
  1031 			max_xfer_len = aCount;
       
  1032 			}
       
  1033 		}
       
  1034 	__KTRACE_OPT(KDMA, Kern::Printf("max_xfer_len: %d", max_xfer_len));
       
  1035 
       
  1036 	// Some sanity checks
       
  1037 #ifdef _DEBUG
       
  1038 	if ((max_xfer_len < src.iElementSize) || (max_xfer_len < dst.iElementSize))
       
  1039 		{
       
  1040 		// 18
       
  1041 		__KTRACE_OPT(KPANIC, Kern::Printf("Error: max_xfer_len < iElementSize"));
       
  1042 		return 0;
       
  1043 		}
       
  1044 	if ((max_xfer_len < (src.iElementSize * src.iElementsPerFrame)) ||
       
  1045 		(max_xfer_len < (dst.iElementSize * dst.iElementsPerFrame)))
       
  1046 		{
       
  1047 		// 19
       
  1048 		__KTRACE_OPT(KPANIC,
       
  1049 					 Kern::Printf("Error: max_xfer_len < (iElementSize * iElementsPerFrame)"));
       
  1050 		return 0;
       
  1051 		}
       
  1052 #endif	// #ifdef _DEBUG
       
  1053 
       
  1054 	return max_xfer_len;
       
  1055 	}
       
  1056 
       
  1057 
       
  1058 // Unified internal fragmentation routine, called by both the old and new
       
  1059 // exported Fragment() functions.
       
  1060 //
       
  1061 // Depending on whether the DMAC uses a single or two separate descriptor
       
  1062 // chains, this function branches into either FragSym() or FragAsym(), and the
       
  1063 // latter function further into either FragAsymSrc()/FragAsymDst() or
       
  1064 // FragBalancedAsym().
       
  1065 //
       
  1066 TInt DDmaRequest::Frag(TDmaTransferArgs& aTransferArgs)
       
  1067 	{
       
  1068 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Frag"));
       
  1069 	__DMA_ASSERTD(!iQueued);
       
  1070 
       
  1071 	// Transfer count + checks
       
  1072 	const TUint count = GetTransferCount(aTransferArgs);
       
  1073 	if (count == 0)
       
  1074 		{
       
  1075 		return KErrArgument;
       
  1076 		}
       
  1077 
       
  1078 	// Max transfer length + checks
       
  1079 	const TUint max_xfer_len = GetMaxTransferlength(aTransferArgs, count);
       
  1080 	if (max_xfer_len == 0)
       
  1081 		{
       
  1082 		return KErrArgument;
       
  1083 		}
       
  1084 
       
  1085 	// ISR callback requested?
       
  1086 	const TBool isr_cb = (aTransferArgs.iFlags & KDmaRequestCallbackFromIsr);
       
  1087 	if (isr_cb)
       
  1088 		{
       
  1089 		// Requesting an ISR callback w/o supplying one?
       
  1090 		if (!iDmaCb)
       
  1091 			{
       
  1092 			// 12
       
  1093 			__KTRACE_OPT(KPANIC, Kern::Printf("Error: !iDmaCb"));
       
  1094 			return KErrArgument;
       
  1095 			}
       
  1096 		}
       
  1097 
       
  1098 	// Set the channel cookie for the PSL
       
  1099 	aTransferArgs.iChannelCookie = iChannel.PslId();
       
  1100 
       
  1101 	// Client shouldn't specify contradictory or invalid things
       
  1102 	TInt r = CheckMemFlags(aTransferArgs.iSrcConfig);
       
  1103 	if (r != KErrNone)
       
  1104 		{
       
  1105 		__KTRACE_OPT(KPANIC, Kern::Printf("Error: CheckMemFlags(src)"));
       
  1106 		return r;
       
  1107 		}
       
  1108 	r =  CheckMemFlags(aTransferArgs.iDstConfig);
       
  1109 	if (r != KErrNone)
       
  1110 		{
       
  1111 		__KTRACE_OPT(KPANIC, Kern::Printf("Error: CheckMemFlags(dst)"));
       
  1112 		return r;
       
  1113 		}
       
  1114 
       
  1115 	// Now the actual fragmentation
       
  1116 	if (iChannel.iDmacCaps->iAsymHwDescriptors)
       
  1117 		{
       
  1118 		r = FragAsym(aTransferArgs, count, max_xfer_len);
       
  1119 		}
       
  1120 	else
       
  1121 		{
       
  1122 		r = FragSym(aTransferArgs, count, max_xfer_len);
       
  1123 		}
       
  1124 
       
  1125 	if (r == KErrNone)
       
  1126 		{
       
  1127 		iIsrCb = isr_cb;
       
  1128 		}
       
  1129 
       
  1130 	__DMA_INVARIANT();
       
  1131 	return r;
       
  1132 	};
       
  1133 
       
  1134 
       
  1135 TInt DDmaRequest::FragSym(TDmaTransferArgs& aTransferArgs, TUint aCount,
       
  1136 						  TUint aMaxTransferLen)
       
  1137 	{
       
  1138 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::FragSym"));
       
  1139 
       
  1140 	TDmaTransferConfig& src = aTransferArgs.iSrcConfig;
       
  1141 	TDmaTransferConfig& dst = aTransferArgs.iDstConfig;
       
  1142 	const TBool mem_src = (src.iFlags & KDmaMemAddr);
       
  1143 	const TBool mem_dst = (dst.iFlags & KDmaMemAddr);
       
  1144 
       
  1145 	const TUint align_mask_src = iChannel.AddressAlignMask(src.iFlags,
       
  1146 														   src.iElementSize,
       
  1147 														   aTransferArgs.iPslRequestInfo);
       
  1148 	__KTRACE_OPT(KDMA, Kern::Printf("align_mask_src: 0x%x", align_mask_src));
       
  1149 	const TUint align_mask_dst = iChannel.AddressAlignMask(dst.iFlags,
       
  1150 														   dst.iElementSize,
       
  1151 														   aTransferArgs.iPslRequestInfo);
       
  1152 	__KTRACE_OPT(KDMA, Kern::Printf("align_mask_dst: 0x%x", align_mask_dst));
       
  1153 
       
  1154 	// Memory buffers must satisfy alignment constraint
       
  1155 	__DMA_ASSERTD(!mem_src || ((src.iAddr & align_mask_src) == 0));
       
  1156 	__DMA_ASSERTD(!mem_dst || ((dst.iAddr & align_mask_dst) == 0));
       
  1157 
       
  1158 	// Max aligned length is used to make sure the beginnings of subtransfers
       
  1159 	// (i.e. fragments) are correctly aligned.
       
  1160 	const TUint max_aligned_len = (aMaxTransferLen &
       
  1161 								   ~(_Max(align_mask_src, align_mask_dst)));
       
  1162 	__KTRACE_OPT(KDMA, Kern::Printf("max_aligned_len: %d", max_aligned_len));
       
  1163 	// Client and PSL sane?
       
  1164 	__DMA_ASSERTD(max_aligned_len > 0);
       
  1165 
       
  1166 	if (mem_src && mem_dst &&
       
  1167 		align_mask_src && align_mask_dst &&
       
  1168 		(align_mask_src != align_mask_dst) &&
       
  1169 		(!(src.iFlags & KDmaMemIsContiguous) || !(dst.iFlags & KDmaMemIsContiguous)))
       
  1170 		{
       
  1171 		// We don't support transfers which satisfy ALL of the following conditions:
       
  1172 		// 1) from memory to memory,
       
  1173 		// 2) both sides have address alignment requirements,
       
  1174 		// 3) those alignment requirements are not the same,
       
  1175 		// 4) the memory is non-contiguous on at least one end.
       
  1176 		//
       
  1177 		// [A 5th condition is that the channel doesn't support fully
       
  1178 		// asymmetric h/w descriptor lists,
       
  1179 		// i.e. TDmaChannel::DmacCaps::iAsymHwDescriptors is reported as EFalse
       
  1180 		// or iBalancedAsymSegments as ETrue. Hence this check is done in
       
  1181 		// FragSym() and FragBalancedAsym() but not in FragAsym().]
       
  1182 		//
       
  1183 		// The reason for this is that fragmentation could be impossible. The
       
  1184 		// memory layout (page break) on the side with the less stringent
       
  1185 		// alignment requirement can result in a misaligned target address on
       
  1186 		// the other side.
       
  1187 		//
       
  1188 		// Here is an example:
       
  1189 		//
       
  1190 		// src.iAddr =  3964 (0x0F7C), non-contiguous,
       
  1191 		// align_mask_src = 1 (alignment = 2 bytes)
       
  1192 		// dst.iAddr = 16384 (0x4000), contiguous,
       
  1193 		// align_mask_dst = 7 (alignment = 8 bytes)
       
  1194 		// count = max_xfer_len = 135 bytes
       
  1195 		// => max_aligned_len = 128 bytes
       
  1196 		//
       
  1197 		// Now, suppose MaxPhysSize() returns 132 bytes because src has 132
       
  1198 		// contiguous bytes to the end of its current mem page.
       
  1199 		// Trying to fragment this leads to:
       
  1200 		//
       
  1201 		// frag_1 = 128 bytes: src reads from 3964 (0x0F7C),
       
  1202 		//                     dst writes to 16384 (0x4000).
       
  1203 		// (Fragment 1 uses the max_aligned_len instead of 132 bytes because
       
  1204 		// otherwise the next fragment would start for the destination at
       
  1205 		// dst.iAddr + 132 = 16516 (0x4084), which is not 8-byte aligned.)
       
  1206 		//
       
  1207 		// frag_2 = 4 bytes: src reads from 4092 (0x0FFC),
       
  1208 		//                   dst writes to 16512 (0x4080).
       
  1209 		// (Fragment 2 uses just 4 bytes instead of the remaining 7 bytes
       
  1210 		// because there is a memory page break on the source side after 4 bytes.)
       
  1211 		//
       
  1212 		// frag_3 = 3 bytes: src reads from 4096 (0x1000),
       
  1213 		//                   dst writes to 16516 (0x4084).
       
  1214 		//
       
  1215 		// And there's the problem: the start address of frag_3 is going to be
       
  1216 		// misaligned for the destination side - it's not 8-byte aligned!
       
  1217 		//
       
  1218 		// 17
       
  1219 		__KTRACE_OPT(KPANIC, Kern::Printf("Error: Different alignments for src & dst"
       
  1220 										  " + non-contiguous target(s)"));
       
  1221 		return KErrArgument;
       
  1222 		}
       
  1223 
       
  1224 	TInt r;
       
  1225 	// Revert any previous fragmentation attempt
       
  1226 	FreeDesList();
       
  1227 	do
       
  1228 		{
       
  1229 		// Allocate fragment
       
  1230 		r = ExpandDesList(/*1*/);
       
  1231 		if (r != KErrNone)
       
  1232 			{
       
  1233 			break;
       
  1234 			}
       
  1235 		// Compute fragment size
       
  1236 		TUint c = _Min(aMaxTransferLen, aCount);
       
  1237 		__KTRACE_OPT(KDMA, Kern::Printf("c = _Min(aMaxTransferLen, aCount) = %d", c));
       
  1238 
       
  1239 		// SRC
       
  1240 		if (mem_src && !(src.iFlags & KDmaMemIsContiguous))
       
  1241 			{
       
  1242 			c = MaxPhysSize(src.iAddr, c);
       
  1243 			__KTRACE_OPT(KDMA, Kern::Printf("c = MaxPhysSize(src.iAddr, c) = %d", c));
       
  1244 			}
       
  1245 
       
  1246 		// DST
       
  1247 		if (mem_dst && !(dst.iFlags & KDmaMemIsContiguous))
       
  1248 			{
       
  1249 			c = MaxPhysSize(dst.iAddr, c);
       
  1250 			__KTRACE_OPT(KDMA, Kern::Printf("c = MaxPhysSize(dst.iAddr, c) = %d", c));
       
  1251 			}
       
  1252 
       
  1253 		// SRC & DST
       
  1254 		if ((mem_src || mem_dst) && (c < aCount) && (c > max_aligned_len))
       
  1255 			{
       
  1256 			// This is not the last fragment of a transfer to/from memory.
       
  1257 			// We must round down the fragment size so the next one is
       
  1258 			// correctly aligned.
       
  1259 			c = max_aligned_len;
       
  1260 			__KTRACE_OPT(KDMA, Kern::Printf("c = max_aligned_len = %d", c));
       
  1261 			//
       
  1262 			// But can this condition actually occur if src and dst are
       
  1263 			// properly aligned to start with?
       
  1264 			//
       
  1265 			// If we disallow unequal alignment requirements in connection with
       
  1266 			// non-contiguous memory buffers (see the long comment above in
       
  1267 			// this function for why) and if both target addresses are
       
  1268 			// correctly aligned at the beginning of the transfer then it
       
  1269 			// doesn't seem possible to end up with a fragment which is not
       
  1270 			// quite the total remaining size (c < aCount) but still larger
       
  1271 			// than the greatest aligned length (c > max_aligned_len).
       
  1272 			//
       
  1273 			// That's because address alignment values are always a power of
       
  1274 			// two (at least that's what we assume - otherwise
       
  1275 			// AddressAlignMask() doesn't work), and memory page sizes are also
       
  1276 			// always a power of two and hence a multiple of the alignment
       
  1277 			// value (as long as the alignment is not greater than the page
       
  1278 			// size, which seems a reasonable assumption regardless of the
       
  1279 			// actual page size). So if we start properly aligned anywhere in a
       
  1280 			// memory page then the number of bytes to the end of that page is
       
  1281 			// always a multiple of the aligment value - there's no remainder.
       
  1282 			//
       
  1283 			// So let's see if we ever hit this assertion:
       
  1284 			Kern::Printf("Unexpected: (mem_src || mem_dst) && (c < aCount) && (c > max_aligned_len)");
       
  1285 			__DMA_ASSERTA(EFalse);
       
  1286 			}
       
  1287 
       
  1288 		// If this is not the last fragment...
       
  1289 		if (c < aCount)
       
  1290 			{
       
  1291 			const TUint es_src = src.iElementSize;
       
  1292 			const TUint es_dst = dst.iElementSize;
       
  1293 			const TUint fs_src = es_src * src.iElementsPerFrame;
       
  1294 			const TUint fs_dst = es_dst * dst.iElementsPerFrame;
       
  1295 			TUint c_prev;
       
  1296 			do
       
  1297 				{
       
  1298 				c_prev = c;
       
  1299 				// If fs_src is !0 then es_src must be !0 as well (see
       
  1300 				// CheckTransferConfig).
       
  1301 				if (es_src)
       
  1302 					{
       
  1303 					r = AdjustFragmentSize(c, es_src, fs_src);
       
  1304 					if (r != KErrNone)
       
  1305 						{
       
  1306 						break;							// while (c != c_prev);
       
  1307 						}
       
  1308 					}
       
  1309 				// If fs_dst is !0 then es_dst must be !0 as well (see
       
  1310 				// CheckTransferConfig).
       
  1311 				if (es_dst)
       
  1312 					{
       
  1313 					r = AdjustFragmentSize(c, es_dst, fs_dst);
       
  1314 					if (r != KErrNone)
       
  1315 						{
       
  1316 						break;							// while (c != c_prev);
       
  1317 						}
       
  1318 					}
       
  1319 				} while (c != c_prev);
       
  1320 			if (r != KErrNone)
       
  1321 				{
       
  1322 				break;									 // while (aCount > 0);
       
  1323 				}
       
  1324 			}
       
  1325 
       
  1326 		// Set transfer count for the PSL
       
  1327 		aTransferArgs.iTransferCount = c;
       
  1328 		__KTRACE_OPT(KDMA, Kern::Printf("this fragm.: %d (0x%x) total remain.: %d (0x%x)",
       
  1329 										c, c, aCount, aCount));
       
  1330 		// Initialise fragment
       
  1331 		r = iChannel.iController->InitDes(*iLastHdr, aTransferArgs);
       
  1332 		if (r != KErrNone)
       
  1333 			{
       
  1334 			break;
       
  1335 			}
       
  1336 		// Update for next iteration
       
  1337 		aCount -= c;
       
  1338 		if (mem_src)
       
  1339 			{
       
  1340 			src.iAddr += c;
       
  1341 			}
       
  1342 		if (mem_dst)
       
  1343 			{
       
  1344 			dst.iAddr += c;
       
  1345 			}
       
  1346 		} while (aCount > 0);
       
  1347 
       
  1348 	if (r != KErrNone)
       
  1349 		{
       
  1350 		FreeDesList();
       
  1351 		}
       
  1352 	return r;
       
  1353 	}
       
  1354 
       
  1355 
       
  1356 TInt DDmaRequest::FragAsym(TDmaTransferArgs& aTransferArgs, TUint aCount,
       
  1357 						   TUint aMaxTransferLen)
       
  1358 	{
       
  1359 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::FragAsym"));
       
  1360 
       
  1361 	TInt r;
       
  1362 	if (iChannel.iDmacCaps->iBalancedAsymSegments)
       
  1363 		{
       
  1364 		r = FragBalancedAsym(aTransferArgs, aCount, aMaxTransferLen);
       
  1365 		if (r != KErrNone)
       
  1366 			{
       
  1367 			FreeSrcDesList();
       
  1368 			FreeDstDesList();
       
  1369 			}
       
  1370 		return r;
       
  1371 		}
       
  1372 	r = FragAsymSrc(aTransferArgs, aCount, aMaxTransferLen);
       
  1373 	if (r != KErrNone)
       
  1374 		{
       
  1375 		FreeSrcDesList();
       
  1376 		return r;
       
  1377 		}
       
  1378 	r = FragAsymDst(aTransferArgs, aCount, aMaxTransferLen);
       
  1379 	if (r != KErrNone)
       
  1380 		{
       
  1381 		FreeSrcDesList();
       
  1382 		FreeDstDesList();
       
  1383 		}
       
  1384 	return r;
       
  1385 	}
       
  1386 
       
  1387 
       
  1388 TInt DDmaRequest::FragAsymSrc(TDmaTransferArgs& aTransferArgs, TUint aCount,
       
  1389 							  TUint aMaxTransferLen)
       
  1390 	{
       
  1391 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::FragAsymSrc"));
       
  1392 
       
  1393 	TDmaTransferConfig& src = aTransferArgs.iSrcConfig;
       
  1394 	const TBool mem_src = (src.iFlags & KDmaMemAddr);
       
  1395 
       
  1396 	const TUint align_mask = iChannel.AddressAlignMask(src.iFlags,
       
  1397 													   src.iElementSize,
       
  1398 													   aTransferArgs.iPslRequestInfo);
       
  1399 	__KTRACE_OPT(KDMA, Kern::Printf("align_mask: 0x%x", align_mask));
       
  1400 
       
  1401 	// Memory buffers must satisfy alignment constraint
       
  1402 	__DMA_ASSERTD(!mem_src || ((src.iAddr & align_mask) == 0));
       
  1403 
       
  1404 	// Max aligned length is used to make sure the beginnings of subtransfers
       
  1405 	// (i.e. fragments) are correctly aligned.
       
  1406 	const TUint max_aligned_len = (aMaxTransferLen & ~align_mask);
       
  1407 	__KTRACE_OPT(KDMA, Kern::Printf("max_aligned_len: %d", max_aligned_len));
       
  1408 	// Client and PSL sane?
       
  1409 	__DMA_ASSERTD(max_aligned_len > 0);
       
  1410 
       
  1411 	TInt r;
       
  1412 	// Revert any previous fragmentation attempt
       
  1413 	FreeSrcDesList();
       
  1414 	do
       
  1415 		{
       
  1416 		// Allocate fragment
       
  1417 		r = ExpandSrcDesList(/*1*/);
       
  1418 		if (r != KErrNone)
       
  1419 			{
       
  1420 			break;
       
  1421 			}
       
  1422 		// Compute fragment size
       
  1423 		TUint c = _Min(aMaxTransferLen, aCount);
       
  1424 		__KTRACE_OPT(KDMA, Kern::Printf("c = _Min(aMaxTransferLen, aCount) = %d", c));
       
  1425 
       
  1426 		if (mem_src && !(src.iFlags & KDmaMemIsContiguous))
       
  1427 			{
       
  1428 			c = MaxPhysSize(src.iAddr, c);
       
  1429 			__KTRACE_OPT(KDMA, Kern::Printf("c = MaxPhysSize(src.iAddr, c) = %d", c));
       
  1430 			}
       
  1431 
       
  1432 		if (mem_src && (c < aCount) && (c > max_aligned_len))
       
  1433 			{
       
  1434 			// This is not the last fragment of a transfer from memory.
       
  1435 			// We must round down the fragment size so the next one is
       
  1436 			// correctly aligned.
       
  1437 			__KTRACE_OPT(KDMA, Kern::Printf("c = max_aligned_len = %d", c));
       
  1438 			//
       
  1439 			// But can this condition actually occur if src is properly aligned
       
  1440 			// to start with?
       
  1441 			//
       
  1442 			// If the target address is correctly aligned at the beginning of
       
  1443 			// the transfer then it doesn't seem possible to end up with a
       
  1444 			// fragment which is not quite the total remaining size (c <
       
  1445 			// aCount) but still larger than the greatest aligned length (c >
       
  1446 			// max_aligned_len).
       
  1447 			//
       
  1448 			// That's because address alignment values are always a power of
       
  1449 			// two (at least that's what we assume - otherwise
       
  1450 			// AddressAlignMask() doesn't work), and memory page sizes are also
       
  1451 			// always a power of two and hence a multiple of the alignment
       
  1452 			// value (as long as the alignment is not greater than the page
       
  1453 			// size, which seems a reasonable assumption regardless of the
       
  1454 			// actual page size). So if we start properly aligned anywhere in a
       
  1455 			// memory page then the number of bytes to the end of that page is
       
  1456 			// always a multiple of the aligment value - there's no remainder.
       
  1457 			//
       
  1458 			// So let's see if we ever hit this assertion:
       
  1459 			Kern::Printf("Unexpected: mem_src && (c < aCount) && (c > max_aligned_len)");
       
  1460 			__DMA_ASSERTA(EFalse);
       
  1461 			}
       
  1462 
       
  1463 		// If this is not the last fragment...
       
  1464 		if (c < aCount)
       
  1465 			{
       
  1466 			const TUint es = src.iElementSize;
       
  1467 			const TUint fs = es * src.iElementsPerFrame;
       
  1468 			// If fs is !0 then es must be !0 as well (see
       
  1469 			// CheckTransferConfig).
       
  1470 			if (es)
       
  1471 				{
       
  1472 				r = AdjustFragmentSize(c, es, fs);
       
  1473 				if (r != KErrNone)
       
  1474 					{
       
  1475 					break;								 // while (aCount > 0);
       
  1476 					}
       
  1477 				}
       
  1478 			}
       
  1479 
       
  1480 		// Set transfer count for the PSL
       
  1481 		aTransferArgs.iTransferCount = c;
       
  1482 		__KTRACE_OPT(KDMA, Kern::Printf("this fragm.: %d (0x%x) total remain.: %d (0x%x)",
       
  1483 										c, c, aCount, aCount));
       
  1484 		// Initialise fragment
       
  1485 		r = iChannel.iController->InitSrcHwDes(*iSrcLastHdr, aTransferArgs);
       
  1486 		if (r != KErrNone)
       
  1487 			{
       
  1488 			break;
       
  1489 			}
       
  1490 		// Update for next iteration
       
  1491 		aCount -= c;
       
  1492 		if (mem_src)
       
  1493 			{
       
  1494 			src.iAddr += c;
       
  1495 			}
       
  1496 		} while (aCount > 0);
       
  1497 
       
  1498 	return r;
       
  1499 	}
       
  1500 
       
  1501 
       
  1502 TInt DDmaRequest::FragAsymDst(TDmaTransferArgs& aTransferArgs, TUint aCount,
       
  1503 							  TUint aMaxTransferLen)
       
  1504 	{
       
  1505 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::FragAsymDst"));
       
  1506 
       
  1507 	TDmaTransferConfig& dst = aTransferArgs.iDstConfig;
       
  1508 	const TBool mem_dst = (dst.iFlags & KDmaMemAddr);
       
  1509 
       
  1510 	const TUint align_mask = iChannel.AddressAlignMask(dst.iFlags,
       
  1511 													   dst.iElementSize,
       
  1512 													   aTransferArgs.iPslRequestInfo);
       
  1513 	__KTRACE_OPT(KDMA, Kern::Printf("align_mask: 0x%x", align_mask));
       
  1514 
       
  1515 	// Memory buffers must satisfy alignment constraint
       
  1516 	__DMA_ASSERTD(!mem_dst || ((dst.iAddr & align_mask) == 0));
       
  1517 
       
  1518 	// Max aligned length is used to make sure the beginnings of subtransfers
       
  1519 	// (i.e. fragments) are correctly aligned.
       
  1520 	const TUint max_aligned_len = (aMaxTransferLen & ~align_mask);
       
  1521 	__KTRACE_OPT(KDMA, Kern::Printf("max_aligned_len: %d", max_aligned_len));
       
  1522 	// Client and PSL sane?
       
  1523 	__DMA_ASSERTD(max_aligned_len > 0);
       
  1524 
       
  1525 	TInt r;
       
  1526 	// Revert any previous fragmentation attempt
       
  1527 	FreeDstDesList();
       
  1528 	do
       
  1529 		{
       
  1530 		// Allocate fragment
       
  1531 		r = ExpandDstDesList(/*1*/);
       
  1532 		if (r != KErrNone)
       
  1533 			{
       
  1534 			break;
       
  1535 			}
       
  1536 		// Compute fragment size
       
  1537 		TUint c = _Min(aMaxTransferLen, aCount);
       
  1538 		__KTRACE_OPT(KDMA, Kern::Printf("c = _Min(aMaxTransferLen, aCount) = %d", c));
       
  1539 
       
  1540 		if (mem_dst && !(dst.iFlags & KDmaMemIsContiguous))
       
  1541 			{
       
  1542 			c = MaxPhysSize(dst.iAddr, c);
       
  1543 			__KTRACE_OPT(KDMA, Kern::Printf("c = MaxPhysSize(dst.iAddr, c) = %d", c));
       
  1544 			}
       
  1545 
       
  1546 		if (mem_dst && (c < aCount) && (c > max_aligned_len))
       
  1547 			{
       
  1548 			// This is not the last fragment of a transfer to memory.
       
  1549 			// We must round down the fragment size so the next one is
       
  1550 			// correctly aligned.
       
  1551 			__KTRACE_OPT(KDMA, Kern::Printf("c = max_aligned_len = %d", c));
       
  1552 			//
       
  1553 			// But can this condition actually occur if dst is properly aligned
       
  1554 			// to start with?
       
  1555 			//
       
  1556 			// If the target address is correctly aligned at the beginning of
       
  1557 			// the transfer then it doesn't seem possible to end up with a
       
  1558 			// fragment which is not quite the total remaining size (c <
       
  1559 			// aCount) but still larger than the greatest aligned length (c >
       
  1560 			// max_aligned_len).
       
  1561 			//
       
  1562 			// That's because address alignment values are always a power of
       
  1563 			// two (at least that's what we assume - otherwise
       
  1564 			// AddressAlignMask() doesn't work), and memory page sizes are also
       
  1565 			// always a power of two and hence a multiple of the alignment
       
  1566 			// value (as long as the alignment is not greater than the page
       
  1567 			// size, which seems a reasonable assumption regardless of the
       
  1568 			// actual page size). So if we start properly aligned anywhere in a
       
  1569 			// memory page then the number of bytes to the end of that page is
       
  1570 			// always a multiple of the aligment value - there's no remainder.
       
  1571 			//
       
  1572 			// So let's see if we ever hit this assertion:
       
  1573 			Kern::Printf("Unexpected: mem_dst && (c < aCount) && (c > max_aligned_len)");
       
  1574 			__DMA_ASSERTA(EFalse);
       
  1575 			}
       
  1576 
       
  1577 		// If this is not the last fragment...
       
  1578 		if (c < aCount)
       
  1579 			{
       
  1580 			const TUint es = dst.iElementSize;
       
  1581 			const TUint fs = es * dst.iElementsPerFrame;
       
  1582 			// If fs is !0 then es must be !0 as well (see
       
  1583 			// CheckTransferConfig).
       
  1584 			if (es)
       
  1585 				{
       
  1586 				r = AdjustFragmentSize(c, es, fs);
       
  1587 				if (r != KErrNone)
       
  1588 					{
       
  1589 					break;								 // while (aCount > 0);
       
  1590 					}
       
  1591 				}
       
  1592 			}
       
  1593 
       
  1594 		// Set transfer count for the PSL
       
  1595 		aTransferArgs.iTransferCount = c;
       
  1596 		__KTRACE_OPT(KDMA, Kern::Printf("this fragm.: %d (0x%x) total remain.: %d (0x%x)",
       
  1597 										c, c, aCount, aCount));
       
  1598 		// Initialise fragment
       
  1599 		r = iChannel.iController->InitDstHwDes(*iDstLastHdr, aTransferArgs);
       
  1600 		if (r != KErrNone)
       
  1601 			{
       
  1602 			break;
       
  1603 			}
       
  1604 		// Update for next iteration
       
  1605 		aCount -= c;
       
  1606 		if (mem_dst)
       
  1607 			{
       
  1608 			dst.iAddr += c;
       
  1609 			}
       
  1610 		}
       
  1611 	while (aCount > 0);
       
  1612 
       
  1613 	return r;
       
  1614 	}
       
  1615 
       
  1616 
       
  1617 TInt DDmaRequest::FragBalancedAsym(TDmaTransferArgs& aTransferArgs, TUint aCount,
       
  1618 								   TUint aMaxTransferLen)
       
  1619 	{
       
  1620 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::FragBalancedAsym"));
       
  1621 
       
  1622 	TDmaTransferConfig& src = aTransferArgs.iSrcConfig;
       
  1623 	TDmaTransferConfig& dst = aTransferArgs.iDstConfig;
       
  1624 	const TBool mem_src = (src.iFlags & KDmaMemAddr);
       
  1625 	const TBool mem_dst = (dst.iFlags & KDmaMemAddr);
       
  1626 
       
  1627 	const TUint align_mask_src = iChannel.AddressAlignMask(src.iFlags,
       
  1628 														   src.iElementSize,
       
  1629 														   aTransferArgs.iPslRequestInfo);
       
  1630 	__KTRACE_OPT(KDMA, Kern::Printf("align_mask_src: 0x%x", align_mask_src));
       
  1631 	const TUint align_mask_dst = iChannel.AddressAlignMask(dst.iFlags,
       
  1632 														   dst.iElementSize,
       
  1633 														   aTransferArgs.iPslRequestInfo);
       
  1634 	__KTRACE_OPT(KDMA, Kern::Printf("align_mask_dst: 0x%x", align_mask_dst));
       
  1635 
       
  1636 	// Memory buffers must satisfy alignment constraint
       
  1637 	__DMA_ASSERTD(!mem_src || ((src.iAddr & align_mask_src) == 0));
       
  1638 	__DMA_ASSERTD(!mem_dst || ((dst.iAddr & align_mask_dst) == 0));
       
  1639 
       
  1640 	// Max aligned length is used to make sure the beginnings of subtransfers
       
  1641 	// (i.e. fragments) are correctly aligned.
       
  1642 	const TUint max_aligned_len = (aMaxTransferLen &
       
  1643 								   ~(_Max(align_mask_src, align_mask_dst)));
       
  1644 	__KTRACE_OPT(KDMA, Kern::Printf("max_aligned_len: %d", max_aligned_len));
       
  1645 	// Client and PSL sane?
       
  1646 	__DMA_ASSERTD(max_aligned_len > 0);
       
  1647 
       
  1648 	if (mem_src && mem_dst &&
       
  1649 		align_mask_src && align_mask_dst &&
       
  1650 		(align_mask_src != align_mask_dst) &&
       
  1651 		(!(src.iFlags & KDmaMemIsContiguous) || !(dst.iFlags & KDmaMemIsContiguous)))
       
  1652 		{
       
  1653 		// We don't support transfers which satisfy ALL of the following conditions:
       
  1654 		// 1) from memory to memory,
       
  1655 		// 2) both sides have address alignment requirements,
       
  1656 		// 3) those alignment requirements are not the same,
       
  1657 		// 4) the memory is non-contiguous on at least one end.
       
  1658 		//
       
  1659 		// [A 5th condition is that the channel doesn't support fully
       
  1660 		// asymmetric h/w descriptor lists,
       
  1661 		// i.e. TDmaChannel::DmacCaps::iAsymHwDescriptors is reported as EFalse
       
  1662 		// or iBalancedAsymSegments as ETrue. Hence this check is done in
       
  1663 		// FragSym() and FragBalancedAsym() but not in FragAsym().]
       
  1664 		//
       
  1665 		// The reason for this is that fragmentation could be impossible. The
       
  1666 		// memory layout (page break) on the side with the less stringent
       
  1667 		// alignment requirement can result in a misaligned target address on
       
  1668 		// the other side.
       
  1669 		//
       
  1670 		// Here is an example:
       
  1671 		//
       
  1672 		// src.iAddr =  3964 (0x0F7C), non-contiguous,
       
  1673 		// align_mask_src = 1 (alignment = 2 bytes)
       
  1674 		// dst.iAddr = 16384 (0x4000), contiguous,
       
  1675 		// align_mask_dst = 7 (alignment = 8 bytes)
       
  1676 		// count = max_xfer_len = 135 bytes
       
  1677 		// => max_aligned_len = 128 bytes
       
  1678 		//
       
  1679 		// Now, suppose MaxPhysSize() returns 132 bytes because src has 132
       
  1680 		// contiguous bytes to the end of its current mem page.
       
  1681 		// Trying to fragment this leads to:
       
  1682 		//
       
  1683 		// frag_1 = 128 bytes: src reads from 3964 (0x0F7C),
       
  1684 		//                     dst writes to 16384 (0x4000).
       
  1685 		// (Fragment 1 uses the max_aligned_len instead of 132 bytes because
       
  1686 		// otherwise the next fragment would start for the destination at
       
  1687 		// dst.iAddr + 132 = 16516 (0x4084), which is not 8-byte aligned.)
       
  1688 		//
       
  1689 		// frag_2 = 4 bytes: src reads from 4092 (0x0FFC),
       
  1690 		//                   dst writes to 16512 (0x4080).
       
  1691 		// (Fragment 2 uses just 4 bytes instead of the remaining 7 bytes
       
  1692 		// because there is a memory page break on the source side after 4 bytes.)
       
  1693 		//
       
  1694 		// frag_3 = 3 bytes: src reads from 4096 (0x1000),
       
  1695 		//                   dst writes to 16516 (0x4084).
       
  1696 		//
       
  1697 		// And there's the problem: the start address of frag_3 is going to be
       
  1698 		// misaligned for the destination side - it's not 8-byte aligned!
       
  1699 		//
       
  1700 		__KTRACE_OPT(KPANIC, Kern::Printf("Error: Different alignments for src & dst"
       
  1701 										  " + non-contiguous target(s)"));
       
  1702 		return KErrArgument;
       
  1703 		}
       
  1704 
       
  1705 	TInt r;
       
  1706 	// Revert any previous fragmentation attempt
       
  1707 	FreeSrcDesList();
       
  1708 	FreeDstDesList();
       
  1709 	__DMA_ASSERTD(iSrcDesCount == iDstDesCount);
       
  1710 	do
       
  1711 		{
       
  1712 		// Allocate fragment
       
  1713 		r = ExpandSrcDesList(/*1*/);
       
  1714 		if (r != KErrNone)
       
  1715 			{
       
  1716 			break;
       
  1717 			}
       
  1718 		r = ExpandDstDesList(/*1*/);
       
  1719 		if (r != KErrNone)
       
  1720 			{
       
  1721 			break;
       
  1722 			}
       
  1723 		__DMA_ASSERTD(iSrcDesCount == iDstDesCount);
       
  1724 		// Compute fragment size
       
  1725 		TUint c = _Min(aMaxTransferLen, aCount);
       
  1726 		__KTRACE_OPT(KDMA, Kern::Printf("c = _Min(aMaxTransferLen, aCount) = %d", c));
       
  1727 
       
  1728 		// SRC
       
  1729 		if (mem_src && !(src.iFlags & KDmaMemIsContiguous))
       
  1730 			{
       
  1731 			c = MaxPhysSize(src.iAddr, c);
       
  1732 			__KTRACE_OPT(KDMA, Kern::Printf("c = MaxPhysSize(src.iAddr, c) = %d", c));
       
  1733 			}
       
  1734 
       
  1735 		// DST
       
  1736 		if (mem_dst && !(dst.iFlags & KDmaMemIsContiguous))
       
  1737 			{
       
  1738 			c = MaxPhysSize(dst.iAddr, c);
       
  1739 			__KTRACE_OPT(KDMA, Kern::Printf("c = MaxPhysSize(dst.iAddr, c) = %d", c));
       
  1740 			}
       
  1741 
       
  1742 		// SRC & DST
       
  1743 		if ((mem_src || mem_dst) && (c < aCount) && (c > max_aligned_len))
       
  1744 			{
       
  1745 			// This is not the last fragment of a transfer to/from memory.
       
  1746 			// We must round down the fragment size so the next one is
       
  1747 			// correctly aligned.
       
  1748 			c = max_aligned_len;
       
  1749 			__KTRACE_OPT(KDMA, Kern::Printf("c = max_aligned_len = %d", c));
       
  1750 			//
       
  1751 			// But can this condition actually occur if src and dst are
       
  1752 			// properly aligned to start with?
       
  1753 			//
       
  1754 			// If we disallow unequal alignment requirements in connection with
       
  1755 			// non-contiguous memory buffers (see the long comment above in
       
  1756 			// this function for why) and if both target addresses are
       
  1757 			// correctly aligned at the beginning of the transfer then it
       
  1758 			// doesn't seem possible to end up with a fragment which is not
       
  1759 			// quite the total remaining size (c < aCount) but still larger
       
  1760 			// than the greatest aligned length (c > max_aligned_len).
       
  1761 			//
       
  1762 			// That's because address alignment values are always a power of
       
  1763 			// two (at least that's what we assume - otherwise
       
  1764 			// AddressAlignMask() doesn't work), and memory page sizes are also
       
  1765 			// always a power of two and hence a multiple of the alignment
       
  1766 			// value (as long as the alignment is not greater than the page
       
  1767 			// size, which seems a reasonable assumption regardless of the
       
  1768 			// actual page size). So if we start properly aligned anywhere in a
       
  1769 			// memory page then the number of bytes to the end of that page is
       
  1770 			// always a multiple of the aligment value - there's no remainder.
       
  1771 			//
       
  1772 			// So let's see if we ever hit this assertion:
       
  1773 			Kern::Printf("Unexpected: (mem_src || mem_dst) && (c < aCount) && (c > max_aligned_len)");
       
  1774 			__DMA_ASSERTA(EFalse);
       
  1775 			}
       
  1776 
       
  1777 		// If this is not the last fragment...
       
  1778 		if (c < aCount)
       
  1779 			{
       
  1780 			const TUint es_src = src.iElementSize;
       
  1781 			const TUint es_dst = dst.iElementSize;
       
  1782 			const TUint fs_src = es_src * src.iElementsPerFrame;
       
  1783 			const TUint fs_dst = es_dst * dst.iElementsPerFrame;
       
  1784 			TUint c_prev;
       
  1785 			do
       
  1786 				{
       
  1787 				c_prev = c;
       
  1788 				// If fs_src is !0 then es_src must be !0 as well (see
       
  1789 				// CheckTransferConfig).
       
  1790 				if (es_src)
       
  1791 					{
       
  1792 					r = AdjustFragmentSize(c, es_src, fs_src);
       
  1793 					if (r != KErrNone)
       
  1794 						{
       
  1795 						break;							// while (c != c_prev);
       
  1796 						}
       
  1797 					}
       
  1798 				// If fs_dst is !0 then es_dst must be !0 as well (see
       
  1799 				// CheckTransferConfig).
       
  1800 				if (es_dst)
       
  1801 					{
       
  1802 					r = AdjustFragmentSize(c, es_dst, fs_dst);
       
  1803 					if (r != KErrNone)
       
  1804 						{
       
  1805 						break;							// while (c != c_prev);
       
  1806 						}
       
  1807 					}
       
  1808 				} while (c != c_prev);
       
  1809 			if (r != KErrNone)
       
  1810 				{
       
  1811 				break;									 // while (aCount > 0);
       
  1812 				}
       
  1813 			}
       
  1814 
       
  1815 		// Set transfer count for the PSL
       
  1816 		aTransferArgs.iTransferCount = c;
       
  1817 		__KTRACE_OPT(KDMA, Kern::Printf("this fragm.: %d (0x%x) total remain.: %d (0x%x)",
       
  1818 										c, c, aCount, aCount));
       
  1819 		// Initialise SRC fragment
       
  1820 		r = iChannel.iController->InitSrcHwDes(*iSrcLastHdr, aTransferArgs);
       
  1821 		if (r != KErrNone)
       
  1822 			{
       
  1823 			break;
       
  1824 			}
       
  1825 		// Initialise DST fragment
       
  1826 		r = iChannel.iController->InitDstHwDes(*iDstLastHdr, aTransferArgs);
       
  1827 		if (r != KErrNone)
       
  1828 			{
       
  1829 			break;
       
  1830 			}
       
  1831 		// Update for next iteration
       
  1832 		aCount -= c;
       
  1833 		if (mem_src)
       
  1834 			{
       
  1835 			src.iAddr += c;
       
  1836 			}
       
  1837 		if (mem_dst)
       
  1838 			{
       
  1839 			dst.iAddr += c;
       
  1840 			}
       
  1841 		}
       
  1842 	while (aCount > 0);
       
  1843 
       
  1844 	return r;
       
  1845 	}
       
  1846 
       
  1847 
       
  1848 EXPORT_C TInt DDmaRequest::Queue()
       
  1849 	{
       
  1850 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Queue thread %O", &Kern::CurrentThread()));
       
  1851 	// Not configured? Call Fragment() first!
       
  1852 	if (iChannel.iDmacCaps->iAsymHwDescriptors)
       
  1853 		{
       
  1854 		__DMA_ASSERTD((iSrcDesCount > 0) && (iDstDesCount > 0));
       
  1855 		}
       
  1856 	else
       
  1857 		{
       
  1858 		__DMA_ASSERTD(iDesCount > 0);
       
  1859 		}
       
  1860 	__DMA_ASSERTD(!iQueued);
       
  1861 
       
  1862 	// Append request to queue and link new descriptor list to existing one.
       
  1863 	iChannel.Wait();
       
  1864 
       
  1865 	TUint32 req_count = iChannel.iQueuedRequests++;
       
  1866 	if (iChannel.iCallQueuedRequestFn)
       
  1867 		{
       
  1868 		if (req_count == 0)
       
  1869 			{
       
  1870 			iChannel.Signal();
       
  1871 			iChannel.QueuedRequestCountChanged();
       
  1872 			iChannel.Wait();
       
  1873 			}
       
  1874 		}
       
  1875 
       
  1876 	TInt r = KErrGeneral;
       
  1877 	const TBool ch_isr_cb = __e32_atomic_load_acq32(&iChannel.iIsrCbRequest);
       
  1878 	if (ch_isr_cb)
       
  1879 		{
       
  1880 		// Client mustn't try to queue any new request while one with an ISR
       
  1881 		// callback is already queued on this channel. This is to make sure
       
  1882 		// that the channel's Transfer() function is not called by both the ISR
       
  1883 		// and the client thread at the same time.
       
  1884 		__KTRACE_OPT(KPANIC, Kern::Printf("An ISR cb request exists - not queueing"));
       
  1885 		// Undo the request count increment...
       
  1886 		req_count = --iChannel.iQueuedRequests;
       
  1887 		__DMA_INVARIANT();
       
  1888 		iChannel.Signal();
       
  1889 		if (iChannel.iCallQueuedRequestFn)
       
  1890 			{
       
  1891 			if (req_count == 0)
       
  1892 				{
       
  1893 				iChannel.QueuedRequestCountChanged();
       
  1894 				}
       
  1895 			}
       
  1896 		}
       
  1897 	else if (iIsrCb && !iChannel.IsQueueEmpty())
       
  1898 		{
       
  1899 		// Client mustn't try to queue an ISR callback request whilst any
       
  1900 		// others are still queued on this channel. This is to make sure that
       
  1901 		// the ISR callback doesn't get executed together with the DFC(s) of
       
  1902 		// any previous request(s).
       
  1903 		__KTRACE_OPT(KPANIC, Kern::Printf("Request queue not empty - not queueing"));
       
  1904 		// Undo the request count increment...
       
  1905 		req_count = --iChannel.iQueuedRequests;
       
  1906 		__DMA_INVARIANT();
       
  1907 		iChannel.Signal();
       
  1908 		if (iChannel.iCallQueuedRequestFn)
       
  1909 			{
       
  1910 			if (req_count == 0)
       
  1911 				{
       
  1912 				iChannel.QueuedRequestCountChanged();
       
  1913 				}
       
  1914 			}
       
  1915 		}
       
  1916 	else if (iChannel.iIsrDfc & (TUint32)TDmaChannel::KCancelFlagMask)
       
  1917 		{
       
  1918 		__KTRACE_OPT(KPANIC, Kern::Printf("Channel requests cancelled - not queueing"));
       
  1919 		// Someone is cancelling all requests - undo the request count increment...
       
  1920 		req_count = --iChannel.iQueuedRequests;
       
  1921 		__DMA_INVARIANT();
       
  1922 		iChannel.Signal();
       
  1923 		if (iChannel.iCallQueuedRequestFn)
       
  1924 			{
       
  1925 			if (req_count == 0)
       
  1926 				{
       
  1927 				iChannel.QueuedRequestCountChanged();
       
  1928 				}
       
  1929 			}
       
  1930 		}
       
  1931 	else
       
  1932 		{
       
  1933 		iQueued = ETrue;
       
  1934 		iChannel.iReqQ.Add(&iLink);
       
  1935 		iChannel.SetNullPtr(*this);
       
  1936 		if (iIsrCb)
       
  1937 			{
       
  1938 			// Since we've made sure that there is no other request in the
       
  1939 			// queue before this, the only thing of relevance is the channel
       
  1940 			// DFC which might yet have to complete for the previous request,
       
  1941 			// and this function might indeed have been called from there via
       
  1942 			// the client callback. This should be all right though as once
       
  1943 			// we've set the following flag no further Queue()'s will be
       
  1944 			// possible.
       
  1945 			__e32_atomic_store_rel32(&iChannel.iIsrCbRequest, ETrue);
       
  1946 			}
       
  1947 		iChannel.DoQueue(*this);
       
  1948 		r = KErrNone;
       
  1949 		__DMA_INVARIANT();
       
  1950 		iChannel.Signal();
       
  1951 		}
       
  1952 
       
  1953 	return r;
       
  1954 	}
       
  1955 
       
  1956 
       
  1957 EXPORT_C TInt DDmaRequest::ExpandDesList(TInt aCount)
       
  1958 	{
       
  1959 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::ExpandDesList aCount=%d", aCount));
       
  1960 	return ExpandDesList(aCount, iDesCount, iFirstHdr, iLastHdr);
       
  1961 	}
       
  1962 
       
  1963 
       
  1964 EXPORT_C TInt DDmaRequest::ExpandSrcDesList(TInt aCount)
       
  1965 	{
       
  1966 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::ExpandSrcDesList"));
       
  1967 	return ExpandDesList(aCount, iSrcDesCount, iSrcFirstHdr, iSrcLastHdr);
       
  1968 	}
       
  1969 
       
  1970 
       
  1971 EXPORT_C TInt DDmaRequest::ExpandDstDesList(TInt aCount)
       
  1972 	{
       
  1973 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::ExpandDstDesList"));
       
  1974 	return ExpandDesList(aCount, iDstDesCount, iDstFirstHdr, iDstLastHdr);
       
  1975 	}
       
  1976 
       
  1977 
       
  1978 TInt DDmaRequest::ExpandDesList(TInt aCount, TInt& aDesCount,
       
  1979 								SDmaDesHdr*& aFirstHdr,
       
  1980 								SDmaDesHdr*& aLastHdr)
       
  1981 	{
       
  1982 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::ExpandDesList"));
       
  1983 	__DMA_ASSERTD(!iQueued);
       
  1984 	__DMA_ASSERTD(aCount > 0);
       
  1985 
       
  1986 	if (aCount > iChannel.iAvailDesCount)
       
  1987 		{
       
  1988 		return KErrTooBig;
       
  1989 		}
       
  1990 
       
  1991 	iChannel.iAvailDesCount -= aCount;
       
  1992 	aDesCount += aCount;
       
  1993 
       
  1994 	TDmac& c = *(iChannel.iController);
       
  1995 	c.Wait();
       
  1996 
       
  1997 	if (aFirstHdr == NULL)
       
  1998 		{
       
  1999 		// Handle an empty list specially to simplify the following loop
       
  2000 		aFirstHdr = aLastHdr = c.iFreeHdr;
       
  2001 		c.iFreeHdr = c.iFreeHdr->iNext;
       
  2002 		--aCount;
       
  2003 		}
       
  2004 	else
       
  2005 		{
       
  2006 		aLastHdr->iNext = c.iFreeHdr;
       
  2007 		}
       
  2008 
       
  2009 	// Remove as many descriptors and headers from the free pool as necessary
       
  2010 	// and ensure hardware descriptors are chained together.
       
  2011 	while (aCount-- > 0)
       
  2012 		{
       
  2013 		__DMA_ASSERTD(c.iFreeHdr != NULL);
       
  2014 		if (c.iCapsHwDes)
       
  2015 			{
       
  2016 			c.ChainHwDes(*aLastHdr, *(c.iFreeHdr));
       
  2017 			}
       
  2018 		aLastHdr = c.iFreeHdr;
       
  2019 		c.iFreeHdr = c.iFreeHdr->iNext;
       
  2020 		}
       
  2021 
       
  2022 	c.Signal();
       
  2023 
       
  2024 	aLastHdr->iNext = NULL;
       
  2025 
       
  2026 	__DMA_INVARIANT();
       
  2027 	return KErrNone;
       
  2028 	}
       
  2029 
       
  2030 
       
  2031 EXPORT_C void DDmaRequest::FreeDesList()
       
  2032 	{
       
  2033 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::FreeDesList"));
       
  2034 	FreeDesList(iDesCount, iFirstHdr, iLastHdr);
       
  2035 	}
       
  2036 
       
  2037 
       
  2038 EXPORT_C void DDmaRequest::FreeSrcDesList()
       
  2039 	{
       
  2040 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::FreeSrcDesList"));
       
  2041 	FreeDesList(iSrcDesCount, iSrcFirstHdr, iSrcLastHdr);
       
  2042 	}
       
  2043 
       
  2044 
       
  2045 EXPORT_C void DDmaRequest::FreeDstDesList()
       
  2046 	{
       
  2047 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::FreeDstDesList"));
       
  2048 	FreeDesList(iDstDesCount, iDstFirstHdr, iDstLastHdr);
       
  2049 	}
       
  2050 
       
  2051 
       
  2052 void DDmaRequest::FreeDesList(TInt& aDesCount, SDmaDesHdr*& aFirstHdr, SDmaDesHdr*& aLastHdr)
       
  2053 	{
       
  2054 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::FreeDesList count=%d", aDesCount));
       
  2055 	__DMA_ASSERTD(!iQueued);
       
  2056 
       
  2057 	if (aDesCount > 0)
       
  2058 		{
       
  2059 		iChannel.iAvailDesCount += aDesCount;
       
  2060 		TDmac& c = *(iChannel.iController);
       
  2061 		const SDmaDesHdr* hdr = aFirstHdr;
       
  2062 		while (hdr)
       
  2063 			{
       
  2064 			__DMA_ASSERTD(c.IsValidHdr(hdr));
       
  2065 
       
  2066 			// This (potential) PSL call doesn't follow the "overhead
       
  2067 			// principle", and something should be done about this.
       
  2068 			c.ClearHwDes(*hdr);
       
  2069 			hdr = hdr->iNext;
       
  2070 			};
       
  2071 
       
  2072 		c.Wait();
       
  2073 		__DMA_ASSERTD(c.IsValidHdr(c.iFreeHdr));
       
  2074 		aLastHdr->iNext = c.iFreeHdr;
       
  2075 		c.iFreeHdr = aFirstHdr;
       
  2076 		c.Signal();
       
  2077 
       
  2078 		aFirstHdr = aLastHdr = NULL;
       
  2079 		aDesCount = 0;
       
  2080 		}
       
  2081 	}
       
  2082 
       
  2083 
       
  2084 EXPORT_C void DDmaRequest::EnableSrcElementCounting(TBool /*aResetElementCount*/)
       
  2085 	{
       
  2086 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::EnableSrcElementCounting"));
       
  2087 
       
  2088 	// Not yet implemented.
       
  2089 	return;
       
  2090 	}
       
  2091 
       
  2092 
       
  2093 EXPORT_C void DDmaRequest::EnableDstElementCounting(TBool /*aResetElementCount*/)
       
  2094 	{
       
  2095 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::EnableDstElementCounting"));
       
  2096 
       
  2097 	// Not yet implemented.
       
  2098 	return;
       
  2099 	}
       
  2100 
       
  2101 
       
  2102 EXPORT_C void DDmaRequest::DisableSrcElementCounting()
       
  2103 	{
       
  2104 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::DisableSrcElementCounting"));
       
  2105 
       
  2106 	// Not yet implemented.
       
  2107 	return;
       
  2108 	}
       
  2109 
       
  2110 
       
  2111 EXPORT_C void DDmaRequest::DisableDstElementCounting()
       
  2112 	{
       
  2113 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::DisableDstElementCounting"));
       
  2114 
       
  2115 	// Not yet implemented.
       
  2116 	return;
       
  2117 	}
       
  2118 
       
  2119 
       
  2120 EXPORT_C TUint32 DDmaRequest::TotalNumSrcElementsTransferred()
       
  2121 	{
       
  2122 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::TotalNumSrcElementsTransferred"));
       
  2123 
       
  2124 	// Not yet implemented.
       
  2125 	return iTotalNumSrcElementsTransferred;
       
  2126 	}
       
  2127 
       
  2128 
       
  2129 EXPORT_C TUint32 DDmaRequest::TotalNumDstElementsTransferred()
       
  2130 	{
       
  2131 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::TotalNumDstElementsTransferred"));
       
  2132 
       
  2133 	// Not yet implemented.
       
  2134 	return iTotalNumDstElementsTransferred;
       
  2135 	}
       
  2136 
       
  2137 
       
  2138 EXPORT_C TInt DDmaRequest::FragmentCount()
       
  2139 	{
       
  2140 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::FragmentCount"));
       
  2141 	return FragmentCount(iFirstHdr);
       
  2142 	}
       
  2143 
       
  2144 
       
  2145 EXPORT_C TInt DDmaRequest::SrcFragmentCount()
       
  2146 	{
       
  2147 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::SrcFragmentCount"));
       
  2148 	return FragmentCount(iSrcFirstHdr);
       
  2149 	}
       
  2150 
       
  2151 
       
  2152 EXPORT_C TInt DDmaRequest::DstFragmentCount()
       
  2153 	{
       
  2154 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::DstFragmentCount"));
       
  2155 	return FragmentCount(iDstFirstHdr);
       
  2156 	}
       
  2157 
       
  2158 
       
  2159 TInt DDmaRequest::FragmentCount(const SDmaDesHdr* aHdr)
       
  2160 	{
       
  2161 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::FragmentCount aHdr=0x%08x", aHdr));
       
  2162 	TInt count = 0;
       
  2163 	for (const SDmaDesHdr* pH = aHdr; pH != NULL; pH = pH->iNext)
       
  2164 		{
       
  2165 		count++;
       
  2166 		}
       
  2167 	return count;
       
  2168 	}
       
  2169 
       
  2170 
       
  2171 //
       
  2172 // Called when request is removed from request queue in channel
       
  2173 //
       
  2174 inline void DDmaRequest::OnDeque()
       
  2175 	{
       
  2176 	iQueued = EFalse;
       
  2177 	if (iChannel.iDmacCaps->iAsymHwDescriptors)
       
  2178 		{
       
  2179 		iSrcLastHdr->iNext = NULL;
       
  2180 		iDstLastHdr->iNext = NULL;
       
  2181 		iChannel.DoUnlink(*iSrcLastHdr);
       
  2182 		iChannel.DoUnlink(*iDstLastHdr);
       
  2183 		}
       
  2184 	else
       
  2185 		{
       
  2186 		iLastHdr->iNext = NULL;
       
  2187 		iChannel.DoUnlink(*iLastHdr);
       
  2188 		}
       
  2189 	}
       
  2190 
       
  2191 
       
  2192 #ifdef _DEBUG
       
  2193 void DDmaRequest::Invariant()
       
  2194 	{
       
  2195 	// This invariant may be called either with,
       
  2196 	// or without the channel lock already held
       
  2197 	TBool channelLockAquired=EFalse;
       
  2198 	if(!iChannel.iLock.HeldByCurrentThread())
       
  2199 		{
       
  2200 		iChannel.Wait();
       
  2201 		channelLockAquired = ETrue;
       
  2202 		}
       
  2203 
       
  2204 	__DMA_ASSERTD(LOGICAL_XOR(iCb, iDmaCb));
       
  2205 	if (iChannel.iDmacCaps->iAsymHwDescriptors)
       
  2206 		{
       
  2207 		__DMA_ASSERTD((0 <= iSrcDesCount) && (iSrcDesCount <= iChannel.iMaxDesCount) &&
       
  2208 					  (0 <= iDstDesCount) && (iDstDesCount <= iChannel.iMaxDesCount));
       
  2209 		if (iSrcDesCount == 0)
       
  2210 			{
       
  2211 			// Not fragmented yet
       
  2212 			__DMA_ASSERTD(iDstDesCount == 0);
       
  2213 			__DMA_ASSERTD(!iQueued);
       
  2214 			__DMA_ASSERTD(!iSrcFirstHdr && !iSrcLastHdr &&
       
  2215 						  !iDstFirstHdr && !iDstLastHdr);
       
  2216 			}
       
  2217 		else if (iDstDesCount == 0)
       
  2218 			{
       
  2219 			// Src side only fragmented yet
       
  2220 			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iSrcFirstHdr));
       
  2221 			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iSrcLastHdr));
       
  2222 			}
       
  2223 		else
       
  2224 			{
       
  2225 			// Src & Dst sides fragmented
       
  2226 			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iSrcFirstHdr));
       
  2227 			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iSrcLastHdr));
       
  2228 			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iDstFirstHdr));
       
  2229 			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iDstLastHdr));
       
  2230 			}
       
  2231 		}
       
  2232 	else
       
  2233 		{
       
  2234 		__DMA_ASSERTD((0 <= iDesCount) && (iDesCount <= iChannel.iMaxDesCount));
       
  2235 		if (iDesCount == 0)
       
  2236 			{
       
  2237 			__DMA_ASSERTD(!iQueued);
       
  2238 			__DMA_ASSERTD(!iFirstHdr && !iLastHdr);
       
  2239 			}
       
  2240 		else
       
  2241 			{
       
  2242 			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iFirstHdr));
       
  2243 			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iLastHdr));
       
  2244 			}
       
  2245 		}
       
  2246 
       
  2247 	if(channelLockAquired)
       
  2248 			{
       
  2249 			iChannel.Signal();
       
  2250 			}
       
  2251 	}
       
  2252 #endif
       
  2253 
       
  2254 
       
  2255 //////////////////////////////////////////////////////////////////////////////
       
  2256 // TDmaChannel
       
  2257 
       
  2258 TDmaChannel::TDmaChannel()
       
  2259 	: iController(NULL),
       
  2260 	  iDmacCaps(NULL),
       
  2261 	  iPslId(0),
       
  2262 	  iDynChannel(EFalse),
       
  2263 	  iPriority(KDmaPriorityNone),
       
  2264 	  iCurHdr(NULL),
       
  2265 	  iNullPtr(&iCurHdr),
       
  2266 	  iDfc(Dfc, NULL, 0),
       
  2267 	  iMaxDesCount(0),
       
  2268 	  iAvailDesCount(0),
       
  2269 	  iIsrDfc(0),
       
  2270 	  iReqQ(),
       
  2271 	  iReqCount(0),
       
  2272 	  iQueuedRequests(0),
       
  2273 	  iCallQueuedRequestFn(ETrue),
       
  2274 	  iCancelInfo(NULL),
       
  2275 	  iRedoRequest(EFalse),
       
  2276 	  iIsrCbRequest(EFalse)
       
  2277 	{
       
  2278 	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::TDmaChannel =0x%08X", this));
       
  2279 	__DMA_INVARIANT();
       
  2280 	}
       
  2281 
       
  2282 
       
  2283 //
       
  2284 // static member function
       
  2285 //
       
  2286 EXPORT_C TInt TDmaChannel::Open(const SCreateInfo& aInfo, TDmaChannel*& aChannel)
       
  2287 	{
       
  2288 	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::Open thread %O", &Kern::CurrentThread()));
       
  2289 
       
  2290 	if (aInfo.iDesCount < 1)
       
  2291 		{
       
  2292 		__KTRACE_OPT(KPANIC, Kern::Printf("DMA channel failed to open: iDescount<1"));
       
  2293 		return KErrArgument;
       
  2294 		}
       
  2295 
       
  2296 	__DMA_ASSERTD(aInfo.iPriority <= KDmaPriority8);
       
  2297 	__DMA_ASSERTD(aInfo.iDfcQ != NULL);
       
  2298 	__DMA_ASSERTD(aInfo.iDfcPriority < KNumDfcPriorities);
       
  2299 
       
  2300 	aChannel = NULL;
       
  2301 
       
  2302 	DmaChannelMgr::Wait();
       
  2303 	TDmaChannel* pC = DmaChannelMgr::Open(aInfo.iCookie, aInfo.iDynChannel, aInfo.iPriority);
       
  2304 	DmaChannelMgr::Signal();
       
  2305 	if (!pC)
       
  2306 		{
       
  2307 		return KErrInUse;
       
  2308 		}
       
  2309 	__DMA_ASSERTD(pC->iController != NULL);
       
  2310 	__DMA_ASSERTD(pC->iDmacCaps != NULL);
       
  2311 	__DMA_ASSERTD(pC->iController->iCapsHwDes == pC->DmacCaps().iHwDescriptors);
       
  2312 	// PSL needs to set iDynChannel if and only if dynamic channel was requested
       
  2313 	__DMA_ASSERTD(!LOGICAL_XOR(aInfo.iDynChannel, pC->iDynChannel));
       
  2314 
       
  2315 	const TInt r = pC->iController->ReserveSetOfDes(aInfo.iDesCount);
       
  2316 	if (r != KErrNone)
       
  2317 		{
       
  2318 		pC->Close();
       
  2319 		return r;
       
  2320 		}
       
  2321 	pC->iAvailDesCount = pC->iMaxDesCount = aInfo.iDesCount;
       
  2322 
       
  2323 	new (&pC->iDfc) TDfc(&Dfc, pC, aInfo.iDfcQ, aInfo.iDfcPriority);
       
  2324 
       
  2325 	aChannel = pC;
       
  2326 
       
  2327 #ifdef _DEBUG
       
  2328 	pC->Invariant();
       
  2329 #endif
       
  2330 	__KTRACE_OPT(KDMA, Kern::Printf("opened channel %d", pC->iPslId));
       
  2331 	return KErrNone;
       
  2332 	}
       
  2333 
       
  2334 
       
  2335 EXPORT_C void TDmaChannel::Close()
       
  2336 	{
       
  2337 	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::Close %d iReqCount=%d", iPslId, iReqCount));
       
  2338 	__DMA_ASSERTD(IsQueueEmpty());
       
  2339 	__DMA_ASSERTD(iReqCount == 0);
       
  2340 
       
  2341 	__DMA_ASSERTD(iQueuedRequests == 0);
       
  2342 
       
  2343 	// Descriptor leak? -> bug in request code
       
  2344 	__DMA_ASSERTD(iAvailDesCount == iMaxDesCount);
       
  2345 
       
  2346 	__DMA_ASSERTD(!iRedoRequest);
       
  2347 	__DMA_ASSERTD(!iIsrCbRequest);
       
  2348 
       
  2349 	iController->ReleaseSetOfDes(iMaxDesCount);
       
  2350 	iAvailDesCount = iMaxDesCount = 0;
       
  2351 
       
  2352 	DmaChannelMgr::Wait();
       
  2353 	DmaChannelMgr::Close(this);
       
  2354 	// The following assignment will be removed once IsOpened() has been
       
  2355 	// removed. That's because 'this' shouldn't be touched any more once
       
  2356 	// Close() has returned from the PSL.
       
  2357 	iController = NULL;
       
  2358 	DmaChannelMgr::Signal();
       
  2359 	}
       
  2360 
       
  2361 
       
  2362 EXPORT_C TInt TDmaChannel::LinkToChannel(TDmaChannel* aChannel)
       
  2363 	{
       
  2364 	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::LinkToChannel thread %O",
       
  2365 									&Kern::CurrentThread()));
       
  2366 	if (aChannel)
       
  2367 		{
       
  2368 		return iController->LinkChannels(*this, *aChannel);
       
  2369 		}
       
  2370 	else
       
  2371 		{
       
  2372 		return iController->UnlinkChannel(*this);
       
  2373 		}
       
  2374 	}
       
  2375 
       
  2376 
       
  2377 EXPORT_C TInt TDmaChannel::Pause()
       
  2378 	{
       
  2379 	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::Pause thread %O",
       
  2380 									&Kern::CurrentThread()));
       
  2381 	return iController->PauseTransfer(*this);
       
  2382 	}
       
  2383 
       
  2384 
       
  2385 EXPORT_C TInt TDmaChannel::Resume()
       
  2386 	{
       
  2387 	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::Resume thread %O",
       
  2388 									&Kern::CurrentThread()));
       
  2389 	return iController->ResumeTransfer(*this);
       
  2390 	}
       
  2391 
       
  2392 
       
  2393 EXPORT_C void TDmaChannel::CancelAll()
       
  2394 	{
       
  2395 	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::CancelAll thread %O channel - %d",
       
  2396 									&Kern::CurrentThread(), iPslId));
       
  2397 	NThread* const nt = NKern::CurrentThread();
       
  2398 	TBool wait = EFalse;
       
  2399 	TDmaCancelInfo cancelinfo;
       
  2400 	TDmaCancelInfo* waiters = NULL;
       
  2401 
       
  2402 	NKern::ThreadEnterCS();
       
  2403 	Wait();
       
  2404 	const TUint32 req_count_before = iQueuedRequests;
       
  2405 	NThreadBase* const dfc_nt = iDfc.Thread();
       
  2406 	// Shouldn't be NULL (i.e. an IDFC)
       
  2407 	__DMA_ASSERTD(dfc_nt);
       
  2408 
       
  2409 	__e32_atomic_store_ord32(&iIsrDfc, (TUint32)KCancelFlagMask);
       
  2410 	// ISRs after this point will not post a DFC, however a DFC may already be
       
  2411 	// queued or running or both.
       
  2412 	if (!IsQueueEmpty())
       
  2413 		{
       
  2414 		// There is a transfer in progress. It may complete before the DMAC
       
  2415 		// has stopped, but the resulting ISR will not post a DFC.
       
  2416 		// ISR should not happen after this function returns.
       
  2417 		iController->StopTransfer(*this);
       
  2418 
       
  2419 		DoCancelAll();
       
  2420 		ResetNullPtr();
       
  2421 
       
  2422 		// Clean-up the request queue.
       
  2423 		SDblQueLink* pL;
       
  2424 		while ((pL = iReqQ.GetFirst()) != NULL)
       
  2425 			{
       
  2426 			iQueuedRequests--;
       
  2427 			DDmaRequest* pR = _LOFF(pL, DDmaRequest, iLink);
       
  2428 			pR->OnDeque();
       
  2429 			}
       
  2430 		}
       
  2431 	if (dfc_nt == nt)
       
  2432 		{
       
  2433 		// DFC runs in this thread, so just cancel it and we're finished
       
  2434 		iDfc.Cancel();
       
  2435 
       
  2436 		// If other calls to CancelAll() are waiting for the DFC, release them here
       
  2437 		waiters = iCancelInfo;
       
  2438 		iCancelInfo = NULL;
       
  2439 
       
  2440 		// Reset the ISR count
       
  2441 		__e32_atomic_store_rel32(&iIsrDfc, 0);
       
  2442 		}
       
  2443 	else
       
  2444 		{
       
  2445 		// DFC runs in another thread. Make sure it's queued and then wait for it to run.
       
  2446 		if (iCancelInfo)
       
  2447 			{
       
  2448 			// Insert cancelinfo into the list so that it precedes iCancelInfo
       
  2449 			cancelinfo.InsertBefore(iCancelInfo);
       
  2450 			}
       
  2451 		else
       
  2452 			{
       
  2453 			iCancelInfo = &cancelinfo;
       
  2454 			}
       
  2455 		wait = ETrue;
       
  2456 		iDfc.Enque();
       
  2457 		}
       
  2458 
       
  2459 	const TUint32 req_count_after = iQueuedRequests;
       
  2460 
       
  2461 	Signal();
       
  2462 
       
  2463 	if (waiters)
       
  2464 		{
       
  2465 		waiters->Signal();
       
  2466 		}
       
  2467 	else if (wait)
       
  2468 		{
       
  2469 		NKern::FSWait(&cancelinfo.iSem);
       
  2470 		}
       
  2471 
       
  2472  	NKern::ThreadLeaveCS();
       
  2473 
       
  2474 	// Only call PSL if there were requests queued when we entered AND there
       
  2475 	// are now no requests left on the queue.
       
  2476 	if (iCallQueuedRequestFn)
       
  2477 		{
       
  2478 		if ((req_count_before != 0) && (req_count_after == 0))
       
  2479 			{
       
  2480 			QueuedRequestCountChanged();
       
  2481 			}
       
  2482 		}
       
  2483 
       
  2484 	__DMA_INVARIANT();
       
  2485 	}
       
  2486 
       
  2487 
       
  2488 EXPORT_C TInt TDmaChannel::IsrRedoRequest(TUint32 aSrcAddr, TUint32 aDstAddr,
       
  2489 										  TUint aTransferCount,
       
  2490 										  TUint32 aPslRequestInfo,
       
  2491 										  TBool aIsrCb)
       
  2492 	{
       
  2493 	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::IsrRedoRequest src=0x%08X, "
       
  2494 									"dst=0x%08X, count=%d, pslInfo=0x%08X, isrCb=%d",
       
  2495 									aSrcAddr, aDstAddr, aTransferCount, aPslRequestInfo,
       
  2496 									aIsrCb));
       
  2497 	// Function needs to be called in ISR context.
       
  2498 	__DMA_ASSERTD(NKern::CurrentContext() == NKern::EInterrupt);
       
  2499 
       
  2500 	__DMA_ASSERTD(!iReqQ.IsEmpty());
       
  2501 	__DMA_ASSERTD(iIsrCbRequest);
       
  2502 
       
  2503 #ifdef _DEBUG
       
  2504 	if ((aSrcAddr != KPhysAddrInvalid) && (aSrcAddr == aDstAddr))
       
  2505 		{
       
  2506 		__KTRACE_OPT(KPANIC,
       
  2507 					 Kern::Printf("Error: Updating src & dst to same address: 0x%08X",
       
  2508 								  aSrcAddr));
       
  2509 		return KErrArgument;
       
  2510 		}
       
  2511 #endif
       
  2512 
       
  2513 	// We assume here that the just completed request is the first one in the
       
  2514 	// queue, i.e. that even if there is more than one request in the queue,
       
  2515 	// their respective last and first (hw) descriptors are *not* linked.
       
  2516 	// (Although that's what apparently happens in TDmaSgChannel::DoQueue() /
       
  2517 	// TDmac::AppendHwDes() @@@).
       
  2518 	DDmaRequest* const pCurReq = _LOFF(iReqQ.First(), DDmaRequest, iLink);
       
  2519 	TInt r;
       
  2520 
       
  2521 	if (iDmacCaps->iAsymHwDescriptors)
       
  2522 		{
       
  2523 		// We don't allow multiple-descriptor chains to be updated here.
       
  2524 		// That we just panic (instead of returning an error), and also only in
       
  2525 		// the UDEB case (instead of always) is not ideal, but done here in the
       
  2526 		// interest of performance.
       
  2527 		__DMA_ASSERTD((pCurReq->iSrcDesCount == 1) && (pCurReq->iDstDesCount == 1));
       
  2528 
       
  2529 		// Adjust parameters if necessary (asymmetrical s/g variety)
       
  2530 		const SDmaDesHdr* const pSrcFirstHdr = pCurReq->iSrcFirstHdr;
       
  2531 		if ((aSrcAddr != KPhysAddrInvalid) || aTransferCount || aPslRequestInfo)
       
  2532 			{
       
  2533 			r = iController->UpdateSrcHwDes(*pSrcFirstHdr, aSrcAddr,
       
  2534 											aTransferCount, aPslRequestInfo);
       
  2535 			if (r != KErrNone)
       
  2536 				{
       
  2537 				__KTRACE_OPT(KPANIC, Kern::Printf("Src descriptor updating failed in PSL"));
       
  2538 				return r;
       
  2539 				}
       
  2540 			}
       
  2541 		const SDmaDesHdr* const pDstFirstHdr = pCurReq->iDstFirstHdr;
       
  2542 		if ((aDstAddr != KPhysAddrInvalid) || aTransferCount || aPslRequestInfo)
       
  2543 			{
       
  2544 			r = iController->UpdateDstHwDes(*pDstFirstHdr, aSrcAddr,
       
  2545 											aTransferCount, aPslRequestInfo);
       
  2546 			if (r != KErrNone)
       
  2547 				{
       
  2548 				__KTRACE_OPT(KPANIC, Kern::Printf("Dst descriptor updating failed in PSL"));
       
  2549 				return r;
       
  2550 				}
       
  2551 			}
       
  2552 		// Reschedule the request
       
  2553 		iController->Transfer(*this, *pSrcFirstHdr, *pDstFirstHdr);
       
  2554 		}
       
  2555 	else
       
  2556 		{
       
  2557 		// We don't allow a multiple-descriptor chain to be updated here.
       
  2558 		// That we just panic (instead of returning an error), and also only in
       
  2559 		// the UDEB case (instead of always) is not ideal, but done here in the
       
  2560 		// interest of performance.
       
  2561 		__DMA_ASSERTD(pCurReq->iDesCount == 1);
       
  2562 
       
  2563 		// Adjust parameters if necessary (symmetrical s/g and non-s/g variety)
       
  2564 		const SDmaDesHdr* const pFirstHdr = pCurReq->iFirstHdr;
       
  2565 		if ((aSrcAddr != KPhysAddrInvalid) || (aDstAddr != KPhysAddrInvalid) ||
       
  2566 			aTransferCount || aPslRequestInfo)
       
  2567 			{
       
  2568 			r = iController->UpdateDes(*pFirstHdr, aSrcAddr, aDstAddr,
       
  2569 									   aTransferCount, aPslRequestInfo);
       
  2570 			if (r != KErrNone)
       
  2571 				{
       
  2572 				__KTRACE_OPT(KPANIC, Kern::Printf("Descriptor updating failed"));
       
  2573 				return r;
       
  2574 				}
       
  2575 			}
       
  2576 		// Reschedule the request
       
  2577 		iController->Transfer(*this, *pFirstHdr);
       
  2578 		}
       
  2579 
       
  2580 	if (!aIsrCb)
       
  2581 		{
       
  2582 		// Not another ISR callback please
       
  2583 		pCurReq->iIsrCb = aIsrCb;
       
  2584 		}
       
  2585 	iRedoRequest = ETrue;
       
  2586 
       
  2587 	return KErrNone;
       
  2588 	}
       
  2589 
       
  2590 
       
  2591 EXPORT_C TInt TDmaChannel::FailNext(TInt /*aFragmentCount*/)
       
  2592 	{
       
  2593 	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::FailNext"));
       
  2594 	return iController->FailNext(*this);
       
  2595 	}
       
  2596 
       
  2597 
       
  2598 EXPORT_C TInt TDmaChannel::MissNextInterrupts(TInt aInterruptCount)
       
  2599 	{
       
  2600 	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::MissNextInterrupts"));
       
  2601 	return iController->MissNextInterrupts(*this, aInterruptCount);
       
  2602 	}
       
  2603 
       
  2604 
       
  2605 EXPORT_C TInt TDmaChannel::Extension(TInt aCmd, TAny* aArg)
       
  2606 	{
       
  2607 	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::Extension"));
       
  2608 	return iController->Extension(*this, aCmd, aArg);
       
  2609 	}
       
  2610 
       
  2611 
       
  2612 //
       
  2613 // static member function
       
  2614 //
       
  2615 EXPORT_C TInt TDmaChannel::StaticExtension(TInt aCmd, TAny* aArg)
       
  2616 	{
       
  2617 	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::StaticExtension"));
       
  2618 	return DmaChannelMgr::StaticExtension(aCmd, aArg);
       
  2619 	}
       
  2620 
       
  2621 
       
  2622 EXPORT_C TUint TDmaChannel::MaxTransferLength(TUint aSrcFlags, TUint aDstFlags,
       
  2623 											  TUint32 aPslInfo)
       
  2624 	{
       
  2625 	return iController->MaxTransferLength(*this, aSrcFlags, aDstFlags, aPslInfo);
       
  2626 	}
       
  2627 
       
  2628 
       
  2629 EXPORT_C TUint TDmaChannel::AddressAlignMask(TUint aTargetFlags, TUint aElementSize,
       
  2630 											 TUint32 aPslInfo)
       
  2631 	{
       
  2632 	return iController->AddressAlignMask(*this, aTargetFlags, aElementSize, aPslInfo);
       
  2633 	}
       
  2634 
       
  2635 
       
  2636 EXPORT_C const SDmacCaps& TDmaChannel::DmacCaps()
       
  2637 	{
       
  2638 	return *iDmacCaps;
       
  2639 	}
       
  2640 
       
  2641 
       
  2642 //
       
  2643 // DFC callback function (static member).
       
  2644 //
       
  2645 void TDmaChannel::Dfc(TAny* aArg)
       
  2646 	{
       
  2647 	static_cast<TDmaChannel*>(aArg)->DoDfc();
       
  2648 	}
       
  2649 
       
  2650 
       
  2651 //
       
  2652 // This is quite a long function, but what can you do...
       
  2653 //
       
  2654 void TDmaChannel::DoDfc()
       
  2655 	{
       
  2656 	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::DoDfc thread %O channel - %d",
       
  2657 									&Kern::CurrentThread(), iPslId));
       
  2658 	Wait();
       
  2659 
       
  2660 	// Atomically fetch and reset the number of DFCs queued by the ISR and the
       
  2661 	// error flag. Leave the cancel flag alone for now.
       
  2662 	const TUint32 w = __e32_atomic_and_ord32(&iIsrDfc, (TUint32)KCancelFlagMask);
       
  2663 	TUint32 count = w & KDfcCountMask;
       
  2664 	const TBool error = w & (TUint32)KErrorFlagMask;
       
  2665 	TBool stop = w & (TUint32)KCancelFlagMask;
       
  2666 	const TUint32 req_count_before = iQueuedRequests;
       
  2667 	TUint32 req_count_after = 0;
       
  2668 
       
  2669 	__DMA_ASSERTD((count > 0) || stop);
       
  2670 	__DMA_ASSERTD(!iRedoRequest); // We shouldn't be here if this is true
       
  2671 
       
  2672 	while (count && !stop)
       
  2673 		{
       
  2674 		--count;
       
  2675 
       
  2676 		__DMA_ASSERTA(!iReqQ.IsEmpty());
       
  2677 
       
  2678 		// If an error occurred it must have been reported on the last
       
  2679 		// interrupt since transfers are suspended after an error.
       
  2680 		DDmaRequest::TResult const res = (count == 0 && error) ?
       
  2681 			DDmaRequest::EError : DDmaRequest::EOk;
       
  2682 		DDmaRequest* pCompletedReq = NULL;
       
  2683 		DDmaRequest* const pCurReq = _LOFF(iReqQ.First(), DDmaRequest, iLink);
       
  2684 
       
  2685 		if (res == DDmaRequest::EOk)
       
  2686 			{
       
  2687 			// Update state machine, current fragment, completed fragment and
       
  2688 			// tell the DMAC to transfer the next fragment if necessary.
       
  2689 			TBool complete;
       
  2690 			if (iDmacCaps->iAsymHwDescriptors)
       
  2691 				{
       
  2692 				SDmaDesHdr* pCompletedSrcHdr = NULL;
       
  2693 				SDmaDesHdr* pCompletedDstHdr = NULL;
       
  2694 				DoDfc(*pCurReq, pCompletedSrcHdr, pCompletedDstHdr);
       
  2695 				// We don't support asymmetrical ISR notifications and request
       
  2696 				// completions yet, hence we can do the following assert test
       
  2697 				// here; also 'complete' is determined equally by either the
       
  2698 				// SRC or DST side.
       
  2699 				__DMA_ASSERTD(!LOGICAL_XOR((pCompletedSrcHdr == pCurReq->iSrcLastHdr),
       
  2700 										   (pCompletedDstHdr == pCurReq->iDstLastHdr)));
       
  2701 				complete = (pCompletedDstHdr == pCurReq->iDstLastHdr);
       
  2702 				}
       
  2703 			else
       
  2704 				{
       
  2705 				SDmaDesHdr* pCompletedHdr = NULL;
       
  2706 				DoDfc(*pCurReq, pCompletedHdr);
       
  2707 				complete = (pCompletedHdr == pCurReq->iLastHdr);
       
  2708 				}
       
  2709 			// If just completed last fragment from current request, switch to
       
  2710 			// next request (if any).
       
  2711 			if (complete)
       
  2712 				{
       
  2713 				pCompletedReq = pCurReq;
       
  2714 				pCurReq->iLink.Deque();
       
  2715 				iQueuedRequests--;
       
  2716 				if (iReqQ.IsEmpty())
       
  2717 					ResetNullPtr();
       
  2718 				pCompletedReq->OnDeque();
       
  2719 				}
       
  2720 			}
       
  2721 		else
       
  2722 			{
       
  2723 			pCompletedReq = pCurReq;
       
  2724 			}
       
  2725 
       
  2726 		if (pCompletedReq && !pCompletedReq->iIsrCb)
       
  2727 			{
       
  2728 			// Don't execute ISR callbacks here (they have already been called)
       
  2729 			DDmaRequest::TCallback const cb = pCompletedReq->iCb;
       
  2730 			if (cb)
       
  2731 				{
       
  2732 				// Old style callback
       
  2733 				TAny* const arg = pCompletedReq->iCbArg;
       
  2734 				Signal();
       
  2735 				__KTRACE_OPT(KDMA, Kern::Printf("Client CB res=%d", res));
       
  2736 				(*cb)(res, arg);
       
  2737 				Wait();
       
  2738 				}
       
  2739 			else
       
  2740 				{
       
  2741 				// New style callback
       
  2742 				TDmaCallback const ncb = pCompletedReq->iDmaCb;
       
  2743 				if (ncb)
       
  2744 					{
       
  2745 					TAny* const arg = pCompletedReq->iDmaCbArg;
       
  2746 					TDmaResult const result = (res == DDmaRequest::EOk) ?
       
  2747 						EDmaResultOK : EDmaResultError;
       
  2748 					Signal();
       
  2749 					__KTRACE_OPT(KDMA, Kern::Printf("Client CB result=%d", result));
       
  2750 					(*ncb)(EDmaCallbackRequestCompletion, result, arg, NULL);
       
  2751 					Wait();
       
  2752 					}
       
  2753 				}
       
  2754 			}
       
  2755 		// Allow another thread in, in case they are trying to cancel
       
  2756 		if (pCompletedReq || Flash())
       
  2757 			{
       
  2758 			stop = __e32_atomic_load_acq32(&iIsrDfc) & (TUint32)KCancelFlagMask;
       
  2759 			}
       
  2760 		}
       
  2761 
       
  2762 	if (stop)
       
  2763 		{
       
  2764 		// If another thread set the cancel flag, it should have
       
  2765 		// cleaned up the request queue
       
  2766 		__DMA_ASSERTD(IsQueueEmpty());
       
  2767 
       
  2768 		TDmaCancelInfo* const waiters = iCancelInfo;
       
  2769 		iCancelInfo = NULL;
       
  2770 
       
  2771 		// make sure DFC doesn't run again until a new request completes
       
  2772 		iDfc.Cancel();
       
  2773 
       
  2774 		// reset the ISR count - new requests can now be processed
       
  2775 		__e32_atomic_store_rel32(&iIsrDfc, 0);
       
  2776 
       
  2777 		req_count_after = iQueuedRequests;
       
  2778 		Signal();
       
  2779 
       
  2780 		// release threads doing CancelAll()
       
  2781 		waiters->Signal();
       
  2782 		}
       
  2783 	else
       
  2784 		{
       
  2785 		req_count_after = iQueuedRequests;
       
  2786 		Signal();
       
  2787 		}
       
  2788 
       
  2789 	// Only call PSL if there were requests queued when we entered AND there
       
  2790 	// are now no requests left on the queue (after also having executed all
       
  2791 	// client callbacks).
       
  2792 	if (iCallQueuedRequestFn)
       
  2793 		{
       
  2794 		if ((req_count_before != 0) && (req_count_after == 0))
       
  2795 			{
       
  2796 			QueuedRequestCountChanged();
       
  2797 			}
       
  2798 		}
       
  2799 
       
  2800 	__DMA_INVARIANT();
       
  2801 	}
       
  2802 
       
  2803 
       
  2804 void TDmaChannel::DoQueue(const DDmaRequest& /*aReq*/)
       
  2805 	{
       
  2806 	// Must be overridden
       
  2807 	__DMA_UNREACHABLE_DEFAULT();
       
  2808 	}
       
  2809 
       
  2810 
       
  2811 //
       
  2812 // Unlink the last item of a LLI chain from the next chain.
       
  2813 // Default implementation does nothing. This is overridden by scatter-gather
       
  2814 // channels.
       
  2815 //
       
  2816 void TDmaChannel::DoUnlink(SDmaDesHdr& /*aHdr*/)
       
  2817 	{
       
  2818 	}
       
  2819 
       
  2820 
       
  2821 void TDmaChannel::DoDfc(const DDmaRequest& /*aCurReq*/, SDmaDesHdr*& /*aCompletedHdr*/)
       
  2822 	{
       
  2823 	// To make sure this version of the function isn't called for channels for
       
  2824 	// which it isn't appropriate (and which therefore don't override it) we
       
  2825 	// put this check in here.
       
  2826 	__DMA_UNREACHABLE_DEFAULT();
       
  2827 	}
       
  2828 
       
  2829 
       
  2830 void TDmaChannel::DoDfc(const DDmaRequest& /*aCurReq*/, SDmaDesHdr*& /*aSrcCompletedHdr*/,
       
  2831 						SDmaDesHdr*& /*aDstCompletedHdr*/)
       
  2832 	{
       
  2833 	// To make sure this version of the function isn't called for channels for
       
  2834 	// which it isn't appropriate (and which therefore don't override it) we
       
  2835 	// put this check in here.
       
  2836 	__DMA_UNREACHABLE_DEFAULT();
       
  2837 	}
       
  2838 
       
  2839 
       
  2840 void TDmaChannel::SetNullPtr(const DDmaRequest& aReq)
       
  2841 	{
       
  2842 	// iNullPtr points to iCurHdr for an empty queue
       
  2843 	*iNullPtr = aReq.iFirstHdr;
       
  2844 	iNullPtr = &(aReq.iLastHdr->iNext);
       
  2845 	}
       
  2846 
       
  2847 
       
  2848 void TDmaChannel::ResetNullPtr()
       
  2849 	{
       
  2850 	iCurHdr = NULL;
       
  2851 	iNullPtr = &iCurHdr;
       
  2852 	}
       
  2853 
       
  2854 
       
  2855 /** PSL may override */
       
  2856 void TDmaChannel::QueuedRequestCountChanged()
       
  2857 	{
       
  2858 	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::QueuedRequestCountChanged(): "
       
  2859 									"disabling further calls"));
       
  2860 	Wait();
       
  2861 	iCallQueuedRequestFn = EFalse;
       
  2862 	Signal();
       
  2863 	}
       
  2864 
       
  2865 
       
  2866 #ifdef _DEBUG
       
  2867 void TDmaChannel::Invariant()
       
  2868 	{
       
  2869 	Wait();
       
  2870 
       
  2871 	__DMA_ASSERTD(iReqCount >= 0);
       
  2872 
       
  2873 	__DMA_ASSERTD(iCurHdr == NULL || iController->IsValidHdr(iCurHdr));
       
  2874 
       
  2875 	// should always point to NULL pointer ending fragment queue
       
  2876 	__DMA_ASSERTD(*iNullPtr == NULL);
       
  2877 
       
  2878 	__DMA_ASSERTD((0 <= iAvailDesCount) && (iAvailDesCount <= iMaxDesCount));
       
  2879 
       
  2880 	__DMA_ASSERTD(LOGICAL_XOR(iCurHdr, IsQueueEmpty()));
       
  2881 	if (iCurHdr == NULL)
       
  2882 		{
       
  2883 		__DMA_ASSERTD(iNullPtr == &iCurHdr);
       
  2884 		}
       
  2885 
       
  2886 	Signal();
       
  2887 	}
       
  2888 #endif
       
  2889 
       
  2890 
       
  2891 //////////////////////////////////////////////////////////////////////////////
       
  2892 // TDmaSbChannel
       
  2893 
       
  2894 void TDmaSbChannel::DoQueue(const DDmaRequest& /*aReq*/)
       
  2895 	{
       
  2896 	if (iState != ETransferring)
       
  2897 		{
       
  2898 		iController->Transfer(*this, *iCurHdr);
       
  2899 		iState = ETransferring;
       
  2900 		}
       
  2901 	}
       
  2902 
       
  2903 
       
  2904 void TDmaSbChannel::DoCancelAll()
       
  2905 	{
       
  2906 	__DMA_ASSERTD(iState == ETransferring);
       
  2907 	iState = EIdle;
       
  2908 	}
       
  2909 
       
  2910 
       
  2911 void TDmaSbChannel::DoDfc(const DDmaRequest& /*aCurReq*/, SDmaDesHdr*& aCompletedHdr)
       
  2912 	{
       
  2913 	__DMA_ASSERTD(iState == ETransferring);
       
  2914 	aCompletedHdr = iCurHdr;
       
  2915 	iCurHdr = iCurHdr->iNext;
       
  2916 	if (iCurHdr != NULL)
       
  2917 		{
       
  2918 		iController->Transfer(*this, *iCurHdr);
       
  2919 		}
       
  2920 	else
       
  2921 		{
       
  2922 		iState = EIdle;
       
  2923 		}
       
  2924 	}
       
  2925 
       
  2926 
       
  2927 //////////////////////////////////////////////////////////////////////////////
       
  2928 // TDmaDbChannel
       
  2929 
       
  2930 void TDmaDbChannel::DoQueue(const DDmaRequest& aReq)
       
  2931 	{
       
  2932 	switch (iState)
       
  2933 		{
       
  2934 	case EIdle:
       
  2935 		iController->Transfer(*this, *iCurHdr);
       
  2936 		if (iCurHdr->iNext)
       
  2937 			{
       
  2938 			iController->Transfer(*this, *(iCurHdr->iNext));
       
  2939 			iState = ETransferring;
       
  2940 			}
       
  2941 		else
       
  2942 			iState = ETransferringLast;
       
  2943 		break;
       
  2944 	case ETransferring:
       
  2945 		// nothing to do
       
  2946 		break;
       
  2947 	case ETransferringLast:
       
  2948 		iController->Transfer(*this, *(aReq.iFirstHdr));
       
  2949 		iState = ETransferring;
       
  2950 		break;
       
  2951 	default:
       
  2952 		__DMA_CANT_HAPPEN();
       
  2953 		}
       
  2954 	}
       
  2955 
       
  2956 
       
  2957 void TDmaDbChannel::DoCancelAll()
       
  2958 	{
       
  2959 	iState = EIdle;
       
  2960 	}
       
  2961 
       
  2962 
       
  2963 void TDmaDbChannel::DoDfc(const DDmaRequest& /*aCurReq*/, SDmaDesHdr*& aCompletedHdr)
       
  2964 	{
       
  2965 	aCompletedHdr = iCurHdr;
       
  2966 	iCurHdr = iCurHdr->iNext;
       
  2967 	switch (iState)
       
  2968 		{
       
  2969 	case ETransferringLast:
       
  2970 		iState = EIdle;
       
  2971 		break;
       
  2972 	case ETransferring:
       
  2973 		if (iCurHdr->iNext == NULL)
       
  2974 			iState = ETransferringLast;
       
  2975 		else
       
  2976 			iController->Transfer(*this, *(iCurHdr->iNext));
       
  2977 		break;
       
  2978 	default:
       
  2979 		__DMA_CANT_HAPPEN();
       
  2980 		}
       
  2981 	}
       
  2982 
       
  2983 
       
  2984 //////////////////////////////////////////////////////////////////////////////
       
  2985 // TDmaSgChannel
       
  2986 
       
  2987 void TDmaSgChannel::DoQueue(const DDmaRequest& aReq)
       
  2988 	{
       
  2989 	if (iState == ETransferring)
       
  2990 		{
       
  2991 		__DMA_ASSERTD(!aReq.iLink.Alone());
       
  2992 		DDmaRequest* pReqPrev = _LOFF(aReq.iLink.iPrev, DDmaRequest, iLink);
       
  2993 		iController->AppendHwDes(*this, *(pReqPrev->iLastHdr), *(aReq.iFirstHdr));
       
  2994 		}
       
  2995 	else
       
  2996 		{
       
  2997 		iController->Transfer(*this, *(aReq.iFirstHdr));
       
  2998 		iState = ETransferring;
       
  2999 		}
       
  3000 	}
       
  3001 
       
  3002 
       
  3003 void TDmaSgChannel::DoCancelAll()
       
  3004 	{
       
  3005 	__DMA_ASSERTD(iState == ETransferring);
       
  3006 	iState = EIdle;
       
  3007 	}
       
  3008 
       
  3009 
       
  3010 void TDmaSgChannel::DoUnlink(SDmaDesHdr& aHdr)
       
  3011 	{
       
  3012 	iController->UnlinkHwDes(*this, aHdr);
       
  3013 	}
       
  3014 
       
  3015 
       
  3016 void TDmaSgChannel::DoDfc(const DDmaRequest& aCurReq, SDmaDesHdr*& aCompletedHdr)
       
  3017 	{
       
  3018 	__DMA_ASSERTD(iState == ETransferring);
       
  3019 	aCompletedHdr = aCurReq.iLastHdr;
       
  3020 	iCurHdr = aCompletedHdr->iNext;
       
  3021 	iState = (iCurHdr != NULL) ? ETransferring : EIdle;
       
  3022 	}
       
  3023 
       
  3024 
       
  3025 //////////////////////////////////////////////////////////////////////////////
       
  3026 // TDmaAsymSgChannel
       
  3027 
       
  3028 TDmaAsymSgChannel::TDmaAsymSgChannel()
       
  3029 	: iSrcCurHdr(NULL),
       
  3030 	  iSrcNullPtr(&iSrcCurHdr),
       
  3031 	  iDstCurHdr(NULL),
       
  3032 	  iDstNullPtr(&iDstCurHdr)
       
  3033 	{
       
  3034 	__DMA_INVARIANT();
       
  3035 	}
       
  3036 
       
  3037 
       
  3038 void TDmaAsymSgChannel::SetNullPtr(const DDmaRequest& aReq)
       
  3039 	{
       
  3040 	// i{Src|Dst}NullPtr points to i{Src|Dst}CurHdr for an empty queue
       
  3041 	*iSrcNullPtr = aReq.iSrcFirstHdr;
       
  3042 	*iDstNullPtr = aReq.iDstFirstHdr;
       
  3043 	iSrcNullPtr = &(aReq.iSrcLastHdr->iNext);
       
  3044 	iDstNullPtr = &(aReq.iDstLastHdr->iNext);
       
  3045 	}
       
  3046 
       
  3047 
       
  3048 void TDmaAsymSgChannel::ResetNullPtr()
       
  3049 	{
       
  3050 	iSrcCurHdr = NULL;
       
  3051 	iSrcNullPtr = &iSrcCurHdr;
       
  3052 	iDstCurHdr = NULL;
       
  3053 	iDstNullPtr = &iDstCurHdr;
       
  3054 	}
       
  3055 
       
  3056 
       
  3057 void TDmaAsymSgChannel::DoQueue(const DDmaRequest& aReq)
       
  3058 	{
       
  3059 	if (iState == ETransferring)
       
  3060 		{
       
  3061 		__DMA_ASSERTD(!aReq.iLink.Alone());
       
  3062 		DDmaRequest* pReqPrev = _LOFF(aReq.iLink.iPrev, DDmaRequest, iLink);
       
  3063 		iController->AppendHwDes(*this,
       
  3064 								 *(pReqPrev->iSrcLastHdr), *(aReq.iSrcFirstHdr),
       
  3065 								 *(pReqPrev->iDstLastHdr), *(aReq.iDstFirstHdr));
       
  3066 		}
       
  3067 	else
       
  3068 		{
       
  3069 		iController->Transfer(*this, *(aReq.iSrcFirstHdr), *(aReq.iDstFirstHdr));
       
  3070 		iState = ETransferring;
       
  3071 		}
       
  3072 	}
       
  3073 
       
  3074 
       
  3075 void TDmaAsymSgChannel::DoCancelAll()
       
  3076 	{
       
  3077 	__DMA_ASSERTD(iState == ETransferring);
       
  3078 	iState = EIdle;
       
  3079 	}
       
  3080 
       
  3081 
       
  3082 void TDmaAsymSgChannel::DoUnlink(SDmaDesHdr& aHdr)
       
  3083 	{
       
  3084 	iController->UnlinkHwDes(*this, aHdr);
       
  3085 	}
       
  3086 
       
  3087 
       
  3088 void TDmaAsymSgChannel::DoDfc(const DDmaRequest& aCurReq, SDmaDesHdr*& aSrcCompletedHdr,
       
  3089 							  SDmaDesHdr*& aDstCompletedHdr)
       
  3090 	{
       
  3091 	__DMA_ASSERTD(iState == ETransferring);
       
  3092 	aSrcCompletedHdr = aCurReq.iSrcLastHdr;
       
  3093 	iSrcCurHdr = aSrcCompletedHdr->iNext;
       
  3094 	aDstCompletedHdr = aCurReq.iDstLastHdr;
       
  3095 	iDstCurHdr = aDstCompletedHdr->iNext;
       
  3096 	// Must be either both NULL or none of them.
       
  3097 	__DMA_ASSERTD(!LOGICAL_XOR(iSrcCurHdr, iDstCurHdr));
       
  3098 	iState = (iSrcCurHdr != NULL) ? ETransferring : EIdle;
       
  3099 	}
       
  3100 
       
  3101 
       
  3102 #ifdef _DEBUG
       
  3103 void TDmaAsymSgChannel::Invariant()
       
  3104 	{
       
  3105 	Wait();
       
  3106 
       
  3107 	__DMA_ASSERTD(iReqCount >= 0);
       
  3108 
       
  3109 	__DMA_ASSERTD(iSrcCurHdr == NULL || iController->IsValidHdr(iSrcCurHdr));
       
  3110 	__DMA_ASSERTD(iDstCurHdr == NULL || iController->IsValidHdr(iDstCurHdr));
       
  3111 
       
  3112 	// should always point to NULL pointer ending fragment queue
       
  3113 	__DMA_ASSERTD(*iSrcNullPtr == NULL);
       
  3114 	__DMA_ASSERTD(*iDstNullPtr == NULL);
       
  3115 
       
  3116 	__DMA_ASSERTD((0 <= iAvailDesCount) && (iAvailDesCount <= iMaxDesCount));
       
  3117 
       
  3118 	__DMA_ASSERTD((iSrcCurHdr && iDstCurHdr && !IsQueueEmpty()) ||
       
  3119 				  (!iSrcCurHdr && !iDstCurHdr && IsQueueEmpty()));
       
  3120 	if (iSrcCurHdr == NULL)
       
  3121 		{
       
  3122 		__DMA_ASSERTD(iSrcNullPtr == &iSrcCurHdr);
       
  3123 		}
       
  3124 	if (iDstCurHdr == NULL)
       
  3125 		{
       
  3126 		__DMA_ASSERTD(iDstNullPtr == &iDstCurHdr);
       
  3127 		}
       
  3128 
       
  3129 	Signal();
       
  3130 	}
       
  3131 #endif