kernel/eka/drivers/dma/dma2_pil.cpp
changeset 247 d8d70de2bd36
parent 139 95f71bcdcdb7
--- a/kernel/eka/drivers/dma/dma2_pil.cpp	Tue Jul 06 15:50:07 2010 +0300
+++ b/kernel/eka/drivers/dma/dma2_pil.cpp	Wed Aug 18 11:08:29 2010 +0300
@@ -22,23 +22,21 @@
 #include <kernel/kern_priv.h>
 
 
-// Symbian Min() & Max() are broken, so we have to define them ourselves
-inline TUint Min(TUint aLeft, TUint aRight)
+// Symbian _Min() & _Max() are broken, so we have to define them ourselves
+inline TUint _Min(TUint aLeft, TUint aRight)
 	{return(aLeft < aRight ? aLeft : aRight);}
-inline TUint Max(TUint aLeft, TUint aRight)
+inline TUint _Max(TUint aLeft, TUint aRight)
 	{return(aLeft > aRight ? aLeft : aRight);}
 
 
-// Uncomment the following #define only when freezing the DMA2 export library.
-//#define __FREEZE_DMA2_LIB
-#ifdef __FREEZE_DMA2_LIB
+// The following section is used only when freezing the DMA2 export library
+/*
 TInt DmaChannelMgr::StaticExtension(TInt, TAny*) {return 0;}
 TDmaChannel* DmaChannelMgr::Open(TUint32, TBool, TUint) {return 0;}
 void DmaChannelMgr::Close(TDmaChannel*) {}
 EXPORT_C const TDmaTestInfo& DmaTestInfo() {static TDmaTestInfo a; return a;}
 EXPORT_C const TDmaV2TestInfo& DmaTestInfoV2() {static TDmaV2TestInfo a; return a;}
-#endif	// #ifdef __FREEZE_DMA2_LIB
-
+*/
 
 static const char KDmaPanicCat[] = "DMA " __FILE__;
 
@@ -202,7 +200,7 @@
 	{
 	// TDmac needs to override this function if it has reported the channel
 	// type for which the PIL calls it.
-	__DMA_CANT_HAPPEN();
+	__DMA_UNREACHABLE_DEFAULT();
 	}
 
 
@@ -211,7 +209,7 @@
 	{
 	// TDmac needs to override this function if it has reported the channel
 	// type for which the PIL calls it.
-	__DMA_CANT_HAPPEN();
+	__DMA_UNREACHABLE_DEFAULT();
 	}
 
 
@@ -265,7 +263,7 @@
 		}
 	else
 		{
-		iDesPool = new TDmaTransferArgs[iMaxDesCount];
+		iDesPool = Kern::Alloc(iMaxDesCount * sizeof(TDmaTransferArgs));
 		r = iDesPool ? KErrNone : KErrNoMemory;
 		}
 	return r;
@@ -450,7 +448,7 @@
 TInt TDmac::InitHwDes(const SDmaDesHdr& /*aHdr*/, const TDmaTransferArgs& /*aTransferArgs*/)
 	{
 	// concrete controller must override if SDmacCaps::iHwDescriptors set
-	__DMA_CANT_HAPPEN();
+	__DMA_UNREACHABLE_DEFAULT();
 	return KErrGeneral;
 	}
 
@@ -458,7 +456,7 @@
 TInt TDmac::InitSrcHwDes(const SDmaDesHdr& /*aHdr*/, const TDmaTransferArgs& /*aTransferArgs*/)
 	{
 	// concrete controller must override if SDmacCaps::iAsymHwDescriptors set
-	__DMA_CANT_HAPPEN();
+	__DMA_UNREACHABLE_DEFAULT();
 	return KErrGeneral;
 	}
 
@@ -466,7 +464,7 @@
 TInt TDmac::InitDstHwDes(const SDmaDesHdr& /*aHdr*/, const TDmaTransferArgs& /*aTransferArgs*/)
 	{
 	// concrete controller must override if SDmacCaps::iAsymHwDescriptors set
-	__DMA_CANT_HAPPEN();
+	__DMA_UNREACHABLE_DEFAULT();
 	return KErrGeneral;
 	}
 
@@ -502,7 +500,7 @@
 						TUint /*aTransferCount*/, TUint32 /*aPslRequestInfo*/)
 	{
 	// concrete controller must override if SDmacCaps::iHwDescriptors set
-	__DMA_CANT_HAPPEN();
+	__DMA_UNREACHABLE_DEFAULT();
 	return KErrGeneral;
 	}
 
@@ -511,7 +509,7 @@
 						   TUint /*aTransferCount*/, TUint32 /*aPslRequestInfo*/)
 	{
 	// concrete controller must override if SDmacCaps::iAsymHwDescriptors set
-	__DMA_CANT_HAPPEN();
+	__DMA_UNREACHABLE_DEFAULT();
 	return KErrGeneral;
 	}
 
@@ -520,7 +518,7 @@
 						   TUint /*aTransferCount*/, TUint32 /*aPslRequestInfo*/)
 	{
 	// concrete controller must override if SDmacCaps::iAsymHwDescriptors set
-	__DMA_CANT_HAPPEN();
+	__DMA_UNREACHABLE_DEFAULT();
 	return KErrGeneral;
 	}
 
@@ -528,7 +526,7 @@
 void TDmac::ChainHwDes(const SDmaDesHdr& /*aHdr*/, const SDmaDesHdr& /*aNextHdr*/)
 	{
 	// concrete controller must override if SDmacCaps::iHwDescriptors set
-	__DMA_CANT_HAPPEN();
+	__DMA_UNREACHABLE_DEFAULT();
 	}
 
 
@@ -536,7 +534,7 @@
 						const SDmaDesHdr& /*aNewHdr*/)
 	{
  	// concrete controller must override if SDmacCaps::iHwDescriptors set
-	__DMA_CANT_HAPPEN();
+	__DMA_UNREACHABLE_DEFAULT();
 	}
 
 
@@ -545,14 +543,14 @@
 						const SDmaDesHdr& /*aDstLastHdr*/, const SDmaDesHdr& /*aDstNewHdr*/)
 	{
 	// concrete controller must override if SDmacCaps::iAsymHwDescriptors set
-	__DMA_CANT_HAPPEN();
+	__DMA_UNREACHABLE_DEFAULT();
 	}
 
 
 void TDmac::UnlinkHwDes(const TDmaChannel& /*aChannel*/, SDmaDesHdr& /*aHdr*/)
 	{
  	// concrete controller must override if SDmacCaps::iHwDescriptors set
-	__DMA_CANT_HAPPEN();
+	__DMA_UNREACHABLE_DEFAULT();
 	}
 
 
@@ -601,7 +599,7 @@
 TUint32 TDmac::HwDesNumDstElementsTransferred(const SDmaDesHdr& /*aHdr*/)
 	{
  	// Concrete controller must override if SDmacCaps::iHwDescriptors set.
-	__DMA_CANT_HAPPEN();
+	__DMA_UNREACHABLE_DEFAULT();
 	return 0;
 	}
 
@@ -609,7 +607,7 @@
 TUint32 TDmac::HwDesNumSrcElementsTransferred(const SDmaDesHdr& /*aHdr*/)
 	{
  	// Concrete controller must override if SDmacCaps::iHwDescriptors set.
-	__DMA_CANT_HAPPEN();
+	__DMA_UNREACHABLE_DEFAULT();
 	return 0;
 	}
 
@@ -856,7 +854,7 @@
 	}
 
 
-TInt DDmaRequest::CheckMemFlags(const TDmaTransferConfig& aTarget, TUint aCount) const
+TInt DDmaRequest::CheckMemFlags(const TDmaTransferConfig& aTarget) const
 	{
 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::CheckMemFlags"));
 
@@ -892,7 +890,7 @@
 	TUint rem = 0;
 	TInt r = KErrNone;
 
-	while (1)
+	FOREVER
 		{
 		// If an element size is defined, make sure the fragment size is
 		// greater or equal.
@@ -1101,13 +1099,13 @@
 	aTransferArgs.iChannelCookie = iChannel.PslId();
 
 	// Client shouldn't specify contradictory or invalid things
-	TInt r = CheckMemFlags(aTransferArgs.iSrcConfig, count);
+	TInt r = CheckMemFlags(aTransferArgs.iSrcConfig);
 	if (r != KErrNone)
 		{
 		__KTRACE_OPT(KPANIC, Kern::Printf("Error: CheckMemFlags(src)"));
 		return r;
 		}
-	r =  CheckMemFlags(aTransferArgs.iDstConfig, count);
+	r =  CheckMemFlags(aTransferArgs.iDstConfig);
 	if (r != KErrNone)
 		{
 		__KTRACE_OPT(KPANIC, Kern::Printf("Error: CheckMemFlags(dst)"));
@@ -1160,7 +1158,7 @@
 	// Max aligned length is used to make sure the beginnings of subtransfers
 	// (i.e. fragments) are correctly aligned.
 	const TUint max_aligned_len = (aMaxTransferLen &
-								   ~(Max(align_mask_src, align_mask_dst)));
+								   ~(_Max(align_mask_src, align_mask_dst)));
 	__KTRACE_OPT(KDMA, Kern::Printf("max_aligned_len: %d", max_aligned_len));
 	// Client and PSL sane?
 	__DMA_ASSERTD(max_aligned_len > 0);
@@ -1235,8 +1233,8 @@
 			break;
 			}
 		// Compute fragment size
-		TUint c = Min(aMaxTransferLen, aCount);
-		__KTRACE_OPT(KDMA, Kern::Printf("c = Min(aMaxTransferLen, aCount) = %d", c));
+		TUint c = _Min(aMaxTransferLen, aCount);
+		__KTRACE_OPT(KDMA, Kern::Printf("c = _Min(aMaxTransferLen, aCount) = %d", c));
 
 		// SRC
 		if (mem_src && !(src.iFlags & KDmaMemIsContiguous))
@@ -1422,8 +1420,8 @@
 			break;
 			}
 		// Compute fragment size
-		TUint c = Min(aMaxTransferLen, aCount);
-		__KTRACE_OPT(KDMA, Kern::Printf("c = Min(aMaxTransferLen, aCount) = %d", c));
+		TUint c = _Min(aMaxTransferLen, aCount);
+		__KTRACE_OPT(KDMA, Kern::Printf("c = _Min(aMaxTransferLen, aCount) = %d", c));
 
 		if (mem_src && !(src.iFlags & KDmaMemIsContiguous))
 			{
@@ -1536,8 +1534,8 @@
 			break;
 			}
 		// Compute fragment size
-		TUint c = Min(aMaxTransferLen, aCount);
-		__KTRACE_OPT(KDMA, Kern::Printf("c = Min(aMaxTransferLen, aCount) = %d", c));
+		TUint c = _Min(aMaxTransferLen, aCount);
+		__KTRACE_OPT(KDMA, Kern::Printf("c = _Min(aMaxTransferLen, aCount) = %d", c));
 
 		if (mem_dst && !(dst.iFlags & KDmaMemIsContiguous))
 			{
@@ -1642,7 +1640,7 @@
 	// Max aligned length is used to make sure the beginnings of subtransfers
 	// (i.e. fragments) are correctly aligned.
 	const TUint max_aligned_len = (aMaxTransferLen &
-								   ~(Max(align_mask_src, align_mask_dst)));
+								   ~(_Max(align_mask_src, align_mask_dst)));
 	__KTRACE_OPT(KDMA, Kern::Printf("max_aligned_len: %d", max_aligned_len));
 	// Client and PSL sane?
 	__DMA_ASSERTD(max_aligned_len > 0);
@@ -1708,6 +1706,7 @@
 	// Revert any previous fragmentation attempt
 	FreeSrcDesList();
 	FreeDstDesList();
+	__DMA_ASSERTD(iSrcDesCount == iDstDesCount);
 	do
 		{
 		// Allocate fragment
@@ -1721,9 +1720,10 @@
 			{
 			break;
 			}
+		__DMA_ASSERTD(iSrcDesCount == iDstDesCount);
 		// Compute fragment size
-		TUint c = Min(aMaxTransferLen, aCount);
-		__KTRACE_OPT(KDMA, Kern::Printf("c = Min(aMaxTransferLen, aCount) = %d", c));
+		TUint c = _Min(aMaxTransferLen, aCount);
+		__KTRACE_OPT(KDMA, Kern::Printf("c = _Min(aMaxTransferLen, aCount) = %d", c));
 
 		// SRC
 		if (mem_src && !(src.iFlags & KDmaMemIsContiguous))
@@ -1851,7 +1851,7 @@
 	// Not configured? Call Fragment() first!
 	if (iChannel.iDmacCaps->iAsymHwDescriptors)
 		{
-		__DMA_ASSERTD((iSrcDesCount < 0) && (iDstDesCount < 0));
+		__DMA_ASSERTD((iSrcDesCount > 0) && (iDstDesCount > 0));
 		}
 	else
 		{
@@ -1863,11 +1863,14 @@
 	iChannel.Wait();
 
 	TUint32 req_count = iChannel.iQueuedRequests++;
-	if (req_count == 0)
+	if (iChannel.iCallQueuedRequestFn)
 		{
-		iChannel.Signal();
-		iChannel.QueuedRequestCountChanged();
-		iChannel.Wait();
+		if (req_count == 0)
+			{
+			iChannel.Signal();
+			iChannel.QueuedRequestCountChanged();
+			iChannel.Wait();
+			}
 		}
 
 	TInt r = KErrGeneral;
@@ -1883,9 +1886,12 @@
 		req_count = --iChannel.iQueuedRequests;
 		__DMA_INVARIANT();
 		iChannel.Signal();
-		if (req_count == 0)
+		if (iChannel.iCallQueuedRequestFn)
 			{
-			iChannel.QueuedRequestCountChanged();
+			if (req_count == 0)
+				{
+				iChannel.QueuedRequestCountChanged();
+				}
 			}
 		}
 	else if (iIsrCb && !iChannel.IsQueueEmpty())
@@ -1899,9 +1905,12 @@
 		req_count = --iChannel.iQueuedRequests;
 		__DMA_INVARIANT();
 		iChannel.Signal();
-		if (req_count == 0)
+		if (iChannel.iCallQueuedRequestFn)
 			{
-			iChannel.QueuedRequestCountChanged();
+			if (req_count == 0)
+				{
+				iChannel.QueuedRequestCountChanged();
+				}
 			}
 		}
 	else if (iChannel.iIsrDfc & (TUint32)TDmaChannel::KCancelFlagMask)
@@ -1911,18 +1920,19 @@
 		req_count = --iChannel.iQueuedRequests;
 		__DMA_INVARIANT();
 		iChannel.Signal();
-		if (req_count == 0)
+		if (iChannel.iCallQueuedRequestFn)
 			{
-			iChannel.QueuedRequestCountChanged();
+			if (req_count == 0)
+				{
+				iChannel.QueuedRequestCountChanged();
+				}
 			}
 		}
 	else
 		{
 		iQueued = ETrue;
 		iChannel.iReqQ.Add(&iLink);
-		// iChannel.iNullPtr points to iChannel.iCurHdr for an empty queue
-		*iChannel.iNullPtr = iFirstHdr;
-		iChannel.iNullPtr = &(iLastHdr->iNext);
+		iChannel.SetNullPtr(*this);
 		if (iIsrCb)
 			{
 			// Since we've made sure that there is no other request in the
@@ -1934,7 +1944,7 @@
 			// possible.
 			__e32_atomic_store_rel32(&iChannel.iIsrCbRequest, ETrue);
 			}
-		iChannel.DoQueue(const_cast<const DDmaRequest&>(*this));
+		iChannel.DoQueue(*this);
 		r = KErrNone;
 		__DMA_INVARIANT();
 		iChannel.Signal();
@@ -2112,21 +2122,6 @@
 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::TotalNumSrcElementsTransferred"));
 
 	// Not yet implemented.
-
-	// So far largely bogus code (just to touch some symbols)...
-	iTotalNumSrcElementsTransferred = 0;
-	TDmac& c = *(iChannel.iController);
-	if (c.iCapsHwDes)
-		{
-		for (const SDmaDesHdr* pH = iFirstHdr; pH != NULL; pH = pH->iNext)
-			{
-			iTotalNumSrcElementsTransferred += c.HwDesNumDstElementsTransferred(*pH);
-			}
-		}
-	else
-		{
-		// Do something different for pseudo descriptors...
-		}
 	return iTotalNumSrcElementsTransferred;
 	}
 
@@ -2213,22 +2208,26 @@
 					  (0 <= iDstDesCount) && (iDstDesCount <= iChannel.iMaxDesCount));
 		if (iSrcDesCount == 0)
 			{
+			// Not fragmented yet
 			__DMA_ASSERTD(iDstDesCount == 0);
 			__DMA_ASSERTD(!iQueued);
 			__DMA_ASSERTD(!iSrcFirstHdr && !iSrcLastHdr &&
 						  !iDstFirstHdr && !iDstLastHdr);
 			}
+		else if (iDstDesCount == 0)
+			{
+			// Src side only fragmented yet
+			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iSrcFirstHdr));
+			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iSrcLastHdr));
+			}
 		else
 			{
+			// Src & Dst sides fragmented
 			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iSrcFirstHdr));
 			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iSrcLastHdr));
 			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iDstFirstHdr));
 			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iDstLastHdr));
 			}
-		if (iChannel.iDmacCaps->iBalancedAsymSegments)
-			{
-			__DMA_ASSERTD(iSrcDesCount == iDstDesCount);
-			}
 		}
 	else
 		{
@@ -2271,6 +2270,7 @@
 	  iReqQ(),
 	  iReqCount(0),
 	  iQueuedRequests(0),
+	  iCallQueuedRequestFn(ETrue),
 	  iCancelInfo(NULL),
 	  iRedoRequest(EFalse),
 	  iIsrCbRequest(EFalse)
@@ -2416,7 +2416,8 @@
 		// ISR should not happen after this function returns.
 		iController->StopTransfer(*this);
 
-		ResetStateMachine();
+		DoCancelAll();
+		ResetNullPtr();
 
 		// Clean-up the request queue.
 		SDblQueLink* pL;
@@ -2472,9 +2473,12 @@
 
 	// Only call PSL if there were requests queued when we entered AND there
 	// are now no requests left on the queue.
-	if ((req_count_before != 0) && (req_count_after == 0))
+	if (iCallQueuedRequestFn)
 		{
-		QueuedRequestCountChanged();
+		if ((req_count_before != 0) && (req_count_after == 0))
+			{
+			QueuedRequestCountChanged();
+			}
 		}
 
 	__DMA_INVARIANT();
@@ -2687,8 +2691,7 @@
 				{
 				SDmaDesHdr* pCompletedSrcHdr = NULL;
 				SDmaDesHdr* pCompletedDstHdr = NULL;
-				DoDfc(const_cast<const DDmaRequest&>(*pCurReq),
-					  pCompletedSrcHdr, pCompletedDstHdr);
+				DoDfc(*pCurReq, pCompletedSrcHdr, pCompletedDstHdr);
 				// We don't support asymmetrical ISR notifications and request
 				// completions yet, hence we can do the following assert test
 				// here; also 'complete' is determined equally by either the
@@ -2700,7 +2703,7 @@
 			else
 				{
 				SDmaDesHdr* pCompletedHdr = NULL;
-				DoDfc(const_cast<const DDmaRequest&>(*pCurReq), pCompletedHdr);
+				DoDfc(*pCurReq, pCompletedHdr);
 				complete = (pCompletedHdr == pCurReq->iLastHdr);
 				}
 			// If just completed last fragment from current request, switch to
@@ -2711,7 +2714,7 @@
 				pCurReq->iLink.Deque();
 				iQueuedRequests--;
 				if (iReqQ.IsEmpty())
-					iNullPtr = &iCurHdr;
+					ResetNullPtr();
 				pCompletedReq->OnDeque();
 				}
 			}
@@ -2786,30 +2789,22 @@
 	// Only call PSL if there were requests queued when we entered AND there
 	// are now no requests left on the queue (after also having executed all
 	// client callbacks).
-	if ((req_count_before != 0) && (req_count_after == 0))
+	if (iCallQueuedRequestFn)
 		{
-		QueuedRequestCountChanged();
+		if ((req_count_before != 0) && (req_count_after == 0))
+			{
+			QueuedRequestCountChanged();
+			}
 		}
 
 	__DMA_INVARIANT();
 	}
 
 
-//
-// Reset state machine only, request queue is unchanged */
-//
-void TDmaChannel::ResetStateMachine()
-	{
-	DoCancelAll();
-	iCurHdr = NULL;
-	iNullPtr = &iCurHdr;
-	}
-
-
 void TDmaChannel::DoQueue(const DDmaRequest& /*aReq*/)
 	{
 	// Must be overridden
-	__DMA_CANT_HAPPEN();
+	__DMA_UNREACHABLE_DEFAULT();
 	}
 
 
@@ -2828,7 +2823,7 @@
 	// To make sure this version of the function isn't called for channels for
 	// which it isn't appropriate (and which therefore don't override it) we
 	// put this check in here.
-	__DMA_CANT_HAPPEN();
+	__DMA_UNREACHABLE_DEFAULT();
 	}
 
 
@@ -2838,21 +2833,33 @@
 	// To make sure this version of the function isn't called for channels for
 	// which it isn't appropriate (and which therefore don't override it) we
 	// put this check in here.
-	__DMA_CANT_HAPPEN();
+	__DMA_UNREACHABLE_DEFAULT();
+	}
+
+
+void TDmaChannel::SetNullPtr(const DDmaRequest& aReq)
+	{
+	// iNullPtr points to iCurHdr for an empty queue
+	*iNullPtr = aReq.iFirstHdr;
+	iNullPtr = &(aReq.iLastHdr->iNext);
+	}
+
+
+void TDmaChannel::ResetNullPtr()
+	{
+	iCurHdr = NULL;
+	iNullPtr = &iCurHdr;
 	}
 
 
 /** PSL may override */
 void TDmaChannel::QueuedRequestCountChanged()
 	{
-#ifdef _DEBUG
+	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::QueuedRequestCountChanged(): "
+									"disabling further calls"));
 	Wait();
-	__KTRACE_OPT(KDMA,
-				 Kern::Printf("TDmaChannel::QueuedRequestCountChanged() %d",
-							  iQueuedRequests));
-	__DMA_ASSERTA(iQueuedRequests >= 0);
+	iCallQueuedRequestFn = EFalse;
 	Signal();
-#endif
 	}
 
 
@@ -3018,6 +3025,35 @@
 //////////////////////////////////////////////////////////////////////////////
 // TDmaAsymSgChannel
 
+TDmaAsymSgChannel::TDmaAsymSgChannel()
+	: iSrcCurHdr(NULL),
+	  iSrcNullPtr(&iSrcCurHdr),
+	  iDstCurHdr(NULL),
+	  iDstNullPtr(&iDstCurHdr)
+	{
+	__DMA_INVARIANT();
+	}
+
+
+void TDmaAsymSgChannel::SetNullPtr(const DDmaRequest& aReq)
+	{
+	// i{Src|Dst}NullPtr points to i{Src|Dst}CurHdr for an empty queue
+	*iSrcNullPtr = aReq.iSrcFirstHdr;
+	*iDstNullPtr = aReq.iDstFirstHdr;
+	iSrcNullPtr = &(aReq.iSrcLastHdr->iNext);
+	iDstNullPtr = &(aReq.iDstLastHdr->iNext);
+	}
+
+
+void TDmaAsymSgChannel::ResetNullPtr()
+	{
+	iSrcCurHdr = NULL;
+	iSrcNullPtr = &iSrcCurHdr;
+	iDstCurHdr = NULL;
+	iDstNullPtr = &iDstCurHdr;
+	}
+
+
 void TDmaAsymSgChannel::DoQueue(const DDmaRequest& aReq)
 	{
 	if (iState == ETransferring)
@@ -3062,3 +3098,34 @@
 	iState = (iSrcCurHdr != NULL) ? ETransferring : EIdle;
 	}
 
+
+#ifdef _DEBUG
+void TDmaAsymSgChannel::Invariant()
+	{
+	Wait();
+
+	__DMA_ASSERTD(iReqCount >= 0);
+
+	__DMA_ASSERTD(iSrcCurHdr == NULL || iController->IsValidHdr(iSrcCurHdr));
+	__DMA_ASSERTD(iDstCurHdr == NULL || iController->IsValidHdr(iDstCurHdr));
+
+	// should always point to NULL pointer ending fragment queue
+	__DMA_ASSERTD(*iSrcNullPtr == NULL);
+	__DMA_ASSERTD(*iDstNullPtr == NULL);
+
+	__DMA_ASSERTD((0 <= iAvailDesCount) && (iAvailDesCount <= iMaxDesCount));
+
+	__DMA_ASSERTD((iSrcCurHdr && iDstCurHdr && !IsQueueEmpty()) ||
+				  (!iSrcCurHdr && !iDstCurHdr && IsQueueEmpty()));
+	if (iSrcCurHdr == NULL)
+		{
+		__DMA_ASSERTD(iSrcNullPtr == &iSrcCurHdr);
+		}
+	if (iDstCurHdr == NULL)
+		{
+		__DMA_ASSERTD(iDstNullPtr == &iDstCurHdr);
+		}
+
+	Signal();
+	}
+#endif