kernel/eka/drivers/dma/dma2_pil.cpp
changeset 247 d8d70de2bd36
parent 139 95f71bcdcdb7
equal deleted inserted replaced
201:43365a9b78a3 247:d8d70de2bd36
    20 #include <drivers/dma_hai.h>
    20 #include <drivers/dma_hai.h>
    21 
    21 
    22 #include <kernel/kern_priv.h>
    22 #include <kernel/kern_priv.h>
    23 
    23 
    24 
    24 
    25 // Symbian Min() & Max() are broken, so we have to define them ourselves
    25 // Symbian _Min() & _Max() are broken, so we have to define them ourselves
    26 inline TUint Min(TUint aLeft, TUint aRight)
    26 inline TUint _Min(TUint aLeft, TUint aRight)
    27 	{return(aLeft < aRight ? aLeft : aRight);}
    27 	{return(aLeft < aRight ? aLeft : aRight);}
    28 inline TUint Max(TUint aLeft, TUint aRight)
    28 inline TUint _Max(TUint aLeft, TUint aRight)
    29 	{return(aLeft > aRight ? aLeft : aRight);}
    29 	{return(aLeft > aRight ? aLeft : aRight);}
    30 
    30 
    31 
    31 
    32 // Uncomment the following #define only when freezing the DMA2 export library.
    32 // The following section is used only when freezing the DMA2 export library
    33 //#define __FREEZE_DMA2_LIB
    33 /*
    34 #ifdef __FREEZE_DMA2_LIB
       
    35 TInt DmaChannelMgr::StaticExtension(TInt, TAny*) {return 0;}
    34 TInt DmaChannelMgr::StaticExtension(TInt, TAny*) {return 0;}
    36 TDmaChannel* DmaChannelMgr::Open(TUint32, TBool, TUint) {return 0;}
    35 TDmaChannel* DmaChannelMgr::Open(TUint32, TBool, TUint) {return 0;}
    37 void DmaChannelMgr::Close(TDmaChannel*) {}
    36 void DmaChannelMgr::Close(TDmaChannel*) {}
    38 EXPORT_C const TDmaTestInfo& DmaTestInfo() {static TDmaTestInfo a; return a;}
    37 EXPORT_C const TDmaTestInfo& DmaTestInfo() {static TDmaTestInfo a; return a;}
    39 EXPORT_C const TDmaV2TestInfo& DmaTestInfoV2() {static TDmaV2TestInfo a; return a;}
    38 EXPORT_C const TDmaV2TestInfo& DmaTestInfoV2() {static TDmaV2TestInfo a; return a;}
    40 #endif	// #ifdef __FREEZE_DMA2_LIB
    39 */
    41 
       
    42 
    40 
    43 static const char KDmaPanicCat[] = "DMA " __FILE__;
    41 static const char KDmaPanicCat[] = "DMA " __FILE__;
    44 
    42 
    45 //////////////////////////////////////////////////////////////////////
    43 //////////////////////////////////////////////////////////////////////
    46 // DmaChannelMgr
    44 // DmaChannelMgr
   200 
   198 
   201 void TDmac::Transfer(const TDmaChannel& /*aChannel*/, const SDmaDesHdr& /*aHdr*/)
   199 void TDmac::Transfer(const TDmaChannel& /*aChannel*/, const SDmaDesHdr& /*aHdr*/)
   202 	{
   200 	{
   203 	// TDmac needs to override this function if it has reported the channel
   201 	// TDmac needs to override this function if it has reported the channel
   204 	// type for which the PIL calls it.
   202 	// type for which the PIL calls it.
   205 	__DMA_CANT_HAPPEN();
   203 	__DMA_UNREACHABLE_DEFAULT();
   206 	}
   204 	}
   207 
   205 
   208 
   206 
   209 void TDmac::Transfer(const TDmaChannel& /*aChannel*/, const SDmaDesHdr& /*aSrcHdr*/,
   207 void TDmac::Transfer(const TDmaChannel& /*aChannel*/, const SDmaDesHdr& /*aSrcHdr*/,
   210 					 const SDmaDesHdr& /*aDstHdr*/)
   208 					 const SDmaDesHdr& /*aDstHdr*/)
   211 	{
   209 	{
   212 	// TDmac needs to override this function if it has reported the channel
   210 	// TDmac needs to override this function if it has reported the channel
   213 	// type for which the PIL calls it.
   211 	// type for which the PIL calls it.
   214 	__DMA_CANT_HAPPEN();
   212 	__DMA_UNREACHABLE_DEFAULT();
   215 	}
   213 	}
   216 
   214 
   217 
   215 
   218 TInt TDmac::PauseTransfer(const TDmaChannel& /*aChannel*/)
   216 TInt TDmac::PauseTransfer(const TDmaChannel& /*aChannel*/)
   219 	{
   217 	{
   263 			}
   261 			}
   264 #endif
   262 #endif
   265 		}
   263 		}
   266 	else
   264 	else
   267 		{
   265 		{
   268 		iDesPool = new TDmaTransferArgs[iMaxDesCount];
   266 		iDesPool = Kern::Alloc(iMaxDesCount * sizeof(TDmaTransferArgs));
   269 		r = iDesPool ? KErrNone : KErrNoMemory;
   267 		r = iDesPool ? KErrNone : KErrNoMemory;
   270 		}
   268 		}
   271 	return r;
   269 	return r;
   272 	}
   270 	}
   273 
   271 
   448 
   446 
   449 
   447 
   450 TInt TDmac::InitHwDes(const SDmaDesHdr& /*aHdr*/, const TDmaTransferArgs& /*aTransferArgs*/)
   448 TInt TDmac::InitHwDes(const SDmaDesHdr& /*aHdr*/, const TDmaTransferArgs& /*aTransferArgs*/)
   451 	{
   449 	{
   452 	// concrete controller must override if SDmacCaps::iHwDescriptors set
   450 	// concrete controller must override if SDmacCaps::iHwDescriptors set
   453 	__DMA_CANT_HAPPEN();
   451 	__DMA_UNREACHABLE_DEFAULT();
   454 	return KErrGeneral;
   452 	return KErrGeneral;
   455 	}
   453 	}
   456 
   454 
   457 
   455 
   458 TInt TDmac::InitSrcHwDes(const SDmaDesHdr& /*aHdr*/, const TDmaTransferArgs& /*aTransferArgs*/)
   456 TInt TDmac::InitSrcHwDes(const SDmaDesHdr& /*aHdr*/, const TDmaTransferArgs& /*aTransferArgs*/)
   459 	{
   457 	{
   460 	// concrete controller must override if SDmacCaps::iAsymHwDescriptors set
   458 	// concrete controller must override if SDmacCaps::iAsymHwDescriptors set
   461 	__DMA_CANT_HAPPEN();
   459 	__DMA_UNREACHABLE_DEFAULT();
   462 	return KErrGeneral;
   460 	return KErrGeneral;
   463 	}
   461 	}
   464 
   462 
   465 
   463 
   466 TInt TDmac::InitDstHwDes(const SDmaDesHdr& /*aHdr*/, const TDmaTransferArgs& /*aTransferArgs*/)
   464 TInt TDmac::InitDstHwDes(const SDmaDesHdr& /*aHdr*/, const TDmaTransferArgs& /*aTransferArgs*/)
   467 	{
   465 	{
   468 	// concrete controller must override if SDmacCaps::iAsymHwDescriptors set
   466 	// concrete controller must override if SDmacCaps::iAsymHwDescriptors set
   469 	__DMA_CANT_HAPPEN();
   467 	__DMA_UNREACHABLE_DEFAULT();
   470 	return KErrGeneral;
   468 	return KErrGeneral;
   471 	}
   469 	}
   472 
   470 
   473 
   471 
   474 TInt TDmac::UpdateDes(const SDmaDesHdr& aHdr, TUint32 aSrcAddr, TUint32 aDstAddr,
   472 TInt TDmac::UpdateDes(const SDmaDesHdr& aHdr, TUint32 aSrcAddr, TUint32 aDstAddr,
   500 
   498 
   501 TInt TDmac::UpdateHwDes(const SDmaDesHdr& /*aHdr*/, TUint32 /*aSrcAddr*/, TUint32 /*aDstAddr*/,
   499 TInt TDmac::UpdateHwDes(const SDmaDesHdr& /*aHdr*/, TUint32 /*aSrcAddr*/, TUint32 /*aDstAddr*/,
   502 						TUint /*aTransferCount*/, TUint32 /*aPslRequestInfo*/)
   500 						TUint /*aTransferCount*/, TUint32 /*aPslRequestInfo*/)
   503 	{
   501 	{
   504 	// concrete controller must override if SDmacCaps::iHwDescriptors set
   502 	// concrete controller must override if SDmacCaps::iHwDescriptors set
   505 	__DMA_CANT_HAPPEN();
   503 	__DMA_UNREACHABLE_DEFAULT();
   506 	return KErrGeneral;
   504 	return KErrGeneral;
   507 	}
   505 	}
   508 
   506 
   509 
   507 
   510 TInt TDmac::UpdateSrcHwDes(const SDmaDesHdr& /*aHdr*/, TUint32 /*aSrcAddr*/,
   508 TInt TDmac::UpdateSrcHwDes(const SDmaDesHdr& /*aHdr*/, TUint32 /*aSrcAddr*/,
   511 						   TUint /*aTransferCount*/, TUint32 /*aPslRequestInfo*/)
   509 						   TUint /*aTransferCount*/, TUint32 /*aPslRequestInfo*/)
   512 	{
   510 	{
   513 	// concrete controller must override if SDmacCaps::iAsymHwDescriptors set
   511 	// concrete controller must override if SDmacCaps::iAsymHwDescriptors set
   514 	__DMA_CANT_HAPPEN();
   512 	__DMA_UNREACHABLE_DEFAULT();
   515 	return KErrGeneral;
   513 	return KErrGeneral;
   516 	}
   514 	}
   517 
   515 
   518 
   516 
   519 TInt TDmac::UpdateDstHwDes(const SDmaDesHdr& /*aHdr*/, TUint32 /*aDstAddr*/,
   517 TInt TDmac::UpdateDstHwDes(const SDmaDesHdr& /*aHdr*/, TUint32 /*aDstAddr*/,
   520 						   TUint /*aTransferCount*/, TUint32 /*aPslRequestInfo*/)
   518 						   TUint /*aTransferCount*/, TUint32 /*aPslRequestInfo*/)
   521 	{
   519 	{
   522 	// concrete controller must override if SDmacCaps::iAsymHwDescriptors set
   520 	// concrete controller must override if SDmacCaps::iAsymHwDescriptors set
   523 	__DMA_CANT_HAPPEN();
   521 	__DMA_UNREACHABLE_DEFAULT();
   524 	return KErrGeneral;
   522 	return KErrGeneral;
   525 	}
   523 	}
   526 
   524 
   527 
   525 
   528 void TDmac::ChainHwDes(const SDmaDesHdr& /*aHdr*/, const SDmaDesHdr& /*aNextHdr*/)
   526 void TDmac::ChainHwDes(const SDmaDesHdr& /*aHdr*/, const SDmaDesHdr& /*aNextHdr*/)
   529 	{
   527 	{
   530 	// concrete controller must override if SDmacCaps::iHwDescriptors set
   528 	// concrete controller must override if SDmacCaps::iHwDescriptors set
   531 	__DMA_CANT_HAPPEN();
   529 	__DMA_UNREACHABLE_DEFAULT();
   532 	}
   530 	}
   533 
   531 
   534 
   532 
   535 void TDmac::AppendHwDes(const TDmaChannel& /*aChannel*/, const SDmaDesHdr& /*aLastHdr*/,
   533 void TDmac::AppendHwDes(const TDmaChannel& /*aChannel*/, const SDmaDesHdr& /*aLastHdr*/,
   536 						const SDmaDesHdr& /*aNewHdr*/)
   534 						const SDmaDesHdr& /*aNewHdr*/)
   537 	{
   535 	{
   538  	// concrete controller must override if SDmacCaps::iHwDescriptors set
   536  	// concrete controller must override if SDmacCaps::iHwDescriptors set
   539 	__DMA_CANT_HAPPEN();
   537 	__DMA_UNREACHABLE_DEFAULT();
   540 	}
   538 	}
   541 
   539 
   542 
   540 
   543 void TDmac::AppendHwDes(const TDmaChannel& /*aChannel*/,
   541 void TDmac::AppendHwDes(const TDmaChannel& /*aChannel*/,
   544 						const SDmaDesHdr& /*aSrcLastHdr*/, const SDmaDesHdr& /*aSrcNewHdr*/,
   542 						const SDmaDesHdr& /*aSrcLastHdr*/, const SDmaDesHdr& /*aSrcNewHdr*/,
   545 						const SDmaDesHdr& /*aDstLastHdr*/, const SDmaDesHdr& /*aDstNewHdr*/)
   543 						const SDmaDesHdr& /*aDstLastHdr*/, const SDmaDesHdr& /*aDstNewHdr*/)
   546 	{
   544 	{
   547 	// concrete controller must override if SDmacCaps::iAsymHwDescriptors set
   545 	// concrete controller must override if SDmacCaps::iAsymHwDescriptors set
   548 	__DMA_CANT_HAPPEN();
   546 	__DMA_UNREACHABLE_DEFAULT();
   549 	}
   547 	}
   550 
   548 
   551 
   549 
   552 void TDmac::UnlinkHwDes(const TDmaChannel& /*aChannel*/, SDmaDesHdr& /*aHdr*/)
   550 void TDmac::UnlinkHwDes(const TDmaChannel& /*aChannel*/, SDmaDesHdr& /*aHdr*/)
   553 	{
   551 	{
   554  	// concrete controller must override if SDmacCaps::iHwDescriptors set
   552  	// concrete controller must override if SDmacCaps::iHwDescriptors set
   555 	__DMA_CANT_HAPPEN();
   553 	__DMA_UNREACHABLE_DEFAULT();
   556 	}
   554 	}
   557 
   555 
   558 
   556 
   559 void TDmac::ClearHwDes(const SDmaDesHdr& /*aHdr*/)
   557 void TDmac::ClearHwDes(const SDmaDesHdr& /*aHdr*/)
   560 	{
   558 	{
   599 
   597 
   600 
   598 
   601 TUint32 TDmac::HwDesNumDstElementsTransferred(const SDmaDesHdr& /*aHdr*/)
   599 TUint32 TDmac::HwDesNumDstElementsTransferred(const SDmaDesHdr& /*aHdr*/)
   602 	{
   600 	{
   603  	// Concrete controller must override if SDmacCaps::iHwDescriptors set.
   601  	// Concrete controller must override if SDmacCaps::iHwDescriptors set.
   604 	__DMA_CANT_HAPPEN();
   602 	__DMA_UNREACHABLE_DEFAULT();
   605 	return 0;
   603 	return 0;
   606 	}
   604 	}
   607 
   605 
   608 
   606 
   609 TUint32 TDmac::HwDesNumSrcElementsTransferred(const SDmaDesHdr& /*aHdr*/)
   607 TUint32 TDmac::HwDesNumSrcElementsTransferred(const SDmaDesHdr& /*aHdr*/)
   610 	{
   608 	{
   611  	// Concrete controller must override if SDmacCaps::iHwDescriptors set.
   609  	// Concrete controller must override if SDmacCaps::iHwDescriptors set.
   612 	__DMA_CANT_HAPPEN();
   610 	__DMA_UNREACHABLE_DEFAULT();
   613 	return 0;
   611 	return 0;
   614 	}
   612 	}
   615 
   613 
   616 
   614 
   617 #ifdef _DEBUG
   615 #ifdef _DEBUG
   854 		}
   852 		}
   855 	return KErrNone;
   853 	return KErrNone;
   856 	}
   854 	}
   857 
   855 
   858 
   856 
   859 TInt DDmaRequest::CheckMemFlags(const TDmaTransferConfig& aTarget, TUint aCount) const
   857 TInt DDmaRequest::CheckMemFlags(const TDmaTransferConfig& aTarget) const
   860 	{
   858 	{
   861 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::CheckMemFlags"));
   859 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::CheckMemFlags"));
   862 
   860 
   863 	const TBool mem_target = (aTarget.iFlags & KDmaMemAddr);
   861 	const TBool mem_target = (aTarget.iFlags & KDmaMemAddr);
   864 
   862 
   890 									aFragSize, aElementSize, aFrameSize));
   888 									aFragSize, aElementSize, aFrameSize));
   891 
   889 
   892 	TUint rem = 0;
   890 	TUint rem = 0;
   893 	TInt r = KErrNone;
   891 	TInt r = KErrNone;
   894 
   892 
   895 	while (1)
   893 	FOREVER
   896 		{
   894 		{
   897 		// If an element size is defined, make sure the fragment size is
   895 		// If an element size is defined, make sure the fragment size is
   898 		// greater or equal.
   896 		// greater or equal.
   899 		if (aElementSize)
   897 		if (aElementSize)
   900 			{
   898 			{
  1099 
  1097 
  1100 	// Set the channel cookie for the PSL
  1098 	// Set the channel cookie for the PSL
  1101 	aTransferArgs.iChannelCookie = iChannel.PslId();
  1099 	aTransferArgs.iChannelCookie = iChannel.PslId();
  1102 
  1100 
  1103 	// Client shouldn't specify contradictory or invalid things
  1101 	// Client shouldn't specify contradictory or invalid things
  1104 	TInt r = CheckMemFlags(aTransferArgs.iSrcConfig, count);
  1102 	TInt r = CheckMemFlags(aTransferArgs.iSrcConfig);
  1105 	if (r != KErrNone)
  1103 	if (r != KErrNone)
  1106 		{
  1104 		{
  1107 		__KTRACE_OPT(KPANIC, Kern::Printf("Error: CheckMemFlags(src)"));
  1105 		__KTRACE_OPT(KPANIC, Kern::Printf("Error: CheckMemFlags(src)"));
  1108 		return r;
  1106 		return r;
  1109 		}
  1107 		}
  1110 	r =  CheckMemFlags(aTransferArgs.iDstConfig, count);
  1108 	r =  CheckMemFlags(aTransferArgs.iDstConfig);
  1111 	if (r != KErrNone)
  1109 	if (r != KErrNone)
  1112 		{
  1110 		{
  1113 		__KTRACE_OPT(KPANIC, Kern::Printf("Error: CheckMemFlags(dst)"));
  1111 		__KTRACE_OPT(KPANIC, Kern::Printf("Error: CheckMemFlags(dst)"));
  1114 		return r;
  1112 		return r;
  1115 		}
  1113 		}
  1158 	__DMA_ASSERTD(!mem_dst || ((dst.iAddr & align_mask_dst) == 0));
  1156 	__DMA_ASSERTD(!mem_dst || ((dst.iAddr & align_mask_dst) == 0));
  1159 
  1157 
  1160 	// Max aligned length is used to make sure the beginnings of subtransfers
  1158 	// Max aligned length is used to make sure the beginnings of subtransfers
  1161 	// (i.e. fragments) are correctly aligned.
  1159 	// (i.e. fragments) are correctly aligned.
  1162 	const TUint max_aligned_len = (aMaxTransferLen &
  1160 	const TUint max_aligned_len = (aMaxTransferLen &
  1163 								   ~(Max(align_mask_src, align_mask_dst)));
  1161 								   ~(_Max(align_mask_src, align_mask_dst)));
  1164 	__KTRACE_OPT(KDMA, Kern::Printf("max_aligned_len: %d", max_aligned_len));
  1162 	__KTRACE_OPT(KDMA, Kern::Printf("max_aligned_len: %d", max_aligned_len));
  1165 	// Client and PSL sane?
  1163 	// Client and PSL sane?
  1166 	__DMA_ASSERTD(max_aligned_len > 0);
  1164 	__DMA_ASSERTD(max_aligned_len > 0);
  1167 
  1165 
  1168 	if (mem_src && mem_dst &&
  1166 	if (mem_src && mem_dst &&
  1233 		if (r != KErrNone)
  1231 		if (r != KErrNone)
  1234 			{
  1232 			{
  1235 			break;
  1233 			break;
  1236 			}
  1234 			}
  1237 		// Compute fragment size
  1235 		// Compute fragment size
  1238 		TUint c = Min(aMaxTransferLen, aCount);
  1236 		TUint c = _Min(aMaxTransferLen, aCount);
  1239 		__KTRACE_OPT(KDMA, Kern::Printf("c = Min(aMaxTransferLen, aCount) = %d", c));
  1237 		__KTRACE_OPT(KDMA, Kern::Printf("c = _Min(aMaxTransferLen, aCount) = %d", c));
  1240 
  1238 
  1241 		// SRC
  1239 		// SRC
  1242 		if (mem_src && !(src.iFlags & KDmaMemIsContiguous))
  1240 		if (mem_src && !(src.iFlags & KDmaMemIsContiguous))
  1243 			{
  1241 			{
  1244 			c = MaxPhysSize(src.iAddr, c);
  1242 			c = MaxPhysSize(src.iAddr, c);
  1420 		if (r != KErrNone)
  1418 		if (r != KErrNone)
  1421 			{
  1419 			{
  1422 			break;
  1420 			break;
  1423 			}
  1421 			}
  1424 		// Compute fragment size
  1422 		// Compute fragment size
  1425 		TUint c = Min(aMaxTransferLen, aCount);
  1423 		TUint c = _Min(aMaxTransferLen, aCount);
  1426 		__KTRACE_OPT(KDMA, Kern::Printf("c = Min(aMaxTransferLen, aCount) = %d", c));
  1424 		__KTRACE_OPT(KDMA, Kern::Printf("c = _Min(aMaxTransferLen, aCount) = %d", c));
  1427 
  1425 
  1428 		if (mem_src && !(src.iFlags & KDmaMemIsContiguous))
  1426 		if (mem_src && !(src.iFlags & KDmaMemIsContiguous))
  1429 			{
  1427 			{
  1430 			c = MaxPhysSize(src.iAddr, c);
  1428 			c = MaxPhysSize(src.iAddr, c);
  1431 			__KTRACE_OPT(KDMA, Kern::Printf("c = MaxPhysSize(src.iAddr, c) = %d", c));
  1429 			__KTRACE_OPT(KDMA, Kern::Printf("c = MaxPhysSize(src.iAddr, c) = %d", c));
  1534 		if (r != KErrNone)
  1532 		if (r != KErrNone)
  1535 			{
  1533 			{
  1536 			break;
  1534 			break;
  1537 			}
  1535 			}
  1538 		// Compute fragment size
  1536 		// Compute fragment size
  1539 		TUint c = Min(aMaxTransferLen, aCount);
  1537 		TUint c = _Min(aMaxTransferLen, aCount);
  1540 		__KTRACE_OPT(KDMA, Kern::Printf("c = Min(aMaxTransferLen, aCount) = %d", c));
  1538 		__KTRACE_OPT(KDMA, Kern::Printf("c = _Min(aMaxTransferLen, aCount) = %d", c));
  1541 
  1539 
  1542 		if (mem_dst && !(dst.iFlags & KDmaMemIsContiguous))
  1540 		if (mem_dst && !(dst.iFlags & KDmaMemIsContiguous))
  1543 			{
  1541 			{
  1544 			c = MaxPhysSize(dst.iAddr, c);
  1542 			c = MaxPhysSize(dst.iAddr, c);
  1545 			__KTRACE_OPT(KDMA, Kern::Printf("c = MaxPhysSize(dst.iAddr, c) = %d", c));
  1543 			__KTRACE_OPT(KDMA, Kern::Printf("c = MaxPhysSize(dst.iAddr, c) = %d", c));
  1640 	__DMA_ASSERTD(!mem_dst || ((dst.iAddr & align_mask_dst) == 0));
  1638 	__DMA_ASSERTD(!mem_dst || ((dst.iAddr & align_mask_dst) == 0));
  1641 
  1639 
  1642 	// Max aligned length is used to make sure the beginnings of subtransfers
  1640 	// Max aligned length is used to make sure the beginnings of subtransfers
  1643 	// (i.e. fragments) are correctly aligned.
  1641 	// (i.e. fragments) are correctly aligned.
  1644 	const TUint max_aligned_len = (aMaxTransferLen &
  1642 	const TUint max_aligned_len = (aMaxTransferLen &
  1645 								   ~(Max(align_mask_src, align_mask_dst)));
  1643 								   ~(_Max(align_mask_src, align_mask_dst)));
  1646 	__KTRACE_OPT(KDMA, Kern::Printf("max_aligned_len: %d", max_aligned_len));
  1644 	__KTRACE_OPT(KDMA, Kern::Printf("max_aligned_len: %d", max_aligned_len));
  1647 	// Client and PSL sane?
  1645 	// Client and PSL sane?
  1648 	__DMA_ASSERTD(max_aligned_len > 0);
  1646 	__DMA_ASSERTD(max_aligned_len > 0);
  1649 
  1647 
  1650 	if (mem_src && mem_dst &&
  1648 	if (mem_src && mem_dst &&
  1706 
  1704 
  1707 	TInt r;
  1705 	TInt r;
  1708 	// Revert any previous fragmentation attempt
  1706 	// Revert any previous fragmentation attempt
  1709 	FreeSrcDesList();
  1707 	FreeSrcDesList();
  1710 	FreeDstDesList();
  1708 	FreeDstDesList();
       
  1709 	__DMA_ASSERTD(iSrcDesCount == iDstDesCount);
  1711 	do
  1710 	do
  1712 		{
  1711 		{
  1713 		// Allocate fragment
  1712 		// Allocate fragment
  1714 		r = ExpandSrcDesList(/*1*/);
  1713 		r = ExpandSrcDesList(/*1*/);
  1715 		if (r != KErrNone)
  1714 		if (r != KErrNone)
  1719 		r = ExpandDstDesList(/*1*/);
  1718 		r = ExpandDstDesList(/*1*/);
  1720 		if (r != KErrNone)
  1719 		if (r != KErrNone)
  1721 			{
  1720 			{
  1722 			break;
  1721 			break;
  1723 			}
  1722 			}
       
  1723 		__DMA_ASSERTD(iSrcDesCount == iDstDesCount);
  1724 		// Compute fragment size
  1724 		// Compute fragment size
  1725 		TUint c = Min(aMaxTransferLen, aCount);
  1725 		TUint c = _Min(aMaxTransferLen, aCount);
  1726 		__KTRACE_OPT(KDMA, Kern::Printf("c = Min(aMaxTransferLen, aCount) = %d", c));
  1726 		__KTRACE_OPT(KDMA, Kern::Printf("c = _Min(aMaxTransferLen, aCount) = %d", c));
  1727 
  1727 
  1728 		// SRC
  1728 		// SRC
  1729 		if (mem_src && !(src.iFlags & KDmaMemIsContiguous))
  1729 		if (mem_src && !(src.iFlags & KDmaMemIsContiguous))
  1730 			{
  1730 			{
  1731 			c = MaxPhysSize(src.iAddr, c);
  1731 			c = MaxPhysSize(src.iAddr, c);
  1849 	{
  1849 	{
  1850 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Queue thread %O", &Kern::CurrentThread()));
  1850 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Queue thread %O", &Kern::CurrentThread()));
  1851 	// Not configured? Call Fragment() first!
  1851 	// Not configured? Call Fragment() first!
  1852 	if (iChannel.iDmacCaps->iAsymHwDescriptors)
  1852 	if (iChannel.iDmacCaps->iAsymHwDescriptors)
  1853 		{
  1853 		{
  1854 		__DMA_ASSERTD((iSrcDesCount < 0) && (iDstDesCount < 0));
  1854 		__DMA_ASSERTD((iSrcDesCount > 0) && (iDstDesCount > 0));
  1855 		}
  1855 		}
  1856 	else
  1856 	else
  1857 		{
  1857 		{
  1858 		__DMA_ASSERTD(iDesCount > 0);
  1858 		__DMA_ASSERTD(iDesCount > 0);
  1859 		}
  1859 		}
  1861 
  1861 
  1862 	// Append request to queue and link new descriptor list to existing one.
  1862 	// Append request to queue and link new descriptor list to existing one.
  1863 	iChannel.Wait();
  1863 	iChannel.Wait();
  1864 
  1864 
  1865 	TUint32 req_count = iChannel.iQueuedRequests++;
  1865 	TUint32 req_count = iChannel.iQueuedRequests++;
  1866 	if (req_count == 0)
  1866 	if (iChannel.iCallQueuedRequestFn)
  1867 		{
  1867 		{
  1868 		iChannel.Signal();
  1868 		if (req_count == 0)
  1869 		iChannel.QueuedRequestCountChanged();
  1869 			{
  1870 		iChannel.Wait();
  1870 			iChannel.Signal();
       
  1871 			iChannel.QueuedRequestCountChanged();
       
  1872 			iChannel.Wait();
       
  1873 			}
  1871 		}
  1874 		}
  1872 
  1875 
  1873 	TInt r = KErrGeneral;
  1876 	TInt r = KErrGeneral;
  1874 	const TBool ch_isr_cb = __e32_atomic_load_acq32(&iChannel.iIsrCbRequest);
  1877 	const TBool ch_isr_cb = __e32_atomic_load_acq32(&iChannel.iIsrCbRequest);
  1875 	if (ch_isr_cb)
  1878 	if (ch_isr_cb)
  1881 		__KTRACE_OPT(KPANIC, Kern::Printf("An ISR cb request exists - not queueing"));
  1884 		__KTRACE_OPT(KPANIC, Kern::Printf("An ISR cb request exists - not queueing"));
  1882 		// Undo the request count increment...
  1885 		// Undo the request count increment...
  1883 		req_count = --iChannel.iQueuedRequests;
  1886 		req_count = --iChannel.iQueuedRequests;
  1884 		__DMA_INVARIANT();
  1887 		__DMA_INVARIANT();
  1885 		iChannel.Signal();
  1888 		iChannel.Signal();
  1886 		if (req_count == 0)
  1889 		if (iChannel.iCallQueuedRequestFn)
  1887 			{
  1890 			{
  1888 			iChannel.QueuedRequestCountChanged();
  1891 			if (req_count == 0)
       
  1892 				{
       
  1893 				iChannel.QueuedRequestCountChanged();
       
  1894 				}
  1889 			}
  1895 			}
  1890 		}
  1896 		}
  1891 	else if (iIsrCb && !iChannel.IsQueueEmpty())
  1897 	else if (iIsrCb && !iChannel.IsQueueEmpty())
  1892 		{
  1898 		{
  1893 		// Client mustn't try to queue an ISR callback request whilst any
  1899 		// Client mustn't try to queue an ISR callback request whilst any
  1897 		__KTRACE_OPT(KPANIC, Kern::Printf("Request queue not empty - not queueing"));
  1903 		__KTRACE_OPT(KPANIC, Kern::Printf("Request queue not empty - not queueing"));
  1898 		// Undo the request count increment...
  1904 		// Undo the request count increment...
  1899 		req_count = --iChannel.iQueuedRequests;
  1905 		req_count = --iChannel.iQueuedRequests;
  1900 		__DMA_INVARIANT();
  1906 		__DMA_INVARIANT();
  1901 		iChannel.Signal();
  1907 		iChannel.Signal();
  1902 		if (req_count == 0)
  1908 		if (iChannel.iCallQueuedRequestFn)
  1903 			{
  1909 			{
  1904 			iChannel.QueuedRequestCountChanged();
  1910 			if (req_count == 0)
       
  1911 				{
       
  1912 				iChannel.QueuedRequestCountChanged();
       
  1913 				}
  1905 			}
  1914 			}
  1906 		}
  1915 		}
  1907 	else if (iChannel.iIsrDfc & (TUint32)TDmaChannel::KCancelFlagMask)
  1916 	else if (iChannel.iIsrDfc & (TUint32)TDmaChannel::KCancelFlagMask)
  1908 		{
  1917 		{
  1909 		__KTRACE_OPT(KPANIC, Kern::Printf("Channel requests cancelled - not queueing"));
  1918 		__KTRACE_OPT(KPANIC, Kern::Printf("Channel requests cancelled - not queueing"));
  1910 		// Someone is cancelling all requests - undo the request count increment...
  1919 		// Someone is cancelling all requests - undo the request count increment...
  1911 		req_count = --iChannel.iQueuedRequests;
  1920 		req_count = --iChannel.iQueuedRequests;
  1912 		__DMA_INVARIANT();
  1921 		__DMA_INVARIANT();
  1913 		iChannel.Signal();
  1922 		iChannel.Signal();
  1914 		if (req_count == 0)
  1923 		if (iChannel.iCallQueuedRequestFn)
  1915 			{
  1924 			{
  1916 			iChannel.QueuedRequestCountChanged();
  1925 			if (req_count == 0)
       
  1926 				{
       
  1927 				iChannel.QueuedRequestCountChanged();
       
  1928 				}
  1917 			}
  1929 			}
  1918 		}
  1930 		}
  1919 	else
  1931 	else
  1920 		{
  1932 		{
  1921 		iQueued = ETrue;
  1933 		iQueued = ETrue;
  1922 		iChannel.iReqQ.Add(&iLink);
  1934 		iChannel.iReqQ.Add(&iLink);
  1923 		// iChannel.iNullPtr points to iChannel.iCurHdr for an empty queue
  1935 		iChannel.SetNullPtr(*this);
  1924 		*iChannel.iNullPtr = iFirstHdr;
       
  1925 		iChannel.iNullPtr = &(iLastHdr->iNext);
       
  1926 		if (iIsrCb)
  1936 		if (iIsrCb)
  1927 			{
  1937 			{
  1928 			// Since we've made sure that there is no other request in the
  1938 			// Since we've made sure that there is no other request in the
  1929 			// queue before this, the only thing of relevance is the channel
  1939 			// queue before this, the only thing of relevance is the channel
  1930 			// DFC which might yet have to complete for the previous request,
  1940 			// DFC which might yet have to complete for the previous request,
  1932 			// the client callback. This should be all right though as once
  1942 			// the client callback. This should be all right though as once
  1933 			// we've set the following flag no further Queue()'s will be
  1943 			// we've set the following flag no further Queue()'s will be
  1934 			// possible.
  1944 			// possible.
  1935 			__e32_atomic_store_rel32(&iChannel.iIsrCbRequest, ETrue);
  1945 			__e32_atomic_store_rel32(&iChannel.iIsrCbRequest, ETrue);
  1936 			}
  1946 			}
  1937 		iChannel.DoQueue(const_cast<const DDmaRequest&>(*this));
  1947 		iChannel.DoQueue(*this);
  1938 		r = KErrNone;
  1948 		r = KErrNone;
  1939 		__DMA_INVARIANT();
  1949 		__DMA_INVARIANT();
  1940 		iChannel.Signal();
  1950 		iChannel.Signal();
  1941 		}
  1951 		}
  1942 
  1952 
  2110 EXPORT_C TUint32 DDmaRequest::TotalNumSrcElementsTransferred()
  2120 EXPORT_C TUint32 DDmaRequest::TotalNumSrcElementsTransferred()
  2111 	{
  2121 	{
  2112 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::TotalNumSrcElementsTransferred"));
  2122 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::TotalNumSrcElementsTransferred"));
  2113 
  2123 
  2114 	// Not yet implemented.
  2124 	// Not yet implemented.
  2115 
       
  2116 	// So far largely bogus code (just to touch some symbols)...
       
  2117 	iTotalNumSrcElementsTransferred = 0;
       
  2118 	TDmac& c = *(iChannel.iController);
       
  2119 	if (c.iCapsHwDes)
       
  2120 		{
       
  2121 		for (const SDmaDesHdr* pH = iFirstHdr; pH != NULL; pH = pH->iNext)
       
  2122 			{
       
  2123 			iTotalNumSrcElementsTransferred += c.HwDesNumDstElementsTransferred(*pH);
       
  2124 			}
       
  2125 		}
       
  2126 	else
       
  2127 		{
       
  2128 		// Do something different for pseudo descriptors...
       
  2129 		}
       
  2130 	return iTotalNumSrcElementsTransferred;
  2125 	return iTotalNumSrcElementsTransferred;
  2131 	}
  2126 	}
  2132 
  2127 
  2133 
  2128 
  2134 EXPORT_C TUint32 DDmaRequest::TotalNumDstElementsTransferred()
  2129 EXPORT_C TUint32 DDmaRequest::TotalNumDstElementsTransferred()
  2211 		{
  2206 		{
  2212 		__DMA_ASSERTD((0 <= iSrcDesCount) && (iSrcDesCount <= iChannel.iMaxDesCount) &&
  2207 		__DMA_ASSERTD((0 <= iSrcDesCount) && (iSrcDesCount <= iChannel.iMaxDesCount) &&
  2213 					  (0 <= iDstDesCount) && (iDstDesCount <= iChannel.iMaxDesCount));
  2208 					  (0 <= iDstDesCount) && (iDstDesCount <= iChannel.iMaxDesCount));
  2214 		if (iSrcDesCount == 0)
  2209 		if (iSrcDesCount == 0)
  2215 			{
  2210 			{
       
  2211 			// Not fragmented yet
  2216 			__DMA_ASSERTD(iDstDesCount == 0);
  2212 			__DMA_ASSERTD(iDstDesCount == 0);
  2217 			__DMA_ASSERTD(!iQueued);
  2213 			__DMA_ASSERTD(!iQueued);
  2218 			__DMA_ASSERTD(!iSrcFirstHdr && !iSrcLastHdr &&
  2214 			__DMA_ASSERTD(!iSrcFirstHdr && !iSrcLastHdr &&
  2219 						  !iDstFirstHdr && !iDstLastHdr);
  2215 						  !iDstFirstHdr && !iDstLastHdr);
  2220 			}
  2216 			}
       
  2217 		else if (iDstDesCount == 0)
       
  2218 			{
       
  2219 			// Src side only fragmented yet
       
  2220 			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iSrcFirstHdr));
       
  2221 			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iSrcLastHdr));
       
  2222 			}
  2221 		else
  2223 		else
  2222 			{
  2224 			{
       
  2225 			// Src & Dst sides fragmented
  2223 			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iSrcFirstHdr));
  2226 			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iSrcFirstHdr));
  2224 			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iSrcLastHdr));
  2227 			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iSrcLastHdr));
  2225 			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iDstFirstHdr));
  2228 			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iDstFirstHdr));
  2226 			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iDstLastHdr));
  2229 			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iDstLastHdr));
  2227 			}
       
  2228 		if (iChannel.iDmacCaps->iBalancedAsymSegments)
       
  2229 			{
       
  2230 			__DMA_ASSERTD(iSrcDesCount == iDstDesCount);
       
  2231 			}
  2230 			}
  2232 		}
  2231 		}
  2233 	else
  2232 	else
  2234 		{
  2233 		{
  2235 		__DMA_ASSERTD((0 <= iDesCount) && (iDesCount <= iChannel.iMaxDesCount));
  2234 		__DMA_ASSERTD((0 <= iDesCount) && (iDesCount <= iChannel.iMaxDesCount));
  2269 	  iAvailDesCount(0),
  2268 	  iAvailDesCount(0),
  2270 	  iIsrDfc(0),
  2269 	  iIsrDfc(0),
  2271 	  iReqQ(),
  2270 	  iReqQ(),
  2272 	  iReqCount(0),
  2271 	  iReqCount(0),
  2273 	  iQueuedRequests(0),
  2272 	  iQueuedRequests(0),
       
  2273 	  iCallQueuedRequestFn(ETrue),
  2274 	  iCancelInfo(NULL),
  2274 	  iCancelInfo(NULL),
  2275 	  iRedoRequest(EFalse),
  2275 	  iRedoRequest(EFalse),
  2276 	  iIsrCbRequest(EFalse)
  2276 	  iIsrCbRequest(EFalse)
  2277 	{
  2277 	{
  2278 	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::TDmaChannel =0x%08X", this));
  2278 	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::TDmaChannel =0x%08X", this));
  2414 		// There is a transfer in progress. It may complete before the DMAC
  2414 		// There is a transfer in progress. It may complete before the DMAC
  2415 		// has stopped, but the resulting ISR will not post a DFC.
  2415 		// has stopped, but the resulting ISR will not post a DFC.
  2416 		// ISR should not happen after this function returns.
  2416 		// ISR should not happen after this function returns.
  2417 		iController->StopTransfer(*this);
  2417 		iController->StopTransfer(*this);
  2418 
  2418 
  2419 		ResetStateMachine();
  2419 		DoCancelAll();
       
  2420 		ResetNullPtr();
  2420 
  2421 
  2421 		// Clean-up the request queue.
  2422 		// Clean-up the request queue.
  2422 		SDblQueLink* pL;
  2423 		SDblQueLink* pL;
  2423 		while ((pL = iReqQ.GetFirst()) != NULL)
  2424 		while ((pL = iReqQ.GetFirst()) != NULL)
  2424 			{
  2425 			{
  2470 
  2471 
  2471  	NKern::ThreadLeaveCS();
  2472  	NKern::ThreadLeaveCS();
  2472 
  2473 
  2473 	// Only call PSL if there were requests queued when we entered AND there
  2474 	// Only call PSL if there were requests queued when we entered AND there
  2474 	// are now no requests left on the queue.
  2475 	// are now no requests left on the queue.
  2475 	if ((req_count_before != 0) && (req_count_after == 0))
  2476 	if (iCallQueuedRequestFn)
  2476 		{
  2477 		{
  2477 		QueuedRequestCountChanged();
  2478 		if ((req_count_before != 0) && (req_count_after == 0))
       
  2479 			{
       
  2480 			QueuedRequestCountChanged();
       
  2481 			}
  2478 		}
  2482 		}
  2479 
  2483 
  2480 	__DMA_INVARIANT();
  2484 	__DMA_INVARIANT();
  2481 	}
  2485 	}
  2482 
  2486 
  2685 			TBool complete;
  2689 			TBool complete;
  2686 			if (iDmacCaps->iAsymHwDescriptors)
  2690 			if (iDmacCaps->iAsymHwDescriptors)
  2687 				{
  2691 				{
  2688 				SDmaDesHdr* pCompletedSrcHdr = NULL;
  2692 				SDmaDesHdr* pCompletedSrcHdr = NULL;
  2689 				SDmaDesHdr* pCompletedDstHdr = NULL;
  2693 				SDmaDesHdr* pCompletedDstHdr = NULL;
  2690 				DoDfc(const_cast<const DDmaRequest&>(*pCurReq),
  2694 				DoDfc(*pCurReq, pCompletedSrcHdr, pCompletedDstHdr);
  2691 					  pCompletedSrcHdr, pCompletedDstHdr);
       
  2692 				// We don't support asymmetrical ISR notifications and request
  2695 				// We don't support asymmetrical ISR notifications and request
  2693 				// completions yet, hence we can do the following assert test
  2696 				// completions yet, hence we can do the following assert test
  2694 				// here; also 'complete' is determined equally by either the
  2697 				// here; also 'complete' is determined equally by either the
  2695 				// SRC or DST side.
  2698 				// SRC or DST side.
  2696 				__DMA_ASSERTD(!LOGICAL_XOR((pCompletedSrcHdr == pCurReq->iSrcLastHdr),
  2699 				__DMA_ASSERTD(!LOGICAL_XOR((pCompletedSrcHdr == pCurReq->iSrcLastHdr),
  2698 				complete = (pCompletedDstHdr == pCurReq->iDstLastHdr);
  2701 				complete = (pCompletedDstHdr == pCurReq->iDstLastHdr);
  2699 				}
  2702 				}
  2700 			else
  2703 			else
  2701 				{
  2704 				{
  2702 				SDmaDesHdr* pCompletedHdr = NULL;
  2705 				SDmaDesHdr* pCompletedHdr = NULL;
  2703 				DoDfc(const_cast<const DDmaRequest&>(*pCurReq), pCompletedHdr);
  2706 				DoDfc(*pCurReq, pCompletedHdr);
  2704 				complete = (pCompletedHdr == pCurReq->iLastHdr);
  2707 				complete = (pCompletedHdr == pCurReq->iLastHdr);
  2705 				}
  2708 				}
  2706 			// If just completed last fragment from current request, switch to
  2709 			// If just completed last fragment from current request, switch to
  2707 			// next request (if any).
  2710 			// next request (if any).
  2708 			if (complete)
  2711 			if (complete)
  2709 				{
  2712 				{
  2710 				pCompletedReq = pCurReq;
  2713 				pCompletedReq = pCurReq;
  2711 				pCurReq->iLink.Deque();
  2714 				pCurReq->iLink.Deque();
  2712 				iQueuedRequests--;
  2715 				iQueuedRequests--;
  2713 				if (iReqQ.IsEmpty())
  2716 				if (iReqQ.IsEmpty())
  2714 					iNullPtr = &iCurHdr;
  2717 					ResetNullPtr();
  2715 				pCompletedReq->OnDeque();
  2718 				pCompletedReq->OnDeque();
  2716 				}
  2719 				}
  2717 			}
  2720 			}
  2718 		else
  2721 		else
  2719 			{
  2722 			{
  2784 		}
  2787 		}
  2785 
  2788 
  2786 	// Only call PSL if there were requests queued when we entered AND there
  2789 	// Only call PSL if there were requests queued when we entered AND there
  2787 	// are now no requests left on the queue (after also having executed all
  2790 	// are now no requests left on the queue (after also having executed all
  2788 	// client callbacks).
  2791 	// client callbacks).
  2789 	if ((req_count_before != 0) && (req_count_after == 0))
  2792 	if (iCallQueuedRequestFn)
  2790 		{
  2793 		{
  2791 		QueuedRequestCountChanged();
  2794 		if ((req_count_before != 0) && (req_count_after == 0))
       
  2795 			{
       
  2796 			QueuedRequestCountChanged();
       
  2797 			}
  2792 		}
  2798 		}
  2793 
  2799 
  2794 	__DMA_INVARIANT();
  2800 	__DMA_INVARIANT();
  2795 	}
  2801 	}
  2796 
  2802 
  2797 
  2803 
  2798 //
       
  2799 // Reset state machine only, request queue is unchanged */
       
  2800 //
       
  2801 void TDmaChannel::ResetStateMachine()
       
  2802 	{
       
  2803 	DoCancelAll();
       
  2804 	iCurHdr = NULL;
       
  2805 	iNullPtr = &iCurHdr;
       
  2806 	}
       
  2807 
       
  2808 
       
  2809 void TDmaChannel::DoQueue(const DDmaRequest& /*aReq*/)
  2804 void TDmaChannel::DoQueue(const DDmaRequest& /*aReq*/)
  2810 	{
  2805 	{
  2811 	// Must be overridden
  2806 	// Must be overridden
  2812 	__DMA_CANT_HAPPEN();
  2807 	__DMA_UNREACHABLE_DEFAULT();
  2813 	}
  2808 	}
  2814 
  2809 
  2815 
  2810 
  2816 //
  2811 //
  2817 // Unlink the last item of a LLI chain from the next chain.
  2812 // Unlink the last item of a LLI chain from the next chain.
  2826 void TDmaChannel::DoDfc(const DDmaRequest& /*aCurReq*/, SDmaDesHdr*& /*aCompletedHdr*/)
  2821 void TDmaChannel::DoDfc(const DDmaRequest& /*aCurReq*/, SDmaDesHdr*& /*aCompletedHdr*/)
  2827 	{
  2822 	{
  2828 	// To make sure this version of the function isn't called for channels for
  2823 	// To make sure this version of the function isn't called for channels for
  2829 	// which it isn't appropriate (and which therefore don't override it) we
  2824 	// which it isn't appropriate (and which therefore don't override it) we
  2830 	// put this check in here.
  2825 	// put this check in here.
  2831 	__DMA_CANT_HAPPEN();
  2826 	__DMA_UNREACHABLE_DEFAULT();
  2832 	}
  2827 	}
  2833 
  2828 
  2834 
  2829 
  2835 void TDmaChannel::DoDfc(const DDmaRequest& /*aCurReq*/, SDmaDesHdr*& /*aSrcCompletedHdr*/,
  2830 void TDmaChannel::DoDfc(const DDmaRequest& /*aCurReq*/, SDmaDesHdr*& /*aSrcCompletedHdr*/,
  2836 						SDmaDesHdr*& /*aDstCompletedHdr*/)
  2831 						SDmaDesHdr*& /*aDstCompletedHdr*/)
  2837 	{
  2832 	{
  2838 	// To make sure this version of the function isn't called for channels for
  2833 	// To make sure this version of the function isn't called for channels for
  2839 	// which it isn't appropriate (and which therefore don't override it) we
  2834 	// which it isn't appropriate (and which therefore don't override it) we
  2840 	// put this check in here.
  2835 	// put this check in here.
  2841 	__DMA_CANT_HAPPEN();
  2836 	__DMA_UNREACHABLE_DEFAULT();
       
  2837 	}
       
  2838 
       
  2839 
       
  2840 void TDmaChannel::SetNullPtr(const DDmaRequest& aReq)
       
  2841 	{
       
  2842 	// iNullPtr points to iCurHdr for an empty queue
       
  2843 	*iNullPtr = aReq.iFirstHdr;
       
  2844 	iNullPtr = &(aReq.iLastHdr->iNext);
       
  2845 	}
       
  2846 
       
  2847 
       
  2848 void TDmaChannel::ResetNullPtr()
       
  2849 	{
       
  2850 	iCurHdr = NULL;
       
  2851 	iNullPtr = &iCurHdr;
  2842 	}
  2852 	}
  2843 
  2853 
  2844 
  2854 
  2845 /** PSL may override */
  2855 /** PSL may override */
  2846 void TDmaChannel::QueuedRequestCountChanged()
  2856 void TDmaChannel::QueuedRequestCountChanged()
  2847 	{
  2857 	{
  2848 #ifdef _DEBUG
  2858 	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::QueuedRequestCountChanged(): "
       
  2859 									"disabling further calls"));
  2849 	Wait();
  2860 	Wait();
  2850 	__KTRACE_OPT(KDMA,
  2861 	iCallQueuedRequestFn = EFalse;
  2851 				 Kern::Printf("TDmaChannel::QueuedRequestCountChanged() %d",
       
  2852 							  iQueuedRequests));
       
  2853 	__DMA_ASSERTA(iQueuedRequests >= 0);
       
  2854 	Signal();
  2862 	Signal();
  2855 #endif
       
  2856 	}
  2863 	}
  2857 
  2864 
  2858 
  2865 
  2859 #ifdef _DEBUG
  2866 #ifdef _DEBUG
  2860 void TDmaChannel::Invariant()
  2867 void TDmaChannel::Invariant()
  3016 
  3023 
  3017 
  3024 
  3018 //////////////////////////////////////////////////////////////////////////////
  3025 //////////////////////////////////////////////////////////////////////////////
  3019 // TDmaAsymSgChannel
  3026 // TDmaAsymSgChannel
  3020 
  3027 
       
  3028 TDmaAsymSgChannel::TDmaAsymSgChannel()
       
  3029 	: iSrcCurHdr(NULL),
       
  3030 	  iSrcNullPtr(&iSrcCurHdr),
       
  3031 	  iDstCurHdr(NULL),
       
  3032 	  iDstNullPtr(&iDstCurHdr)
       
  3033 	{
       
  3034 	__DMA_INVARIANT();
       
  3035 	}
       
  3036 
       
  3037 
       
  3038 void TDmaAsymSgChannel::SetNullPtr(const DDmaRequest& aReq)
       
  3039 	{
       
  3040 	// i{Src|Dst}NullPtr points to i{Src|Dst}CurHdr for an empty queue
       
  3041 	*iSrcNullPtr = aReq.iSrcFirstHdr;
       
  3042 	*iDstNullPtr = aReq.iDstFirstHdr;
       
  3043 	iSrcNullPtr = &(aReq.iSrcLastHdr->iNext);
       
  3044 	iDstNullPtr = &(aReq.iDstLastHdr->iNext);
       
  3045 	}
       
  3046 
       
  3047 
       
  3048 void TDmaAsymSgChannel::ResetNullPtr()
       
  3049 	{
       
  3050 	iSrcCurHdr = NULL;
       
  3051 	iSrcNullPtr = &iSrcCurHdr;
       
  3052 	iDstCurHdr = NULL;
       
  3053 	iDstNullPtr = &iDstCurHdr;
       
  3054 	}
       
  3055 
       
  3056 
  3021 void TDmaAsymSgChannel::DoQueue(const DDmaRequest& aReq)
  3057 void TDmaAsymSgChannel::DoQueue(const DDmaRequest& aReq)
  3022 	{
  3058 	{
  3023 	if (iState == ETransferring)
  3059 	if (iState == ETransferring)
  3024 		{
  3060 		{
  3025 		__DMA_ASSERTD(!aReq.iLink.Alone());
  3061 		__DMA_ASSERTD(!aReq.iLink.Alone());
  3060 	// Must be either both NULL or none of them.
  3096 	// Must be either both NULL or none of them.
  3061 	__DMA_ASSERTD(!LOGICAL_XOR(iSrcCurHdr, iDstCurHdr));
  3097 	__DMA_ASSERTD(!LOGICAL_XOR(iSrcCurHdr, iDstCurHdr));
  3062 	iState = (iSrcCurHdr != NULL) ? ETransferring : EIdle;
  3098 	iState = (iSrcCurHdr != NULL) ? ETransferring : EIdle;
  3063 	}
  3099 	}
  3064 
  3100 
       
  3101 
       
  3102 #ifdef _DEBUG
       
  3103 void TDmaAsymSgChannel::Invariant()
       
  3104 	{
       
  3105 	Wait();
       
  3106 
       
  3107 	__DMA_ASSERTD(iReqCount >= 0);
       
  3108 
       
  3109 	__DMA_ASSERTD(iSrcCurHdr == NULL || iController->IsValidHdr(iSrcCurHdr));
       
  3110 	__DMA_ASSERTD(iDstCurHdr == NULL || iController->IsValidHdr(iDstCurHdr));
       
  3111 
       
  3112 	// should always point to NULL pointer ending fragment queue
       
  3113 	__DMA_ASSERTD(*iSrcNullPtr == NULL);
       
  3114 	__DMA_ASSERTD(*iDstNullPtr == NULL);
       
  3115 
       
  3116 	__DMA_ASSERTD((0 <= iAvailDesCount) && (iAvailDesCount <= iMaxDesCount));
       
  3117 
       
  3118 	__DMA_ASSERTD((iSrcCurHdr && iDstCurHdr && !IsQueueEmpty()) ||
       
  3119 				  (!iSrcCurHdr && !iDstCurHdr && IsQueueEmpty()));
       
  3120 	if (iSrcCurHdr == NULL)
       
  3121 		{
       
  3122 		__DMA_ASSERTD(iSrcNullPtr == &iSrcCurHdr);
       
  3123 		}
       
  3124 	if (iDstCurHdr == NULL)
       
  3125 		{
       
  3126 		__DMA_ASSERTD(iDstNullPtr == &iDstCurHdr);
       
  3127 		}
       
  3128 
       
  3129 	Signal();
       
  3130 	}
       
  3131 #endif