kernel/eka/drivers/dma/dma2_pil.cpp
changeset 139 95f71bcdcdb7
parent 109 b3a1d9898418
child 189 a5496987b1da
child 247 d8d70de2bd36
equal deleted inserted replaced
109:b3a1d9898418 139:95f71bcdcdb7
   116 
   116 
   117 //
   117 //
   118 // Return minimum of aMaxSize and size of largest physically contiguous block
   118 // Return minimum of aMaxSize and size of largest physically contiguous block
   119 // starting at aLinAddr.
   119 // starting at aLinAddr.
   120 //
   120 //
   121 static TInt MaxPhysSize(TLinAddr aLinAddr, const TInt aMaxSize)
   121 static TUint MaxPhysSize(TLinAddr aLinAddr, const TUint aMaxSize)
   122 	{
   122 	{
   123 	const TPhysAddr physBase = LinToPhys(aLinAddr);
   123 	const TPhysAddr physBase = LinToPhys(aLinAddr);
       
   124 	__DMA_ASSERTD(physBase != KPhysAddrInvalid);
   124 	TLinAddr lin = aLinAddr;
   125 	TLinAddr lin = aLinAddr;
   125 	TInt size = 0;
   126 	TUint size = 0;
   126 	for (;;)
   127 	for (;;)
   127 		{
   128 		{
   128 		// Round up the linear address to the next MMU page boundary
   129 		// Round up the linear address to the next MMU page boundary
   129 		const TLinAddr linBoundary = Kern::RoundToPageSize(lin + 1);
   130 		const TLinAddr linBoundary = Kern::RoundToPageSize(lin + 1);
   130 		size += linBoundary - lin;
   131 		size += linBoundary - lin;
   150 	  iDesPool(NULL),
   151 	  iDesPool(NULL),
   151 	  iDesSize(aInfo.iDesSize),
   152 	  iDesSize(aInfo.iDesSize),
   152 	  iCapsHwDes(aInfo.iCapsHwDes),
   153 	  iCapsHwDes(aInfo.iCapsHwDes),
   153 	  iFreeHdr(NULL)
   154 	  iFreeHdr(NULL)
   154 	{
   155 	{
       
   156 	__KTRACE_OPT(KDMA, Kern::Printf("TDmac::TDmac"));
   155 	__DMA_ASSERTD(iMaxDesCount > 0);
   157 	__DMA_ASSERTD(iMaxDesCount > 0);
   156 	__DMA_ASSERTD(iDesSize > 0);
   158 	__DMA_ASSERTD(iDesSize > 0);
   157 	}
   159 	}
   158 
   160 
   159 
   161 
   160 //
   162 //
   161 // Second-phase c'tor
   163 // Second-phase c'tor
   162 //
   164 //
   163 TInt TDmac::Create(const SCreateInfo& aInfo)
   165 TInt TDmac::Create(const SCreateInfo& aInfo)
   164 	{
   166 	{
       
   167 	__KTRACE_OPT(KDMA, Kern::Printf("TDmac::Create"));
   165 	iHdrPool = new SDmaDesHdr[iMaxDesCount];
   168 	iHdrPool = new SDmaDesHdr[iMaxDesCount];
   166 	if (iHdrPool == NULL)
   169 	if (iHdrPool == NULL)
   167 		{
   170 		{
   168 		return KErrNoMemory;
   171 		return KErrNoMemory;
   169 		}
   172 		}
   185 	}
   188 	}
   186 
   189 
   187 
   190 
   188 TDmac::~TDmac()
   191 TDmac::~TDmac()
   189 	{
   192 	{
       
   193 	__KTRACE_OPT(KDMA, Kern::Printf("TDmac::~TDmac"));
   190 	__DMA_INVARIANT();
   194 	__DMA_INVARIANT();
   191 
   195 
   192 	FreeDesPool();
   196 	FreeDesPool();
   193 	delete[] iHdrPool;
   197 	delete[] iHdrPool;
   194 	}
   198 	}
   227 	}
   231 	}
   228 
   232 
   229 
   233 
   230 TInt TDmac::AllocDesPool(TUint aAttribs)
   234 TInt TDmac::AllocDesPool(TUint aAttribs)
   231 	{
   235 	{
       
   236 	__KTRACE_OPT(KDMA, Kern::Printf("TDmac::AllocDesPool"));
   232 	// Calling thread must be in CS
   237 	// Calling thread must be in CS
   233 	__ASSERT_CRITICAL;
   238 	__ASSERT_CRITICAL;
   234 	TInt r;
   239 	TInt r;
   235 	if (iCapsHwDes)
   240 	if (iCapsHwDes)
   236 		{
   241 		{
   267 	}
   272 	}
   268 
   273 
   269 
   274 
   270 void TDmac::FreeDesPool()
   275 void TDmac::FreeDesPool()
   271 	{
   276 	{
       
   277 	__KTRACE_OPT(KDMA, Kern::Printf("TDmac::FreeDesPool"));
   272 	// Calling thread must be in CS
   278 	// Calling thread must be in CS
   273 	__ASSERT_CRITICAL;
   279 	__ASSERT_CRITICAL;
   274 	if (iCapsHwDes)
   280 	if (iCapsHwDes)
   275 		{
   281 		{
   276 #ifdef __WINS__
   282 #ifdef __WINS__
   315 //
   321 //
   316 // Return the given number of preallocated descriptors to the free pool.
   322 // Return the given number of preallocated descriptors to the free pool.
   317 //
   323 //
   318 void TDmac::ReleaseSetOfDes(TInt aCount)
   324 void TDmac::ReleaseSetOfDes(TInt aCount)
   319 	{
   325 	{
       
   326 	__KTRACE_OPT(KDMA, Kern::Printf("TDmac::ReleaseSetOfDes count=%d", aCount));
   320 	__DMA_ASSERTD(aCount >= 0);
   327 	__DMA_ASSERTD(aCount >= 0);
   321 	Wait();
   328 	Wait();
   322 	iAvailDesCount += aCount;
   329 	iAvailDesCount += aCount;
   323 	Signal();
   330 	Signal();
   324 	__DMA_INVARIANT();
   331 	__DMA_INVARIANT();
   384 			// Not redoing or being cancelled means we've been calling the
   391 			// Not redoing or being cancelled means we've been calling the
   385 			// request's ISR callback for the last time. We're going to
   392 			// request's ISR callback for the last time. We're going to
   386 			// complete the request via the DFC in the usual way.
   393 			// complete the request via the DFC in the usual way.
   387 			}
   394 			}
   388 		}
   395 		}
       
   396 	else
       
   397 		{
       
   398 		// The PIL doesn't support yet any completion types other than
       
   399 		// EDmaCallbackRequestCompletion.
       
   400 		__DMA_CANT_HAPPEN();
       
   401 		}
   389 
   402 
   390 	// Now queue a DFC if necessary. The possible scenarios are:
   403 	// Now queue a DFC if necessary. The possible scenarios are:
   391 	// a) DFC not queued (orig == 0)              -> update iIsrDfc + queue DFC
   404 	// a) DFC not queued (orig == 0)              -> update iIsrDfc + queue DFC
   392 	// b) DFC queued, not running yet (orig != 0) -> just update iIsrDfc
   405 	// b) DFC queued, not running yet (orig != 0) -> just update iIsrDfc
   393 	// c) DFC running / iIsrDfc not reset yet (orig != 0) -> just update iIsrDfc
   406 	// c) DFC running / iIsrDfc not reset yet (orig != 0) -> just update iIsrDfc
   605 
   618 
   606 void TDmac::Invariant()
   619 void TDmac::Invariant()
   607 	{
   620 	{
   608 	Wait();
   621 	Wait();
   609 	__DMA_ASSERTD(0 <= iAvailDesCount && iAvailDesCount <= iMaxDesCount);
   622 	__DMA_ASSERTD(0 <= iAvailDesCount && iAvailDesCount <= iMaxDesCount);
   610 	__DMA_ASSERTD(! iFreeHdr || IsValidHdr(iFreeHdr));
   623 	__DMA_ASSERTD(!iFreeHdr || IsValidHdr(iFreeHdr));
   611 	for (TInt i = 0; i < iMaxDesCount; i++)
   624 	for (TInt i = 0; i < iMaxDesCount; i++)
   612 		__DMA_ASSERTD(iHdrPool[i].iNext == NULL || IsValidHdr(iHdrPool[i].iNext));
   625 		__DMA_ASSERTD(iHdrPool[i].iNext == NULL || IsValidHdr(iHdrPool[i].iNext));
   613 	Signal();
   626 	Signal();
   614 	}
   627 	}
   615 
   628 
   618 	{
   631 	{
   619 	return (iHdrPool <= aHdr) && (aHdr < iHdrPool + iMaxDesCount);
   632 	return (iHdrPool <= aHdr) && (aHdr < iHdrPool + iMaxDesCount);
   620 	}
   633 	}
   621 
   634 
   622 #endif
   635 #endif
   623 
       
   624 
       
   625 
   636 
   626 
   637 
   627 //
   638 //
   628 // Internal compat version, used by legacy Fragment()
   639 // Internal compat version, used by legacy Fragment()
   629 //
   640 //
   642 	  iPslTargetInfo(0),
   653 	  iPslTargetInfo(0),
   643 	  iRepeatCount(0),
   654 	  iRepeatCount(0),
   644 	  iDelta(~0u),
   655 	  iDelta(~0u),
   645 	  iReserved(0)
   656 	  iReserved(0)
   646 	{
   657 	{
   647 	}
   658 	__KTRACE_OPT(KDMA,
   648 
   659 				 Kern::Printf("TDmaTransferConfig::TDmaTransferConfig "
       
   660 							  "aAddr=0x%08X aFlags=0x%08X aAddrInc=%d",
       
   661 							  aAddr, aFlags, aAddrInc));
       
   662 	}
   649 
   663 
   650 
   664 
   651 //
   665 //
   652 // Internal compat version, used by legacy Fragment()
   666 // Internal compat version, used by legacy Fragment()
   653 //
   667 //
   659 	  iGraphicsOps(KDmaGraphicsOpNone),
   673 	  iGraphicsOps(KDmaGraphicsOpNone),
   660 	  iColour(0),
   674 	  iColour(0),
   661 	  iFlags(0),
   675 	  iFlags(0),
   662 	  iChannelPriority(KDmaPriorityNone),
   676 	  iChannelPriority(KDmaPriorityNone),
   663 	  iPslRequestInfo(aPslInfo),
   677 	  iPslRequestInfo(aPslInfo),
       
   678 	  iChannelCookie(0),
   664 	  iDelta(~0u),
   679 	  iDelta(~0u),
   665 	  iReserved1(0),
   680 	  iReserved1(0),
   666 	  iChannelCookie(0),
       
   667 	  iReserved2(0)
   681 	  iReserved2(0)
   668 	{
   682 	{
       
   683 	__KTRACE_OPT(KDMA,
       
   684 				 Kern::Printf("TDmaTransferArgs::TDmaTransferArgs"));
       
   685 	__KTRACE_OPT(KDMA,
       
   686 				 Kern::Printf("  aSrc=0x%08X aDest=0x%08X aCount=%d aFlags=0x%08X aPslInfo=0x%08X",
       
   687 							  aSrc, aDest, aCount, aFlags, aPslInfo));
   669 	}
   688 	}
   670 
   689 
   671 
   690 
   672 //
   691 //
   673 // As DDmaRequest is derived from DBase, the initializations with zero aren't
   692 // As DDmaRequest is derived from DBase, the initializations with zero aren't
   693 	  iQueued(EFalse),
   712 	  iQueued(EFalse),
   694 	  iMaxTransferSize(aMaxTransferSize),
   713 	  iMaxTransferSize(aMaxTransferSize),
   695 	  iTotalNumSrcElementsTransferred(0),
   714 	  iTotalNumSrcElementsTransferred(0),
   696 	  iTotalNumDstElementsTransferred(0)
   715 	  iTotalNumDstElementsTransferred(0)
   697 	{
   716 	{
       
   717 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::DDmaRequest =0x%08X (old style)", this));
   698 	iChannel.iReqCount++;
   718 	iChannel.iReqCount++;
   699 	__DMA_ASSERTD(0 <= aMaxTransferSize);
   719 	__DMA_ASSERTD(0 <= aMaxTransferSize);
   700 	__DMA_INVARIANT();
   720 	__DMA_INVARIANT();
   701 	}
   721 	}
   702 
   722 
   725 	  iQueued(EFalse),
   745 	  iQueued(EFalse),
   726 	  iMaxTransferSize(aMaxTransferSize),
   746 	  iMaxTransferSize(aMaxTransferSize),
   727 	  iTotalNumSrcElementsTransferred(0),
   747 	  iTotalNumSrcElementsTransferred(0),
   728 	  iTotalNumDstElementsTransferred(0)
   748 	  iTotalNumDstElementsTransferred(0)
   729 	{
   749 	{
       
   750 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::DDmaRequest =0x%08X (new style)", this));
   730 	__e32_atomic_add_ord32(&iChannel.iReqCount, 1);
   751 	__e32_atomic_add_ord32(&iChannel.iReqCount, 1);
   731 	__DMA_INVARIANT();
   752 	__DMA_INVARIANT();
   732 	}
   753 	}
   733 
   754 
   734 
   755 
   735 EXPORT_C DDmaRequest::~DDmaRequest()
   756 EXPORT_C DDmaRequest::~DDmaRequest()
   736 	{
   757 	{
       
   758 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::~DDmaRequest"));
   737 	__DMA_ASSERTD(!iQueued);
   759 	__DMA_ASSERTD(!iQueued);
   738 	__DMA_INVARIANT();
   760 	__DMA_INVARIANT();
   739 	FreeDesList();
   761 	if (iChannel.iDmacCaps->iAsymHwDescriptors)
       
   762 		{
       
   763 		FreeSrcDesList();
       
   764 		FreeDstDesList();
       
   765 		}
       
   766 	else
       
   767 		{
       
   768 		FreeDesList();
       
   769 		}
   740 	__e32_atomic_add_ord32(&iChannel.iReqCount, TUint32(-1));
   770 	__e32_atomic_add_ord32(&iChannel.iReqCount, TUint32(-1));
   741 	}
   771 	}
   742 
   772 
   743 
   773 
   744 EXPORT_C TInt DDmaRequest::Fragment(TUint32 aSrc, TUint32 aDest, TInt aCount,
   774 EXPORT_C TInt DDmaRequest::Fragment(TUint32 aSrc, TUint32 aDest, TInt aCount,
   745 									TUint aFlags, TUint32 aPslInfo)
   775 									TUint aFlags, TUint32 aPslInfo)
   746 	{
   776 	{
   747 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Fragment thread %O "
   777 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Fragment thread %O (old style)",
   748 									"src=0x%08X dest=0x%08X count=%d flags=0x%X psl=0x%08X",
   778 									&Kern::CurrentThread()));
   749 									&Kern::CurrentThread(), aSrc, aDest, aCount, aFlags, aPslInfo));
   779 
   750 	__DMA_ASSERTD(aCount > 0);
   780 	__DMA_ASSERTD(aCount > 0);
   751 
   781 
   752 	TDmaTransferArgs args(aSrc, aDest, aCount, aFlags, aPslInfo);
   782 	TDmaTransferArgs args(aSrc, aDest, aCount, aFlags, aPslInfo);
   753 
   783 
   754 	return Frag(args);
   784 	return Frag(args);
   755 	}
   785 	}
   756 
   786 
   757 
   787 
   758 EXPORT_C TInt DDmaRequest::Fragment(const TDmaTransferArgs& aTransferArgs)
   788 EXPORT_C TInt DDmaRequest::Fragment(const TDmaTransferArgs& aTransferArgs)
   759 	{
   789 	{
   760 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Fragment thread %O", &Kern::CurrentThread()));
   790 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Fragment thread %O (new style)",
       
   791 									&Kern::CurrentThread()));
   761 
   792 
   762 	// Writable temporary working copy of the transfer arguments.
   793 	// Writable temporary working copy of the transfer arguments.
   763 	// We need this because we may have to modify some fields before passing it
   794 	// We need this because we may have to modify some fields before passing it
   764 	// to the PSL (for example iChannelCookie, iTransferCount,
   795 	// to the PSL (for example iChannelCookie, iTransferCount,
   765 	// iDstConfig::iAddr, and iSrcConfig::iAddr).
   796 	// iDstConfig::iAddr, and iSrcConfig::iAddr).
   767 
   798 
   768 	return Frag(args);
   799 	return Frag(args);
   769 	}
   800 	}
   770 
   801 
   771 
   802 
   772 TUint DDmaRequest::GetTransferCount(const TDmaTransferArgs& aTransferArgs)
   803 TInt DDmaRequest::CheckTransferConfig(const TDmaTransferConfig& aTarget, TUint aCount) const
   773 	{
   804 	{
       
   805 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::CheckTransferConfig"));
       
   806 
       
   807 	if (aTarget.iElementSize != 0)
       
   808 		{
       
   809 		if ((aCount % aTarget.iElementSize) != 0)
       
   810 			{
       
   811 			// 2, 7 (These strange numbers refer to some test cases documented
       
   812 			// elsewhere - they will be removed eventually.)
       
   813 			__KTRACE_OPT(KPANIC,
       
   814 						 Kern::Printf("Error: ((aCount %% iElementSize) != 0)"));
       
   815 			return KErrArgument;
       
   816 			}
       
   817 		if (aTarget.iElementsPerFrame != 0)
       
   818 			{
       
   819 			if ((aTarget.iElementSize * aTarget.iElementsPerFrame *
       
   820 				 aTarget.iFramesPerTransfer) != aCount)
       
   821 				{
       
   822 				// 3, 8
       
   823 				__KTRACE_OPT(KPANIC,
       
   824 							 Kern::Printf("Error: ((iElementSize * "
       
   825 										  "iElementsPerFrame * "
       
   826 										  "iFramesPerTransfer) != aCount)"));
       
   827 				return KErrArgument;
       
   828 				}
       
   829 			}
       
   830 		}
       
   831 	else
       
   832 		{
       
   833 		if (aTarget.iElementsPerFrame != 0)
       
   834 			{
       
   835 			// 4, 9
       
   836 			__KTRACE_OPT(KPANIC,
       
   837 						 Kern::Printf("Error: (iElementsPerFrame != 0)"));
       
   838 			return KErrArgument;
       
   839 			}
       
   840 		if (aTarget.iFramesPerTransfer != 0)
       
   841 			{
       
   842 			// 5, 10
       
   843 			__KTRACE_OPT(KPANIC,
       
   844 						 Kern::Printf("Error: (iFramesPerTransfer != 0)"));
       
   845 			return KErrArgument;
       
   846 			}
       
   847 		if (aTarget.iElementsPerPacket != 0)
       
   848 			{
       
   849 			// 6, 11
       
   850 			__KTRACE_OPT(KPANIC,
       
   851 						 Kern::Printf("Error: (iElementsPerPacket != 0)"));
       
   852 			return KErrArgument;
       
   853 			}
       
   854 		}
       
   855 	return KErrNone;
       
   856 	}
       
   857 
       
   858 
       
   859 TInt DDmaRequest::CheckMemFlags(const TDmaTransferConfig& aTarget, TUint aCount) const
       
   860 	{
       
   861 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::CheckMemFlags"));
       
   862 
       
   863 	const TBool mem_target = (aTarget.iFlags & KDmaMemAddr);
       
   864 
       
   865 	if (mem_target && (aTarget.iFlags & KDmaPhysAddr) && !(aTarget.iFlags & KDmaMemIsContiguous))
       
   866 		{
       
   867 		// Physical memory address implies contiguous range
       
   868 		// 13, 15
       
   869 		__KTRACE_OPT(KPANIC, Kern::Printf("Error: mem_target && KDmaPhysAddr && !KDmaMemIsContiguous"));
       
   870 		return KErrArgument;
       
   871 		}
       
   872 	else if ((aTarget.iFlags & KDmaMemIsContiguous) && !mem_target)
       
   873 		{
       
   874 		// Contiguous range implies memory address
       
   875 		// 14, 16
       
   876 		__KTRACE_OPT(KPANIC, Kern::Printf("Error: KDmaMemIsContiguous && !mem_target"));
       
   877 		return KErrArgument;
       
   878 		}
       
   879 	return KErrNone;
       
   880 	}
       
   881 
       
   882 
       
   883 // Makes sure an element or frame never straddles two DMA subtransfer
       
   884 // fragments. This would be a fragmentation error by the PIL.
       
   885 //
       
   886 TInt DDmaRequest::AdjustFragmentSize(TUint& aFragSize, TUint aElementSize,
       
   887 									 TUint aFrameSize)
       
   888 	{
       
   889 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::AdjustFragmentSize FragSize=%d ES=%d FS=%d",
       
   890 									aFragSize, aElementSize, aFrameSize));
       
   891 
       
   892 	TUint rem = 0;
       
   893 	TInt r = KErrNone;
       
   894 
       
   895 	while (1)
       
   896 		{
       
   897 		// If an element size is defined, make sure the fragment size is
       
   898 		// greater or equal.
       
   899 		if (aElementSize)
       
   900 			{
       
   901 			if (aFragSize < aElementSize)
       
   902 				{
       
   903 				__KTRACE_OPT(KPANIC, Kern::Printf("Error: aFragSize < aElementSize"));
       
   904 				r = KErrArgument;
       
   905 				break;
       
   906 				}
       
   907 			}
       
   908 		// If a frame size is defined, make sure the fragment size is greater
       
   909 		// or equal.
       
   910 		if (aFrameSize)
       
   911 			{
       
   912 			if (aFragSize < aFrameSize)
       
   913 				{
       
   914 				__KTRACE_OPT(KPANIC, Kern::Printf("Error: aFragSize < aFrameSize"));
       
   915 				r = KErrArgument;
       
   916 				break;
       
   917 				}
       
   918 			}
       
   919 		// If a frame size is defined, make sure the fragment ends on a frame
       
   920 		// boundary.
       
   921 		if (aFrameSize)
       
   922 			{
       
   923 			rem = aFragSize % aFrameSize;
       
   924 			if (rem != 0)
       
   925 				{
       
   926 				aFragSize -= rem;
       
   927 				// 20, 22
       
   928 				__KTRACE_OPT(KDMA, Kern::Printf("aFragSize %% aFrameSize != 0 --> aFragSize = %d",
       
   929 												aFragSize));
       
   930 				// aFragSize has changed, so we have to do all the checks
       
   931 				// again.
       
   932 				continue;
       
   933 				}
       
   934 			}
       
   935 		// If an element size is defined, make sure the fragment ends on an
       
   936 		// element boundary.
       
   937 		if (aElementSize)
       
   938 			{
       
   939 			rem = aFragSize % aElementSize;
       
   940 			if (rem != 0)
       
   941 				{
       
   942 				aFragSize -= rem;
       
   943 				// 21, 23
       
   944 				__KTRACE_OPT(KDMA, Kern::Printf("aFragSize %% aElementSize != 0 --> aFragSize = %d",
       
   945 												aFragSize));
       
   946 				// aFragSize has changed, so we have to do all the checks
       
   947 				// again.
       
   948 				continue;
       
   949 				}
       
   950 			}
       
   951 		// Done - all checks passed. Let's get out.
       
   952 		break;
       
   953 		}
       
   954 
       
   955 	return r;
       
   956 	}
       
   957 
       
   958 
       
   959 TUint DDmaRequest::GetTransferCount(const TDmaTransferArgs& aTransferArgs) const
       
   960 	{
       
   961 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::GetTransferCount"));
       
   962 
   774 	const TDmaTransferConfig& src = aTransferArgs.iSrcConfig;
   963 	const TDmaTransferConfig& src = aTransferArgs.iSrcConfig;
       
   964 #ifdef _DEBUG
   775 	const TDmaTransferConfig& dst = aTransferArgs.iDstConfig;
   965 	const TDmaTransferConfig& dst = aTransferArgs.iDstConfig;
       
   966 #endif	// #ifdef _DEBUG
   776 
   967 
   777 	TUint count = aTransferArgs.iTransferCount;
   968 	TUint count = aTransferArgs.iTransferCount;
   778 	if (count == 0)
   969 	if (count == 0)
   779 		{
   970 		{
   780 		__KTRACE_OPT(KDMA, Kern::Printf("iTransferCount == 0"));
   971 		__KTRACE_OPT(KDMA, Kern::Printf("iTransferCount == 0"));
   781 		count = src.iElementSize * src.iElementsPerFrame *
   972 		count = src.iElementSize * src.iElementsPerFrame *
   782 			src.iFramesPerTransfer;
   973 			src.iFramesPerTransfer;
       
   974 #ifdef _DEBUG
   783 		const TUint dst_cnt = dst.iElementSize * dst.iElementsPerFrame *
   975 		const TUint dst_cnt = dst.iElementSize * dst.iElementsPerFrame *
   784 			dst.iFramesPerTransfer;
   976 			dst.iFramesPerTransfer;
   785 		if (count != dst_cnt)
   977 		if (count != dst_cnt)
   786 			{
   978 			{
       
   979 			// 1
   787 			__KTRACE_OPT(KPANIC, Kern::Printf("Error: (count != dst_cnt)"));
   980 			__KTRACE_OPT(KPANIC, Kern::Printf("Error: (count != dst_cnt)"));
   788 			return 0;
   981 			return 0;
   789 			}
   982 			}
       
   983 #endif	// #ifdef _DEBUG
   790 		}
   984 		}
   791 	else
   985 	else
   792 		{
   986 		{
   793 		__KTRACE_OPT(KDMA, Kern::Printf("iTransferCount == %d", count));
   987 		__KTRACE_OPT(KDMA, Kern::Printf("iTransferCount == %d", count));
       
   988 #ifdef _DEBUG
   794 		// Client shouldn't specify contradictory or incomplete things
   989 		// Client shouldn't specify contradictory or incomplete things
   795 		if (src.iElementSize != 0)
   990 		if (CheckTransferConfig(src, count) != KErrNone)
   796 			{
   991 			{
   797 			if ((count % src.iElementSize) != 0)
   992 			__KTRACE_OPT(KPANIC, Kern::Printf("Error: CheckTransferConfig(src)"));
   798 				{
   993 			return 0;
   799 				__KTRACE_OPT(KPANIC,
   994 			}
   800 							 Kern::Printf("Error: ((count %% src.iElementSize) != 0)"));
   995 		if (CheckTransferConfig(dst, count) != KErrNone)
   801 				return 0;
   996 			{
   802 				}
   997 			__KTRACE_OPT(KPANIC, Kern::Printf("Error: CheckTransferConfig(dst)"));
   803 			if (src.iElementsPerFrame != 0)
   998 			return 0;
   804 				{
   999 			}
   805 				if ((src.iElementSize * src.iElementsPerFrame * src.iFramesPerTransfer) != count)
  1000 #endif	// #ifdef _DEBUG
   806 					{
       
   807 					__KTRACE_OPT(KPANIC,
       
   808 								 Kern::Printf("Error: ((src.iElementSize * "
       
   809 											  "src.iElementsPerFrame * "
       
   810 											  "src.iFramesPerTransfer) != count)"));
       
   811 					return 0;
       
   812 					}
       
   813 				}
       
   814 			}
       
   815 		else
       
   816 			{
       
   817 			if (src.iElementsPerFrame != 0)
       
   818 				{
       
   819 				__KTRACE_OPT(KPANIC,
       
   820 							 Kern::Printf("Error: (src.iElementsPerFrame != 0)"));
       
   821 				return 0;
       
   822 				}
       
   823 			if (src.iFramesPerTransfer != 0)
       
   824 				{
       
   825 				__KTRACE_OPT(KPANIC,
       
   826 							 Kern::Printf("Error: (src.iFramesPerTransfer != 0)"));
       
   827 				return 0;
       
   828 				}
       
   829 			if (src.iElementsPerPacket != 0)
       
   830 				{
       
   831 				__KTRACE_OPT(KPANIC,
       
   832 							 Kern::Printf("Error: (src.iElementsPerPacket != 0)"));
       
   833 				return 0;
       
   834 				}
       
   835 			}
       
   836 		if (dst.iElementSize != 0)
       
   837 			{
       
   838 			if ((count % dst.iElementSize) != 0)
       
   839 				{
       
   840 				__KTRACE_OPT(KPANIC,
       
   841 							 Kern::Printf("Error: ((count %% dst.iElementSize) != 0)"));
       
   842 				return 0;
       
   843 				}
       
   844 			if (dst.iElementsPerFrame != 0)
       
   845 				{
       
   846 				if ((dst.iElementSize * dst.iElementsPerFrame * dst.iFramesPerTransfer) != count)
       
   847 					{
       
   848 					__KTRACE_OPT(KPANIC,
       
   849 								 Kern::Printf("Error: ((dst.iElementSize * "
       
   850 											  "dst.iElementsPerFrame * "
       
   851 											  "dst.iFramesPerTransfer) != count)"));
       
   852 					return 0;
       
   853 					}
       
   854 				}
       
   855 			}
       
   856 		else
       
   857 			{
       
   858 			if (dst.iElementsPerFrame != 0)
       
   859 				{
       
   860 				__KTRACE_OPT(KPANIC,
       
   861 							 Kern::Printf("Error: (dst.iElementsPerFrame != 0)"));
       
   862 				return 0;
       
   863 				}
       
   864 			if (dst.iFramesPerTransfer != 0)
       
   865 				{
       
   866 				__KTRACE_OPT(KPANIC,
       
   867 							 Kern::Printf("Error: (dst.iFramesPerTransfer != 0)"));
       
   868 				return 0;
       
   869 				}
       
   870 			if (dst.iElementsPerPacket != 0)
       
   871 				{
       
   872 				__KTRACE_OPT(KPANIC,
       
   873 							 Kern::Printf("Error: (dst.iElementsPerPacket != 0)"));
       
   874 				return 0;
       
   875 				}
       
   876 			}
       
   877 		}
  1001 		}
   878 	return count;
  1002 	return count;
   879 	}
  1003 	}
   880 
  1004 
   881 
  1005 
   882 TInt DDmaRequest::Frag(TDmaTransferArgs& aTransferArgs)
  1006 TUint DDmaRequest::GetMaxTransferlength(const TDmaTransferArgs& aTransferArgs, TUint aCount) const
   883 	{
  1007 	{
   884 	__DMA_ASSERTD(!iQueued);
  1008 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::GetMaxTransferlength"));
   885 
       
   886 	// Transfer count checks
       
   887 	const TUint count = GetTransferCount(aTransferArgs);
       
   888 	if (count == 0)
       
   889 		{
       
   890 		return KErrArgument;
       
   891 		}
       
   892 
  1009 
   893 	const TDmaTransferConfig& src = aTransferArgs.iSrcConfig;
  1010 	const TDmaTransferConfig& src = aTransferArgs.iSrcConfig;
   894 	const TDmaTransferConfig& dst = aTransferArgs.iDstConfig;
  1011 	const TDmaTransferConfig& dst = aTransferArgs.iDstConfig;
   895 
  1012 
   896 	// Ask the PSL what the maximum length possible for this transfer is
  1013 	// Ask the PSL what the maximum length is for a single transfer
   897 	TUint max_xfer_len = iChannel.MaxTransferLength(src.iFlags, dst.iFlags,
  1014 	TUint max_xfer_len = iChannel.MaxTransferLength(src.iFlags, dst.iFlags,
   898 													aTransferArgs.iPslRequestInfo);
  1015 													aTransferArgs.iPslRequestInfo);
   899 	if (iMaxTransferSize)
  1016 	if (iMaxTransferSize)
   900 		{
  1017 		{
   901 		// User has set a size cap
  1018 		// (User has set a transfer size cap)
   902 		__KTRACE_OPT(KDMA, Kern::Printf("iMaxTransferSize != 0"));
  1019 		__KTRACE_OPT(KDMA, Kern::Printf("iMaxTransferSize: %d", iMaxTransferSize));
   903 		__DMA_ASSERTA((iMaxTransferSize <= max_xfer_len) || (max_xfer_len == 0));
  1020 		if ((max_xfer_len != 0) && (iMaxTransferSize > max_xfer_len))
       
  1021 			{
       
  1022 			// Not really an error, but still...
       
  1023 			__KTRACE_OPT(KPANIC, Kern::Printf("Warning: iMaxTransferSize > max_xfer_len"));
       
  1024 			}
   904 		max_xfer_len = iMaxTransferSize;
  1025 		max_xfer_len = iMaxTransferSize;
   905 		}
  1026 		}
   906 	else
  1027 	else
   907 		{
  1028 		{
   908 		// User doesn't care about max size
  1029 		// (User doesn't care about max transfer size)
   909 		if (max_xfer_len == 0)
  1030 		if (max_xfer_len == 0)
   910 			{
  1031 			{
   911 			// No maximum imposed by controller
  1032 			// '0' = no maximum imposed by controller
   912 			max_xfer_len = count;
  1033 			max_xfer_len = aCount;
   913 			}
  1034 			}
       
  1035 		}
       
  1036 	__KTRACE_OPT(KDMA, Kern::Printf("max_xfer_len: %d", max_xfer_len));
       
  1037 
       
  1038 	// Some sanity checks
       
  1039 #ifdef _DEBUG
       
  1040 	if ((max_xfer_len < src.iElementSize) || (max_xfer_len < dst.iElementSize))
       
  1041 		{
       
  1042 		// 18
       
  1043 		__KTRACE_OPT(KPANIC, Kern::Printf("Error: max_xfer_len < iElementSize"));
       
  1044 		return 0;
       
  1045 		}
       
  1046 	if ((max_xfer_len < (src.iElementSize * src.iElementsPerFrame)) ||
       
  1047 		(max_xfer_len < (dst.iElementSize * dst.iElementsPerFrame)))
       
  1048 		{
       
  1049 		// 19
       
  1050 		__KTRACE_OPT(KPANIC,
       
  1051 					 Kern::Printf("Error: max_xfer_len < (iElementSize * iElementsPerFrame)"));
       
  1052 		return 0;
       
  1053 		}
       
  1054 #endif	// #ifdef _DEBUG
       
  1055 
       
  1056 	return max_xfer_len;
       
  1057 	}
       
  1058 
       
  1059 
       
  1060 // Unified internal fragmentation routine, called by both the old and new
       
  1061 // exported Fragment() functions.
       
  1062 //
       
  1063 // Depending on whether the DMAC uses a single or two separate descriptor
       
  1064 // chains, this function branches into either FragSym() or FragAsym(), and the
       
  1065 // latter function further into either FragAsymSrc()/FragAsymDst() or
       
  1066 // FragBalancedAsym().
       
  1067 //
       
  1068 TInt DDmaRequest::Frag(TDmaTransferArgs& aTransferArgs)
       
  1069 	{
       
  1070 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Frag"));
       
  1071 	__DMA_ASSERTD(!iQueued);
       
  1072 
       
  1073 	// Transfer count + checks
       
  1074 	const TUint count = GetTransferCount(aTransferArgs);
       
  1075 	if (count == 0)
       
  1076 		{
       
  1077 		return KErrArgument;
       
  1078 		}
       
  1079 
       
  1080 	// Max transfer length + checks
       
  1081 	const TUint max_xfer_len = GetMaxTransferlength(aTransferArgs, count);
       
  1082 	if (max_xfer_len == 0)
       
  1083 		{
       
  1084 		return KErrArgument;
   914 		}
  1085 		}
   915 
  1086 
   916 	// ISR callback requested?
  1087 	// ISR callback requested?
   917 	const TBool isr_cb = (aTransferArgs.iFlags & KDmaRequestCallbackFromIsr);
  1088 	const TBool isr_cb = (aTransferArgs.iFlags & KDmaRequestCallbackFromIsr);
   918 	if (isr_cb)
  1089 	if (isr_cb)
   919 		{
  1090 		{
   920 		// Requesting an ISR callback w/o supplying one?
  1091 		// Requesting an ISR callback w/o supplying one?
   921 		if (!iDmaCb)
  1092 		if (!iDmaCb)
   922 			{
  1093 			{
       
  1094 			// 12
       
  1095 			__KTRACE_OPT(KPANIC, Kern::Printf("Error: !iDmaCb"));
   923 			return KErrArgument;
  1096 			return KErrArgument;
   924 			}
  1097 			}
   925 		}
  1098 		}
   926 
  1099 
   927 	// Set the channel cookie for the PSL
  1100 	// Set the channel cookie for the PSL
   928 	aTransferArgs.iChannelCookie = iChannel.PslId();
  1101 	aTransferArgs.iChannelCookie = iChannel.PslId();
   929 
  1102 
       
  1103 	// Client shouldn't specify contradictory or invalid things
       
  1104 	TInt r = CheckMemFlags(aTransferArgs.iSrcConfig, count);
       
  1105 	if (r != KErrNone)
       
  1106 		{
       
  1107 		__KTRACE_OPT(KPANIC, Kern::Printf("Error: CheckMemFlags(src)"));
       
  1108 		return r;
       
  1109 		}
       
  1110 	r =  CheckMemFlags(aTransferArgs.iDstConfig, count);
       
  1111 	if (r != KErrNone)
       
  1112 		{
       
  1113 		__KTRACE_OPT(KPANIC, Kern::Printf("Error: CheckMemFlags(dst)"));
       
  1114 		return r;
       
  1115 		}
       
  1116 
   930 	// Now the actual fragmentation
  1117 	// Now the actual fragmentation
   931 	TInt r;
       
   932 	if (iChannel.iDmacCaps->iAsymHwDescriptors)
  1118 	if (iChannel.iDmacCaps->iAsymHwDescriptors)
   933 		{
  1119 		{
   934 		r = FragAsym(aTransferArgs, count, max_xfer_len);
  1120 		r = FragAsym(aTransferArgs, count, max_xfer_len);
   935 		}
  1121 		}
   936 	else
  1122 	else
   949 
  1135 
   950 
  1136 
   951 TInt DDmaRequest::FragSym(TDmaTransferArgs& aTransferArgs, TUint aCount,
  1137 TInt DDmaRequest::FragSym(TDmaTransferArgs& aTransferArgs, TUint aCount,
   952 						  TUint aMaxTransferLen)
  1138 						  TUint aMaxTransferLen)
   953 	{
  1139 	{
       
  1140 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::FragSym"));
       
  1141 
   954 	TDmaTransferConfig& src = aTransferArgs.iSrcConfig;
  1142 	TDmaTransferConfig& src = aTransferArgs.iSrcConfig;
   955 	TDmaTransferConfig& dst = aTransferArgs.iDstConfig;
  1143 	TDmaTransferConfig& dst = aTransferArgs.iDstConfig;
   956 
       
   957 	const TBool mem_src = (src.iFlags & KDmaMemAddr);
  1144 	const TBool mem_src = (src.iFlags & KDmaMemAddr);
   958 	const TBool mem_dst = (dst.iFlags & KDmaMemAddr);
  1145 	const TBool mem_dst = (dst.iFlags & KDmaMemAddr);
   959 
  1146 
   960 	const TUint align_mask_src = iChannel.AddressAlignMask(src.iFlags,
  1147 	const TUint align_mask_src = iChannel.AddressAlignMask(src.iFlags,
   961 														   src.iElementSize,
  1148 														   src.iElementSize,
   962 														   aTransferArgs.iPslRequestInfo);
  1149 														   aTransferArgs.iPslRequestInfo);
       
  1150 	__KTRACE_OPT(KDMA, Kern::Printf("align_mask_src: 0x%x", align_mask_src));
   963 	const TUint align_mask_dst = iChannel.AddressAlignMask(dst.iFlags,
  1151 	const TUint align_mask_dst = iChannel.AddressAlignMask(dst.iFlags,
   964 														   dst.iElementSize,
  1152 														   dst.iElementSize,
   965 														   aTransferArgs.iPslRequestInfo);
  1153 														   aTransferArgs.iPslRequestInfo);
       
  1154 	__KTRACE_OPT(KDMA, Kern::Printf("align_mask_dst: 0x%x", align_mask_dst));
       
  1155 
   966 	// Memory buffers must satisfy alignment constraint
  1156 	// Memory buffers must satisfy alignment constraint
   967 	__DMA_ASSERTD(!mem_src || ((src.iAddr & align_mask_src) == 0));
  1157 	__DMA_ASSERTD(!mem_src || ((src.iAddr & align_mask_src) == 0));
   968 	__DMA_ASSERTD(!mem_dst || ((dst.iAddr & align_mask_dst) == 0));
  1158 	__DMA_ASSERTD(!mem_dst || ((dst.iAddr & align_mask_dst) == 0));
   969 
  1159 
       
  1160 	// Max aligned length is used to make sure the beginnings of subtransfers
       
  1161 	// (i.e. fragments) are correctly aligned.
   970 	const TUint max_aligned_len = (aMaxTransferLen &
  1162 	const TUint max_aligned_len = (aMaxTransferLen &
   971 								   ~(Max(align_mask_src, align_mask_dst)));
  1163 								   ~(Max(align_mask_src, align_mask_dst)));
       
  1164 	__KTRACE_OPT(KDMA, Kern::Printf("max_aligned_len: %d", max_aligned_len));
   972 	// Client and PSL sane?
  1165 	// Client and PSL sane?
   973 	__DMA_ASSERTD(max_aligned_len > 0);
  1166 	__DMA_ASSERTD(max_aligned_len > 0);
   974 
  1167 
   975 	FreeDesList();			   // revert any previous fragmentation attempt
  1168 	if (mem_src && mem_dst &&
       
  1169 		align_mask_src && align_mask_dst &&
       
  1170 		(align_mask_src != align_mask_dst) &&
       
  1171 		(!(src.iFlags & KDmaMemIsContiguous) || !(dst.iFlags & KDmaMemIsContiguous)))
       
  1172 		{
       
  1173 		// We don't support transfers which satisfy ALL of the following conditions:
       
  1174 		// 1) from memory to memory,
       
  1175 		// 2) both sides have address alignment requirements,
       
  1176 		// 3) those alignment requirements are not the same,
       
  1177 		// 4) the memory is non-contiguous on at least one end.
       
  1178 		//
       
  1179 		// [A 5th condition is that the channel doesn't support fully
       
  1180 		// asymmetric h/w descriptor lists,
       
  1181 		// i.e. TDmaChannel::DmacCaps::iAsymHwDescriptors is reported as EFalse
       
  1182 		// or iBalancedAsymSegments as ETrue. Hence this check is done in
       
  1183 		// FragSym() and FragBalancedAsym() but not in FragAsym().]
       
  1184 		//
       
  1185 		// The reason for this is that fragmentation could be impossible. The
       
  1186 		// memory layout (page break) on the side with the less stringent
       
  1187 		// alignment requirement can result in a misaligned target address on
       
  1188 		// the other side.
       
  1189 		//
       
  1190 		// Here is an example:
       
  1191 		//
       
  1192 		// src.iAddr =  3964 (0x0F7C), non-contiguous,
       
  1193 		// align_mask_src = 1 (alignment = 2 bytes)
       
  1194 		// dst.iAddr = 16384 (0x4000), contiguous,
       
  1195 		// align_mask_dst = 7 (alignment = 8 bytes)
       
  1196 		// count = max_xfer_len = 135 bytes
       
  1197 		// => max_aligned_len = 128 bytes
       
  1198 		//
       
  1199 		// Now, suppose MaxPhysSize() returns 132 bytes because src has 132
       
  1200 		// contiguous bytes to the end of its current mem page.
       
  1201 		// Trying to fragment this leads to:
       
  1202 		//
       
  1203 		// frag_1 = 128 bytes: src reads from 3964 (0x0F7C),
       
  1204 		//                     dst writes to 16384 (0x4000).
       
  1205 		// (Fragment 1 uses the max_aligned_len instead of 132 bytes because
       
  1206 		// otherwise the next fragment would start for the destination at
       
  1207 		// dst.iAddr + 132 = 16516 (0x4084), which is not 8-byte aligned.)
       
  1208 		//
       
  1209 		// frag_2 = 4 bytes: src reads from 4092 (0x0FFC),
       
  1210 		//                   dst writes to 16512 (0x4080).
       
  1211 		// (Fragment 2 uses just 4 bytes instead of the remaining 7 bytes
       
  1212 		// because there is a memory page break on the source side after 4 bytes.)
       
  1213 		//
       
  1214 		// frag_3 = 3 bytes: src reads from 4096 (0x1000),
       
  1215 		//                   dst writes to 16516 (0x4084).
       
  1216 		//
       
  1217 		// And there's the problem: the start address of frag_3 is going to be
       
  1218 		// misaligned for the destination side - it's not 8-byte aligned!
       
  1219 		//
       
  1220 		// 17
       
  1221 		__KTRACE_OPT(KPANIC, Kern::Printf("Error: Different alignments for src & dst"
       
  1222 										  " + non-contiguous target(s)"));
       
  1223 		return KErrArgument;
       
  1224 		}
       
  1225 
   976 	TInt r;
  1226 	TInt r;
       
  1227 	// Revert any previous fragmentation attempt
       
  1228 	FreeDesList();
   977 	do
  1229 	do
   978 		{
  1230 		{
   979 		// Allocate fragment
  1231 		// Allocate fragment
   980 		r = ExpandDesList(/*1*/);
  1232 		r = ExpandDesList(/*1*/);
   981 		if (r != KErrNone)
  1233 		if (r != KErrNone)
   982 			{
  1234 			{
   983 			FreeDesList();
       
   984 			break;
  1235 			break;
   985 			}
  1236 			}
   986 		// Compute fragment size
  1237 		// Compute fragment size
   987 		TUint c = Min(aMaxTransferLen, aCount);
  1238 		TUint c = Min(aMaxTransferLen, aCount);
   988 		if (mem_src && !(src.iFlags & KDmaPhysAddr))
  1239 		__KTRACE_OPT(KDMA, Kern::Printf("c = Min(aMaxTransferLen, aCount) = %d", c));
   989 			{
  1240 
   990 			__KTRACE_OPT(KDMA, Kern::Printf("mem_src && !(src.iFlags & KDmaPhysAddr)"));
  1241 		// SRC
   991 			// @@@ Should also take into account (src.iFlags & KDmaMemIsContiguous)!
  1242 		if (mem_src && !(src.iFlags & KDmaMemIsContiguous))
       
  1243 			{
   992 			c = MaxPhysSize(src.iAddr, c);
  1244 			c = MaxPhysSize(src.iAddr, c);
   993 			}
  1245 			__KTRACE_OPT(KDMA, Kern::Printf("c = MaxPhysSize(src.iAddr, c) = %d", c));
   994 		if (mem_dst && !(dst.iFlags & KDmaPhysAddr))
  1246 			}
   995 			{
  1247 
   996 			__KTRACE_OPT(KDMA, Kern::Printf("mem_dst && !(dst.iFlags & KDmaPhysAddr)"));
  1248 		// DST
   997 			// @@@ Should also take into account (dst.iFlags & KDmaMemIsContiguous)!
  1249 		if (mem_dst && !(dst.iFlags & KDmaMemIsContiguous))
       
  1250 			{
   998 			c = MaxPhysSize(dst.iAddr, c);
  1251 			c = MaxPhysSize(dst.iAddr, c);
   999 			}
  1252 			__KTRACE_OPT(KDMA, Kern::Printf("c = MaxPhysSize(dst.iAddr, c) = %d", c));
       
  1253 			}
       
  1254 
       
  1255 		// SRC & DST
  1000 		if ((mem_src || mem_dst) && (c < aCount) && (c > max_aligned_len))
  1256 		if ((mem_src || mem_dst) && (c < aCount) && (c > max_aligned_len))
  1001 			{
  1257 			{
  1002 			// This is not the last fragment of a transfer to/from memory.
  1258 			// This is not the last fragment of a transfer to/from memory.
  1003 			// We must round down the fragment size so the next one is
  1259 			// We must round down the fragment size so the next one is
  1004 			// correctly aligned.
  1260 			// correctly aligned.
  1005 			__KTRACE_OPT(KDMA, Kern::Printf("(mem_src || mem_dst) && (c < aCount) && (c > max_aligned_len)"));
       
  1006 			c = max_aligned_len;
  1261 			c = max_aligned_len;
  1007 			}
  1262 			__KTRACE_OPT(KDMA, Kern::Printf("c = max_aligned_len = %d", c));
  1008 
  1263 			//
  1009 		// TODO: Make sure an element or frame on neither src or dst side
  1264 			// But can this condition actually occur if src and dst are
  1010 		// (which can be of different sizes) never straddles a DMA subtransfer.
  1265 			// properly aligned to start with?
  1011 		// (This would be a fragmentation error by the PIL.)
  1266 			//
       
  1267 			// If we disallow unequal alignment requirements in connection with
       
  1268 			// non-contiguous memory buffers (see the long comment above in
       
  1269 			// this function for why) and if both target addresses are
       
  1270 			// correctly aligned at the beginning of the transfer then it
       
  1271 			// doesn't seem possible to end up with a fragment which is not
       
  1272 			// quite the total remaining size (c < aCount) but still larger
       
  1273 			// than the greatest aligned length (c > max_aligned_len).
       
  1274 			//
       
  1275 			// That's because address alignment values are always a power of
       
  1276 			// two (at least that's what we assume - otherwise
       
  1277 			// AddressAlignMask() doesn't work), and memory page sizes are also
       
  1278 			// always a power of two and hence a multiple of the alignment
       
  1279 			// value (as long as the alignment is not greater than the page
       
  1280 			// size, which seems a reasonable assumption regardless of the
       
  1281 			// actual page size). So if we start properly aligned anywhere in a
       
  1282 			// memory page then the number of bytes to the end of that page is
       
  1283 			// always a multiple of the aligment value - there's no remainder.
       
  1284 			//
       
  1285 			// So let's see if we ever hit this assertion:
       
  1286 			Kern::Printf("Unexpected: (mem_src || mem_dst) && (c < aCount) && (c > max_aligned_len)");
       
  1287 			__DMA_ASSERTA(EFalse);
       
  1288 			}
       
  1289 
       
  1290 		// If this is not the last fragment...
       
  1291 		if (c < aCount)
       
  1292 			{
       
  1293 			const TUint es_src = src.iElementSize;
       
  1294 			const TUint es_dst = dst.iElementSize;
       
  1295 			const TUint fs_src = es_src * src.iElementsPerFrame;
       
  1296 			const TUint fs_dst = es_dst * dst.iElementsPerFrame;
       
  1297 			TUint c_prev;
       
  1298 			do
       
  1299 				{
       
  1300 				c_prev = c;
       
  1301 				// If fs_src is !0 then es_src must be !0 as well (see
       
  1302 				// CheckTransferConfig).
       
  1303 				if (es_src)
       
  1304 					{
       
  1305 					r = AdjustFragmentSize(c, es_src, fs_src);
       
  1306 					if (r != KErrNone)
       
  1307 						{
       
  1308 						break;							// while (c != c_prev);
       
  1309 						}
       
  1310 					}
       
  1311 				// If fs_dst is !0 then es_dst must be !0 as well (see
       
  1312 				// CheckTransferConfig).
       
  1313 				if (es_dst)
       
  1314 					{
       
  1315 					r = AdjustFragmentSize(c, es_dst, fs_dst);
       
  1316 					if (r != KErrNone)
       
  1317 						{
       
  1318 						break;							// while (c != c_prev);
       
  1319 						}
       
  1320 					}
       
  1321 				} while (c != c_prev);
       
  1322 			if (r != KErrNone)
       
  1323 				{
       
  1324 				break;									 // while (aCount > 0);
       
  1325 				}
       
  1326 			}
  1012 
  1327 
  1013 		// Set transfer count for the PSL
  1328 		// Set transfer count for the PSL
  1014 		aTransferArgs.iTransferCount = c;
  1329 		aTransferArgs.iTransferCount = c;
  1015 		__KTRACE_OPT(KDMA, Kern::Printf("this fragm.: %d (0x%x) total remain.: %d (0x%x)",
  1330 		__KTRACE_OPT(KDMA, Kern::Printf("this fragm.: %d (0x%x) total remain.: %d (0x%x)",
  1016 										c, c, aCount, aCount));
  1331 										c, c, aCount, aCount));
  1017 		// Initialise fragment
  1332 		// Initialise fragment
  1018 		r = iChannel.iController->InitDes(*iLastHdr, aTransferArgs);
  1333 		r = iChannel.iController->InitDes(*iLastHdr, aTransferArgs);
  1019 		if (r != KErrNone)
  1334 		if (r != KErrNone)
  1020 			{
  1335 			{
  1021 			FreeDesList();
       
  1022 			break;
  1336 			break;
  1023 			}
  1337 			}
  1024 		// Update for next iteration
  1338 		// Update for next iteration
  1025 		aCount -= c;
  1339 		aCount -= c;
  1026 		if (mem_src)
  1340 		if (mem_src)
       
  1341 			{
  1027 			src.iAddr += c;
  1342 			src.iAddr += c;
       
  1343 			}
  1028 		if (mem_dst)
  1344 		if (mem_dst)
       
  1345 			{
  1029 			dst.iAddr += c;
  1346 			dst.iAddr += c;
  1030 		}
  1347 			}
  1031 	while (aCount > 0);
  1348 		} while (aCount > 0);
  1032 
  1349 
       
  1350 	if (r != KErrNone)
       
  1351 		{
       
  1352 		FreeDesList();
       
  1353 		}
  1033 	return r;
  1354 	return r;
  1034 	}
  1355 	}
  1035 
  1356 
  1036 
  1357 
  1037 TInt DDmaRequest::FragAsym(TDmaTransferArgs& aTransferArgs, TUint aCount,
  1358 TInt DDmaRequest::FragAsym(TDmaTransferArgs& aTransferArgs, TUint aCount,
  1038 						   TUint aMaxTransferLen)
  1359 						   TUint aMaxTransferLen)
  1039 	{
  1360 	{
  1040 	TInt r = FragAsymSrc(aTransferArgs, aCount, aMaxTransferLen);
  1361 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::FragAsym"));
       
  1362 
       
  1363 	TInt r;
       
  1364 	if (iChannel.iDmacCaps->iBalancedAsymSegments)
       
  1365 		{
       
  1366 		r = FragBalancedAsym(aTransferArgs, aCount, aMaxTransferLen);
       
  1367 		if (r != KErrNone)
       
  1368 			{
       
  1369 			FreeSrcDesList();
       
  1370 			FreeDstDesList();
       
  1371 			}
       
  1372 		return r;
       
  1373 		}
       
  1374 	r = FragAsymSrc(aTransferArgs, aCount, aMaxTransferLen);
  1041 	if (r != KErrNone)
  1375 	if (r != KErrNone)
  1042 		{
  1376 		{
  1043 		FreeSrcDesList();
  1377 		FreeSrcDesList();
  1044 		return r;
  1378 		return r;
  1045 		}
  1379 		}
  1054 
  1388 
  1055 
  1389 
  1056 TInt DDmaRequest::FragAsymSrc(TDmaTransferArgs& aTransferArgs, TUint aCount,
  1390 TInt DDmaRequest::FragAsymSrc(TDmaTransferArgs& aTransferArgs, TUint aCount,
  1057 							  TUint aMaxTransferLen)
  1391 							  TUint aMaxTransferLen)
  1058 	{
  1392 	{
       
  1393 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::FragAsymSrc"));
       
  1394 
  1059 	TDmaTransferConfig& src = aTransferArgs.iSrcConfig;
  1395 	TDmaTransferConfig& src = aTransferArgs.iSrcConfig;
  1060 
       
  1061 	const TBool mem_src = (src.iFlags & KDmaMemAddr);
  1396 	const TBool mem_src = (src.iFlags & KDmaMemAddr);
  1062 
  1397 
  1063 	const TUint align_mask = iChannel.AddressAlignMask(src.iFlags,
  1398 	const TUint align_mask = iChannel.AddressAlignMask(src.iFlags,
  1064 													   src.iElementSize,
  1399 													   src.iElementSize,
  1065 													   aTransferArgs.iPslRequestInfo);
  1400 													   aTransferArgs.iPslRequestInfo);
       
  1401 	__KTRACE_OPT(KDMA, Kern::Printf("align_mask: 0x%x", align_mask));
       
  1402 
  1066 	// Memory buffers must satisfy alignment constraint
  1403 	// Memory buffers must satisfy alignment constraint
  1067 	__DMA_ASSERTD(!mem_src || ((src.iAddr & align_mask) == 0));
  1404 	__DMA_ASSERTD(!mem_src || ((src.iAddr & align_mask) == 0));
  1068 
  1405 
       
  1406 	// Max aligned length is used to make sure the beginnings of subtransfers
       
  1407 	// (i.e. fragments) are correctly aligned.
  1069 	const TUint max_aligned_len = (aMaxTransferLen & ~align_mask);
  1408 	const TUint max_aligned_len = (aMaxTransferLen & ~align_mask);
  1070 	__DMA_ASSERTD(max_aligned_len > 0);				  // bug in PSL if not true
  1409 	__KTRACE_OPT(KDMA, Kern::Printf("max_aligned_len: %d", max_aligned_len));
  1071 
  1410 	// Client and PSL sane?
       
  1411 	__DMA_ASSERTD(max_aligned_len > 0);
       
  1412 
       
  1413 	TInt r;
       
  1414 	// Revert any previous fragmentation attempt
  1072 	FreeSrcDesList();
  1415 	FreeSrcDesList();
  1073 	TInt r;
       
  1074 	do
  1416 	do
  1075 		{
  1417 		{
  1076 		// Allocate fragment
  1418 		// Allocate fragment
  1077 		r = ExpandSrcDesList(/*1*/);
  1419 		r = ExpandSrcDesList(/*1*/);
  1078 		if (r != KErrNone)
  1420 		if (r != KErrNone)
  1079 			{
  1421 			{
  1080 			break;
  1422 			break;
  1081 			}
  1423 			}
  1082 		// Compute fragment size
  1424 		// Compute fragment size
  1083 		TUint c = Min(aMaxTransferLen, aCount);
  1425 		TUint c = Min(aMaxTransferLen, aCount);
  1084 		if (mem_src && !(src.iFlags & KDmaPhysAddr))
  1426 		__KTRACE_OPT(KDMA, Kern::Printf("c = Min(aMaxTransferLen, aCount) = %d", c));
  1085 			{
  1427 
  1086 			__KTRACE_OPT(KDMA, Kern::Printf("mem_src && !(src.iFlags & KDmaPhysAddr)"));
  1428 		if (mem_src && !(src.iFlags & KDmaMemIsContiguous))
       
  1429 			{
  1087 			c = MaxPhysSize(src.iAddr, c);
  1430 			c = MaxPhysSize(src.iAddr, c);
  1088 			}
  1431 			__KTRACE_OPT(KDMA, Kern::Printf("c = MaxPhysSize(src.iAddr, c) = %d", c));
       
  1432 			}
       
  1433 
  1089 		if (mem_src && (c < aCount) && (c > max_aligned_len))
  1434 		if (mem_src && (c < aCount) && (c > max_aligned_len))
  1090 			{
  1435 			{
  1091 			// This is not the last fragment of a transfer from memory.
  1436 			// This is not the last fragment of a transfer from memory.
  1092 			// We must round down the fragment size so the next one is
  1437 			// We must round down the fragment size so the next one is
  1093 			// correctly aligned.
  1438 			// correctly aligned.
  1094 			__KTRACE_OPT(KDMA, Kern::Printf("mem_src && (c < aCount) && (c > max_aligned_len)"));
  1439 			__KTRACE_OPT(KDMA, Kern::Printf("c = max_aligned_len = %d", c));
  1095 			c = max_aligned_len;
  1440 			//
  1096 			}
  1441 			// But can this condition actually occur if src is properly aligned
       
  1442 			// to start with?
       
  1443 			//
       
  1444 			// If the target address is correctly aligned at the beginning of
       
  1445 			// the transfer then it doesn't seem possible to end up with a
       
  1446 			// fragment which is not quite the total remaining size (c <
       
  1447 			// aCount) but still larger than the greatest aligned length (c >
       
  1448 			// max_aligned_len).
       
  1449 			//
       
  1450 			// That's because address alignment values are always a power of
       
  1451 			// two (at least that's what we assume - otherwise
       
  1452 			// AddressAlignMask() doesn't work), and memory page sizes are also
       
  1453 			// always a power of two and hence a multiple of the alignment
       
  1454 			// value (as long as the alignment is not greater than the page
       
  1455 			// size, which seems a reasonable assumption regardless of the
       
  1456 			// actual page size). So if we start properly aligned anywhere in a
       
  1457 			// memory page then the number of bytes to the end of that page is
       
  1458 			// always a multiple of the aligment value - there's no remainder.
       
  1459 			//
       
  1460 			// So let's see if we ever hit this assertion:
       
  1461 			Kern::Printf("Unexpected: mem_src && (c < aCount) && (c > max_aligned_len)");
       
  1462 			__DMA_ASSERTA(EFalse);
       
  1463 			}
       
  1464 
       
  1465 		// If this is not the last fragment...
       
  1466 		if (c < aCount)
       
  1467 			{
       
  1468 			const TUint es = src.iElementSize;
       
  1469 			const TUint fs = es * src.iElementsPerFrame;
       
  1470 			// If fs is !0 then es must be !0 as well (see
       
  1471 			// CheckTransferConfig).
       
  1472 			if (es)
       
  1473 				{
       
  1474 				r = AdjustFragmentSize(c, es, fs);
       
  1475 				if (r != KErrNone)
       
  1476 					{
       
  1477 					break;								 // while (aCount > 0);
       
  1478 					}
       
  1479 				}
       
  1480 			}
       
  1481 
  1097 		// Set transfer count for the PSL
  1482 		// Set transfer count for the PSL
  1098 		aTransferArgs.iTransferCount = c;
  1483 		aTransferArgs.iTransferCount = c;
  1099 		__KTRACE_OPT(KDMA, Kern::Printf("this fragm.: %d (0x%x) total remain.: %d (0x%x)",
  1484 		__KTRACE_OPT(KDMA, Kern::Printf("this fragm.: %d (0x%x) total remain.: %d (0x%x)",
  1100 										c, c, aCount, aCount));
  1485 										c, c, aCount, aCount));
  1101 		// Initialise fragment
  1486 		// Initialise fragment
  1105 			break;
  1490 			break;
  1106 			}
  1491 			}
  1107 		// Update for next iteration
  1492 		// Update for next iteration
  1108 		aCount -= c;
  1493 		aCount -= c;
  1109 		if (mem_src)
  1494 		if (mem_src)
       
  1495 			{
  1110 			src.iAddr += c;
  1496 			src.iAddr += c;
  1111 		}
  1497 			}
  1112 	while (aCount > 0);
  1498 		} while (aCount > 0);
  1113 
  1499 
  1114 	return r;
  1500 	return r;
  1115 	}
  1501 	}
  1116 
  1502 
  1117 
  1503 
  1118 TInt DDmaRequest::FragAsymDst(TDmaTransferArgs& aTransferArgs, TUint aCount,
  1504 TInt DDmaRequest::FragAsymDst(TDmaTransferArgs& aTransferArgs, TUint aCount,
  1119 							  TUint aMaxTransferLen)
  1505 							  TUint aMaxTransferLen)
  1120 	{
  1506 	{
       
  1507 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::FragAsymDst"));
       
  1508 
  1121 	TDmaTransferConfig& dst = aTransferArgs.iDstConfig;
  1509 	TDmaTransferConfig& dst = aTransferArgs.iDstConfig;
  1122 
       
  1123 	const TBool mem_dst = (dst.iFlags & KDmaMemAddr);
  1510 	const TBool mem_dst = (dst.iFlags & KDmaMemAddr);
  1124 
  1511 
  1125 	const TUint align_mask = iChannel.AddressAlignMask(dst.iFlags,
  1512 	const TUint align_mask = iChannel.AddressAlignMask(dst.iFlags,
  1126 													   dst.iElementSize,
  1513 													   dst.iElementSize,
  1127 													   aTransferArgs.iPslRequestInfo);
  1514 													   aTransferArgs.iPslRequestInfo);
       
  1515 	__KTRACE_OPT(KDMA, Kern::Printf("align_mask: 0x%x", align_mask));
       
  1516 
  1128 	// Memory buffers must satisfy alignment constraint
  1517 	// Memory buffers must satisfy alignment constraint
  1129 	__DMA_ASSERTD(!mem_dst || ((dst.iAddr & align_mask) == 0));
  1518 	__DMA_ASSERTD(!mem_dst || ((dst.iAddr & align_mask) == 0));
  1130 
  1519 
       
  1520 	// Max aligned length is used to make sure the beginnings of subtransfers
       
  1521 	// (i.e. fragments) are correctly aligned.
  1131 	const TUint max_aligned_len = (aMaxTransferLen & ~align_mask);
  1522 	const TUint max_aligned_len = (aMaxTransferLen & ~align_mask);
  1132 	__DMA_ASSERTD(max_aligned_len > 0);				  // bug in PSL if not true
  1523 	__KTRACE_OPT(KDMA, Kern::Printf("max_aligned_len: %d", max_aligned_len));
  1133 
  1524 	// Client and PSL sane?
       
  1525 	__DMA_ASSERTD(max_aligned_len > 0);
       
  1526 
       
  1527 	TInt r;
       
  1528 	// Revert any previous fragmentation attempt
  1134 	FreeDstDesList();
  1529 	FreeDstDesList();
  1135 	TInt r;
       
  1136 	do
  1530 	do
  1137 		{
  1531 		{
  1138 		// Allocate fragment
  1532 		// Allocate fragment
  1139 		r = ExpandDstDesList(/*1*/);
  1533 		r = ExpandDstDesList(/*1*/);
  1140 		if (r != KErrNone)
  1534 		if (r != KErrNone)
  1141 			{
  1535 			{
  1142 			break;
  1536 			break;
  1143 			}
  1537 			}
  1144 		// Compute fragment size
  1538 		// Compute fragment size
  1145 		TUint c = Min(aMaxTransferLen, aCount);
  1539 		TUint c = Min(aMaxTransferLen, aCount);
  1146 		if (mem_dst && !(dst.iFlags & KDmaPhysAddr))
  1540 		__KTRACE_OPT(KDMA, Kern::Printf("c = Min(aMaxTransferLen, aCount) = %d", c));
  1147 			{
  1541 
  1148 			__KTRACE_OPT(KDMA, Kern::Printf("mem_dst && !(dst.iFlags & KDmaPhysAddr)"));
  1542 		if (mem_dst && !(dst.iFlags & KDmaMemIsContiguous))
       
  1543 			{
  1149 			c = MaxPhysSize(dst.iAddr, c);
  1544 			c = MaxPhysSize(dst.iAddr, c);
  1150 			}
  1545 			__KTRACE_OPT(KDMA, Kern::Printf("c = MaxPhysSize(dst.iAddr, c) = %d", c));
       
  1546 			}
       
  1547 
  1151 		if (mem_dst && (c < aCount) && (c > max_aligned_len))
  1548 		if (mem_dst && (c < aCount) && (c > max_aligned_len))
  1152 			{
  1549 			{
  1153 			// This is not the last fragment of a transfer to memory.
  1550 			// This is not the last fragment of a transfer to memory.
  1154 			// We must round down the fragment size so the next one is
  1551 			// We must round down the fragment size so the next one is
  1155 			// correctly aligned.
  1552 			// correctly aligned.
  1156 			__KTRACE_OPT(KDMA, Kern::Printf("mem_dst && (c < aCount) && (c > max_aligned_len)"));
  1553 			__KTRACE_OPT(KDMA, Kern::Printf("c = max_aligned_len = %d", c));
  1157 			c = max_aligned_len;
  1554 			//
  1158 			}
  1555 			// But can this condition actually occur if dst is properly aligned
       
  1556 			// to start with?
       
  1557 			//
       
  1558 			// If the target address is correctly aligned at the beginning of
       
  1559 			// the transfer then it doesn't seem possible to end up with a
       
  1560 			// fragment which is not quite the total remaining size (c <
       
  1561 			// aCount) but still larger than the greatest aligned length (c >
       
  1562 			// max_aligned_len).
       
  1563 			//
       
  1564 			// That's because address alignment values are always a power of
       
  1565 			// two (at least that's what we assume - otherwise
       
  1566 			// AddressAlignMask() doesn't work), and memory page sizes are also
       
  1567 			// always a power of two and hence a multiple of the alignment
       
  1568 			// value (as long as the alignment is not greater than the page
       
  1569 			// size, which seems a reasonable assumption regardless of the
       
  1570 			// actual page size). So if we start properly aligned anywhere in a
       
  1571 			// memory page then the number of bytes to the end of that page is
       
  1572 			// always a multiple of the aligment value - there's no remainder.
       
  1573 			//
       
  1574 			// So let's see if we ever hit this assertion:
       
  1575 			Kern::Printf("Unexpected: mem_dst && (c < aCount) && (c > max_aligned_len)");
       
  1576 			__DMA_ASSERTA(EFalse);
       
  1577 			}
       
  1578 
       
  1579 		// If this is not the last fragment...
       
  1580 		if (c < aCount)
       
  1581 			{
       
  1582 			const TUint es = dst.iElementSize;
       
  1583 			const TUint fs = es * dst.iElementsPerFrame;
       
  1584 			// If fs is !0 then es must be !0 as well (see
       
  1585 			// CheckTransferConfig).
       
  1586 			if (es)
       
  1587 				{
       
  1588 				r = AdjustFragmentSize(c, es, fs);
       
  1589 				if (r != KErrNone)
       
  1590 					{
       
  1591 					break;								 // while (aCount > 0);
       
  1592 					}
       
  1593 				}
       
  1594 			}
       
  1595 
  1159 		// Set transfer count for the PSL
  1596 		// Set transfer count for the PSL
  1160 		aTransferArgs.iTransferCount = c;
  1597 		aTransferArgs.iTransferCount = c;
  1161 		__KTRACE_OPT(KDMA, Kern::Printf("this fragm.: %d (0x%x) total remain.: %d (0x%x)",
  1598 		__KTRACE_OPT(KDMA, Kern::Printf("this fragm.: %d (0x%x) total remain.: %d (0x%x)",
  1162 										c, c, aCount, aCount));
  1599 										c, c, aCount, aCount));
  1163 		// Initialise fragment
  1600 		// Initialise fragment
  1167 			break;
  1604 			break;
  1168 			}
  1605 			}
  1169 		// Update for next iteration
  1606 		// Update for next iteration
  1170 		aCount -= c;
  1607 		aCount -= c;
  1171 		if (mem_dst)
  1608 		if (mem_dst)
       
  1609 			{
  1172 			dst.iAddr += c;
  1610 			dst.iAddr += c;
       
  1611 			}
  1173 		}
  1612 		}
  1174 	while (aCount > 0);
  1613 	while (aCount > 0);
  1175 
  1614 
  1176 	return r;
  1615 	return r;
  1177 	}
  1616 	}
  1178 
  1617 
  1179 
  1618 
       
  1619 TInt DDmaRequest::FragBalancedAsym(TDmaTransferArgs& aTransferArgs, TUint aCount,
       
  1620 								   TUint aMaxTransferLen)
       
  1621 	{
       
  1622 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::FragBalancedAsym"));
       
  1623 
       
  1624 	TDmaTransferConfig& src = aTransferArgs.iSrcConfig;
       
  1625 	TDmaTransferConfig& dst = aTransferArgs.iDstConfig;
       
  1626 	const TBool mem_src = (src.iFlags & KDmaMemAddr);
       
  1627 	const TBool mem_dst = (dst.iFlags & KDmaMemAddr);
       
  1628 
       
  1629 	const TUint align_mask_src = iChannel.AddressAlignMask(src.iFlags,
       
  1630 														   src.iElementSize,
       
  1631 														   aTransferArgs.iPslRequestInfo);
       
  1632 	__KTRACE_OPT(KDMA, Kern::Printf("align_mask_src: 0x%x", align_mask_src));
       
  1633 	const TUint align_mask_dst = iChannel.AddressAlignMask(dst.iFlags,
       
  1634 														   dst.iElementSize,
       
  1635 														   aTransferArgs.iPslRequestInfo);
       
  1636 	__KTRACE_OPT(KDMA, Kern::Printf("align_mask_dst: 0x%x", align_mask_dst));
       
  1637 
       
  1638 	// Memory buffers must satisfy alignment constraint
       
  1639 	__DMA_ASSERTD(!mem_src || ((src.iAddr & align_mask_src) == 0));
       
  1640 	__DMA_ASSERTD(!mem_dst || ((dst.iAddr & align_mask_dst) == 0));
       
  1641 
       
  1642 	// Max aligned length is used to make sure the beginnings of subtransfers
       
  1643 	// (i.e. fragments) are correctly aligned.
       
  1644 	const TUint max_aligned_len = (aMaxTransferLen &
       
  1645 								   ~(Max(align_mask_src, align_mask_dst)));
       
  1646 	__KTRACE_OPT(KDMA, Kern::Printf("max_aligned_len: %d", max_aligned_len));
       
  1647 	// Client and PSL sane?
       
  1648 	__DMA_ASSERTD(max_aligned_len > 0);
       
  1649 
       
  1650 	if (mem_src && mem_dst &&
       
  1651 		align_mask_src && align_mask_dst &&
       
  1652 		(align_mask_src != align_mask_dst) &&
       
  1653 		(!(src.iFlags & KDmaMemIsContiguous) || !(dst.iFlags & KDmaMemIsContiguous)))
       
  1654 		{
       
  1655 		// We don't support transfers which satisfy ALL of the following conditions:
       
  1656 		// 1) from memory to memory,
       
  1657 		// 2) both sides have address alignment requirements,
       
  1658 		// 3) those alignment requirements are not the same,
       
  1659 		// 4) the memory is non-contiguous on at least one end.
       
  1660 		//
       
  1661 		// [A 5th condition is that the channel doesn't support fully
       
  1662 		// asymmetric h/w descriptor lists,
       
  1663 		// i.e. TDmaChannel::DmacCaps::iAsymHwDescriptors is reported as EFalse
       
  1664 		// or iBalancedAsymSegments as ETrue. Hence this check is done in
       
  1665 		// FragSym() and FragBalancedAsym() but not in FragAsym().]
       
  1666 		//
       
  1667 		// The reason for this is that fragmentation could be impossible. The
       
  1668 		// memory layout (page break) on the side with the less stringent
       
  1669 		// alignment requirement can result in a misaligned target address on
       
  1670 		// the other side.
       
  1671 		//
       
  1672 		// Here is an example:
       
  1673 		//
       
  1674 		// src.iAddr =  3964 (0x0F7C), non-contiguous,
       
  1675 		// align_mask_src = 1 (alignment = 2 bytes)
       
  1676 		// dst.iAddr = 16384 (0x4000), contiguous,
       
  1677 		// align_mask_dst = 7 (alignment = 8 bytes)
       
  1678 		// count = max_xfer_len = 135 bytes
       
  1679 		// => max_aligned_len = 128 bytes
       
  1680 		//
       
  1681 		// Now, suppose MaxPhysSize() returns 132 bytes because src has 132
       
  1682 		// contiguous bytes to the end of its current mem page.
       
  1683 		// Trying to fragment this leads to:
       
  1684 		//
       
  1685 		// frag_1 = 128 bytes: src reads from 3964 (0x0F7C),
       
  1686 		//                     dst writes to 16384 (0x4000).
       
  1687 		// (Fragment 1 uses the max_aligned_len instead of 132 bytes because
       
  1688 		// otherwise the next fragment would start for the destination at
       
  1689 		// dst.iAddr + 132 = 16516 (0x4084), which is not 8-byte aligned.)
       
  1690 		//
       
  1691 		// frag_2 = 4 bytes: src reads from 4092 (0x0FFC),
       
  1692 		//                   dst writes to 16512 (0x4080).
       
  1693 		// (Fragment 2 uses just 4 bytes instead of the remaining 7 bytes
       
  1694 		// because there is a memory page break on the source side after 4 bytes.)
       
  1695 		//
       
  1696 		// frag_3 = 3 bytes: src reads from 4096 (0x1000),
       
  1697 		//                   dst writes to 16516 (0x4084).
       
  1698 		//
       
  1699 		// And there's the problem: the start address of frag_3 is going to be
       
  1700 		// misaligned for the destination side - it's not 8-byte aligned!
       
  1701 		//
       
  1702 		__KTRACE_OPT(KPANIC, Kern::Printf("Error: Different alignments for src & dst"
       
  1703 										  " + non-contiguous target(s)"));
       
  1704 		return KErrArgument;
       
  1705 		}
       
  1706 
       
  1707 	TInt r;
       
  1708 	// Revert any previous fragmentation attempt
       
  1709 	FreeSrcDesList();
       
  1710 	FreeDstDesList();
       
  1711 	do
       
  1712 		{
       
  1713 		// Allocate fragment
       
  1714 		r = ExpandSrcDesList(/*1*/);
       
  1715 		if (r != KErrNone)
       
  1716 			{
       
  1717 			break;
       
  1718 			}
       
  1719 		r = ExpandDstDesList(/*1*/);
       
  1720 		if (r != KErrNone)
       
  1721 			{
       
  1722 			break;
       
  1723 			}
       
  1724 		// Compute fragment size
       
  1725 		TUint c = Min(aMaxTransferLen, aCount);
       
  1726 		__KTRACE_OPT(KDMA, Kern::Printf("c = Min(aMaxTransferLen, aCount) = %d", c));
       
  1727 
       
  1728 		// SRC
       
  1729 		if (mem_src && !(src.iFlags & KDmaMemIsContiguous))
       
  1730 			{
       
  1731 			c = MaxPhysSize(src.iAddr, c);
       
  1732 			__KTRACE_OPT(KDMA, Kern::Printf("c = MaxPhysSize(src.iAddr, c) = %d", c));
       
  1733 			}
       
  1734 
       
  1735 		// DST
       
  1736 		if (mem_dst && !(dst.iFlags & KDmaMemIsContiguous))
       
  1737 			{
       
  1738 			c = MaxPhysSize(dst.iAddr, c);
       
  1739 			__KTRACE_OPT(KDMA, Kern::Printf("c = MaxPhysSize(dst.iAddr, c) = %d", c));
       
  1740 			}
       
  1741 
       
  1742 		// SRC & DST
       
  1743 		if ((mem_src || mem_dst) && (c < aCount) && (c > max_aligned_len))
       
  1744 			{
       
  1745 			// This is not the last fragment of a transfer to/from memory.
       
  1746 			// We must round down the fragment size so the next one is
       
  1747 			// correctly aligned.
       
  1748 			c = max_aligned_len;
       
  1749 			__KTRACE_OPT(KDMA, Kern::Printf("c = max_aligned_len = %d", c));
       
  1750 			//
       
  1751 			// But can this condition actually occur if src and dst are
       
  1752 			// properly aligned to start with?
       
  1753 			//
       
  1754 			// If we disallow unequal alignment requirements in connection with
       
  1755 			// non-contiguous memory buffers (see the long comment above in
       
  1756 			// this function for why) and if both target addresses are
       
  1757 			// correctly aligned at the beginning of the transfer then it
       
  1758 			// doesn't seem possible to end up with a fragment which is not
       
  1759 			// quite the total remaining size (c < aCount) but still larger
       
  1760 			// than the greatest aligned length (c > max_aligned_len).
       
  1761 			//
       
  1762 			// That's because address alignment values are always a power of
       
  1763 			// two (at least that's what we assume - otherwise
       
  1764 			// AddressAlignMask() doesn't work), and memory page sizes are also
       
  1765 			// always a power of two and hence a multiple of the alignment
       
  1766 			// value (as long as the alignment is not greater than the page
       
  1767 			// size, which seems a reasonable assumption regardless of the
       
  1768 			// actual page size). So if we start properly aligned anywhere in a
       
  1769 			// memory page then the number of bytes to the end of that page is
       
  1770 			// always a multiple of the aligment value - there's no remainder.
       
  1771 			//
       
  1772 			// So let's see if we ever hit this assertion:
       
  1773 			Kern::Printf("Unexpected: (mem_src || mem_dst) && (c < aCount) && (c > max_aligned_len)");
       
  1774 			__DMA_ASSERTA(EFalse);
       
  1775 			}
       
  1776 
       
  1777 		// If this is not the last fragment...
       
  1778 		if (c < aCount)
       
  1779 			{
       
  1780 			const TUint es_src = src.iElementSize;
       
  1781 			const TUint es_dst = dst.iElementSize;
       
  1782 			const TUint fs_src = es_src * src.iElementsPerFrame;
       
  1783 			const TUint fs_dst = es_dst * dst.iElementsPerFrame;
       
  1784 			TUint c_prev;
       
  1785 			do
       
  1786 				{
       
  1787 				c_prev = c;
       
  1788 				// If fs_src is !0 then es_src must be !0 as well (see
       
  1789 				// CheckTransferConfig).
       
  1790 				if (es_src)
       
  1791 					{
       
  1792 					r = AdjustFragmentSize(c, es_src, fs_src);
       
  1793 					if (r != KErrNone)
       
  1794 						{
       
  1795 						break;							// while (c != c_prev);
       
  1796 						}
       
  1797 					}
       
  1798 				// If fs_dst is !0 then es_dst must be !0 as well (see
       
  1799 				// CheckTransferConfig).
       
  1800 				if (es_dst)
       
  1801 					{
       
  1802 					r = AdjustFragmentSize(c, es_dst, fs_dst);
       
  1803 					if (r != KErrNone)
       
  1804 						{
       
  1805 						break;							// while (c != c_prev);
       
  1806 						}
       
  1807 					}
       
  1808 				} while (c != c_prev);
       
  1809 			if (r != KErrNone)
       
  1810 				{
       
  1811 				break;									 // while (aCount > 0);
       
  1812 				}
       
  1813 			}
       
  1814 
       
  1815 		// Set transfer count for the PSL
       
  1816 		aTransferArgs.iTransferCount = c;
       
  1817 		__KTRACE_OPT(KDMA, Kern::Printf("this fragm.: %d (0x%x) total remain.: %d (0x%x)",
       
  1818 										c, c, aCount, aCount));
       
  1819 		// Initialise SRC fragment
       
  1820 		r = iChannel.iController->InitSrcHwDes(*iSrcLastHdr, aTransferArgs);
       
  1821 		if (r != KErrNone)
       
  1822 			{
       
  1823 			break;
       
  1824 			}
       
  1825 		// Initialise DST fragment
       
  1826 		r = iChannel.iController->InitDstHwDes(*iDstLastHdr, aTransferArgs);
       
  1827 		if (r != KErrNone)
       
  1828 			{
       
  1829 			break;
       
  1830 			}
       
  1831 		// Update for next iteration
       
  1832 		aCount -= c;
       
  1833 		if (mem_src)
       
  1834 			{
       
  1835 			src.iAddr += c;
       
  1836 			}
       
  1837 		if (mem_dst)
       
  1838 			{
       
  1839 			dst.iAddr += c;
       
  1840 			}
       
  1841 		}
       
  1842 	while (aCount > 0);
       
  1843 
       
  1844 	return r;
       
  1845 	}
       
  1846 
       
  1847 
  1180 EXPORT_C TInt DDmaRequest::Queue()
  1848 EXPORT_C TInt DDmaRequest::Queue()
  1181 	{
  1849 	{
  1182 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Queue thread %O", &Kern::CurrentThread()));
  1850 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Queue thread %O", &Kern::CurrentThread()));
  1183 	__DMA_ASSERTD(iDesCount > 0);	// Not configured? Call Fragment() first!
  1851 	// Not configured? Call Fragment() first!
       
  1852 	if (iChannel.iDmacCaps->iAsymHwDescriptors)
       
  1853 		{
       
  1854 		__DMA_ASSERTD((iSrcDesCount < 0) && (iDstDesCount < 0));
       
  1855 		}
       
  1856 	else
       
  1857 		{
       
  1858 		__DMA_ASSERTD(iDesCount > 0);
       
  1859 		}
  1184 	__DMA_ASSERTD(!iQueued);
  1860 	__DMA_ASSERTD(!iQueued);
  1185 
  1861 
  1186 	// Append request to queue and link new descriptor list to existing one.
  1862 	// Append request to queue and link new descriptor list to existing one.
  1187 	iChannel.Wait();
  1863 	iChannel.Wait();
  1188 
  1864 
  1268 	}
  1944 	}
  1269 
  1945 
  1270 
  1946 
  1271 EXPORT_C TInt DDmaRequest::ExpandDesList(TInt aCount)
  1947 EXPORT_C TInt DDmaRequest::ExpandDesList(TInt aCount)
  1272 	{
  1948 	{
       
  1949 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::ExpandDesList aCount=%d", aCount));
  1273 	return ExpandDesList(aCount, iDesCount, iFirstHdr, iLastHdr);
  1950 	return ExpandDesList(aCount, iDesCount, iFirstHdr, iLastHdr);
  1274 	}
  1951 	}
  1275 
  1952 
  1276 
  1953 
  1277 EXPORT_C TInt DDmaRequest::ExpandSrcDesList(TInt aCount)
  1954 EXPORT_C TInt DDmaRequest::ExpandSrcDesList(TInt aCount)
  1278 	{
  1955 	{
       
  1956 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::ExpandSrcDesList"));
  1279 	return ExpandDesList(aCount, iSrcDesCount, iSrcFirstHdr, iSrcLastHdr);
  1957 	return ExpandDesList(aCount, iSrcDesCount, iSrcFirstHdr, iSrcLastHdr);
  1280 	}
  1958 	}
  1281 
  1959 
  1282 
  1960 
  1283 EXPORT_C TInt DDmaRequest::ExpandDstDesList(TInt aCount)
  1961 EXPORT_C TInt DDmaRequest::ExpandDstDesList(TInt aCount)
  1284 	{
  1962 	{
       
  1963 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::ExpandDstDesList"));
  1285 	return ExpandDesList(aCount, iDstDesCount, iDstFirstHdr, iDstLastHdr);
  1964 	return ExpandDesList(aCount, iDstDesCount, iDstFirstHdr, iDstLastHdr);
  1286 	}
  1965 	}
  1287 
  1966 
  1288 
  1967 
  1289 TInt DDmaRequest::ExpandDesList(TInt aCount, TInt& aDesCount,
  1968 TInt DDmaRequest::ExpandDesList(TInt aCount, TInt& aDesCount,
  1290 								SDmaDesHdr*& aFirstHdr,
  1969 								SDmaDesHdr*& aFirstHdr,
  1291 								SDmaDesHdr*& aLastHdr)
  1970 								SDmaDesHdr*& aLastHdr)
  1292 	{
  1971 	{
       
  1972 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::ExpandDesList"));
  1293 	__DMA_ASSERTD(!iQueued);
  1973 	__DMA_ASSERTD(!iQueued);
  1294 	__DMA_ASSERTD(aCount > 0);
  1974 	__DMA_ASSERTD(aCount > 0);
  1295 
  1975 
  1296 	if (aCount > iChannel.iAvailDesCount)
  1976 	if (aCount > iChannel.iAvailDesCount)
  1297 		{
  1977 		{
  1338 	}
  2018 	}
  1339 
  2019 
  1340 
  2020 
  1341 EXPORT_C void DDmaRequest::FreeDesList()
  2021 EXPORT_C void DDmaRequest::FreeDesList()
  1342 	{
  2022 	{
       
  2023 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::FreeDesList"));
  1343 	FreeDesList(iDesCount, iFirstHdr, iLastHdr);
  2024 	FreeDesList(iDesCount, iFirstHdr, iLastHdr);
  1344 	}
  2025 	}
  1345 
  2026 
  1346 
  2027 
  1347 EXPORT_C void DDmaRequest::FreeSrcDesList()
  2028 EXPORT_C void DDmaRequest::FreeSrcDesList()
  1348 	{
  2029 	{
       
  2030 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::FreeSrcDesList"));
  1349 	FreeDesList(iSrcDesCount, iSrcFirstHdr, iSrcLastHdr);
  2031 	FreeDesList(iSrcDesCount, iSrcFirstHdr, iSrcLastHdr);
  1350 	}
  2032 	}
  1351 
  2033 
  1352 
  2034 
  1353 EXPORT_C void DDmaRequest::FreeDstDesList()
  2035 EXPORT_C void DDmaRequest::FreeDstDesList()
  1354 	{
  2036 	{
       
  2037 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::FreeDstDesList"));
  1355 	FreeDesList(iDstDesCount, iDstFirstHdr, iDstLastHdr);
  2038 	FreeDesList(iDstDesCount, iDstFirstHdr, iDstLastHdr);
  1356 	}
  2039 	}
  1357 
  2040 
  1358 
  2041 
  1359 void DDmaRequest::FreeDesList(TInt& aDesCount, SDmaDesHdr*& aFirstHdr, SDmaDesHdr*& aLastHdr)
  2042 void DDmaRequest::FreeDesList(TInt& aDesCount, SDmaDesHdr*& aFirstHdr, SDmaDesHdr*& aLastHdr)
  1360 	{
  2043 	{
       
  2044 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::FreeDesList count=%d", aDesCount));
  1361 	__DMA_ASSERTD(!iQueued);
  2045 	__DMA_ASSERTD(!iQueued);
  1362 
  2046 
  1363 	if (aDesCount > 0)
  2047 	if (aDesCount > 0)
  1364 		{
  2048 		{
  1365 		iChannel.iAvailDesCount += aDesCount;
  2049 		iChannel.iAvailDesCount += aDesCount;
  1366 		TDmac& c = *(iChannel.iController);
  2050 		TDmac& c = *(iChannel.iController);
  1367 		const SDmaDesHdr* hdr = aFirstHdr;
  2051 		const SDmaDesHdr* hdr = aFirstHdr;
  1368 		while (hdr)
  2052 		while (hdr)
  1369 			{
  2053 			{
       
  2054 			__DMA_ASSERTD(c.IsValidHdr(hdr));
       
  2055 
       
  2056 			// This (potential) PSL call doesn't follow the "overhead
       
  2057 			// principle", and something should be done about this.
  1370 			c.ClearHwDes(*hdr);
  2058 			c.ClearHwDes(*hdr);
  1371 			hdr = hdr->iNext;
  2059 			hdr = hdr->iNext;
  1372 			};
  2060 			};
       
  2061 
  1373 		c.Wait();
  2062 		c.Wait();
       
  2063 		__DMA_ASSERTD(c.IsValidHdr(c.iFreeHdr));
  1374 		aLastHdr->iNext = c.iFreeHdr;
  2064 		aLastHdr->iNext = c.iFreeHdr;
  1375 		c.iFreeHdr = aFirstHdr;
  2065 		c.iFreeHdr = aFirstHdr;
  1376 		c.Signal();
  2066 		c.Signal();
       
  2067 
  1377 		aFirstHdr = aLastHdr = NULL;
  2068 		aFirstHdr = aLastHdr = NULL;
  1378 		aDesCount = 0;
  2069 		aDesCount = 0;
  1379 		}
  2070 		}
  1380 	}
  2071 	}
  1381 
  2072 
  1382 
  2073 
  1383 EXPORT_C void DDmaRequest::EnableSrcElementCounting(TBool /*aResetElementCount*/)
  2074 EXPORT_C void DDmaRequest::EnableSrcElementCounting(TBool /*aResetElementCount*/)
  1384 	{
  2075 	{
       
  2076 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::EnableSrcElementCounting"));
       
  2077 
  1385 	// Not yet implemented.
  2078 	// Not yet implemented.
  1386 	return;
  2079 	return;
  1387 	}
  2080 	}
  1388 
  2081 
  1389 
  2082 
  1390 EXPORT_C void DDmaRequest::EnableDstElementCounting(TBool /*aResetElementCount*/)
  2083 EXPORT_C void DDmaRequest::EnableDstElementCounting(TBool /*aResetElementCount*/)
  1391 	{
  2084 	{
       
  2085 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::EnableDstElementCounting"));
       
  2086 
  1392 	// Not yet implemented.
  2087 	// Not yet implemented.
  1393 	return;
  2088 	return;
  1394 	}
  2089 	}
  1395 
  2090 
  1396 
  2091 
  1397 EXPORT_C void DDmaRequest::DisableSrcElementCounting()
  2092 EXPORT_C void DDmaRequest::DisableSrcElementCounting()
  1398 	{
  2093 	{
       
  2094 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::DisableSrcElementCounting"));
       
  2095 
  1399 	// Not yet implemented.
  2096 	// Not yet implemented.
  1400 	return;
  2097 	return;
  1401 	}
  2098 	}
  1402 
  2099 
  1403 
  2100 
  1404 EXPORT_C void DDmaRequest::DisableDstElementCounting()
  2101 EXPORT_C void DDmaRequest::DisableDstElementCounting()
  1405 	{
  2102 	{
       
  2103 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::DisableDstElementCounting"));
       
  2104 
  1406 	// Not yet implemented.
  2105 	// Not yet implemented.
  1407 	return;
  2106 	return;
  1408 	}
  2107 	}
  1409 
  2108 
  1410 
  2109 
  1411 EXPORT_C TUint32 DDmaRequest::TotalNumSrcElementsTransferred()
  2110 EXPORT_C TUint32 DDmaRequest::TotalNumSrcElementsTransferred()
  1412 	{
  2111 	{
       
  2112 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::TotalNumSrcElementsTransferred"));
       
  2113 
  1413 	// Not yet implemented.
  2114 	// Not yet implemented.
  1414 
  2115 
  1415 	// So far largely bogus code (just to touch some symbols)...
  2116 	// So far largely bogus code (just to touch some symbols)...
  1416 	iTotalNumSrcElementsTransferred = 0;
  2117 	iTotalNumSrcElementsTransferred = 0;
  1417 	TDmac& c = *(iChannel.iController);
  2118 	TDmac& c = *(iChannel.iController);
  1430 	}
  2131 	}
  1431 
  2132 
  1432 
  2133 
  1433 EXPORT_C TUint32 DDmaRequest::TotalNumDstElementsTransferred()
  2134 EXPORT_C TUint32 DDmaRequest::TotalNumDstElementsTransferred()
  1434 	{
  2135 	{
       
  2136 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::TotalNumDstElementsTransferred"));
       
  2137 
  1435 	// Not yet implemented.
  2138 	// Not yet implemented.
  1436 	return iTotalNumDstElementsTransferred;
  2139 	return iTotalNumDstElementsTransferred;
  1437 	}
  2140 	}
  1438 
  2141 
  1439 
  2142 
  1440 EXPORT_C TInt DDmaRequest::FragmentCount()
  2143 EXPORT_C TInt DDmaRequest::FragmentCount()
  1441 	{
  2144 	{
       
  2145 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::FragmentCount"));
  1442 	return FragmentCount(iFirstHdr);
  2146 	return FragmentCount(iFirstHdr);
  1443 	}
  2147 	}
  1444 
  2148 
  1445 
  2149 
  1446 EXPORT_C TInt DDmaRequest::SrcFragmentCount()
  2150 EXPORT_C TInt DDmaRequest::SrcFragmentCount()
  1447 	{
  2151 	{
       
  2152 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::SrcFragmentCount"));
  1448 	return FragmentCount(iSrcFirstHdr);
  2153 	return FragmentCount(iSrcFirstHdr);
  1449 	}
  2154 	}
  1450 
  2155 
  1451 
  2156 
  1452 EXPORT_C TInt DDmaRequest::DstFragmentCount()
  2157 EXPORT_C TInt DDmaRequest::DstFragmentCount()
  1453 	{
  2158 	{
       
  2159 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::DstFragmentCount"));
  1454 	return FragmentCount(iDstFirstHdr);
  2160 	return FragmentCount(iDstFirstHdr);
  1455 	}
  2161 	}
  1456 
  2162 
  1457 
  2163 
  1458 TInt DDmaRequest::FragmentCount(const SDmaDesHdr* aHdr)
  2164 TInt DDmaRequest::FragmentCount(const SDmaDesHdr* aHdr)
  1459 	{
  2165 	{
       
  2166 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::FragmentCount aHdr=0x%08x", aHdr));
  1460 	TInt count = 0;
  2167 	TInt count = 0;
  1461 	for (const SDmaDesHdr* pH = aHdr; pH != NULL; pH = pH->iNext)
  2168 	for (const SDmaDesHdr* pH = aHdr; pH != NULL; pH = pH->iNext)
  1462 		{
  2169 		{
  1463 		count++;
  2170 		count++;
  1464 		}
  2171 		}
  1470 // Called when request is removed from request queue in channel
  2177 // Called when request is removed from request queue in channel
  1471 //
  2178 //
  1472 inline void DDmaRequest::OnDeque()
  2179 inline void DDmaRequest::OnDeque()
  1473 	{
  2180 	{
  1474 	iQueued = EFalse;
  2181 	iQueued = EFalse;
  1475 	iLastHdr->iNext = NULL;
  2182 	if (iChannel.iDmacCaps->iAsymHwDescriptors)
  1476 	iChannel.DoUnlink(*iLastHdr);
  2183 		{
       
  2184 		iSrcLastHdr->iNext = NULL;
       
  2185 		iDstLastHdr->iNext = NULL;
       
  2186 		iChannel.DoUnlink(*iSrcLastHdr);
       
  2187 		iChannel.DoUnlink(*iDstLastHdr);
       
  2188 		}
       
  2189 	else
       
  2190 		{
       
  2191 		iLastHdr->iNext = NULL;
       
  2192 		iChannel.DoUnlink(*iLastHdr);
       
  2193 		}
  1477 	}
  2194 	}
  1478 
  2195 
  1479 
  2196 
  1480 #ifdef _DEBUG
  2197 #ifdef _DEBUG
  1481 void DDmaRequest::Invariant()
  2198 void DDmaRequest::Invariant()
  1505 			{
  2222 			{
  1506 			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iSrcFirstHdr));
  2223 			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iSrcFirstHdr));
  1507 			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iSrcLastHdr));
  2224 			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iSrcLastHdr));
  1508 			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iDstFirstHdr));
  2225 			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iDstFirstHdr));
  1509 			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iDstLastHdr));
  2226 			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iDstLastHdr));
       
  2227 			}
       
  2228 		if (iChannel.iDmacCaps->iBalancedAsymSegments)
       
  2229 			{
       
  2230 			__DMA_ASSERTD(iSrcDesCount == iDstDesCount);
  1510 			}
  2231 			}
  1511 		}
  2232 		}
  1512 	else
  2233 	else
  1513 		{
  2234 		{
  1514 		__DMA_ASSERTD((0 <= iDesCount) && (iDesCount <= iChannel.iMaxDesCount));
  2235 		__DMA_ASSERTD((0 <= iDesCount) && (iDesCount <= iChannel.iMaxDesCount));
  1552 	  iQueuedRequests(0),
  2273 	  iQueuedRequests(0),
  1553 	  iCancelInfo(NULL),
  2274 	  iCancelInfo(NULL),
  1554 	  iRedoRequest(EFalse),
  2275 	  iRedoRequest(EFalse),
  1555 	  iIsrCbRequest(EFalse)
  2276 	  iIsrCbRequest(EFalse)
  1556 	{
  2277 	{
  1557 	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::TDmaChannel"));
  2278 	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::TDmaChannel =0x%08X", this));
  1558 	__DMA_INVARIANT();
  2279 	__DMA_INVARIANT();
  1559 	}
  2280 	}
  1560 
  2281 
  1561 
  2282 
  1562 //
  2283 //
  1564 //
  2285 //
  1565 EXPORT_C TInt TDmaChannel::Open(const SCreateInfo& aInfo, TDmaChannel*& aChannel)
  2286 EXPORT_C TInt TDmaChannel::Open(const SCreateInfo& aInfo, TDmaChannel*& aChannel)
  1566 	{
  2287 	{
  1567 	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::Open thread %O", &Kern::CurrentThread()));
  2288 	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::Open thread %O", &Kern::CurrentThread()));
  1568 
  2289 
  1569 	__DMA_ASSERTD(aInfo.iDesCount >= 1);
  2290 	if (aInfo.iDesCount < 1)
       
  2291 		{
       
  2292 		__KTRACE_OPT(KPANIC, Kern::Printf("DMA channel failed to open: iDescount<1"));
       
  2293 		return KErrArgument;
       
  2294 		}
       
  2295 
  1570 	__DMA_ASSERTD(aInfo.iPriority <= KDmaPriority8);
  2296 	__DMA_ASSERTD(aInfo.iPriority <= KDmaPriority8);
  1571 	__DMA_ASSERTD(aInfo.iDfcQ != NULL);
  2297 	__DMA_ASSERTD(aInfo.iDfcQ != NULL);
  1572 	__DMA_ASSERTD(aInfo.iDfcPriority < KNumDfcPriorities);
  2298 	__DMA_ASSERTD(aInfo.iDfcPriority < KNumDfcPriorities);
  1573 
  2299 
  1574 	aChannel = NULL;
  2300 	aChannel = NULL;
  1758 EXPORT_C TInt TDmaChannel::IsrRedoRequest(TUint32 aSrcAddr, TUint32 aDstAddr,
  2484 EXPORT_C TInt TDmaChannel::IsrRedoRequest(TUint32 aSrcAddr, TUint32 aDstAddr,
  1759 										  TUint aTransferCount,
  2485 										  TUint aTransferCount,
  1760 										  TUint32 aPslRequestInfo,
  2486 										  TUint32 aPslRequestInfo,
  1761 										  TBool aIsrCb)
  2487 										  TBool aIsrCb)
  1762 	{
  2488 	{
  1763 	__KTRACE_OPT(KDMA,
  2489 	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::IsrRedoRequest src=0x%08X, "
  1764 				 Kern::Printf("TDmaChannel::IsrRedoRequest src=0x%08x, "
  2490 									"dst=0x%08X, count=%d, pslInfo=0x%08X, isrCb=%d",
  1765 							  "dst=0x%08x, count=%d, pslInfo=0x%08x, isrCb=%d",
  2491 									aSrcAddr, aDstAddr, aTransferCount, aPslRequestInfo,
  1766 							  aSrcAddr, aDstAddr, aTransferCount, aPslRequestInfo,
  2492 									aIsrCb));
  1767 							  aIsrCb));
       
  1768 	// Function needs to be called in ISR context.
  2493 	// Function needs to be called in ISR context.
  1769 	__DMA_ASSERTD(NKern::CurrentContext() == NKern::EInterrupt);
  2494 	__DMA_ASSERTD(NKern::CurrentContext() == NKern::EInterrupt);
  1770 
  2495 
  1771 	__DMA_ASSERTD(!iReqQ.IsEmpty());
  2496 	__DMA_ASSERTD(!iReqQ.IsEmpty());
  1772 	__DMA_ASSERTD(iIsrCbRequest);
  2497 	__DMA_ASSERTD(iIsrCbRequest);
  1773 
  2498 
  1774 #ifdef _DEBUG
  2499 #ifdef _DEBUG
  1775 	if ((aSrcAddr != KPhysAddrInvalid) && (aSrcAddr == aDstAddr))
  2500 	if ((aSrcAddr != KPhysAddrInvalid) && (aSrcAddr == aDstAddr))
  1776 		{
  2501 		{
  1777 		__KTRACE_OPT(KPANIC,
  2502 		__KTRACE_OPT(KPANIC,
  1778 					 Kern::Printf("Error: Updating src & dst to same address: 0x%08x",
  2503 					 Kern::Printf("Error: Updating src & dst to same address: 0x%08X",
  1779 								  aSrcAddr));
  2504 								  aSrcAddr));
  1780 		return KErrArgument;
  2505 		return KErrArgument;
  1781 		}
  2506 		}
  1782 #endif
  2507 #endif
  1783 
  2508 
  1789 	DDmaRequest* const pCurReq = _LOFF(iReqQ.First(), DDmaRequest, iLink);
  2514 	DDmaRequest* const pCurReq = _LOFF(iReqQ.First(), DDmaRequest, iLink);
  1790 	TInt r;
  2515 	TInt r;
  1791 
  2516 
  1792 	if (iDmacCaps->iAsymHwDescriptors)
  2517 	if (iDmacCaps->iAsymHwDescriptors)
  1793 		{
  2518 		{
  1794 		// We don't allow multiple-descriptor chains to be updated here
  2519 		// We don't allow multiple-descriptor chains to be updated here.
       
  2520 		// That we just panic (instead of returning an error), and also only in
       
  2521 		// the UDEB case (instead of always) is not ideal, but done here in the
       
  2522 		// interest of performance.
  1795 		__DMA_ASSERTD((pCurReq->iSrcDesCount == 1) && (pCurReq->iDstDesCount == 1));
  2523 		__DMA_ASSERTD((pCurReq->iSrcDesCount == 1) && (pCurReq->iDstDesCount == 1));
       
  2524 
  1796 		// Adjust parameters if necessary (asymmetrical s/g variety)
  2525 		// Adjust parameters if necessary (asymmetrical s/g variety)
  1797 		const SDmaDesHdr* const pSrcFirstHdr = pCurReq->iSrcFirstHdr;
  2526 		const SDmaDesHdr* const pSrcFirstHdr = pCurReq->iSrcFirstHdr;
  1798 		if ((aSrcAddr != KPhysAddrInvalid) || aTransferCount || aPslRequestInfo)
  2527 		if ((aSrcAddr != KPhysAddrInvalid) || aTransferCount || aPslRequestInfo)
  1799 			{
  2528 			{
  1800 			r = iController->UpdateSrcHwDes(*pSrcFirstHdr, aSrcAddr,
  2529 			r = iController->UpdateSrcHwDes(*pSrcFirstHdr, aSrcAddr,
  1819 		// Reschedule the request
  2548 		// Reschedule the request
  1820 		iController->Transfer(*this, *pSrcFirstHdr, *pDstFirstHdr);
  2549 		iController->Transfer(*this, *pSrcFirstHdr, *pDstFirstHdr);
  1821 		}
  2550 		}
  1822 	else
  2551 	else
  1823 		{
  2552 		{
  1824 		// We don't allow multiple-descriptor chains to be updated here
  2553 		// We don't allow a multiple-descriptor chain to be updated here.
       
  2554 		// That we just panic (instead of returning an error), and also only in
       
  2555 		// the UDEB case (instead of always) is not ideal, but done here in the
       
  2556 		// interest of performance.
  1825 		__DMA_ASSERTD(pCurReq->iDesCount == 1);
  2557 		__DMA_ASSERTD(pCurReq->iDesCount == 1);
       
  2558 
  1826 		// Adjust parameters if necessary (symmetrical s/g and non-s/g variety)
  2559 		// Adjust parameters if necessary (symmetrical s/g and non-s/g variety)
  1827 		const SDmaDesHdr* const pFirstHdr = pCurReq->iFirstHdr;
  2560 		const SDmaDesHdr* const pFirstHdr = pCurReq->iFirstHdr;
  1828 		if ((aSrcAddr != KPhysAddrInvalid) || (aDstAddr != KPhysAddrInvalid) ||
  2561 		if ((aSrcAddr != KPhysAddrInvalid) || (aDstAddr != KPhysAddrInvalid) ||
  1829 			aTransferCount || aPslRequestInfo)
  2562 			aTransferCount || aPslRequestInfo)
  1830 			{
  2563 			{
  1851 	}
  2584 	}
  1852 
  2585 
  1853 
  2586 
  1854 EXPORT_C TInt TDmaChannel::FailNext(TInt /*aFragmentCount*/)
  2587 EXPORT_C TInt TDmaChannel::FailNext(TInt /*aFragmentCount*/)
  1855 	{
  2588 	{
       
  2589 	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::FailNext"));
  1856 	return iController->FailNext(*this);
  2590 	return iController->FailNext(*this);
  1857 	}
  2591 	}
  1858 
  2592 
  1859 
  2593 
  1860 EXPORT_C TInt TDmaChannel::MissNextInterrupts(TInt aInterruptCount)
  2594 EXPORT_C TInt TDmaChannel::MissNextInterrupts(TInt aInterruptCount)
  1861 	{
  2595 	{
       
  2596 	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::MissNextInterrupts"));
  1862 	return iController->MissNextInterrupts(*this, aInterruptCount);
  2597 	return iController->MissNextInterrupts(*this, aInterruptCount);
  1863 	}
  2598 	}
  1864 
  2599 
  1865 
  2600 
  1866 EXPORT_C TInt TDmaChannel::Extension(TInt aCmd, TAny* aArg)
  2601 EXPORT_C TInt TDmaChannel::Extension(TInt aCmd, TAny* aArg)
  1867 	{
  2602 	{
       
  2603 	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::Extension"));
  1868 	return iController->Extension(*this, aCmd, aArg);
  2604 	return iController->Extension(*this, aCmd, aArg);
  1869 	}
  2605 	}
  1870 
  2606 
  1871 
  2607 
  1872 //
  2608 //
  1873 // static member function
  2609 // static member function
  1874 //
  2610 //
  1875 EXPORT_C TInt TDmaChannel::StaticExtension(TInt aCmd, TAny* aArg)
  2611 EXPORT_C TInt TDmaChannel::StaticExtension(TInt aCmd, TAny* aArg)
  1876 	{
  2612 	{
       
  2613 	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::StaticExtension"));
  1877 	return DmaChannelMgr::StaticExtension(aCmd, aArg);
  2614 	return DmaChannelMgr::StaticExtension(aCmd, aArg);
  1878 	}
  2615 	}
  1879 
  2616 
  1880 
  2617 
  1881 EXPORT_C TUint TDmaChannel::MaxTransferLength(TUint aSrcFlags, TUint aDstFlags,
  2618 EXPORT_C TUint TDmaChannel::MaxTransferLength(TUint aSrcFlags, TUint aDstFlags,
  1930 
  2667 
  1931 	while (count && !stop)
  2668 	while (count && !stop)
  1932 		{
  2669 		{
  1933 		--count;
  2670 		--count;
  1934 
  2671 
  1935 		__DMA_ASSERTD(!iReqQ.IsEmpty());
  2672 		__DMA_ASSERTA(!iReqQ.IsEmpty());
  1936 
  2673 
  1937 		// If an error occurred it must have been reported on the last
  2674 		// If an error occurred it must have been reported on the last
  1938 		// interrupt since transfers are suspended after an error.
  2675 		// interrupt since transfers are suspended after an error.
  1939 		DDmaRequest::TResult const res = (count == 0 && error) ?
  2676 		DDmaRequest::TResult const res = (count == 0 && error) ?
  1940 			DDmaRequest::EError : DDmaRequest::EOk;
  2677 			DDmaRequest::EError : DDmaRequest::EOk;
  1943 
  2680 
  1944 		if (res == DDmaRequest::EOk)
  2681 		if (res == DDmaRequest::EOk)
  1945 			{
  2682 			{
  1946 			// Update state machine, current fragment, completed fragment and
  2683 			// Update state machine, current fragment, completed fragment and
  1947 			// tell the DMAC to transfer the next fragment if necessary.
  2684 			// tell the DMAC to transfer the next fragment if necessary.
  1948 			SDmaDesHdr* pCompletedHdr = NULL;
  2685 			TBool complete;
  1949 			DoDfc(const_cast<const DDmaRequest&>(*pCurReq), pCompletedHdr);
  2686 			if (iDmacCaps->iAsymHwDescriptors)
  1950 
  2687 				{
       
  2688 				SDmaDesHdr* pCompletedSrcHdr = NULL;
       
  2689 				SDmaDesHdr* pCompletedDstHdr = NULL;
       
  2690 				DoDfc(const_cast<const DDmaRequest&>(*pCurReq),
       
  2691 					  pCompletedSrcHdr, pCompletedDstHdr);
       
  2692 				// We don't support asymmetrical ISR notifications and request
       
  2693 				// completions yet, hence we can do the following assert test
       
  2694 				// here; also 'complete' is determined equally by either the
       
  2695 				// SRC or DST side.
       
  2696 				__DMA_ASSERTD(!LOGICAL_XOR((pCompletedSrcHdr == pCurReq->iSrcLastHdr),
       
  2697 										   (pCompletedDstHdr == pCurReq->iDstLastHdr)));
       
  2698 				complete = (pCompletedDstHdr == pCurReq->iDstLastHdr);
       
  2699 				}
       
  2700 			else
       
  2701 				{
       
  2702 				SDmaDesHdr* pCompletedHdr = NULL;
       
  2703 				DoDfc(const_cast<const DDmaRequest&>(*pCurReq), pCompletedHdr);
       
  2704 				complete = (pCompletedHdr == pCurReq->iLastHdr);
       
  2705 				}
  1951 			// If just completed last fragment from current request, switch to
  2706 			// If just completed last fragment from current request, switch to
  1952 			// next request (if any).
  2707 			// next request (if any).
  1953 			if (pCompletedHdr == pCurReq->iLastHdr)
  2708 			if (complete)
  1954 				{
  2709 				{
  1955 				pCompletedReq = pCurReq;
  2710 				pCompletedReq = pCurReq;
  1956 				pCurReq->iLink.Deque();
  2711 				pCurReq->iLink.Deque();
  1957 				iQueuedRequests--;
  2712 				iQueuedRequests--;
  1958 				if (iReqQ.IsEmpty())
  2713 				if (iReqQ.IsEmpty())