725 iQueued(EFalse), |
745 iQueued(EFalse), |
726 iMaxTransferSize(aMaxTransferSize), |
746 iMaxTransferSize(aMaxTransferSize), |
727 iTotalNumSrcElementsTransferred(0), |
747 iTotalNumSrcElementsTransferred(0), |
728 iTotalNumDstElementsTransferred(0) |
748 iTotalNumDstElementsTransferred(0) |
729 { |
749 { |
|
750 __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::DDmaRequest =0x%08X (new style)", this)); |
730 __e32_atomic_add_ord32(&iChannel.iReqCount, 1); |
751 __e32_atomic_add_ord32(&iChannel.iReqCount, 1); |
731 __DMA_INVARIANT(); |
752 __DMA_INVARIANT(); |
732 } |
753 } |
733 |
754 |
734 |
755 |
735 EXPORT_C DDmaRequest::~DDmaRequest() |
756 EXPORT_C DDmaRequest::~DDmaRequest() |
736 { |
757 { |
|
758 __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::~DDmaRequest")); |
737 __DMA_ASSERTD(!iQueued); |
759 __DMA_ASSERTD(!iQueued); |
738 __DMA_INVARIANT(); |
760 __DMA_INVARIANT(); |
739 FreeDesList(); |
761 if (iChannel.iDmacCaps->iAsymHwDescriptors) |
|
762 { |
|
763 FreeSrcDesList(); |
|
764 FreeDstDesList(); |
|
765 } |
|
766 else |
|
767 { |
|
768 FreeDesList(); |
|
769 } |
740 __e32_atomic_add_ord32(&iChannel.iReqCount, TUint32(-1)); |
770 __e32_atomic_add_ord32(&iChannel.iReqCount, TUint32(-1)); |
741 } |
771 } |
742 |
772 |
743 |
773 |
744 EXPORT_C TInt DDmaRequest::Fragment(TUint32 aSrc, TUint32 aDest, TInt aCount, |
774 EXPORT_C TInt DDmaRequest::Fragment(TUint32 aSrc, TUint32 aDest, TInt aCount, |
745 TUint aFlags, TUint32 aPslInfo) |
775 TUint aFlags, TUint32 aPslInfo) |
746 { |
776 { |
747 __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Fragment thread %O " |
777 __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Fragment thread %O (old style)", |
748 "src=0x%08X dest=0x%08X count=%d flags=0x%X psl=0x%08X", |
778 &Kern::CurrentThread())); |
749 &Kern::CurrentThread(), aSrc, aDest, aCount, aFlags, aPslInfo)); |
779 |
750 __DMA_ASSERTD(aCount > 0); |
780 __DMA_ASSERTD(aCount > 0); |
751 |
781 |
752 TDmaTransferArgs args(aSrc, aDest, aCount, aFlags, aPslInfo); |
782 TDmaTransferArgs args(aSrc, aDest, aCount, aFlags, aPslInfo); |
753 |
783 |
754 return Frag(args); |
784 return Frag(args); |
755 } |
785 } |
756 |
786 |
757 |
787 |
758 EXPORT_C TInt DDmaRequest::Fragment(const TDmaTransferArgs& aTransferArgs) |
788 EXPORT_C TInt DDmaRequest::Fragment(const TDmaTransferArgs& aTransferArgs) |
759 { |
789 { |
760 __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Fragment thread %O", &Kern::CurrentThread())); |
790 __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Fragment thread %O (new style)", |
|
791 &Kern::CurrentThread())); |
761 |
792 |
762 // Writable temporary working copy of the transfer arguments. |
793 // Writable temporary working copy of the transfer arguments. |
763 // We need this because we may have to modify some fields before passing it |
794 // We need this because we may have to modify some fields before passing it |
764 // to the PSL (for example iChannelCookie, iTransferCount, |
795 // to the PSL (for example iChannelCookie, iTransferCount, |
765 // iDstConfig::iAddr, and iSrcConfig::iAddr). |
796 // iDstConfig::iAddr, and iSrcConfig::iAddr). |
767 |
798 |
768 return Frag(args); |
799 return Frag(args); |
769 } |
800 } |
770 |
801 |
771 |
802 |
772 TUint DDmaRequest::GetTransferCount(const TDmaTransferArgs& aTransferArgs) |
803 TInt DDmaRequest::CheckTransferConfig(const TDmaTransferConfig& aTarget, TUint aCount) const |
773 { |
804 { |
|
805 __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::CheckTransferConfig")); |
|
806 |
|
807 if (aTarget.iElementSize != 0) |
|
808 { |
|
809 if ((aCount % aTarget.iElementSize) != 0) |
|
810 { |
|
811 // 2, 7 (These strange numbers refer to some test cases documented |
|
812 // elsewhere - they will be removed eventually.) |
|
813 __KTRACE_OPT(KPANIC, |
|
814 Kern::Printf("Error: ((aCount %% iElementSize) != 0)")); |
|
815 return KErrArgument; |
|
816 } |
|
817 if (aTarget.iElementsPerFrame != 0) |
|
818 { |
|
819 if ((aTarget.iElementSize * aTarget.iElementsPerFrame * |
|
820 aTarget.iFramesPerTransfer) != aCount) |
|
821 { |
|
822 // 3, 8 |
|
823 __KTRACE_OPT(KPANIC, |
|
824 Kern::Printf("Error: ((iElementSize * " |
|
825 "iElementsPerFrame * " |
|
826 "iFramesPerTransfer) != aCount)")); |
|
827 return KErrArgument; |
|
828 } |
|
829 } |
|
830 } |
|
831 else |
|
832 { |
|
833 if (aTarget.iElementsPerFrame != 0) |
|
834 { |
|
835 // 4, 9 |
|
836 __KTRACE_OPT(KPANIC, |
|
837 Kern::Printf("Error: (iElementsPerFrame != 0)")); |
|
838 return KErrArgument; |
|
839 } |
|
840 if (aTarget.iFramesPerTransfer != 0) |
|
841 { |
|
842 // 5, 10 |
|
843 __KTRACE_OPT(KPANIC, |
|
844 Kern::Printf("Error: (iFramesPerTransfer != 0)")); |
|
845 return KErrArgument; |
|
846 } |
|
847 if (aTarget.iElementsPerPacket != 0) |
|
848 { |
|
849 // 6, 11 |
|
850 __KTRACE_OPT(KPANIC, |
|
851 Kern::Printf("Error: (iElementsPerPacket != 0)")); |
|
852 return KErrArgument; |
|
853 } |
|
854 } |
|
855 return KErrNone; |
|
856 } |
|
857 |
|
858 |
|
859 TInt DDmaRequest::CheckMemFlags(const TDmaTransferConfig& aTarget, TUint aCount) const |
|
860 { |
|
861 __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::CheckMemFlags")); |
|
862 |
|
863 const TBool mem_target = (aTarget.iFlags & KDmaMemAddr); |
|
864 |
|
865 if (mem_target && (aTarget.iFlags & KDmaPhysAddr) && !(aTarget.iFlags & KDmaMemIsContiguous)) |
|
866 { |
|
867 // Physical memory address implies contiguous range |
|
868 // 13, 15 |
|
869 __KTRACE_OPT(KPANIC, Kern::Printf("Error: mem_target && KDmaPhysAddr && !KDmaMemIsContiguous")); |
|
870 return KErrArgument; |
|
871 } |
|
872 else if ((aTarget.iFlags & KDmaMemIsContiguous) && !mem_target) |
|
873 { |
|
874 // Contiguous range implies memory address |
|
875 // 14, 16 |
|
876 __KTRACE_OPT(KPANIC, Kern::Printf("Error: KDmaMemIsContiguous && !mem_target")); |
|
877 return KErrArgument; |
|
878 } |
|
879 return KErrNone; |
|
880 } |
|
881 |
|
882 |
|
883 // Makes sure an element or frame never straddles two DMA subtransfer |
|
884 // fragments. This would be a fragmentation error by the PIL. |
|
885 // |
|
886 TInt DDmaRequest::AdjustFragmentSize(TUint& aFragSize, TUint aElementSize, |
|
887 TUint aFrameSize) |
|
888 { |
|
889 __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::AdjustFragmentSize FragSize=%d ES=%d FS=%d", |
|
890 aFragSize, aElementSize, aFrameSize)); |
|
891 |
|
892 TUint rem = 0; |
|
893 TInt r = KErrNone; |
|
894 |
|
895 while (1) |
|
896 { |
|
897 // If an element size is defined, make sure the fragment size is |
|
898 // greater or equal. |
|
899 if (aElementSize) |
|
900 { |
|
901 if (aFragSize < aElementSize) |
|
902 { |
|
903 __KTRACE_OPT(KPANIC, Kern::Printf("Error: aFragSize < aElementSize")); |
|
904 r = KErrArgument; |
|
905 break; |
|
906 } |
|
907 } |
|
908 // If a frame size is defined, make sure the fragment size is greater |
|
909 // or equal. |
|
910 if (aFrameSize) |
|
911 { |
|
912 if (aFragSize < aFrameSize) |
|
913 { |
|
914 __KTRACE_OPT(KPANIC, Kern::Printf("Error: aFragSize < aFrameSize")); |
|
915 r = KErrArgument; |
|
916 break; |
|
917 } |
|
918 } |
|
919 // If a frame size is defined, make sure the fragment ends on a frame |
|
920 // boundary. |
|
921 if (aFrameSize) |
|
922 { |
|
923 rem = aFragSize % aFrameSize; |
|
924 if (rem != 0) |
|
925 { |
|
926 aFragSize -= rem; |
|
927 // 20, 22 |
|
928 __KTRACE_OPT(KDMA, Kern::Printf("aFragSize %% aFrameSize != 0 --> aFragSize = %d", |
|
929 aFragSize)); |
|
930 // aFragSize has changed, so we have to do all the checks |
|
931 // again. |
|
932 continue; |
|
933 } |
|
934 } |
|
935 // If an element size is defined, make sure the fragment ends on an |
|
936 // element boundary. |
|
937 if (aElementSize) |
|
938 { |
|
939 rem = aFragSize % aElementSize; |
|
940 if (rem != 0) |
|
941 { |
|
942 aFragSize -= rem; |
|
943 // 21, 23 |
|
944 __KTRACE_OPT(KDMA, Kern::Printf("aFragSize %% aElementSize != 0 --> aFragSize = %d", |
|
945 aFragSize)); |
|
946 // aFragSize has changed, so we have to do all the checks |
|
947 // again. |
|
948 continue; |
|
949 } |
|
950 } |
|
951 // Done - all checks passed. Let's get out. |
|
952 break; |
|
953 } |
|
954 |
|
955 return r; |
|
956 } |
|
957 |
|
958 |
|
959 TUint DDmaRequest::GetTransferCount(const TDmaTransferArgs& aTransferArgs) const |
|
960 { |
|
961 __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::GetTransferCount")); |
|
962 |
774 const TDmaTransferConfig& src = aTransferArgs.iSrcConfig; |
963 const TDmaTransferConfig& src = aTransferArgs.iSrcConfig; |
|
964 #ifdef _DEBUG |
775 const TDmaTransferConfig& dst = aTransferArgs.iDstConfig; |
965 const TDmaTransferConfig& dst = aTransferArgs.iDstConfig; |
|
966 #endif // #ifdef _DEBUG |
776 |
967 |
777 TUint count = aTransferArgs.iTransferCount; |
968 TUint count = aTransferArgs.iTransferCount; |
778 if (count == 0) |
969 if (count == 0) |
779 { |
970 { |
780 __KTRACE_OPT(KDMA, Kern::Printf("iTransferCount == 0")); |
971 __KTRACE_OPT(KDMA, Kern::Printf("iTransferCount == 0")); |
781 count = src.iElementSize * src.iElementsPerFrame * |
972 count = src.iElementSize * src.iElementsPerFrame * |
782 src.iFramesPerTransfer; |
973 src.iFramesPerTransfer; |
|
974 #ifdef _DEBUG |
783 const TUint dst_cnt = dst.iElementSize * dst.iElementsPerFrame * |
975 const TUint dst_cnt = dst.iElementSize * dst.iElementsPerFrame * |
784 dst.iFramesPerTransfer; |
976 dst.iFramesPerTransfer; |
785 if (count != dst_cnt) |
977 if (count != dst_cnt) |
786 { |
978 { |
|
979 // 1 |
787 __KTRACE_OPT(KPANIC, Kern::Printf("Error: (count != dst_cnt)")); |
980 __KTRACE_OPT(KPANIC, Kern::Printf("Error: (count != dst_cnt)")); |
788 return 0; |
981 return 0; |
789 } |
982 } |
|
983 #endif // #ifdef _DEBUG |
790 } |
984 } |
791 else |
985 else |
792 { |
986 { |
793 __KTRACE_OPT(KDMA, Kern::Printf("iTransferCount == %d", count)); |
987 __KTRACE_OPT(KDMA, Kern::Printf("iTransferCount == %d", count)); |
|
988 #ifdef _DEBUG |
794 // Client shouldn't specify contradictory or incomplete things |
989 // Client shouldn't specify contradictory or incomplete things |
795 if (src.iElementSize != 0) |
990 if (CheckTransferConfig(src, count) != KErrNone) |
796 { |
991 { |
797 if ((count % src.iElementSize) != 0) |
992 __KTRACE_OPT(KPANIC, Kern::Printf("Error: CheckTransferConfig(src)")); |
798 { |
993 return 0; |
799 __KTRACE_OPT(KPANIC, |
994 } |
800 Kern::Printf("Error: ((count %% src.iElementSize) != 0)")); |
995 if (CheckTransferConfig(dst, count) != KErrNone) |
801 return 0; |
996 { |
802 } |
997 __KTRACE_OPT(KPANIC, Kern::Printf("Error: CheckTransferConfig(dst)")); |
803 if (src.iElementsPerFrame != 0) |
998 return 0; |
804 { |
999 } |
805 if ((src.iElementSize * src.iElementsPerFrame * src.iFramesPerTransfer) != count) |
1000 #endif // #ifdef _DEBUG |
806 { |
|
807 __KTRACE_OPT(KPANIC, |
|
808 Kern::Printf("Error: ((src.iElementSize * " |
|
809 "src.iElementsPerFrame * " |
|
810 "src.iFramesPerTransfer) != count)")); |
|
811 return 0; |
|
812 } |
|
813 } |
|
814 } |
|
815 else |
|
816 { |
|
817 if (src.iElementsPerFrame != 0) |
|
818 { |
|
819 __KTRACE_OPT(KPANIC, |
|
820 Kern::Printf("Error: (src.iElementsPerFrame != 0)")); |
|
821 return 0; |
|
822 } |
|
823 if (src.iFramesPerTransfer != 0) |
|
824 { |
|
825 __KTRACE_OPT(KPANIC, |
|
826 Kern::Printf("Error: (src.iFramesPerTransfer != 0)")); |
|
827 return 0; |
|
828 } |
|
829 if (src.iElementsPerPacket != 0) |
|
830 { |
|
831 __KTRACE_OPT(KPANIC, |
|
832 Kern::Printf("Error: (src.iElementsPerPacket != 0)")); |
|
833 return 0; |
|
834 } |
|
835 } |
|
836 if (dst.iElementSize != 0) |
|
837 { |
|
838 if ((count % dst.iElementSize) != 0) |
|
839 { |
|
840 __KTRACE_OPT(KPANIC, |
|
841 Kern::Printf("Error: ((count %% dst.iElementSize) != 0)")); |
|
842 return 0; |
|
843 } |
|
844 if (dst.iElementsPerFrame != 0) |
|
845 { |
|
846 if ((dst.iElementSize * dst.iElementsPerFrame * dst.iFramesPerTransfer) != count) |
|
847 { |
|
848 __KTRACE_OPT(KPANIC, |
|
849 Kern::Printf("Error: ((dst.iElementSize * " |
|
850 "dst.iElementsPerFrame * " |
|
851 "dst.iFramesPerTransfer) != count)")); |
|
852 return 0; |
|
853 } |
|
854 } |
|
855 } |
|
856 else |
|
857 { |
|
858 if (dst.iElementsPerFrame != 0) |
|
859 { |
|
860 __KTRACE_OPT(KPANIC, |
|
861 Kern::Printf("Error: (dst.iElementsPerFrame != 0)")); |
|
862 return 0; |
|
863 } |
|
864 if (dst.iFramesPerTransfer != 0) |
|
865 { |
|
866 __KTRACE_OPT(KPANIC, |
|
867 Kern::Printf("Error: (dst.iFramesPerTransfer != 0)")); |
|
868 return 0; |
|
869 } |
|
870 if (dst.iElementsPerPacket != 0) |
|
871 { |
|
872 __KTRACE_OPT(KPANIC, |
|
873 Kern::Printf("Error: (dst.iElementsPerPacket != 0)")); |
|
874 return 0; |
|
875 } |
|
876 } |
|
877 } |
1001 } |
878 return count; |
1002 return count; |
879 } |
1003 } |
880 |
1004 |
881 |
1005 |
882 TInt DDmaRequest::Frag(TDmaTransferArgs& aTransferArgs) |
1006 TUint DDmaRequest::GetMaxTransferlength(const TDmaTransferArgs& aTransferArgs, TUint aCount) const |
883 { |
1007 { |
884 __DMA_ASSERTD(!iQueued); |
1008 __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::GetMaxTransferlength")); |
885 |
|
886 // Transfer count checks |
|
887 const TUint count = GetTransferCount(aTransferArgs); |
|
888 if (count == 0) |
|
889 { |
|
890 return KErrArgument; |
|
891 } |
|
892 |
1009 |
893 const TDmaTransferConfig& src = aTransferArgs.iSrcConfig; |
1010 const TDmaTransferConfig& src = aTransferArgs.iSrcConfig; |
894 const TDmaTransferConfig& dst = aTransferArgs.iDstConfig; |
1011 const TDmaTransferConfig& dst = aTransferArgs.iDstConfig; |
895 |
1012 |
896 // Ask the PSL what the maximum length possible for this transfer is |
1013 // Ask the PSL what the maximum length is for a single transfer |
897 TUint max_xfer_len = iChannel.MaxTransferLength(src.iFlags, dst.iFlags, |
1014 TUint max_xfer_len = iChannel.MaxTransferLength(src.iFlags, dst.iFlags, |
898 aTransferArgs.iPslRequestInfo); |
1015 aTransferArgs.iPslRequestInfo); |
899 if (iMaxTransferSize) |
1016 if (iMaxTransferSize) |
900 { |
1017 { |
901 // User has set a size cap |
1018 // (User has set a transfer size cap) |
902 __KTRACE_OPT(KDMA, Kern::Printf("iMaxTransferSize != 0")); |
1019 __KTRACE_OPT(KDMA, Kern::Printf("iMaxTransferSize: %d", iMaxTransferSize)); |
903 __DMA_ASSERTA((iMaxTransferSize <= max_xfer_len) || (max_xfer_len == 0)); |
1020 if ((max_xfer_len != 0) && (iMaxTransferSize > max_xfer_len)) |
|
1021 { |
|
1022 // Not really an error, but still... |
|
1023 __KTRACE_OPT(KPANIC, Kern::Printf("Warning: iMaxTransferSize > max_xfer_len")); |
|
1024 } |
904 max_xfer_len = iMaxTransferSize; |
1025 max_xfer_len = iMaxTransferSize; |
905 } |
1026 } |
906 else |
1027 else |
907 { |
1028 { |
908 // User doesn't care about max size |
1029 // (User doesn't care about max transfer size) |
909 if (max_xfer_len == 0) |
1030 if (max_xfer_len == 0) |
910 { |
1031 { |
911 // No maximum imposed by controller |
1032 // '0' = no maximum imposed by controller |
912 max_xfer_len = count; |
1033 max_xfer_len = aCount; |
913 } |
1034 } |
|
1035 } |
|
1036 __KTRACE_OPT(KDMA, Kern::Printf("max_xfer_len: %d", max_xfer_len)); |
|
1037 |
|
1038 // Some sanity checks |
|
1039 #ifdef _DEBUG |
|
1040 if ((max_xfer_len < src.iElementSize) || (max_xfer_len < dst.iElementSize)) |
|
1041 { |
|
1042 // 18 |
|
1043 __KTRACE_OPT(KPANIC, Kern::Printf("Error: max_xfer_len < iElementSize")); |
|
1044 return 0; |
|
1045 } |
|
1046 if ((max_xfer_len < (src.iElementSize * src.iElementsPerFrame)) || |
|
1047 (max_xfer_len < (dst.iElementSize * dst.iElementsPerFrame))) |
|
1048 { |
|
1049 // 19 |
|
1050 __KTRACE_OPT(KPANIC, |
|
1051 Kern::Printf("Error: max_xfer_len < (iElementSize * iElementsPerFrame)")); |
|
1052 return 0; |
|
1053 } |
|
1054 #endif // #ifdef _DEBUG |
|
1055 |
|
1056 return max_xfer_len; |
|
1057 } |
|
1058 |
|
1059 |
|
1060 // Unified internal fragmentation routine, called by both the old and new |
|
1061 // exported Fragment() functions. |
|
1062 // |
|
1063 // Depending on whether the DMAC uses a single or two separate descriptor |
|
1064 // chains, this function branches into either FragSym() or FragAsym(), and the |
|
1065 // latter function further into either FragAsymSrc()/FragAsymDst() or |
|
1066 // FragBalancedAsym(). |
|
1067 // |
|
1068 TInt DDmaRequest::Frag(TDmaTransferArgs& aTransferArgs) |
|
1069 { |
|
1070 __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Frag")); |
|
1071 __DMA_ASSERTD(!iQueued); |
|
1072 |
|
1073 // Transfer count + checks |
|
1074 const TUint count = GetTransferCount(aTransferArgs); |
|
1075 if (count == 0) |
|
1076 { |
|
1077 return KErrArgument; |
|
1078 } |
|
1079 |
|
1080 // Max transfer length + checks |
|
1081 const TUint max_xfer_len = GetMaxTransferlength(aTransferArgs, count); |
|
1082 if (max_xfer_len == 0) |
|
1083 { |
|
1084 return KErrArgument; |
914 } |
1085 } |
915 |
1086 |
916 // ISR callback requested? |
1087 // ISR callback requested? |
917 const TBool isr_cb = (aTransferArgs.iFlags & KDmaRequestCallbackFromIsr); |
1088 const TBool isr_cb = (aTransferArgs.iFlags & KDmaRequestCallbackFromIsr); |
918 if (isr_cb) |
1089 if (isr_cb) |
919 { |
1090 { |
920 // Requesting an ISR callback w/o supplying one? |
1091 // Requesting an ISR callback w/o supplying one? |
921 if (!iDmaCb) |
1092 if (!iDmaCb) |
922 { |
1093 { |
|
1094 // 12 |
|
1095 __KTRACE_OPT(KPANIC, Kern::Printf("Error: !iDmaCb")); |
923 return KErrArgument; |
1096 return KErrArgument; |
924 } |
1097 } |
925 } |
1098 } |
926 |
1099 |
927 // Set the channel cookie for the PSL |
1100 // Set the channel cookie for the PSL |
928 aTransferArgs.iChannelCookie = iChannel.PslId(); |
1101 aTransferArgs.iChannelCookie = iChannel.PslId(); |
929 |
1102 |
|
1103 // Client shouldn't specify contradictory or invalid things |
|
1104 TInt r = CheckMemFlags(aTransferArgs.iSrcConfig, count); |
|
1105 if (r != KErrNone) |
|
1106 { |
|
1107 __KTRACE_OPT(KPANIC, Kern::Printf("Error: CheckMemFlags(src)")); |
|
1108 return r; |
|
1109 } |
|
1110 r = CheckMemFlags(aTransferArgs.iDstConfig, count); |
|
1111 if (r != KErrNone) |
|
1112 { |
|
1113 __KTRACE_OPT(KPANIC, Kern::Printf("Error: CheckMemFlags(dst)")); |
|
1114 return r; |
|
1115 } |
|
1116 |
930 // Now the actual fragmentation |
1117 // Now the actual fragmentation |
931 TInt r; |
|
932 if (iChannel.iDmacCaps->iAsymHwDescriptors) |
1118 if (iChannel.iDmacCaps->iAsymHwDescriptors) |
933 { |
1119 { |
934 r = FragAsym(aTransferArgs, count, max_xfer_len); |
1120 r = FragAsym(aTransferArgs, count, max_xfer_len); |
935 } |
1121 } |
936 else |
1122 else |
949 |
1135 |
950 |
1136 |
951 TInt DDmaRequest::FragSym(TDmaTransferArgs& aTransferArgs, TUint aCount, |
1137 TInt DDmaRequest::FragSym(TDmaTransferArgs& aTransferArgs, TUint aCount, |
952 TUint aMaxTransferLen) |
1138 TUint aMaxTransferLen) |
953 { |
1139 { |
|
1140 __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::FragSym")); |
|
1141 |
954 TDmaTransferConfig& src = aTransferArgs.iSrcConfig; |
1142 TDmaTransferConfig& src = aTransferArgs.iSrcConfig; |
955 TDmaTransferConfig& dst = aTransferArgs.iDstConfig; |
1143 TDmaTransferConfig& dst = aTransferArgs.iDstConfig; |
956 |
|
957 const TBool mem_src = (src.iFlags & KDmaMemAddr); |
1144 const TBool mem_src = (src.iFlags & KDmaMemAddr); |
958 const TBool mem_dst = (dst.iFlags & KDmaMemAddr); |
1145 const TBool mem_dst = (dst.iFlags & KDmaMemAddr); |
959 |
1146 |
960 const TUint align_mask_src = iChannel.AddressAlignMask(src.iFlags, |
1147 const TUint align_mask_src = iChannel.AddressAlignMask(src.iFlags, |
961 src.iElementSize, |
1148 src.iElementSize, |
962 aTransferArgs.iPslRequestInfo); |
1149 aTransferArgs.iPslRequestInfo); |
|
1150 __KTRACE_OPT(KDMA, Kern::Printf("align_mask_src: 0x%x", align_mask_src)); |
963 const TUint align_mask_dst = iChannel.AddressAlignMask(dst.iFlags, |
1151 const TUint align_mask_dst = iChannel.AddressAlignMask(dst.iFlags, |
964 dst.iElementSize, |
1152 dst.iElementSize, |
965 aTransferArgs.iPslRequestInfo); |
1153 aTransferArgs.iPslRequestInfo); |
|
1154 __KTRACE_OPT(KDMA, Kern::Printf("align_mask_dst: 0x%x", align_mask_dst)); |
|
1155 |
966 // Memory buffers must satisfy alignment constraint |
1156 // Memory buffers must satisfy alignment constraint |
967 __DMA_ASSERTD(!mem_src || ((src.iAddr & align_mask_src) == 0)); |
1157 __DMA_ASSERTD(!mem_src || ((src.iAddr & align_mask_src) == 0)); |
968 __DMA_ASSERTD(!mem_dst || ((dst.iAddr & align_mask_dst) == 0)); |
1158 __DMA_ASSERTD(!mem_dst || ((dst.iAddr & align_mask_dst) == 0)); |
969 |
1159 |
|
1160 // Max aligned length is used to make sure the beginnings of subtransfers |
|
1161 // (i.e. fragments) are correctly aligned. |
970 const TUint max_aligned_len = (aMaxTransferLen & |
1162 const TUint max_aligned_len = (aMaxTransferLen & |
971 ~(Max(align_mask_src, align_mask_dst))); |
1163 ~(Max(align_mask_src, align_mask_dst))); |
|
1164 __KTRACE_OPT(KDMA, Kern::Printf("max_aligned_len: %d", max_aligned_len)); |
972 // Client and PSL sane? |
1165 // Client and PSL sane? |
973 __DMA_ASSERTD(max_aligned_len > 0); |
1166 __DMA_ASSERTD(max_aligned_len > 0); |
974 |
1167 |
975 FreeDesList(); // revert any previous fragmentation attempt |
1168 if (mem_src && mem_dst && |
|
1169 align_mask_src && align_mask_dst && |
|
1170 (align_mask_src != align_mask_dst) && |
|
1171 (!(src.iFlags & KDmaMemIsContiguous) || !(dst.iFlags & KDmaMemIsContiguous))) |
|
1172 { |
|
1173 // We don't support transfers which satisfy ALL of the following conditions: |
|
1174 // 1) from memory to memory, |
|
1175 // 2) both sides have address alignment requirements, |
|
1176 // 3) those alignment requirements are not the same, |
|
1177 // 4) the memory is non-contiguous on at least one end. |
|
1178 // |
|
1179 // [A 5th condition is that the channel doesn't support fully |
|
1180 // asymmetric h/w descriptor lists, |
|
1181 // i.e. TDmaChannel::DmacCaps::iAsymHwDescriptors is reported as EFalse |
|
1182 // or iBalancedAsymSegments as ETrue. Hence this check is done in |
|
1183 // FragSym() and FragBalancedAsym() but not in FragAsym().] |
|
1184 // |
|
1185 // The reason for this is that fragmentation could be impossible. The |
|
1186 // memory layout (page break) on the side with the less stringent |
|
1187 // alignment requirement can result in a misaligned target address on |
|
1188 // the other side. |
|
1189 // |
|
1190 // Here is an example: |
|
1191 // |
|
1192 // src.iAddr = 3964 (0x0F7C), non-contiguous, |
|
1193 // align_mask_src = 1 (alignment = 2 bytes) |
|
1194 // dst.iAddr = 16384 (0x4000), contiguous, |
|
1195 // align_mask_dst = 7 (alignment = 8 bytes) |
|
1196 // count = max_xfer_len = 135 bytes |
|
1197 // => max_aligned_len = 128 bytes |
|
1198 // |
|
1199 // Now, suppose MaxPhysSize() returns 132 bytes because src has 132 |
|
1200 // contiguous bytes to the end of its current mem page. |
|
1201 // Trying to fragment this leads to: |
|
1202 // |
|
1203 // frag_1 = 128 bytes: src reads from 3964 (0x0F7C), |
|
1204 // dst writes to 16384 (0x4000). |
|
1205 // (Fragment 1 uses the max_aligned_len instead of 132 bytes because |
|
1206 // otherwise the next fragment would start for the destination at |
|
1207 // dst.iAddr + 132 = 16516 (0x4084), which is not 8-byte aligned.) |
|
1208 // |
|
1209 // frag_2 = 4 bytes: src reads from 4092 (0x0FFC), |
|
1210 // dst writes to 16512 (0x4080). |
|
1211 // (Fragment 2 uses just 4 bytes instead of the remaining 7 bytes |
|
1212 // because there is a memory page break on the source side after 4 bytes.) |
|
1213 // |
|
1214 // frag_3 = 3 bytes: src reads from 4096 (0x1000), |
|
1215 // dst writes to 16516 (0x4084). |
|
1216 // |
|
1217 // And there's the problem: the start address of frag_3 is going to be |
|
1218 // misaligned for the destination side - it's not 8-byte aligned! |
|
1219 // |
|
1220 // 17 |
|
1221 __KTRACE_OPT(KPANIC, Kern::Printf("Error: Different alignments for src & dst" |
|
1222 " + non-contiguous target(s)")); |
|
1223 return KErrArgument; |
|
1224 } |
|
1225 |
976 TInt r; |
1226 TInt r; |
|
1227 // Revert any previous fragmentation attempt |
|
1228 FreeDesList(); |
977 do |
1229 do |
978 { |
1230 { |
979 // Allocate fragment |
1231 // Allocate fragment |
980 r = ExpandDesList(/*1*/); |
1232 r = ExpandDesList(/*1*/); |
981 if (r != KErrNone) |
1233 if (r != KErrNone) |
982 { |
1234 { |
983 FreeDesList(); |
|
984 break; |
1235 break; |
985 } |
1236 } |
986 // Compute fragment size |
1237 // Compute fragment size |
987 TUint c = Min(aMaxTransferLen, aCount); |
1238 TUint c = Min(aMaxTransferLen, aCount); |
988 if (mem_src && !(src.iFlags & KDmaPhysAddr)) |
1239 __KTRACE_OPT(KDMA, Kern::Printf("c = Min(aMaxTransferLen, aCount) = %d", c)); |
989 { |
1240 |
990 __KTRACE_OPT(KDMA, Kern::Printf("mem_src && !(src.iFlags & KDmaPhysAddr)")); |
1241 // SRC |
991 // @@@ Should also take into account (src.iFlags & KDmaMemIsContiguous)! |
1242 if (mem_src && !(src.iFlags & KDmaMemIsContiguous)) |
|
1243 { |
992 c = MaxPhysSize(src.iAddr, c); |
1244 c = MaxPhysSize(src.iAddr, c); |
993 } |
1245 __KTRACE_OPT(KDMA, Kern::Printf("c = MaxPhysSize(src.iAddr, c) = %d", c)); |
994 if (mem_dst && !(dst.iFlags & KDmaPhysAddr)) |
1246 } |
995 { |
1247 |
996 __KTRACE_OPT(KDMA, Kern::Printf("mem_dst && !(dst.iFlags & KDmaPhysAddr)")); |
1248 // DST |
997 // @@@ Should also take into account (dst.iFlags & KDmaMemIsContiguous)! |
1249 if (mem_dst && !(dst.iFlags & KDmaMemIsContiguous)) |
|
1250 { |
998 c = MaxPhysSize(dst.iAddr, c); |
1251 c = MaxPhysSize(dst.iAddr, c); |
999 } |
1252 __KTRACE_OPT(KDMA, Kern::Printf("c = MaxPhysSize(dst.iAddr, c) = %d", c)); |
|
1253 } |
|
1254 |
|
1255 // SRC & DST |
1000 if ((mem_src || mem_dst) && (c < aCount) && (c > max_aligned_len)) |
1256 if ((mem_src || mem_dst) && (c < aCount) && (c > max_aligned_len)) |
1001 { |
1257 { |
1002 // This is not the last fragment of a transfer to/from memory. |
1258 // This is not the last fragment of a transfer to/from memory. |
1003 // We must round down the fragment size so the next one is |
1259 // We must round down the fragment size so the next one is |
1004 // correctly aligned. |
1260 // correctly aligned. |
1005 __KTRACE_OPT(KDMA, Kern::Printf("(mem_src || mem_dst) && (c < aCount) && (c > max_aligned_len)")); |
|
1006 c = max_aligned_len; |
1261 c = max_aligned_len; |
1007 } |
1262 __KTRACE_OPT(KDMA, Kern::Printf("c = max_aligned_len = %d", c)); |
1008 |
1263 // |
1009 // TODO: Make sure an element or frame on neither src or dst side |
1264 // But can this condition actually occur if src and dst are |
1010 // (which can be of different sizes) never straddles a DMA subtransfer. |
1265 // properly aligned to start with? |
1011 // (This would be a fragmentation error by the PIL.) |
1266 // |
|
1267 // If we disallow unequal alignment requirements in connection with |
|
1268 // non-contiguous memory buffers (see the long comment above in |
|
1269 // this function for why) and if both target addresses are |
|
1270 // correctly aligned at the beginning of the transfer then it |
|
1271 // doesn't seem possible to end up with a fragment which is not |
|
1272 // quite the total remaining size (c < aCount) but still larger |
|
1273 // than the greatest aligned length (c > max_aligned_len). |
|
1274 // |
|
1275 // That's because address alignment values are always a power of |
|
1276 // two (at least that's what we assume - otherwise |
|
1277 // AddressAlignMask() doesn't work), and memory page sizes are also |
|
1278 // always a power of two and hence a multiple of the alignment |
|
1279 // value (as long as the alignment is not greater than the page |
|
1280 // size, which seems a reasonable assumption regardless of the |
|
1281 // actual page size). So if we start properly aligned anywhere in a |
|
1282 // memory page then the number of bytes to the end of that page is |
|
1283 // always a multiple of the aligment value - there's no remainder. |
|
1284 // |
|
1285 // So let's see if we ever hit this assertion: |
|
1286 Kern::Printf("Unexpected: (mem_src || mem_dst) && (c < aCount) && (c > max_aligned_len)"); |
|
1287 __DMA_ASSERTA(EFalse); |
|
1288 } |
|
1289 |
|
1290 // If this is not the last fragment... |
|
1291 if (c < aCount) |
|
1292 { |
|
1293 const TUint es_src = src.iElementSize; |
|
1294 const TUint es_dst = dst.iElementSize; |
|
1295 const TUint fs_src = es_src * src.iElementsPerFrame; |
|
1296 const TUint fs_dst = es_dst * dst.iElementsPerFrame; |
|
1297 TUint c_prev; |
|
1298 do |
|
1299 { |
|
1300 c_prev = c; |
|
1301 // If fs_src is !0 then es_src must be !0 as well (see |
|
1302 // CheckTransferConfig). |
|
1303 if (es_src) |
|
1304 { |
|
1305 r = AdjustFragmentSize(c, es_src, fs_src); |
|
1306 if (r != KErrNone) |
|
1307 { |
|
1308 break; // while (c != c_prev); |
|
1309 } |
|
1310 } |
|
1311 // If fs_dst is !0 then es_dst must be !0 as well (see |
|
1312 // CheckTransferConfig). |
|
1313 if (es_dst) |
|
1314 { |
|
1315 r = AdjustFragmentSize(c, es_dst, fs_dst); |
|
1316 if (r != KErrNone) |
|
1317 { |
|
1318 break; // while (c != c_prev); |
|
1319 } |
|
1320 } |
|
1321 } while (c != c_prev); |
|
1322 if (r != KErrNone) |
|
1323 { |
|
1324 break; // while (aCount > 0); |
|
1325 } |
|
1326 } |
1012 |
1327 |
1013 // Set transfer count for the PSL |
1328 // Set transfer count for the PSL |
1014 aTransferArgs.iTransferCount = c; |
1329 aTransferArgs.iTransferCount = c; |
1015 __KTRACE_OPT(KDMA, Kern::Printf("this fragm.: %d (0x%x) total remain.: %d (0x%x)", |
1330 __KTRACE_OPT(KDMA, Kern::Printf("this fragm.: %d (0x%x) total remain.: %d (0x%x)", |
1016 c, c, aCount, aCount)); |
1331 c, c, aCount, aCount)); |
1017 // Initialise fragment |
1332 // Initialise fragment |
1018 r = iChannel.iController->InitDes(*iLastHdr, aTransferArgs); |
1333 r = iChannel.iController->InitDes(*iLastHdr, aTransferArgs); |
1019 if (r != KErrNone) |
1334 if (r != KErrNone) |
1020 { |
1335 { |
1021 FreeDesList(); |
|
1022 break; |
1336 break; |
1023 } |
1337 } |
1024 // Update for next iteration |
1338 // Update for next iteration |
1025 aCount -= c; |
1339 aCount -= c; |
1026 if (mem_src) |
1340 if (mem_src) |
|
1341 { |
1027 src.iAddr += c; |
1342 src.iAddr += c; |
|
1343 } |
1028 if (mem_dst) |
1344 if (mem_dst) |
|
1345 { |
1029 dst.iAddr += c; |
1346 dst.iAddr += c; |
1030 } |
1347 } |
1031 while (aCount > 0); |
1348 } while (aCount > 0); |
1032 |
1349 |
|
1350 if (r != KErrNone) |
|
1351 { |
|
1352 FreeDesList(); |
|
1353 } |
1033 return r; |
1354 return r; |
1034 } |
1355 } |
1035 |
1356 |
1036 |
1357 |
1037 TInt DDmaRequest::FragAsym(TDmaTransferArgs& aTransferArgs, TUint aCount, |
1358 TInt DDmaRequest::FragAsym(TDmaTransferArgs& aTransferArgs, TUint aCount, |
1038 TUint aMaxTransferLen) |
1359 TUint aMaxTransferLen) |
1039 { |
1360 { |
1040 TInt r = FragAsymSrc(aTransferArgs, aCount, aMaxTransferLen); |
1361 __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::FragAsym")); |
|
1362 |
|
1363 TInt r; |
|
1364 if (iChannel.iDmacCaps->iBalancedAsymSegments) |
|
1365 { |
|
1366 r = FragBalancedAsym(aTransferArgs, aCount, aMaxTransferLen); |
|
1367 if (r != KErrNone) |
|
1368 { |
|
1369 FreeSrcDesList(); |
|
1370 FreeDstDesList(); |
|
1371 } |
|
1372 return r; |
|
1373 } |
|
1374 r = FragAsymSrc(aTransferArgs, aCount, aMaxTransferLen); |
1041 if (r != KErrNone) |
1375 if (r != KErrNone) |
1042 { |
1376 { |
1043 FreeSrcDesList(); |
1377 FreeSrcDesList(); |
1044 return r; |
1378 return r; |
1045 } |
1379 } |
1054 |
1388 |
1055 |
1389 |
1056 TInt DDmaRequest::FragAsymSrc(TDmaTransferArgs& aTransferArgs, TUint aCount, |
1390 TInt DDmaRequest::FragAsymSrc(TDmaTransferArgs& aTransferArgs, TUint aCount, |
1057 TUint aMaxTransferLen) |
1391 TUint aMaxTransferLen) |
1058 { |
1392 { |
|
1393 __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::FragAsymSrc")); |
|
1394 |
1059 TDmaTransferConfig& src = aTransferArgs.iSrcConfig; |
1395 TDmaTransferConfig& src = aTransferArgs.iSrcConfig; |
1060 |
|
1061 const TBool mem_src = (src.iFlags & KDmaMemAddr); |
1396 const TBool mem_src = (src.iFlags & KDmaMemAddr); |
1062 |
1397 |
1063 const TUint align_mask = iChannel.AddressAlignMask(src.iFlags, |
1398 const TUint align_mask = iChannel.AddressAlignMask(src.iFlags, |
1064 src.iElementSize, |
1399 src.iElementSize, |
1065 aTransferArgs.iPslRequestInfo); |
1400 aTransferArgs.iPslRequestInfo); |
|
1401 __KTRACE_OPT(KDMA, Kern::Printf("align_mask: 0x%x", align_mask)); |
|
1402 |
1066 // Memory buffers must satisfy alignment constraint |
1403 // Memory buffers must satisfy alignment constraint |
1067 __DMA_ASSERTD(!mem_src || ((src.iAddr & align_mask) == 0)); |
1404 __DMA_ASSERTD(!mem_src || ((src.iAddr & align_mask) == 0)); |
1068 |
1405 |
|
1406 // Max aligned length is used to make sure the beginnings of subtransfers |
|
1407 // (i.e. fragments) are correctly aligned. |
1069 const TUint max_aligned_len = (aMaxTransferLen & ~align_mask); |
1408 const TUint max_aligned_len = (aMaxTransferLen & ~align_mask); |
1070 __DMA_ASSERTD(max_aligned_len > 0); // bug in PSL if not true |
1409 __KTRACE_OPT(KDMA, Kern::Printf("max_aligned_len: %d", max_aligned_len)); |
1071 |
1410 // Client and PSL sane? |
|
1411 __DMA_ASSERTD(max_aligned_len > 0); |
|
1412 |
|
1413 TInt r; |
|
1414 // Revert any previous fragmentation attempt |
1072 FreeSrcDesList(); |
1415 FreeSrcDesList(); |
1073 TInt r; |
|
1074 do |
1416 do |
1075 { |
1417 { |
1076 // Allocate fragment |
1418 // Allocate fragment |
1077 r = ExpandSrcDesList(/*1*/); |
1419 r = ExpandSrcDesList(/*1*/); |
1078 if (r != KErrNone) |
1420 if (r != KErrNone) |
1079 { |
1421 { |
1080 break; |
1422 break; |
1081 } |
1423 } |
1082 // Compute fragment size |
1424 // Compute fragment size |
1083 TUint c = Min(aMaxTransferLen, aCount); |
1425 TUint c = Min(aMaxTransferLen, aCount); |
1084 if (mem_src && !(src.iFlags & KDmaPhysAddr)) |
1426 __KTRACE_OPT(KDMA, Kern::Printf("c = Min(aMaxTransferLen, aCount) = %d", c)); |
1085 { |
1427 |
1086 __KTRACE_OPT(KDMA, Kern::Printf("mem_src && !(src.iFlags & KDmaPhysAddr)")); |
1428 if (mem_src && !(src.iFlags & KDmaMemIsContiguous)) |
|
1429 { |
1087 c = MaxPhysSize(src.iAddr, c); |
1430 c = MaxPhysSize(src.iAddr, c); |
1088 } |
1431 __KTRACE_OPT(KDMA, Kern::Printf("c = MaxPhysSize(src.iAddr, c) = %d", c)); |
|
1432 } |
|
1433 |
1089 if (mem_src && (c < aCount) && (c > max_aligned_len)) |
1434 if (mem_src && (c < aCount) && (c > max_aligned_len)) |
1090 { |
1435 { |
1091 // This is not the last fragment of a transfer from memory. |
1436 // This is not the last fragment of a transfer from memory. |
1092 // We must round down the fragment size so the next one is |
1437 // We must round down the fragment size so the next one is |
1093 // correctly aligned. |
1438 // correctly aligned. |
1094 __KTRACE_OPT(KDMA, Kern::Printf("mem_src && (c < aCount) && (c > max_aligned_len)")); |
1439 __KTRACE_OPT(KDMA, Kern::Printf("c = max_aligned_len = %d", c)); |
1095 c = max_aligned_len; |
1440 // |
1096 } |
1441 // But can this condition actually occur if src is properly aligned |
|
1442 // to start with? |
|
1443 // |
|
1444 // If the target address is correctly aligned at the beginning of |
|
1445 // the transfer then it doesn't seem possible to end up with a |
|
1446 // fragment which is not quite the total remaining size (c < |
|
1447 // aCount) but still larger than the greatest aligned length (c > |
|
1448 // max_aligned_len). |
|
1449 // |
|
1450 // That's because address alignment values are always a power of |
|
1451 // two (at least that's what we assume - otherwise |
|
1452 // AddressAlignMask() doesn't work), and memory page sizes are also |
|
1453 // always a power of two and hence a multiple of the alignment |
|
1454 // value (as long as the alignment is not greater than the page |
|
1455 // size, which seems a reasonable assumption regardless of the |
|
1456 // actual page size). So if we start properly aligned anywhere in a |
|
1457 // memory page then the number of bytes to the end of that page is |
|
1458 // always a multiple of the aligment value - there's no remainder. |
|
1459 // |
|
1460 // So let's see if we ever hit this assertion: |
|
1461 Kern::Printf("Unexpected: mem_src && (c < aCount) && (c > max_aligned_len)"); |
|
1462 __DMA_ASSERTA(EFalse); |
|
1463 } |
|
1464 |
|
1465 // If this is not the last fragment... |
|
1466 if (c < aCount) |
|
1467 { |
|
1468 const TUint es = src.iElementSize; |
|
1469 const TUint fs = es * src.iElementsPerFrame; |
|
1470 // If fs is !0 then es must be !0 as well (see |
|
1471 // CheckTransferConfig). |
|
1472 if (es) |
|
1473 { |
|
1474 r = AdjustFragmentSize(c, es, fs); |
|
1475 if (r != KErrNone) |
|
1476 { |
|
1477 break; // while (aCount > 0); |
|
1478 } |
|
1479 } |
|
1480 } |
|
1481 |
1097 // Set transfer count for the PSL |
1482 // Set transfer count for the PSL |
1098 aTransferArgs.iTransferCount = c; |
1483 aTransferArgs.iTransferCount = c; |
1099 __KTRACE_OPT(KDMA, Kern::Printf("this fragm.: %d (0x%x) total remain.: %d (0x%x)", |
1484 __KTRACE_OPT(KDMA, Kern::Printf("this fragm.: %d (0x%x) total remain.: %d (0x%x)", |
1100 c, c, aCount, aCount)); |
1485 c, c, aCount, aCount)); |
1101 // Initialise fragment |
1486 // Initialise fragment |
1105 break; |
1490 break; |
1106 } |
1491 } |
1107 // Update for next iteration |
1492 // Update for next iteration |
1108 aCount -= c; |
1493 aCount -= c; |
1109 if (mem_src) |
1494 if (mem_src) |
|
1495 { |
1110 src.iAddr += c; |
1496 src.iAddr += c; |
1111 } |
1497 } |
1112 while (aCount > 0); |
1498 } while (aCount > 0); |
1113 |
1499 |
1114 return r; |
1500 return r; |
1115 } |
1501 } |
1116 |
1502 |
1117 |
1503 |
1118 TInt DDmaRequest::FragAsymDst(TDmaTransferArgs& aTransferArgs, TUint aCount, |
1504 TInt DDmaRequest::FragAsymDst(TDmaTransferArgs& aTransferArgs, TUint aCount, |
1119 TUint aMaxTransferLen) |
1505 TUint aMaxTransferLen) |
1120 { |
1506 { |
|
1507 __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::FragAsymDst")); |
|
1508 |
1121 TDmaTransferConfig& dst = aTransferArgs.iDstConfig; |
1509 TDmaTransferConfig& dst = aTransferArgs.iDstConfig; |
1122 |
|
1123 const TBool mem_dst = (dst.iFlags & KDmaMemAddr); |
1510 const TBool mem_dst = (dst.iFlags & KDmaMemAddr); |
1124 |
1511 |
1125 const TUint align_mask = iChannel.AddressAlignMask(dst.iFlags, |
1512 const TUint align_mask = iChannel.AddressAlignMask(dst.iFlags, |
1126 dst.iElementSize, |
1513 dst.iElementSize, |
1127 aTransferArgs.iPslRequestInfo); |
1514 aTransferArgs.iPslRequestInfo); |
|
1515 __KTRACE_OPT(KDMA, Kern::Printf("align_mask: 0x%x", align_mask)); |
|
1516 |
1128 // Memory buffers must satisfy alignment constraint |
1517 // Memory buffers must satisfy alignment constraint |
1129 __DMA_ASSERTD(!mem_dst || ((dst.iAddr & align_mask) == 0)); |
1518 __DMA_ASSERTD(!mem_dst || ((dst.iAddr & align_mask) == 0)); |
1130 |
1519 |
|
1520 // Max aligned length is used to make sure the beginnings of subtransfers |
|
1521 // (i.e. fragments) are correctly aligned. |
1131 const TUint max_aligned_len = (aMaxTransferLen & ~align_mask); |
1522 const TUint max_aligned_len = (aMaxTransferLen & ~align_mask); |
1132 __DMA_ASSERTD(max_aligned_len > 0); // bug in PSL if not true |
1523 __KTRACE_OPT(KDMA, Kern::Printf("max_aligned_len: %d", max_aligned_len)); |
1133 |
1524 // Client and PSL sane? |
|
1525 __DMA_ASSERTD(max_aligned_len > 0); |
|
1526 |
|
1527 TInt r; |
|
1528 // Revert any previous fragmentation attempt |
1134 FreeDstDesList(); |
1529 FreeDstDesList(); |
1135 TInt r; |
|
1136 do |
1530 do |
1137 { |
1531 { |
1138 // Allocate fragment |
1532 // Allocate fragment |
1139 r = ExpandDstDesList(/*1*/); |
1533 r = ExpandDstDesList(/*1*/); |
1140 if (r != KErrNone) |
1534 if (r != KErrNone) |
1141 { |
1535 { |
1142 break; |
1536 break; |
1143 } |
1537 } |
1144 // Compute fragment size |
1538 // Compute fragment size |
1145 TUint c = Min(aMaxTransferLen, aCount); |
1539 TUint c = Min(aMaxTransferLen, aCount); |
1146 if (mem_dst && !(dst.iFlags & KDmaPhysAddr)) |
1540 __KTRACE_OPT(KDMA, Kern::Printf("c = Min(aMaxTransferLen, aCount) = %d", c)); |
1147 { |
1541 |
1148 __KTRACE_OPT(KDMA, Kern::Printf("mem_dst && !(dst.iFlags & KDmaPhysAddr)")); |
1542 if (mem_dst && !(dst.iFlags & KDmaMemIsContiguous)) |
|
1543 { |
1149 c = MaxPhysSize(dst.iAddr, c); |
1544 c = MaxPhysSize(dst.iAddr, c); |
1150 } |
1545 __KTRACE_OPT(KDMA, Kern::Printf("c = MaxPhysSize(dst.iAddr, c) = %d", c)); |
|
1546 } |
|
1547 |
1151 if (mem_dst && (c < aCount) && (c > max_aligned_len)) |
1548 if (mem_dst && (c < aCount) && (c > max_aligned_len)) |
1152 { |
1549 { |
1153 // This is not the last fragment of a transfer to memory. |
1550 // This is not the last fragment of a transfer to memory. |
1154 // We must round down the fragment size so the next one is |
1551 // We must round down the fragment size so the next one is |
1155 // correctly aligned. |
1552 // correctly aligned. |
1156 __KTRACE_OPT(KDMA, Kern::Printf("mem_dst && (c < aCount) && (c > max_aligned_len)")); |
1553 __KTRACE_OPT(KDMA, Kern::Printf("c = max_aligned_len = %d", c)); |
1157 c = max_aligned_len; |
1554 // |
1158 } |
1555 // But can this condition actually occur if dst is properly aligned |
|
1556 // to start with? |
|
1557 // |
|
1558 // If the target address is correctly aligned at the beginning of |
|
1559 // the transfer then it doesn't seem possible to end up with a |
|
1560 // fragment which is not quite the total remaining size (c < |
|
1561 // aCount) but still larger than the greatest aligned length (c > |
|
1562 // max_aligned_len). |
|
1563 // |
|
1564 // That's because address alignment values are always a power of |
|
1565 // two (at least that's what we assume - otherwise |
|
1566 // AddressAlignMask() doesn't work), and memory page sizes are also |
|
1567 // always a power of two and hence a multiple of the alignment |
|
1568 // value (as long as the alignment is not greater than the page |
|
1569 // size, which seems a reasonable assumption regardless of the |
|
1570 // actual page size). So if we start properly aligned anywhere in a |
|
1571 // memory page then the number of bytes to the end of that page is |
|
1572 // always a multiple of the aligment value - there's no remainder. |
|
1573 // |
|
1574 // So let's see if we ever hit this assertion: |
|
1575 Kern::Printf("Unexpected: mem_dst && (c < aCount) && (c > max_aligned_len)"); |
|
1576 __DMA_ASSERTA(EFalse); |
|
1577 } |
|
1578 |
|
1579 // If this is not the last fragment... |
|
1580 if (c < aCount) |
|
1581 { |
|
1582 const TUint es = dst.iElementSize; |
|
1583 const TUint fs = es * dst.iElementsPerFrame; |
|
1584 // If fs is !0 then es must be !0 as well (see |
|
1585 // CheckTransferConfig). |
|
1586 if (es) |
|
1587 { |
|
1588 r = AdjustFragmentSize(c, es, fs); |
|
1589 if (r != KErrNone) |
|
1590 { |
|
1591 break; // while (aCount > 0); |
|
1592 } |
|
1593 } |
|
1594 } |
|
1595 |
1159 // Set transfer count for the PSL |
1596 // Set transfer count for the PSL |
1160 aTransferArgs.iTransferCount = c; |
1597 aTransferArgs.iTransferCount = c; |
1161 __KTRACE_OPT(KDMA, Kern::Printf("this fragm.: %d (0x%x) total remain.: %d (0x%x)", |
1598 __KTRACE_OPT(KDMA, Kern::Printf("this fragm.: %d (0x%x) total remain.: %d (0x%x)", |
1162 c, c, aCount, aCount)); |
1599 c, c, aCount, aCount)); |
1163 // Initialise fragment |
1600 // Initialise fragment |
1167 break; |
1604 break; |
1168 } |
1605 } |
1169 // Update for next iteration |
1606 // Update for next iteration |
1170 aCount -= c; |
1607 aCount -= c; |
1171 if (mem_dst) |
1608 if (mem_dst) |
|
1609 { |
1172 dst.iAddr += c; |
1610 dst.iAddr += c; |
|
1611 } |
1173 } |
1612 } |
1174 while (aCount > 0); |
1613 while (aCount > 0); |
1175 |
1614 |
1176 return r; |
1615 return r; |
1177 } |
1616 } |
1178 |
1617 |
1179 |
1618 |
|
1619 TInt DDmaRequest::FragBalancedAsym(TDmaTransferArgs& aTransferArgs, TUint aCount, |
|
1620 TUint aMaxTransferLen) |
|
1621 { |
|
1622 __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::FragBalancedAsym")); |
|
1623 |
|
1624 TDmaTransferConfig& src = aTransferArgs.iSrcConfig; |
|
1625 TDmaTransferConfig& dst = aTransferArgs.iDstConfig; |
|
1626 const TBool mem_src = (src.iFlags & KDmaMemAddr); |
|
1627 const TBool mem_dst = (dst.iFlags & KDmaMemAddr); |
|
1628 |
|
1629 const TUint align_mask_src = iChannel.AddressAlignMask(src.iFlags, |
|
1630 src.iElementSize, |
|
1631 aTransferArgs.iPslRequestInfo); |
|
1632 __KTRACE_OPT(KDMA, Kern::Printf("align_mask_src: 0x%x", align_mask_src)); |
|
1633 const TUint align_mask_dst = iChannel.AddressAlignMask(dst.iFlags, |
|
1634 dst.iElementSize, |
|
1635 aTransferArgs.iPslRequestInfo); |
|
1636 __KTRACE_OPT(KDMA, Kern::Printf("align_mask_dst: 0x%x", align_mask_dst)); |
|
1637 |
|
1638 // Memory buffers must satisfy alignment constraint |
|
1639 __DMA_ASSERTD(!mem_src || ((src.iAddr & align_mask_src) == 0)); |
|
1640 __DMA_ASSERTD(!mem_dst || ((dst.iAddr & align_mask_dst) == 0)); |
|
1641 |
|
1642 // Max aligned length is used to make sure the beginnings of subtransfers |
|
1643 // (i.e. fragments) are correctly aligned. |
|
1644 const TUint max_aligned_len = (aMaxTransferLen & |
|
1645 ~(Max(align_mask_src, align_mask_dst))); |
|
1646 __KTRACE_OPT(KDMA, Kern::Printf("max_aligned_len: %d", max_aligned_len)); |
|
1647 // Client and PSL sane? |
|
1648 __DMA_ASSERTD(max_aligned_len > 0); |
|
1649 |
|
1650 if (mem_src && mem_dst && |
|
1651 align_mask_src && align_mask_dst && |
|
1652 (align_mask_src != align_mask_dst) && |
|
1653 (!(src.iFlags & KDmaMemIsContiguous) || !(dst.iFlags & KDmaMemIsContiguous))) |
|
1654 { |
|
1655 // We don't support transfers which satisfy ALL of the following conditions: |
|
1656 // 1) from memory to memory, |
|
1657 // 2) both sides have address alignment requirements, |
|
1658 // 3) those alignment requirements are not the same, |
|
1659 // 4) the memory is non-contiguous on at least one end. |
|
1660 // |
|
1661 // [A 5th condition is that the channel doesn't support fully |
|
1662 // asymmetric h/w descriptor lists, |
|
1663 // i.e. TDmaChannel::DmacCaps::iAsymHwDescriptors is reported as EFalse |
|
1664 // or iBalancedAsymSegments as ETrue. Hence this check is done in |
|
1665 // FragSym() and FragBalancedAsym() but not in FragAsym().] |
|
1666 // |
|
1667 // The reason for this is that fragmentation could be impossible. The |
|
1668 // memory layout (page break) on the side with the less stringent |
|
1669 // alignment requirement can result in a misaligned target address on |
|
1670 // the other side. |
|
1671 // |
|
1672 // Here is an example: |
|
1673 // |
|
1674 // src.iAddr = 3964 (0x0F7C), non-contiguous, |
|
1675 // align_mask_src = 1 (alignment = 2 bytes) |
|
1676 // dst.iAddr = 16384 (0x4000), contiguous, |
|
1677 // align_mask_dst = 7 (alignment = 8 bytes) |
|
1678 // count = max_xfer_len = 135 bytes |
|
1679 // => max_aligned_len = 128 bytes |
|
1680 // |
|
1681 // Now, suppose MaxPhysSize() returns 132 bytes because src has 132 |
|
1682 // contiguous bytes to the end of its current mem page. |
|
1683 // Trying to fragment this leads to: |
|
1684 // |
|
1685 // frag_1 = 128 bytes: src reads from 3964 (0x0F7C), |
|
1686 // dst writes to 16384 (0x4000). |
|
1687 // (Fragment 1 uses the max_aligned_len instead of 132 bytes because |
|
1688 // otherwise the next fragment would start for the destination at |
|
1689 // dst.iAddr + 132 = 16516 (0x4084), which is not 8-byte aligned.) |
|
1690 // |
|
1691 // frag_2 = 4 bytes: src reads from 4092 (0x0FFC), |
|
1692 // dst writes to 16512 (0x4080). |
|
1693 // (Fragment 2 uses just 4 bytes instead of the remaining 7 bytes |
|
1694 // because there is a memory page break on the source side after 4 bytes.) |
|
1695 // |
|
1696 // frag_3 = 3 bytes: src reads from 4096 (0x1000), |
|
1697 // dst writes to 16516 (0x4084). |
|
1698 // |
|
1699 // And there's the problem: the start address of frag_3 is going to be |
|
1700 // misaligned for the destination side - it's not 8-byte aligned! |
|
1701 // |
|
1702 __KTRACE_OPT(KPANIC, Kern::Printf("Error: Different alignments for src & dst" |
|
1703 " + non-contiguous target(s)")); |
|
1704 return KErrArgument; |
|
1705 } |
|
1706 |
|
1707 TInt r; |
|
1708 // Revert any previous fragmentation attempt |
|
1709 FreeSrcDesList(); |
|
1710 FreeDstDesList(); |
|
1711 do |
|
1712 { |
|
1713 // Allocate fragment |
|
1714 r = ExpandSrcDesList(/*1*/); |
|
1715 if (r != KErrNone) |
|
1716 { |
|
1717 break; |
|
1718 } |
|
1719 r = ExpandDstDesList(/*1*/); |
|
1720 if (r != KErrNone) |
|
1721 { |
|
1722 break; |
|
1723 } |
|
1724 // Compute fragment size |
|
1725 TUint c = Min(aMaxTransferLen, aCount); |
|
1726 __KTRACE_OPT(KDMA, Kern::Printf("c = Min(aMaxTransferLen, aCount) = %d", c)); |
|
1727 |
|
1728 // SRC |
|
1729 if (mem_src && !(src.iFlags & KDmaMemIsContiguous)) |
|
1730 { |
|
1731 c = MaxPhysSize(src.iAddr, c); |
|
1732 __KTRACE_OPT(KDMA, Kern::Printf("c = MaxPhysSize(src.iAddr, c) = %d", c)); |
|
1733 } |
|
1734 |
|
1735 // DST |
|
1736 if (mem_dst && !(dst.iFlags & KDmaMemIsContiguous)) |
|
1737 { |
|
1738 c = MaxPhysSize(dst.iAddr, c); |
|
1739 __KTRACE_OPT(KDMA, Kern::Printf("c = MaxPhysSize(dst.iAddr, c) = %d", c)); |
|
1740 } |
|
1741 |
|
1742 // SRC & DST |
|
1743 if ((mem_src || mem_dst) && (c < aCount) && (c > max_aligned_len)) |
|
1744 { |
|
1745 // This is not the last fragment of a transfer to/from memory. |
|
1746 // We must round down the fragment size so the next one is |
|
1747 // correctly aligned. |
|
1748 c = max_aligned_len; |
|
1749 __KTRACE_OPT(KDMA, Kern::Printf("c = max_aligned_len = %d", c)); |
|
1750 // |
|
1751 // But can this condition actually occur if src and dst are |
|
1752 // properly aligned to start with? |
|
1753 // |
|
1754 // If we disallow unequal alignment requirements in connection with |
|
1755 // non-contiguous memory buffers (see the long comment above in |
|
1756 // this function for why) and if both target addresses are |
|
1757 // correctly aligned at the beginning of the transfer then it |
|
1758 // doesn't seem possible to end up with a fragment which is not |
|
1759 // quite the total remaining size (c < aCount) but still larger |
|
1760 // than the greatest aligned length (c > max_aligned_len). |
|
1761 // |
|
1762 // That's because address alignment values are always a power of |
|
1763 // two (at least that's what we assume - otherwise |
|
1764 // AddressAlignMask() doesn't work), and memory page sizes are also |
|
1765 // always a power of two and hence a multiple of the alignment |
|
1766 // value (as long as the alignment is not greater than the page |
|
1767 // size, which seems a reasonable assumption regardless of the |
|
1768 // actual page size). So if we start properly aligned anywhere in a |
|
1769 // memory page then the number of bytes to the end of that page is |
|
1770 // always a multiple of the aligment value - there's no remainder. |
|
1771 // |
|
1772 // So let's see if we ever hit this assertion: |
|
1773 Kern::Printf("Unexpected: (mem_src || mem_dst) && (c < aCount) && (c > max_aligned_len)"); |
|
1774 __DMA_ASSERTA(EFalse); |
|
1775 } |
|
1776 |
|
1777 // If this is not the last fragment... |
|
1778 if (c < aCount) |
|
1779 { |
|
1780 const TUint es_src = src.iElementSize; |
|
1781 const TUint es_dst = dst.iElementSize; |
|
1782 const TUint fs_src = es_src * src.iElementsPerFrame; |
|
1783 const TUint fs_dst = es_dst * dst.iElementsPerFrame; |
|
1784 TUint c_prev; |
|
1785 do |
|
1786 { |
|
1787 c_prev = c; |
|
1788 // If fs_src is !0 then es_src must be !0 as well (see |
|
1789 // CheckTransferConfig). |
|
1790 if (es_src) |
|
1791 { |
|
1792 r = AdjustFragmentSize(c, es_src, fs_src); |
|
1793 if (r != KErrNone) |
|
1794 { |
|
1795 break; // while (c != c_prev); |
|
1796 } |
|
1797 } |
|
1798 // If fs_dst is !0 then es_dst must be !0 as well (see |
|
1799 // CheckTransferConfig). |
|
1800 if (es_dst) |
|
1801 { |
|
1802 r = AdjustFragmentSize(c, es_dst, fs_dst); |
|
1803 if (r != KErrNone) |
|
1804 { |
|
1805 break; // while (c != c_prev); |
|
1806 } |
|
1807 } |
|
1808 } while (c != c_prev); |
|
1809 if (r != KErrNone) |
|
1810 { |
|
1811 break; // while (aCount > 0); |
|
1812 } |
|
1813 } |
|
1814 |
|
1815 // Set transfer count for the PSL |
|
1816 aTransferArgs.iTransferCount = c; |
|
1817 __KTRACE_OPT(KDMA, Kern::Printf("this fragm.: %d (0x%x) total remain.: %d (0x%x)", |
|
1818 c, c, aCount, aCount)); |
|
1819 // Initialise SRC fragment |
|
1820 r = iChannel.iController->InitSrcHwDes(*iSrcLastHdr, aTransferArgs); |
|
1821 if (r != KErrNone) |
|
1822 { |
|
1823 break; |
|
1824 } |
|
1825 // Initialise DST fragment |
|
1826 r = iChannel.iController->InitDstHwDes(*iDstLastHdr, aTransferArgs); |
|
1827 if (r != KErrNone) |
|
1828 { |
|
1829 break; |
|
1830 } |
|
1831 // Update for next iteration |
|
1832 aCount -= c; |
|
1833 if (mem_src) |
|
1834 { |
|
1835 src.iAddr += c; |
|
1836 } |
|
1837 if (mem_dst) |
|
1838 { |
|
1839 dst.iAddr += c; |
|
1840 } |
|
1841 } |
|
1842 while (aCount > 0); |
|
1843 |
|
1844 return r; |
|
1845 } |
|
1846 |
|
1847 |
1180 EXPORT_C TInt DDmaRequest::Queue() |
1848 EXPORT_C TInt DDmaRequest::Queue() |
1181 { |
1849 { |
1182 __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Queue thread %O", &Kern::CurrentThread())); |
1850 __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Queue thread %O", &Kern::CurrentThread())); |
1183 __DMA_ASSERTD(iDesCount > 0); // Not configured? Call Fragment() first! |
1851 // Not configured? Call Fragment() first! |
|
1852 if (iChannel.iDmacCaps->iAsymHwDescriptors) |
|
1853 { |
|
1854 __DMA_ASSERTD((iSrcDesCount < 0) && (iDstDesCount < 0)); |
|
1855 } |
|
1856 else |
|
1857 { |
|
1858 __DMA_ASSERTD(iDesCount > 0); |
|
1859 } |
1184 __DMA_ASSERTD(!iQueued); |
1860 __DMA_ASSERTD(!iQueued); |
1185 |
1861 |
1186 // Append request to queue and link new descriptor list to existing one. |
1862 // Append request to queue and link new descriptor list to existing one. |
1187 iChannel.Wait(); |
1863 iChannel.Wait(); |
1188 |
1864 |
1338 } |
2018 } |
1339 |
2019 |
1340 |
2020 |
1341 EXPORT_C void DDmaRequest::FreeDesList() |
2021 EXPORT_C void DDmaRequest::FreeDesList() |
1342 { |
2022 { |
|
2023 __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::FreeDesList")); |
1343 FreeDesList(iDesCount, iFirstHdr, iLastHdr); |
2024 FreeDesList(iDesCount, iFirstHdr, iLastHdr); |
1344 } |
2025 } |
1345 |
2026 |
1346 |
2027 |
1347 EXPORT_C void DDmaRequest::FreeSrcDesList() |
2028 EXPORT_C void DDmaRequest::FreeSrcDesList() |
1348 { |
2029 { |
|
2030 __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::FreeSrcDesList")); |
1349 FreeDesList(iSrcDesCount, iSrcFirstHdr, iSrcLastHdr); |
2031 FreeDesList(iSrcDesCount, iSrcFirstHdr, iSrcLastHdr); |
1350 } |
2032 } |
1351 |
2033 |
1352 |
2034 |
1353 EXPORT_C void DDmaRequest::FreeDstDesList() |
2035 EXPORT_C void DDmaRequest::FreeDstDesList() |
1354 { |
2036 { |
|
2037 __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::FreeDstDesList")); |
1355 FreeDesList(iDstDesCount, iDstFirstHdr, iDstLastHdr); |
2038 FreeDesList(iDstDesCount, iDstFirstHdr, iDstLastHdr); |
1356 } |
2039 } |
1357 |
2040 |
1358 |
2041 |
1359 void DDmaRequest::FreeDesList(TInt& aDesCount, SDmaDesHdr*& aFirstHdr, SDmaDesHdr*& aLastHdr) |
2042 void DDmaRequest::FreeDesList(TInt& aDesCount, SDmaDesHdr*& aFirstHdr, SDmaDesHdr*& aLastHdr) |
1360 { |
2043 { |
|
2044 __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::FreeDesList count=%d", aDesCount)); |
1361 __DMA_ASSERTD(!iQueued); |
2045 __DMA_ASSERTD(!iQueued); |
1362 |
2046 |
1363 if (aDesCount > 0) |
2047 if (aDesCount > 0) |
1364 { |
2048 { |
1365 iChannel.iAvailDesCount += aDesCount; |
2049 iChannel.iAvailDesCount += aDesCount; |
1366 TDmac& c = *(iChannel.iController); |
2050 TDmac& c = *(iChannel.iController); |
1367 const SDmaDesHdr* hdr = aFirstHdr; |
2051 const SDmaDesHdr* hdr = aFirstHdr; |
1368 while (hdr) |
2052 while (hdr) |
1369 { |
2053 { |
|
2054 __DMA_ASSERTD(c.IsValidHdr(hdr)); |
|
2055 |
|
2056 // This (potential) PSL call doesn't follow the "overhead |
|
2057 // principle", and something should be done about this. |
1370 c.ClearHwDes(*hdr); |
2058 c.ClearHwDes(*hdr); |
1371 hdr = hdr->iNext; |
2059 hdr = hdr->iNext; |
1372 }; |
2060 }; |
|
2061 |
1373 c.Wait(); |
2062 c.Wait(); |
|
2063 __DMA_ASSERTD(c.IsValidHdr(c.iFreeHdr)); |
1374 aLastHdr->iNext = c.iFreeHdr; |
2064 aLastHdr->iNext = c.iFreeHdr; |
1375 c.iFreeHdr = aFirstHdr; |
2065 c.iFreeHdr = aFirstHdr; |
1376 c.Signal(); |
2066 c.Signal(); |
|
2067 |
1377 aFirstHdr = aLastHdr = NULL; |
2068 aFirstHdr = aLastHdr = NULL; |
1378 aDesCount = 0; |
2069 aDesCount = 0; |
1379 } |
2070 } |
1380 } |
2071 } |
1381 |
2072 |
1382 |
2073 |
1383 EXPORT_C void DDmaRequest::EnableSrcElementCounting(TBool /*aResetElementCount*/) |
2074 EXPORT_C void DDmaRequest::EnableSrcElementCounting(TBool /*aResetElementCount*/) |
1384 { |
2075 { |
|
2076 __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::EnableSrcElementCounting")); |
|
2077 |
1385 // Not yet implemented. |
2078 // Not yet implemented. |
1386 return; |
2079 return; |
1387 } |
2080 } |
1388 |
2081 |
1389 |
2082 |
1390 EXPORT_C void DDmaRequest::EnableDstElementCounting(TBool /*aResetElementCount*/) |
2083 EXPORT_C void DDmaRequest::EnableDstElementCounting(TBool /*aResetElementCount*/) |
1391 { |
2084 { |
|
2085 __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::EnableDstElementCounting")); |
|
2086 |
1392 // Not yet implemented. |
2087 // Not yet implemented. |
1393 return; |
2088 return; |
1394 } |
2089 } |
1395 |
2090 |
1396 |
2091 |
1397 EXPORT_C void DDmaRequest::DisableSrcElementCounting() |
2092 EXPORT_C void DDmaRequest::DisableSrcElementCounting() |
1398 { |
2093 { |
|
2094 __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::DisableSrcElementCounting")); |
|
2095 |
1399 // Not yet implemented. |
2096 // Not yet implemented. |
1400 return; |
2097 return; |
1401 } |
2098 } |
1402 |
2099 |
1403 |
2100 |
1404 EXPORT_C void DDmaRequest::DisableDstElementCounting() |
2101 EXPORT_C void DDmaRequest::DisableDstElementCounting() |
1405 { |
2102 { |
|
2103 __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::DisableDstElementCounting")); |
|
2104 |
1406 // Not yet implemented. |
2105 // Not yet implemented. |
1407 return; |
2106 return; |
1408 } |
2107 } |
1409 |
2108 |
1410 |
2109 |
1411 EXPORT_C TUint32 DDmaRequest::TotalNumSrcElementsTransferred() |
2110 EXPORT_C TUint32 DDmaRequest::TotalNumSrcElementsTransferred() |
1412 { |
2111 { |
|
2112 __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::TotalNumSrcElementsTransferred")); |
|
2113 |
1413 // Not yet implemented. |
2114 // Not yet implemented. |
1414 |
2115 |
1415 // So far largely bogus code (just to touch some symbols)... |
2116 // So far largely bogus code (just to touch some symbols)... |
1416 iTotalNumSrcElementsTransferred = 0; |
2117 iTotalNumSrcElementsTransferred = 0; |
1417 TDmac& c = *(iChannel.iController); |
2118 TDmac& c = *(iChannel.iController); |