620 TInt Mmu::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aBytes, TPhysAddr& aPhysAddr, TInt aAlign) |
620 TInt Mmu::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aBytes, TPhysAddr& aPhysAddr, TInt aAlign) |
621 { |
621 { |
622 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam(?,%d,%d,?,%d)", aZoneIdCount, aBytes, aPhysAddr, aAlign)); |
622 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam(?,%d,%d,?,%d)", aZoneIdCount, aBytes, aPhysAddr, aAlign)); |
623 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
623 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
624 |
624 |
625 TInt r = iRamPageAllocator->ZoneAllocContiguousRam(aZoneIdList, aZoneIdCount, aBytes, aPhysAddr, EPageFixed, aAlign); |
625 TInt r = iRamPageAllocator->ZoneAllocContiguousRam(aZoneIdList, aZoneIdCount, aBytes, aPhysAddr, aAlign); |
626 if(r!=KErrNone) |
626 if(r!=KErrNone) |
627 iRamAllocFailed = ETrue; |
627 iRamAllocFailed = ETrue; |
628 else |
628 else |
629 { |
629 { |
630 TUint pages = MM::RoundToPageCount(aBytes); |
630 TUint pages = MM::RoundToPageCount(aBytes); |
869 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocRam returns %d",r)); |
869 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocRam returns %d",r)); |
870 return r; |
870 return r; |
871 } |
871 } |
872 |
872 |
873 |
873 |
|
874 /** |
|
875 Mark a page as being allocated to a particular page type. |
|
876 |
|
877 NOTE - This page should not be used until PagesAllocated() has been invoked on it. |
|
878 |
|
879 @param aPhysAddr The physical address of the page to mark as allocated. |
|
880 @param aZonePageType The type of the page to mark as allocated. |
|
881 */ |
|
882 void Mmu::MarkPageAllocated(TPhysAddr aPhysAddr, TZonePageType aZonePageType) |
|
883 { |
|
884 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::MarkPageAllocated(0x%x, %d)", aPhysAddr, aZonePageType)); |
|
885 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
|
886 iRamPageAllocator->MarkPageAllocated(aPhysAddr, aZonePageType); |
|
887 } |
|
888 |
|
889 |
874 void Mmu::FreeRam(TPhysAddr* aPages, TUint aCount, TZonePageType aZonePageType) |
890 void Mmu::FreeRam(TPhysAddr* aPages, TUint aCount, TZonePageType aZonePageType) |
875 { |
891 { |
876 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreeRam(?,%d)",aCount)); |
892 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreeRam(?,%d)",aCount)); |
877 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
893 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
878 |
894 |
888 TPhysAddr pagePhys = *pages++; |
904 TPhysAddr pagePhys = *pages++; |
889 __NK_ASSERT_DEBUG(pagePhys!=KPhysAddrInvalid); |
905 __NK_ASSERT_DEBUG(pagePhys!=KPhysAddrInvalid); |
890 SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys); |
906 SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys); |
891 PageFreed(pi); |
907 PageFreed(pi); |
892 |
908 |
893 // If this is an old page of a page being moved that was previously pinned |
909 switch (ThePager.PageFreed(pi)) |
894 // then make sure it is freed as discardable otherwise despite DPager::DonatePages() |
910 { |
895 // having marked it as discardable it would be freed as movable. |
911 case KErrNone: |
896 __NK_ASSERT_DEBUG(pi->PagedState() != SPageInfo::EPagedPinnedMoved || aCount == 1); |
912 --aCount; // pager has dealt with this page, so one less for us |
897 if (pi->PagedState() == SPageInfo::EPagedPinnedMoved) |
913 break; |
898 aZonePageType = EPageDiscard; |
914 case KErrCompletion: |
899 |
915 // This was a pager controlled page but it is no longer required. |
900 if(ThePager.PageFreed(pi)==KErrNone) |
916 __NK_ASSERT_DEBUG(aZonePageType == EPageMovable || aZonePageType == EPageDiscard); |
901 --aCount; // pager has dealt with this page, so one less for us |
917 __NK_ASSERT_DEBUG(pi->PagedState() == SPageInfo::EUnpaged); |
902 else |
918 if (aZonePageType == EPageMovable) |
903 { |
919 {// This page was donated to the pager so have to free it here |
904 // All paged pages should have been dealt with by the pager above. |
920 // as aZonePageType is incorrect for this page but aPages may |
905 __NK_ASSERT_DEBUG(pi->PagedState() == SPageInfo::EUnpaged); |
921 // contain a mixture of movable and discardable pages. |
906 *pagesOut++ = pagePhys; // store page address for freeing later |
922 MmuLock::Unlock(); |
|
923 iRamPageAllocator->FreeRamPages(&pagePhys, 1, EPageDiscard); |
|
924 aCount--; // We've freed this page here so one less to free later |
|
925 flash = 0; // reset flash count as we released the mmulock. |
|
926 MmuLock::Lock(); |
|
927 break; |
|
928 } |
|
929 // fall through.. |
|
930 default: |
|
931 // Free this page.. |
|
932 __NK_ASSERT_DEBUG(pi->PagedState() == SPageInfo::EUnpaged); |
|
933 *pagesOut++ = pagePhys; // store page address for freeing later |
907 } |
934 } |
908 } |
935 } |
909 MmuLock::Unlock(); |
936 MmuLock::Unlock(); |
910 |
937 |
911 iRamPageAllocator->FreeRamPages(aPages, aCount, aZonePageType); |
938 iRamPageAllocator->FreeRamPages(aPages, aCount, aZonePageType); |
920 if(K::CheckForSimulatedAllocFail()) |
947 if(K::CheckForSimulatedAllocFail()) |
921 { |
948 { |
922 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocContiguousRam returns simulated OOM %d",KErrNoMemory)); |
949 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocContiguousRam returns simulated OOM %d",KErrNoMemory)); |
923 return KErrNoMemory; |
950 return KErrNoMemory; |
924 } |
951 } |
925 // Only the page sets EAllocNoPagerReclaim and it shouldn't allocate contiguous ram. |
952 // Only the pager sets EAllocNoPagerReclaim and it shouldn't allocate contiguous ram. |
926 __NK_ASSERT_DEBUG(!(aFlags&EAllocNoPagerReclaim)); |
953 __NK_ASSERT_DEBUG(!(aFlags&EAllocNoPagerReclaim)); |
927 #endif |
954 #endif |
928 TInt r = iRamPageAllocator->AllocContiguousRam(aCount, aPhysAddr, EPageFixed, aAlign+KPageShift); |
955 TInt r = iRamPageAllocator->AllocContiguousRam(aCount, aPhysAddr, aAlign+KPageShift); |
929 if(r==KErrNoMemory && aCount > KMaxFreeableContiguousPages) |
|
930 { |
|
931 // flush paging cache and retry... |
|
932 ThePager.FlushAll(); |
|
933 r = iRamPageAllocator->AllocContiguousRam(aCount, aPhysAddr, EPageFixed, aAlign+KPageShift); |
|
934 } |
|
935 if(r!=KErrNone) |
956 if(r!=KErrNone) |
936 iRamAllocFailed = ETrue; |
957 iRamAllocFailed = ETrue; |
937 else |
958 else |
938 PagesAllocated((TPhysAddr*)(aPhysAddr|1), aCount, aFlags); |
959 PagesAllocated((TPhysAddr*)(aPhysAddr|1), aCount, aFlags); |
939 __KTRACE_OPT(KMMU,Kern::Printf("AllocContiguouseRam returns %d and aPhysAddr=0x%08x",r,aPhysAddr)); |
960 __KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam returns %d and aPhysAddr=0x%08x",r,aPhysAddr)); |
940 return r; |
961 return r; |
941 } |
962 } |
942 |
963 |
943 |
964 |
944 void Mmu::FreeContiguousRam(TPhysAddr aPhysAddr, TUint aCount) |
965 void Mmu::FreeContiguousRam(TPhysAddr aPhysAddr, TUint aCount) |