kernel/eka/memmodel/epoc/mmubase/ramalloc.cpp
branchRCL_3
changeset 28 5b5d147c7838
parent 26 c734af59ce98
equal deleted inserted replaced
26:c734af59ce98 28:5b5d147c7838
  1328 	{
  1328 	{
  1329 	__KTRACE_OPT(KMMU, 
  1329 	__KTRACE_OPT(KMMU, 
  1330 		Kern::Printf("ZoneClearPages: ID 0x%x, req 0x%x", aZone.iId, aRequiredPages));
  1330 		Kern::Printf("ZoneClearPages: ID 0x%x, req 0x%x", aZone.iId, aRequiredPages));
  1331 	// Discard the required number of discardable pages.
  1331 	// Discard the required number of discardable pages.
  1332 	TUint offset = 0;
  1332 	TUint offset = 0;
  1333 	TInt r = NextAllocatedPage(&aZone, offset, EPageDiscard);
  1333 	for (; aRequiredPages; offset++)
  1334 	while (r == KErrNone && aRequiredPages)
  1334 		{
  1335 		{
  1335 		TInt r = NextAllocatedPage(&aZone, offset, EPageDiscard);
       
  1336 		if (r != KErrNone)
       
  1337 			break;
       
  1338 		if (iContiguousReserved && aZone.iBma[EPageFixed]->NotFree(offset, 1))
       
  1339 			{
       
  1340 			offset++;
       
  1341 			continue;
       
  1342 			}
  1336 		TPhysAddr physAddr = (offset << KPageShift) + aZone.iPhysBase;
  1343 		TPhysAddr physAddr = (offset << KPageShift) + aZone.iPhysBase;
  1337 		TInt discarded = M::DiscardPage(physAddr, aZone.iId, EFalse);
  1344 		TInt discarded = M::DiscardPage(physAddr, aZone.iId, M::EMoveDisMoveDirty);
  1338 		if (discarded == KErrNone)
  1345 		if (discarded == KErrNone)
  1339 			{// The page was successfully discarded.
  1346 			{// The page was successfully discarded.
  1340 			aRequiredPages--;
  1347 			aRequiredPages--;
  1341 			}
  1348 			}
  1342 		offset++;
       
  1343 		r = NextAllocatedPage(&aZone, offset, EPageDiscard);
       
  1344 		}
  1349 		}
  1345 	// Move the required number of movable pages.
  1350 	// Move the required number of movable pages.
  1346 	offset = 0;
  1351 	for (offset = 0; aRequiredPages; offset++)
  1347 	r = NextAllocatedPage(&aZone, offset, EPageMovable);
  1352 		{
  1348 	while(r == KErrNone && aRequiredPages)
  1353 		TInt r = NextAllocatedPage(&aZone, offset, EPageMovable);
  1349 		{
  1354 		if (r != KErrNone)
       
  1355 			break;
       
  1356 		if (iContiguousReserved && aZone.iBma[EPageFixed]->NotFree(offset, 1))
       
  1357 			{
       
  1358 			offset++;
       
  1359 			continue;
       
  1360 			}
  1350 		TPhysAddr physAddr = (offset << KPageShift) + aZone.iPhysBase;
  1361 		TPhysAddr physAddr = (offset << KPageShift) + aZone.iPhysBase;
  1351 		TPhysAddr newAddr = KPhysAddrInvalid;
  1362 		TPhysAddr newAddr = KPhysAddrInvalid;
  1352 		if (M::MovePage(physAddr, newAddr, aZone.iId, EFalse) == KErrNone)
  1363 		if (M::MovePage(physAddr, newAddr, aZone.iId, 0) == KErrNone)
  1353 			{// The page was successfully moved.
  1364 			{// The page was successfully moved.
  1354 #ifdef _DEBUG
  1365 #ifdef _DEBUG
  1355 			TInt newOffset = 0;
  1366 			TInt newOffset = 0;
  1356 			SZone* newZone = GetZoneAndOffset(newAddr, newOffset);
  1367 			SZone* newZone = GetZoneAndOffset(newAddr, newOffset);
  1357 			__NK_ASSERT_DEBUG(newZone != &aZone);
  1368 			__NK_ASSERT_DEBUG(newZone != &aZone);
  1358 #endif
  1369 #endif
  1359 			aRequiredPages--;
  1370 			aRequiredPages--;
  1360 			}
  1371 			}
  1361 		offset++;
       
  1362 		r = NextAllocatedPage(&aZone, offset, EPageMovable);
       
  1363 		}
  1372 		}
  1364 	}
  1373 	}
  1365 
  1374 
  1366 /** Attempt to allocate pages into a particular zone.  Pages will not
  1375 /** Attempt to allocate pages into a particular zone.  Pages will not
  1367 	always be contiguous.
  1376 	always be contiguous.
  1439 	__NK_ASSERT_DEBUG(aType != EPageUnknown);
  1448 	__NK_ASSERT_DEBUG(aType != EPageUnknown);
  1440 
  1449 
  1441 	TPhysAddr* pageListBase = aPageList;
  1450 	TPhysAddr* pageListBase = aPageList;
  1442 	TUint32 numMissing = aNumPages;
  1451 	TUint32 numMissing = aNumPages;
  1443 
  1452 
       
  1453 	if ((TUint)aNumPages > iTotalFreeRamPages)
       
  1454 		{// Not enough free pages to fulfill this request so return the amount required
       
  1455 		return aNumPages - iTotalFreeRamPages;
       
  1456 		}
       
  1457 
  1444 	if (aType == EPageFixed)
  1458 	if (aType == EPageFixed)
  1445 		{// Currently only a general defrag operation should set this and it won't
  1459 		{// Currently only a general defrag operation should set this and it won't
  1446 		// allocate fixed pages.
  1460 		// allocate fixed pages.
  1447 		__NK_ASSERT_DEBUG(!aBlockRest);
  1461 		__NK_ASSERT_DEBUG(!aBlockRest);
  1448 		if ((TUint)aNumPages > iTotalFreeRamPages + M::NumberOfFreeDpPages())
       
  1449 			{// Not enough free space and not enough freeable pages.
       
  1450 			goto exit;
       
  1451 			}
       
  1452 
  1462 
  1453 		// Search through each zone in preference order until all pages allocated or
  1463 		// Search through each zone in preference order until all pages allocated or
  1454 		// have reached the end of the preference list
  1464 		// have reached the end of the preference list
  1455 		SDblQueLink* link = iZonePrefList.First();
  1465 		SDblQueLink* link = iZonePrefList.First();
  1456 		while (numMissing && link != &iZonePrefList.iA)
  1466 		while (numMissing && link != &iZonePrefList.iA)
  1482 				}
  1492 				}
  1483 			}
  1493 			}
  1484 		}
  1494 		}
  1485 	else
  1495 	else
  1486 		{
  1496 		{
  1487 		if ((TUint)aNumPages > iTotalFreeRamPages)
       
  1488 			{// Not enough free pages to fulfill this request so return amount required
       
  1489 			return aNumPages - iTotalFreeRamPages;
       
  1490 			}
       
  1491 
       
  1492 		// Determine if there are enough free pages in the RAM zones in use.
  1497 		// Determine if there are enough free pages in the RAM zones in use.
  1493 		TUint totalFreeInUse = 0;
  1498 		TUint totalFreeInUse = 0;
  1494 		SDblQueLink* link = iZoneLeastMovDis;
  1499 		SDblQueLink* link = iZoneLeastMovDis;
  1495 		for(; link != &iZonePrefList.iA; link = link->iPrev)
  1500 		for(; link != &iZonePrefList.iA; link = link->iPrev)
  1496 			{
  1501 			{
  1723 	return r;
  1728 	return r;
  1724 	}
  1729 	}
  1725 
  1730 
  1726 
  1731 
  1727 #if !defined(__MEMMODEL_MULTIPLE__) && !defined(__MEMMODEL_MOVING__)
  1732 #if !defined(__MEMMODEL_MULTIPLE__) && !defined(__MEMMODEL_MOVING__)
  1728 void DRamAllocator::BlockContiguousRegion(TPhysAddr aAddrBase, TUint aNumPages)
  1733 TUint DRamAllocator::BlockContiguousRegion(TPhysAddr aAddrBase, TUint aNumPages)
  1729 	{
  1734 	{
  1730 	// Shouldn't be asked to block zero pages, addrEndPage would be wrong if we did.
  1735 	// Shouldn't be asked to block zero pages, addrEndPage would be wrong if we did.
  1731 	__NK_ASSERT_DEBUG(aNumPages);
  1736 	__NK_ASSERT_DEBUG(aNumPages);
  1732 	TPhysAddr addr = aAddrBase;
  1737 	TPhysAddr addr = aAddrBase;
  1733 	TPhysAddr addrEndPage = aAddrBase + ((aNumPages - 1) << KPageShift);
  1738 	TPhysAddr addrEndPage = aAddrBase + ((aNumPages - 1) << KPageShift);
  1734 	TInt tmpOffset;
  1739 	TInt tmpOffset;
  1735 	SZone* endZone = GetZoneAndOffset(addrEndPage, tmpOffset);
  1740 	SZone* endZone = GetZoneAndOffset(addrEndPage, tmpOffset);
  1736 	SZone* tmpZone;
  1741 	SZone* tmpZone;
       
  1742 	TUint totalUnreserved = aNumPages;
  1737 	do
  1743 	do
  1738 		{
  1744 		{
  1739 		tmpZone = GetZoneAndOffset(addr, tmpOffset);
  1745 		tmpZone = GetZoneAndOffset(addr, tmpOffset);
  1740 		__NK_ASSERT_DEBUG(tmpZone != NULL);
  1746 		__NK_ASSERT_DEBUG(tmpZone != NULL);
  1741 		TUint runLength = 	(addrEndPage < tmpZone->iPhysEnd)? 
  1747 		TUint runLength = 	(addrEndPage < tmpZone->iPhysEnd)? 
  1752 					free++;
  1758 					free++;
  1753 			__NK_ASSERT_DEBUG(free == reserved);
  1759 			__NK_ASSERT_DEBUG(free == reserved);
  1754 #endif
  1760 #endif
  1755 			ZoneAllocPages(tmpZone, reserved, EPageFixed);
  1761 			ZoneAllocPages(tmpZone, reserved, EPageFixed);
  1756 			iTotalFreeRamPages -= reserved;
  1762 			iTotalFreeRamPages -= reserved;
       
  1763 			totalUnreserved -= reserved;
  1757 			}
  1764 			}
  1758 		tmpZone->iBma[EPageFixed]->Alloc(tmpOffset, runLength);
  1765 		tmpZone->iBma[EPageFixed]->Alloc(tmpOffset, runLength);
  1759 		addr = tmpZone->iPhysEnd + 1;
  1766 		addr = tmpZone->iPhysEnd + 1;
  1760 		}
  1767 		}
  1761 	while (tmpZone != endZone);
  1768 	while (tmpZone != endZone);
       
  1769 	return totalUnreserved;
  1762 	}
  1770 	}
  1763 
  1771 
  1764 
  1772 
  1765 FORCE_INLINE void DRamAllocator::UnblockSetAllocRuns(	TUint& aOffset1, TUint& aOffset2, 
  1773 FORCE_INLINE void DRamAllocator::UnblockSetAllocRuns(	TUint& aOffset1, TUint& aOffset2, 
  1766 														TUint aRunLength1, TUint aRunLength2, 
  1774 														TUint aRunLength1, TUint aRunLength2, 
  1832 		}
  1840 		}
  1833 	while (tmpZone != endZone);
  1841 	while (tmpZone != endZone);
  1834 	}
  1842 	}
  1835 
  1843 
  1836 
  1844 
  1837 TBool DRamAllocator::ClearContiguousRegion(TPhysAddr aAddrBase, TPhysAddr aZoneBase, TUint aNumPages, TInt& aOffset)
  1845 TUint DRamAllocator::CountPagesInRun(TPhysAddr aAddrBase, TPhysAddr aAddrEndPage, TZonePageType aType)
  1838 	{
  1846 	{
       
  1847 	__NK_ASSERT_DEBUG(aAddrBase <= aAddrEndPage);
       
  1848 	TUint totalAllocated = 0;
  1839 	TPhysAddr addr = aAddrBase;
  1849 	TPhysAddr addr = aAddrBase;
  1840 	TPhysAddr addrEnd = aAddrBase + (aNumPages << KPageShift);
  1850 	TUint tmpOffset;
       
  1851 	SZone* endZone = GetZoneAndOffset(aAddrEndPage, (TInt&)tmpOffset);
       
  1852 	SZone* tmpZone;
       
  1853 	do
       
  1854 		{
       
  1855 		tmpZone = GetZoneAndOffset(addr, (TInt&)tmpOffset);
       
  1856 		__NK_ASSERT_DEBUG(tmpZone != NULL);
       
  1857 		TUint runLength = 	(aAddrEndPage < tmpZone->iPhysEnd)? 
       
  1858 							((aAddrEndPage - addr) >> KPageShift) + 1: 
       
  1859 							tmpZone->iPhysPages - tmpOffset;
       
  1860 		TUint runEnd = tmpOffset + runLength - 1;
       
  1861 		while (tmpOffset <= runEnd)
       
  1862 			{
       
  1863 			TUint run = NextAllocatedRun(tmpZone, tmpOffset, runEnd, aType);
       
  1864 			totalAllocated += run;
       
  1865 			tmpOffset += run;
       
  1866 			}
       
  1867 		addr = tmpZone->iPhysEnd + 1;
       
  1868 		}
       
  1869 	while (tmpZone != endZone);
       
  1870 	return totalAllocated;
       
  1871 	}
       
  1872 
       
  1873 
       
  1874 TInt DRamAllocator::ClearContiguousRegion(TPhysAddr aAddrBase, TPhysAddr aZoneBase, TUint aNumPages, TInt& aOffset, TUint aUnreservedPages)
       
  1875 	{
       
  1876 	TPhysAddr addr = aAddrBase;
       
  1877 	TPhysAddr addrEndPage = aAddrBase + ((aNumPages -1 )<< KPageShift);
  1841 	TInt contigOffset = 0;
  1878 	TInt contigOffset = 0;
  1842 	SZone* contigZone = GetZoneAndOffset(addr, contigOffset);
  1879 	SZone* contigZone = GetZoneAndOffset(addr, contigOffset);
  1843 	for (; addr != addrEnd; addr += KPageSize, contigOffset++)
  1880 	TUint unreservedPages = aUnreservedPages;
       
  1881 	for (; addr <= addrEndPage; addr += KPageSize, contigOffset++)
  1844 		{
  1882 		{
  1845 		if (contigZone->iPhysEnd < addr)
  1883 		if (contigZone->iPhysEnd < addr)
  1846 			{
  1884 			{
  1847 			contigZone = GetZoneAndOffset(addr, contigOffset);
  1885 			contigZone = GetZoneAndOffset(addr, contigOffset);
  1848 			__NK_ASSERT_DEBUG(contigZone != NULL);
  1886 			__NK_ASSERT_DEBUG(contigZone != NULL);
  1850 
  1888 
  1851 		__NK_ASSERT_DEBUG(contigZone != NULL);
  1889 		__NK_ASSERT_DEBUG(contigZone != NULL);
  1852 		__NK_ASSERT_DEBUG(contigZone->iBma[EPageFixed]->NotFree(contigOffset, 1));
  1890 		__NK_ASSERT_DEBUG(contigZone->iBma[EPageFixed]->NotFree(contigOffset, 1));
  1853 		__NK_ASSERT_DEBUG(SPageInfo::SafeFromPhysAddr(addr) != NULL);
  1891 		__NK_ASSERT_DEBUG(SPageInfo::SafeFromPhysAddr(addr) != NULL);
  1854 
  1892 
  1855 		// WARNING - This may flash the ram alloc mutex.
  1893 		if (unreservedPages > iTotalFreeRamPages)
  1856 		TInt exRet = M::MoveAndAllocPage(addr, EPageFixed);
  1894 			{// May need to discard some pages so there is free space for the 
  1857 		if (exRet != KErrNone)
  1895 			// pages in the contiguous run to be moved to.
       
  1896 			TUint requiredPages = unreservedPages - iTotalFreeRamPages;
       
  1897 			if (requiredPages)
       
  1898 				{// Ask the pager to get free some pages.
       
  1899 				M::GetFreePages(requiredPages);
       
  1900 
       
  1901 				// The ram alloc lock may have been flashed so ensure that we still have
       
  1902 				// enough free ram to complete the allocation.
       
  1903 				TUint remainingPages = ((addrEndPage - addr) >> KPageShift) + 1;
       
  1904 				unreservedPages = remainingPages - CountPagesInRun(addr, addrEndPage, EPageFixed);
       
  1905 				if (unreservedPages > iTotalFreeRamPages + M::NumberOfFreeDpPages())
       
  1906 					{// Not enough free space and not enough freeable pages.
       
  1907 					return KErrNoMemory;
       
  1908 					}
       
  1909 				}
       
  1910 			}
       
  1911 
       
  1912 		TInt r = M::MoveAndAllocPage(addr, EPageFixed);
       
  1913 		if (r != KErrNone)
  1858 			{// This page couldn't be moved or discarded so 
  1914 			{// This page couldn't be moved or discarded so 
  1859 			// restart the search the page after this one.
  1915 			// restart the search the page after this one.
  1860 			__KTRACE_OPT(KMMU2, Kern::Printf("ContigMov fail contigOffset 0x%x exRet %d", contigOffset, exRet));
  1916 			__KTRACE_OPT(KMMU2, Kern::Printf("ContigMov fail contigOffset 0x%x r %d", contigOffset, r));
  1861 			aOffset = (addr < aZoneBase)? 0 : contigOffset + 1;
  1917 			aOffset = (addr < aZoneBase)? 0 : contigOffset + 1;
  1862 			break;
  1918 			return r;
  1863 			}
  1919 			}
  1864 		}
  1920 		__NK_ASSERT_DEBUG(contigZone->iBma[EPageFixed]->NotFree(contigOffset, 1));
  1865 	return addr == addrEnd;
  1921 		__NK_ASSERT_DEBUG(contigZone->iBma[KBmaAllPages]->NotFree(contigOffset, 1));
       
  1922 		__NK_ASSERT_DEBUG(contigZone->iBma[EPageDiscard]->NotAllocated(contigOffset, 1));
       
  1923 		__NK_ASSERT_DEBUG(contigZone->iBma[EPageMovable]->NotAllocated(contigOffset, 1));
       
  1924 		}
       
  1925 
       
  1926 	// Successfully cleared the contiguous run
       
  1927 	return KErrNone;
  1866 	}
  1928 	}
  1867 
  1929 
  1868 
  1930 
  1869 /**
  1931 /**
  1870 Search through the zones for the requested contiguous RAM, first in preference 
  1932 Search through the zones for the requested contiguous RAM, first in preference 
  1886 	M::RamAllocIsLocked();
  1948 	M::RamAllocIsLocked();
  1887 
  1949 
  1888 	if ((TUint)aNumPages > iTotalFreeRamPages + M::NumberOfFreeDpPages())
  1950 	if ((TUint)aNumPages > iTotalFreeRamPages + M::NumberOfFreeDpPages())
  1889 		{// Not enough free space and not enough freeable pages.
  1951 		{// Not enough free space and not enough freeable pages.
  1890 		return KErrNoMemory;
  1952 		return KErrNoMemory;
       
  1953 		}
       
  1954 	if (aNumPages > iTotalFreeRamPages)
       
  1955 		{// Need to discard some pages so there is free space for the pages in 
       
  1956 		// the contiguous run to be moved to.
       
  1957 		TUint requiredPages = aNumPages - iTotalFreeRamPages;
       
  1958 		if (!M::GetFreePages(requiredPages))
       
  1959 			return KErrNoMemory;
  1891 		}
  1960 		}
  1892 
  1961 
  1893 	TInt alignWrtPage = Max(aAlign - KPageShift, 0);
  1962 	TInt alignWrtPage = Max(aAlign - KPageShift, 0);
  1894 	TUint32 alignmask = (1u << alignWrtPage) - 1;
  1963 	TUint32 alignmask = (1u << alignWrtPage) - 1;
  1895 
  1964 
  1948 				TPhysAddr addrBase = TPhysAddr((base + offset - carryImmov + alignmask) & ~alignmask) << KPageShift;
  2017 				TPhysAddr addrBase = TPhysAddr((base + offset - carryImmov + alignmask) & ~alignmask) << KPageShift;
  1949 				__KTRACE_OPT(KMMU2, Kern::Printf(">AllocContig fix run 0x%08x - 0x%08x 0x%x", addrBase, addrBase + (aNumPages << KPageShift), TheCurrentThread));
  2018 				__KTRACE_OPT(KMMU2, Kern::Printf(">AllocContig fix run 0x%08x - 0x%08x 0x%x", addrBase, addrBase + (aNumPages << KPageShift), TheCurrentThread));
  1950 				
  2019 				
  1951 				// Block the contiguous region from being allocated.
  2020 				// Block the contiguous region from being allocated.
  1952 				iContiguousReserved++;
  2021 				iContiguousReserved++;
  1953 				BlockContiguousRegion(addrBase, aNumPages);
  2022 				TUint unreservedPages = BlockContiguousRegion(addrBase, aNumPages);
  1954 				if (ClearContiguousRegion(addrBase, zone->iPhysBase, aNumPages, offset))
  2023 				TInt clearRet = ClearContiguousRegion(addrBase, zone->iPhysBase, aNumPages, offset, unreservedPages);
       
  2024 				if (clearRet == KErrNone)
  1955 					{// Cleared all the required pages.
  2025 					{// Cleared all the required pages.
  1956 					// Return address of physical page at the start of the region.
  2026 					// Return address of physical page at the start of the region.
  1957 					iContiguousReserved--;
  2027 					iContiguousReserved--;
  1958 					aPhysAddr = addrBase;
  2028 					aPhysAddr = addrBase;
  1959 					__KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam returns %08x",aPhysAddr));
  2029 					__KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam returns %08x",aPhysAddr));
  1975 					// within the current RAM zone or skip onto the next 
  2045 					// within the current RAM zone or skip onto the next 
  1976 					// one if at the end of this one.
  2046 					// one if at the end of this one.
  1977 					carryImmov = 0;
  2047 					carryImmov = 0;
  1978 					carryAll = 0;
  2048 					carryAll = 0;
  1979 					__KTRACE_OPT(KMMU2, Kern::Printf("<AllocContigfail run 0x%08x - 0x%08x 0x%x", addrBase, addrBase + (aNumPages << KPageShift), TheCurrentThread));
  2049 					__KTRACE_OPT(KMMU2, Kern::Printf("<AllocContigfail run 0x%08x - 0x%08x 0x%x", addrBase, addrBase + (aNumPages << KPageShift), TheCurrentThread));
       
  2050 					if (clearRet == KErrNoMemory)
       
  2051 						{// There are no longer enough free or discardable pages to 
       
  2052 						// be able to fulfill this allocation.
       
  2053 						return KErrNoMemory;
       
  2054 						}
  1980 					}
  2055 					}
  1981 				}
  2056 				}
  1982 			}
  2057 			}
  1983 		// Keep searching immovable page bma of the current RAM zone until 
  2058 		// Keep searching immovable page bma of the current RAM zone until 
  1984 		// gone past end of RAM zone or no run can be found.
  2059 		// gone past end of RAM zone or no run can be found.
  2097 						__NK_ASSERT_DEBUG(contigZone != NULL);
  2172 						__NK_ASSERT_DEBUG(contigZone != NULL);
  2098 						__NK_ASSERT_DEBUG(contigZone->iBma[EPageFixed]->NotAllocated(contigOffset, 1));
  2173 						__NK_ASSERT_DEBUG(contigZone->iBma[EPageFixed]->NotAllocated(contigOffset, 1));
  2099 						__NK_ASSERT_DEBUG(SPageInfo::SafeFromPhysAddr(addr) != NULL);
  2174 						__NK_ASSERT_DEBUG(SPageInfo::SafeFromPhysAddr(addr) != NULL);
  2100 
  2175 
  2101 						TPhysAddr newAddr;
  2176 						TPhysAddr newAddr;
  2102 						TInt moveRet = M::MovePage(addr, newAddr, contigZone->iId, EFalse);
  2177 						TInt moveRet = M::MovePage(addr, newAddr, contigZone->iId, 0);
  2103 						if (moveRet != KErrNone && moveRet != KErrNotFound)
  2178 						if (moveRet != KErrNone && moveRet != KErrNotFound)
  2104 							{// This page couldn't be moved or discarded so 
  2179 							{// This page couldn't be moved or discarded so 
  2105 							// restart the search the page after this one.
  2180 							// restart the search the page after this one.
  2106 							__KTRACE_OPT(KMMU2, 
  2181 							__KTRACE_OPT(KMMU2, 
  2107 										Kern::Printf("ContigMov fail offset %x moveRet %d addr %x carryImmov %x", 
  2182 										Kern::Printf("ContigMov fail offset %x moveRet %d addr %x carryImmov %x", 
  2460 @param aZone	The zone to find the next allocated page in.
  2535 @param aZone	The zone to find the next allocated page in.
  2461 @param aOffset	On entry this is the offset from which the next allocated
  2536 @param aOffset	On entry this is the offset from which the next allocated
  2462 				page in the zone should be found, on return it will be the offset 
  2537 				page in the zone should be found, on return it will be the offset 
  2463 				of the next allocated page.
  2538 				of the next allocated page.
  2464 @param aEndOffset The last offset within this RAM zone to check for allocated runs.
  2539 @param aEndOffset The last offset within this RAM zone to check for allocated runs.
  2465 @return The length of any run found, KErrNotFound if no more pages in
  2540 @return The length of any run found.
  2466 the zone after aOffset are allocated, KErrArgument if aOffset is outside the zone.
       
  2467 */
  2541 */
  2468 TInt DRamAllocator::NextAllocatedRun(SZone* aZone, TUint& aOffset, TUint aEndOffset, TZonePageType aType) const
  2542 TInt DRamAllocator::NextAllocatedRun(SZone* aZone, TUint& aOffset, TUint aEndOffset, TZonePageType aType) const
  2469 	{
  2543 	{
  2470 	const TUint KWordAlignMask = KMaxTUint32 << 5;
  2544 	const TUint KWordAlignMask = KMaxTUint32 << 5;
  2471 
  2545