352 __KTRACE_OPT(KMMU,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc)); // counts rolled over |
352 __KTRACE_OPT(KMMU,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc)); // counts rolled over |
353 __KTRACE_OPT(KMMU,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown], |
353 __KTRACE_OPT(KMMU,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown], |
354 aZone->iAllocPages[EPageFixed], aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard])); |
354 aZone->iAllocPages[EPageFixed], aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard])); |
355 Panic(EZonesCountErr); |
355 Panic(EZonesCountErr); |
356 } |
356 } |
357 __ASSERT_DEBUG(free == (TUint32)aZone->iBma[KBmaAllPages]->iAvail, Panic(EAllocRamPagesInconsistent)); |
|
358 __KTRACE_OPT(KMMU2,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc)); |
357 __KTRACE_OPT(KMMU2,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc)); |
359 __KTRACE_OPT(KMMU2,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown], |
358 __KTRACE_OPT(KMMU2,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown], |
360 aZone->iAllocPages[EPageFixed], aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard])); |
359 aZone->iAllocPages[EPageFixed], aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard])); |
361 |
360 |
362 if (iAllowBmaVerify) |
361 if (!iContiguousReserved) |
363 { |
362 { |
|
363 __ASSERT_DEBUG(free == (TUint32)aZone->iBma[KBmaAllPages]->iAvail, Panic(EAllocRamPagesInconsistent)); |
364 TBitMapAllocator& bmaType = *(aZone->iBma[(aType != EPageUnknown)? aType : EPageFixed]); |
364 TBitMapAllocator& bmaType = *(aZone->iBma[(aType != EPageUnknown)? aType : EPageFixed]); |
365 TUint allocPages; |
365 TUint allocPages; |
366 if (aType == EPageFixed || aType == EPageUnknown) |
366 if (aType == EPageFixed || aType == EPageUnknown) |
367 allocPages = aZone->iAllocPages[EPageUnknown] + aZone->iAllocPages[EPageFixed]; |
367 allocPages = aZone->iAllocPages[EPageUnknown] + aZone->iAllocPages[EPageFixed]; |
368 else |
368 else |
493 __KTRACE_OPT(KMMU,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc)); // counts rolled over |
493 __KTRACE_OPT(KMMU,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc)); // counts rolled over |
494 __KTRACE_OPT(KMMU,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown], |
494 __KTRACE_OPT(KMMU,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown], |
495 aZone->iAllocPages[EPageFixed], aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard])); |
495 aZone->iAllocPages[EPageFixed], aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard])); |
496 Panic(EZonesCountErr); |
496 Panic(EZonesCountErr); |
497 } |
497 } |
498 __ASSERT_DEBUG(free == (TUint32)aZone->iBma[KBmaAllPages]->iAvail, Panic(EAllocRamPagesInconsistent)); |
|
499 __KTRACE_OPT(KMMU2,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc)); |
498 __KTRACE_OPT(KMMU2,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc)); |
500 __KTRACE_OPT(KMMU2,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown], |
499 __KTRACE_OPT(KMMU2,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown], |
501 aZone->iAllocPages[EPageFixed], aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard])); |
500 aZone->iAllocPages[EPageFixed], aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard])); |
502 |
501 |
503 if (iAllowBmaVerify) |
502 if (!iContiguousReserved) |
504 { |
503 { |
|
504 __ASSERT_DEBUG(free == (TUint32)aZone->iBma[KBmaAllPages]->iAvail, Panic(EAllocRamPagesInconsistent)); |
505 TBitMapAllocator& bmaType = *(aZone->iBma[(aType != EPageUnknown)? aType : EPageFixed]); |
505 TBitMapAllocator& bmaType = *(aZone->iBma[(aType != EPageUnknown)? aType : EPageFixed]); |
506 TUint allocPages; |
506 TUint allocPages; |
507 if (aType == EPageFixed || aType == EPageUnknown) |
507 if (aType == EPageFixed || aType == EPageUnknown) |
508 allocPages = aZone->iAllocPages[EPageUnknown] + aZone->iAllocPages[EPageFixed]; |
508 allocPages = aZone->iAllocPages[EPageUnknown] + aZone->iAllocPages[EPageFixed]; |
509 else |
509 else |
1257 --aNumPages; |
1274 --aNumPages; |
1258 ++aPageList; |
1275 ++aPageList; |
1259 pa += KPageSize; |
1276 pa += KPageSize; |
1260 } |
1277 } |
1261 __KTRACE_OPT(KMMU2,Kern::Printf("%d consecutive pages, zp_rem=%x, %d remaining pages",n,zp_rem,aNumPages)); |
1278 __KTRACE_OPT(KMMU2,Kern::Printf("%d consecutive pages, zp_rem=%x, %d remaining pages",n,zp_rem,aNumPages)); |
1262 bmaAll.Free(ix,n); |
|
1263 TBitMapAllocator& bmaType = *(z->iBma[aType]); |
1279 TBitMapAllocator& bmaType = *(z->iBma[aType]); |
1264 bmaType.Free(ix,n); |
1280 bmaType.Free(ix,n); |
1265 iTotalFreeRamPages += n; |
1281 |
1266 ZoneFreePages(z, n, aType); |
1282 if (iContiguousReserved && aType != EPageFixed) |
|
1283 {// See if a page has been reserved by AllocContiguous() in this range. |
|
1284 TUint pagesFreed = 0; |
|
1285 TUint allocStart = ix; |
|
1286 TUint freeOffset = ix; |
|
1287 TUint endOffset = ix + n - 1; |
|
1288 while (freeOffset <= endOffset) |
|
1289 { |
|
1290 TUint runLength = NextAllocatedRun(z, allocStart, endOffset, EPageFixed); |
|
1291 if (allocStart > freeOffset) |
|
1292 { |
|
1293 TUint freed = allocStart - freeOffset; |
|
1294 bmaAll.Free(freeOffset, freed); |
|
1295 pagesFreed += freed; |
|
1296 } |
|
1297 allocStart += runLength; |
|
1298 freeOffset = allocStart; |
|
1299 } |
|
1300 iTotalFreeRamPages += pagesFreed; |
|
1301 ZoneFreePages(z, n, aType); |
|
1302 ZoneAllocPages(z, n - pagesFreed, EPageFixed); |
|
1303 } |
|
1304 else |
|
1305 { |
|
1306 bmaAll.Free(ix,n); |
|
1307 iTotalFreeRamPages += n; |
|
1308 ZoneFreePages(z, n, aType); |
|
1309 } |
1267 #ifdef BTRACE_RAM_ALLOCATOR |
1310 #ifdef BTRACE_RAM_ALLOCATOR |
1268 BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocFreePages, aType, n, first_pa); |
1311 BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocFreePages, aType, n, first_pa); |
1269 #endif |
1312 #endif |
1270 } |
1313 } |
1271 #ifdef BTRACE_RAM_ALLOCATOR |
1314 #ifdef BTRACE_RAM_ALLOCATOR |
1272 BTrace0(BTrace::ERamAllocator, BTrace::ERamAllocFreePagesEnd); |
1315 BTrace0(BTrace::ERamAllocator, BTrace::ERamAllocFreePagesEnd); |
1273 #endif |
1316 #endif |
1274 } |
1317 } |
|
1318 |
1275 |
1319 |
1276 /** |
1320 /** |
1277 Attempt to clear upto the required amount of discardable or movable pages |
1321 Attempt to clear upto the required amount of discardable or movable pages |
1278 from the RAM zone. |
1322 from the RAM zone. |
1279 |
1323 |
1284 { |
1328 { |
1285 __KTRACE_OPT(KMMU, |
1329 __KTRACE_OPT(KMMU, |
1286 Kern::Printf("ZoneClearPages: ID 0x%x, req 0x%x", aZone.iId, aRequiredPages)); |
1330 Kern::Printf("ZoneClearPages: ID 0x%x, req 0x%x", aZone.iId, aRequiredPages)); |
1287 // Discard the required number of discardable pages. |
1331 // Discard the required number of discardable pages. |
1288 TUint offset = 0; |
1332 TUint offset = 0; |
1289 TInt r = NextAllocatedPage(&aZone, offset, EPageDiscard); |
1333 for (; aRequiredPages; offset++) |
1290 while (r == KErrNone && aRequiredPages) |
1334 { |
1291 { |
1335 TInt r = NextAllocatedPage(&aZone, offset, EPageDiscard); |
|
1336 if (r != KErrNone) |
|
1337 break; |
|
1338 if (iContiguousReserved && aZone.iBma[EPageFixed]->NotFree(offset, 1)) |
|
1339 { |
|
1340 offset++; |
|
1341 continue; |
|
1342 } |
1292 TPhysAddr physAddr = (offset << KPageShift) + aZone.iPhysBase; |
1343 TPhysAddr physAddr = (offset << KPageShift) + aZone.iPhysBase; |
1293 TInt discarded = M::DiscardPage(physAddr, aZone.iId, EFalse); |
1344 TInt discarded = M::DiscardPage(physAddr, aZone.iId, M::EMoveDisMoveDirty); |
1294 if (discarded == KErrNone) |
1345 if (discarded == KErrNone) |
1295 {// The page was successfully discarded. |
1346 {// The page was successfully discarded. |
1296 aRequiredPages--; |
1347 aRequiredPages--; |
1297 } |
1348 } |
1298 offset++; |
|
1299 r = NextAllocatedPage(&aZone, offset, EPageDiscard); |
|
1300 } |
1349 } |
1301 // Move the required number of movable pages. |
1350 // Move the required number of movable pages. |
1302 offset = 0; |
1351 for (offset = 0; aRequiredPages; offset++) |
1303 r = NextAllocatedPage(&aZone, offset, EPageMovable); |
1352 { |
1304 while(r == KErrNone && aRequiredPages) |
1353 TInt r = NextAllocatedPage(&aZone, offset, EPageMovable); |
1305 { |
1354 if (r != KErrNone) |
|
1355 break; |
|
1356 if (iContiguousReserved && aZone.iBma[EPageFixed]->NotFree(offset, 1)) |
|
1357 { |
|
1358 offset++; |
|
1359 continue; |
|
1360 } |
1306 TPhysAddr physAddr = (offset << KPageShift) + aZone.iPhysBase; |
1361 TPhysAddr physAddr = (offset << KPageShift) + aZone.iPhysBase; |
1307 TPhysAddr newAddr = KPhysAddrInvalid; |
1362 TPhysAddr newAddr = KPhysAddrInvalid; |
1308 if (M::MovePage(physAddr, newAddr, aZone.iId, EFalse) == KErrNone) |
1363 if (M::MovePage(physAddr, newAddr, aZone.iId, 0) == KErrNone) |
1309 {// The page was successfully moved. |
1364 {// The page was successfully moved. |
1310 #ifdef _DEBUG |
1365 #ifdef _DEBUG |
1311 TInt newOffset = 0; |
1366 TInt newOffset = 0; |
1312 SZone* newZone = GetZoneAndOffset(newAddr, newOffset); |
1367 SZone* newZone = GetZoneAndOffset(newAddr, newOffset); |
1313 __NK_ASSERT_DEBUG(newZone != &aZone); |
1368 __NK_ASSERT_DEBUG(newZone != &aZone); |
1314 #endif |
1369 #endif |
1315 aRequiredPages--; |
1370 aRequiredPages--; |
1316 } |
1371 } |
1317 offset++; |
|
1318 r = NextAllocatedPage(&aZone, offset, EPageMovable); |
|
1319 } |
1372 } |
1320 } |
1373 } |
1321 |
1374 |
1322 /** Attempt to allocate pages into a particular zone. Pages will not |
1375 /** Attempt to allocate pages into a particular zone. Pages will not |
1323 always be contiguous. |
1376 always be contiguous. |
1677 |
1726 |
1678 aState = (TZoneSearchState)currentState; |
1727 aState = (TZoneSearchState)currentState; |
1679 return r; |
1728 return r; |
1680 } |
1729 } |
1681 |
1730 |
|
1731 |
|
1732 #if !defined(__MEMMODEL_MULTIPLE__) && !defined(__MEMMODEL_MOVING__) |
|
1733 TUint DRamAllocator::BlockContiguousRegion(TPhysAddr aAddrBase, TUint aNumPages) |
|
1734 { |
|
1735 // Shouldn't be asked to block zero pages, addrEndPage would be wrong if we did. |
|
1736 __NK_ASSERT_DEBUG(aNumPages); |
|
1737 TPhysAddr addr = aAddrBase; |
|
1738 TPhysAddr addrEndPage = aAddrBase + ((aNumPages - 1) << KPageShift); |
|
1739 TInt tmpOffset; |
|
1740 SZone* endZone = GetZoneAndOffset(addrEndPage, tmpOffset); |
|
1741 SZone* tmpZone; |
|
1742 TUint totalUnreserved = aNumPages; |
|
1743 do |
|
1744 { |
|
1745 tmpZone = GetZoneAndOffset(addr, tmpOffset); |
|
1746 __NK_ASSERT_DEBUG(tmpZone != NULL); |
|
1747 TUint runLength = (addrEndPage < tmpZone->iPhysEnd)? |
|
1748 ((addrEndPage - addr) >> KPageShift) + 1: |
|
1749 tmpZone->iPhysPages - tmpOffset; |
|
1750 TUint reserved = tmpZone->iBma[KBmaAllPages]->SelectiveAlloc(tmpOffset, runLength); |
|
1751 if (reserved) |
|
1752 { |
|
1753 #ifdef _DEBUG |
|
1754 TUint runEnd = tmpOffset + runLength; |
|
1755 TUint free = 0; |
|
1756 for (TUint i = tmpOffset; i < runEnd; i++) |
|
1757 if (tmpZone->iBma[EPageMovable]->NotAllocated(i,1) && tmpZone->iBma[EPageDiscard]->NotAllocated(i,1)) |
|
1758 free++; |
|
1759 __NK_ASSERT_DEBUG(free == reserved); |
|
1760 #endif |
|
1761 ZoneAllocPages(tmpZone, reserved, EPageFixed); |
|
1762 iTotalFreeRamPages -= reserved; |
|
1763 totalUnreserved -= reserved; |
|
1764 } |
|
1765 tmpZone->iBma[EPageFixed]->Alloc(tmpOffset, runLength); |
|
1766 addr = tmpZone->iPhysEnd + 1; |
|
1767 } |
|
1768 while (tmpZone != endZone); |
|
1769 return totalUnreserved; |
|
1770 } |
|
1771 |
|
1772 |
|
1773 FORCE_INLINE void DRamAllocator::UnblockSetAllocRuns( TUint& aOffset1, TUint& aOffset2, |
|
1774 TUint aRunLength1, TUint aRunLength2, |
|
1775 TUint& aAllocLength, TUint& aAllocStart) |
|
1776 { |
|
1777 aAllocStart = aOffset1; |
|
1778 aAllocLength = aRunLength1; |
|
1779 aOffset1 += aAllocLength; |
|
1780 if (aOffset1 == aOffset2) |
|
1781 { |
|
1782 aAllocLength += aRunLength2; |
|
1783 aOffset2 += aRunLength2; |
|
1784 aOffset1 = aOffset2; |
|
1785 } |
|
1786 } |
|
1787 |
|
1788 |
|
1789 void DRamAllocator::UnblockContiguousRegion(TPhysAddr aAddrBase, TUint aNumPages) |
|
1790 { |
|
1791 // Shouldn't be asked to unblock zero pages, addrEndPage would be wrong if we did. |
|
1792 __NK_ASSERT_DEBUG(aNumPages); |
|
1793 TPhysAddr addr = aAddrBase; |
|
1794 TPhysAddr addrEndPage = aAddrBase + ((aNumPages - 1) << KPageShift); |
|
1795 TInt tmpOffset; |
|
1796 SZone* endZone = GetZoneAndOffset(addrEndPage, tmpOffset); |
|
1797 SZone* tmpZone; |
|
1798 do |
|
1799 { |
|
1800 tmpZone = GetZoneAndOffset(addr, tmpOffset); |
|
1801 __NK_ASSERT_DEBUG(tmpZone != NULL); |
|
1802 TUint runLength = (addrEndPage < tmpZone->iPhysEnd)? |
|
1803 ((addrEndPage - addr) >> KPageShift) + 1: |
|
1804 tmpZone->iPhysPages - tmpOffset; |
|
1805 TUint unreserved = 0; |
|
1806 TUint runEnd = tmpOffset + runLength - 1; |
|
1807 TUint freeOffset = tmpOffset; |
|
1808 TUint discardOffset = freeOffset; |
|
1809 TUint movableOffset = freeOffset; |
|
1810 __KTRACE_OPT(KMMU2, Kern::Printf("freeOff %d, runEnd %d", freeOffset, runEnd)); |
|
1811 while (freeOffset <= runEnd) |
|
1812 { |
|
1813 TUint discardRun; |
|
1814 TUint movableRun; |
|
1815 discardRun = NextAllocatedRun(tmpZone, discardOffset, runEnd, EPageDiscard); |
|
1816 movableRun = NextAllocatedRun(tmpZone, movableOffset, runEnd, EPageMovable); |
|
1817 TUint allocLength; |
|
1818 TUint allocStart; |
|
1819 __KTRACE_OPT(KMMU2, Kern::Printf("disOff %d len %d movOff %d len %d", discardOffset, discardRun, movableOffset, movableRun)); |
|
1820 if (discardOffset < movableOffset) |
|
1821 UnblockSetAllocRuns(discardOffset, movableOffset, discardRun, movableRun, allocLength, allocStart); |
|
1822 else |
|
1823 UnblockSetAllocRuns(movableOffset, discardOffset, movableRun, discardRun, allocLength, allocStart); |
|
1824 |
|
1825 if (allocStart > freeOffset) |
|
1826 { |
|
1827 unreserved += allocStart - freeOffset; |
|
1828 tmpZone->iBma[KBmaAllPages]->Free(freeOffset, allocStart - freeOffset); |
|
1829 __NK_ASSERT_DEBUG( !tmpZone->iBma[EPageMovable]->NotFree(freeOffset, allocStart - freeOffset) && |
|
1830 !tmpZone->iBma[EPageDiscard]->NotFree(freeOffset, allocStart - freeOffset)); |
|
1831 } |
|
1832 __KTRACE_OPT(KMMU2, Kern::Printf("disOff %d len %d movOff %d len %d start %d len %d", discardOffset, discardRun, movableOffset, movableRun, allocStart, allocLength)); |
|
1833 freeOffset = allocStart + allocLength; |
|
1834 __KTRACE_OPT(KMMU2, Kern::Printf("freeOff %d", freeOffset)); |
|
1835 } |
|
1836 tmpZone->iBma[EPageFixed]->Free(tmpOffset, runLength); |
|
1837 ZoneFreePages(tmpZone, unreserved, EPageFixed); |
|
1838 iTotalFreeRamPages += unreserved; |
|
1839 addr = tmpZone->iPhysEnd + 1; |
|
1840 } |
|
1841 while (tmpZone != endZone); |
|
1842 } |
|
1843 |
|
1844 |
|
1845 TUint DRamAllocator::CountPagesInRun(TPhysAddr aAddrBase, TPhysAddr aAddrEndPage, TZonePageType aType) |
|
1846 { |
|
1847 __NK_ASSERT_DEBUG(aAddrBase <= aAddrEndPage); |
|
1848 TUint totalAllocated = 0; |
|
1849 TPhysAddr addr = aAddrBase; |
|
1850 TUint tmpOffset; |
|
1851 SZone* endZone = GetZoneAndOffset(aAddrEndPage, (TInt&)tmpOffset); |
|
1852 SZone* tmpZone; |
|
1853 do |
|
1854 { |
|
1855 tmpZone = GetZoneAndOffset(addr, (TInt&)tmpOffset); |
|
1856 __NK_ASSERT_DEBUG(tmpZone != NULL); |
|
1857 TUint runLength = (aAddrEndPage < tmpZone->iPhysEnd)? |
|
1858 ((aAddrEndPage - addr) >> KPageShift) + 1: |
|
1859 tmpZone->iPhysPages - tmpOffset; |
|
1860 TUint runEnd = tmpOffset + runLength - 1; |
|
1861 while (tmpOffset <= runEnd) |
|
1862 { |
|
1863 TUint run = NextAllocatedRun(tmpZone, tmpOffset, runEnd, aType); |
|
1864 totalAllocated += run; |
|
1865 tmpOffset += run; |
|
1866 } |
|
1867 addr = tmpZone->iPhysEnd + 1; |
|
1868 } |
|
1869 while (tmpZone != endZone); |
|
1870 return totalAllocated; |
|
1871 } |
|
1872 |
|
1873 |
|
1874 TInt DRamAllocator::ClearContiguousRegion(TPhysAddr aAddrBase, TPhysAddr aZoneBase, TUint aNumPages, TInt& aOffset, TUint aUnreservedPages) |
|
1875 { |
|
1876 TPhysAddr addr = aAddrBase; |
|
1877 TPhysAddr addrEndPage = aAddrBase + ((aNumPages -1 )<< KPageShift); |
|
1878 TInt contigOffset = 0; |
|
1879 SZone* contigZone = GetZoneAndOffset(addr, contigOffset); |
|
1880 TUint unreservedPages = aUnreservedPages; |
|
1881 for (; addr <= addrEndPage; addr += KPageSize, contigOffset++) |
|
1882 { |
|
1883 if (contigZone->iPhysEnd < addr) |
|
1884 { |
|
1885 contigZone = GetZoneAndOffset(addr, contigOffset); |
|
1886 __NK_ASSERT_DEBUG(contigZone != NULL); |
|
1887 } |
|
1888 |
|
1889 __NK_ASSERT_DEBUG(contigZone != NULL); |
|
1890 __NK_ASSERT_DEBUG(contigZone->iBma[EPageFixed]->NotFree(contigOffset, 1)); |
|
1891 __NK_ASSERT_DEBUG(SPageInfo::SafeFromPhysAddr(addr) != NULL); |
|
1892 |
|
1893 if (unreservedPages > iTotalFreeRamPages) |
|
1894 {// May need to discard some pages so there is free space for the |
|
1895 // pages in the contiguous run to be moved to. |
|
1896 TUint requiredPages = unreservedPages - iTotalFreeRamPages; |
|
1897 if (requiredPages) |
|
1898 {// Ask the pager to get free some pages. |
|
1899 M::GetFreePages(requiredPages); |
|
1900 |
|
1901 // The ram alloc lock may have been flashed so ensure that we still have |
|
1902 // enough free ram to complete the allocation. |
|
1903 TUint remainingPages = ((addrEndPage - addr) >> KPageShift) + 1; |
|
1904 unreservedPages = remainingPages - CountPagesInRun(addr, addrEndPage, EPageFixed); |
|
1905 if (unreservedPages > iTotalFreeRamPages + M::NumberOfFreeDpPages()) |
|
1906 {// Not enough free space and not enough freeable pages. |
|
1907 return KErrNoMemory; |
|
1908 } |
|
1909 } |
|
1910 } |
|
1911 |
|
1912 TInt r = M::MoveAndAllocPage(addr, EPageFixed); |
|
1913 if (r != KErrNone) |
|
1914 {// This page couldn't be moved or discarded so |
|
1915 // restart the search the page after this one. |
|
1916 __KTRACE_OPT(KMMU2, Kern::Printf("ContigMov fail contigOffset 0x%x r %d", contigOffset, r)); |
|
1917 aOffset = (addr < aZoneBase)? 0 : contigOffset + 1; |
|
1918 return r; |
|
1919 } |
|
1920 __NK_ASSERT_DEBUG(contigZone->iBma[EPageFixed]->NotFree(contigOffset, 1)); |
|
1921 __NK_ASSERT_DEBUG(contigZone->iBma[KBmaAllPages]->NotFree(contigOffset, 1)); |
|
1922 __NK_ASSERT_DEBUG(contigZone->iBma[EPageDiscard]->NotAllocated(contigOffset, 1)); |
|
1923 __NK_ASSERT_DEBUG(contigZone->iBma[EPageMovable]->NotAllocated(contigOffset, 1)); |
|
1924 } |
|
1925 |
|
1926 // Successfully cleared the contiguous run |
|
1927 return KErrNone; |
|
1928 } |
|
1929 |
|
1930 |
1682 /** |
1931 /** |
1683 Search through the zones for the requested contiguous RAM, first in preference |
1932 Search through the zones for the requested contiguous RAM, first in preference |
1684 order then, if that fails, in address order. |
1933 order then, if that fails, in address order. |
1685 |
1934 |
|
1935 No support for non-fixed pages as this will discard and move pages if required. |
|
1936 |
1686 @param aNumPages The number of contiguous pages to find |
1937 @param aNumPages The number of contiguous pages to find |
1687 @param aPhysAddr Will contain the base address of any contiguous run if found |
1938 @param aPhysAddr Will contain the base address of any contiguous run if found |
1688 @param aType The page type of the memory to be allocated |
|
1689 @param aAlign Alignment specified as the alignment shift |
1939 @param aAlign Alignment specified as the alignment shift |
1690 @param aBlockedZoneId The ID of a zone that can't be allocated into, by default this has no effect |
|
1691 @param aBlockRest Set to ETrue to stop allocation as soon as aBlockedZoneId is reached |
|
1692 in preference ordering. EFalse otherwise. |
1940 in preference ordering. EFalse otherwise. |
1693 |
1941 |
1694 @return KErrNone on success, KErrNoMemory otherwise |
1942 @return KErrNone on success, KErrNoMemory otherwise |
1695 */ |
1943 */ |
1696 TInt DRamAllocator::AllocContiguousRam(TUint aNumPages, TPhysAddr& aPhysAddr, TZonePageType aType, TInt aAlign, TUint aBlockedZoneId, TBool aBlockRest) |
1944 TInt DRamAllocator::AllocContiguousRam(TUint aNumPages, TPhysAddr& aPhysAddr, TInt aAlign) |
1697 { |
1945 { |
1698 __KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam size %08x align %d",aNumPages,aAlign)); |
1946 __KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam size %08x align %d",aNumPages,aAlign)); |
1699 |
1947 |
1700 M::RamAllocIsLocked(); |
1948 M::RamAllocIsLocked(); |
1701 |
1949 |
1702 // No support for non-fixed pages as this will discard and move |
1950 if ((TUint)aNumPages > iTotalFreeRamPages + M::NumberOfFreeDpPages()) |
1703 // pages if required. |
1951 {// Not enough free space and not enough freeable pages. |
1704 __NK_ASSERT_DEBUG(aType == EPageFixed); |
1952 return KErrNoMemory; |
|
1953 } |
|
1954 if (aNumPages > iTotalFreeRamPages) |
|
1955 {// Need to discard some pages so there is free space for the pages in |
|
1956 // the contiguous run to be moved to. |
|
1957 TUint requiredPages = aNumPages - iTotalFreeRamPages; |
|
1958 if (!M::GetFreePages(requiredPages)) |
|
1959 return KErrNoMemory; |
|
1960 } |
|
1961 |
1705 TInt alignWrtPage = Max(aAlign - KPageShift, 0); |
1962 TInt alignWrtPage = Max(aAlign - KPageShift, 0); |
1706 TUint32 alignmask = (1u << alignWrtPage) - 1; |
1963 TUint32 alignmask = (1u << alignWrtPage) - 1; |
1707 |
1964 |
1708 // Attempt to find enough pages searching in preference order first then |
1965 // Attempt to find enough pages searching in preference order first then |
1709 // in address order |
1966 // in address order |
1731 __KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: base=%08x carryAll=%08x offset=%08x", base, carryAll, offset)); |
1988 __KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: base=%08x carryAll=%08x offset=%08x", base, carryAll, offset)); |
1732 offset = bmaAll.AllocAligned(aNumPages, alignWrtPage, base, EFalse, carryAll, runLength); |
1989 offset = bmaAll.AllocAligned(aNumPages, alignWrtPage, base, EFalse, carryAll, runLength); |
1733 __KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: offset=%08x", offset)); |
1990 __KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: offset=%08x", offset)); |
1734 |
1991 |
1735 if (offset >= 0) |
1992 if (offset >= 0) |
|
1993 { |
|
1994 // Have found enough contiguous pages so return address of physical page |
|
1995 // at the start of the region |
|
1996 aPhysAddr = TPhysAddr((base + offset - carryAll + alignmask) & ~alignmask) << KPageShift; |
|
1997 MarkPagesAllocated(aPhysAddr, aNumPages, EPageFixed); |
|
1998 |
|
1999 __KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam returns %08x",aPhysAddr)); |
|
2000 #ifdef BTRACE_RAM_ALLOCATOR |
|
2001 BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocContiguousRam, EPageFixed, aNumPages, aPhysAddr); |
|
2002 #endif |
|
2003 return KErrNone; |
|
2004 } |
|
2005 // No run found when looking in just the free pages so see if this |
|
2006 // RAM zone could be used if pages where moved or discarded. |
|
2007 TBitMapAllocator& bmaImmov = *(zone->iBma[EPageFixed]); |
|
2008 offset = 0; // Clear so searches whole of fixed BMA on the first pass. |
|
2009 do |
|
2010 { |
|
2011 __KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: base=%08x carryImmov=%08x offset=%08x", base, carryImmov, offset)); |
|
2012 offset = bmaImmov.AllocAligned(aNumPages, alignWrtPage, base, EFalse, carryImmov, runLength, offset); |
|
2013 __KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: offset=%08x", offset)); |
|
2014 if (offset >= 0) |
|
2015 {// Have found a run in immovable page bma so attempt to clear |
|
2016 // it for the allocation. |
|
2017 TPhysAddr addrBase = TPhysAddr((base + offset - carryImmov + alignmask) & ~alignmask) << KPageShift; |
|
2018 __KTRACE_OPT(KMMU2, Kern::Printf(">AllocContig fix run 0x%08x - 0x%08x 0x%x", addrBase, addrBase + (aNumPages << KPageShift), TheCurrentThread)); |
|
2019 |
|
2020 // Block the contiguous region from being allocated. |
|
2021 iContiguousReserved++; |
|
2022 TUint unreservedPages = BlockContiguousRegion(addrBase, aNumPages); |
|
2023 TInt clearRet = ClearContiguousRegion(addrBase, zone->iPhysBase, aNumPages, offset, unreservedPages); |
|
2024 if (clearRet == KErrNone) |
|
2025 {// Cleared all the required pages. |
|
2026 // Return address of physical page at the start of the region. |
|
2027 iContiguousReserved--; |
|
2028 aPhysAddr = addrBase; |
|
2029 __KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam returns %08x",aPhysAddr)); |
|
2030 #ifdef BTRACE_RAM_ALLOCATOR |
|
2031 BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocContiguousRam, EPageFixed, aNumPages, aPhysAddr); |
|
2032 #endif |
|
2033 __KTRACE_OPT(KMMU2, Kern::Printf("<AllocContig suc run 0x%08x - 0x%08x 0x%x", addrBase, addrBase + (aNumPages << KPageShift), TheCurrentThread)); |
|
2034 return KErrNone; |
|
2035 } |
|
2036 else |
|
2037 { |
|
2038 // Unblock the contiguous region. |
|
2039 UnblockContiguousRegion(addrBase, aNumPages); |
|
2040 iContiguousReserved--; |
|
2041 __KTRACE_OPT(KMMU2, Kern::Printf("ContigMov fail offset 0x%x carryImmov %x", |
|
2042 offset, carryImmov)); |
|
2043 // Can't rely on RAM zone preference ordering being |
|
2044 // the same so clear carrys and restart search from |
|
2045 // within the current RAM zone or skip onto the next |
|
2046 // one if at the end of this one. |
|
2047 carryImmov = 0; |
|
2048 carryAll = 0; |
|
2049 __KTRACE_OPT(KMMU2, Kern::Printf("<AllocContigfail run 0x%08x - 0x%08x 0x%x", addrBase, addrBase + (aNumPages << KPageShift), TheCurrentThread)); |
|
2050 if (clearRet == KErrNoMemory) |
|
2051 {// There are no longer enough free or discardable pages to |
|
2052 // be able to fulfill this allocation. |
|
2053 return KErrNoMemory; |
|
2054 } |
|
2055 } |
|
2056 } |
|
2057 } |
|
2058 // Keep searching immovable page bma of the current RAM zone until |
|
2059 // gone past end of RAM zone or no run can be found. |
|
2060 while (offset >= 0 && (TUint)offset < zone->iPhysPages); |
|
2061 } |
|
2062 return KErrNoMemory; |
|
2063 } |
|
2064 |
|
2065 #else |
|
2066 |
|
2067 /** |
|
2068 Search through the zones for the requested contiguous RAM, first in preference |
|
2069 order then, if that fails, in address order. |
|
2070 |
|
2071 No support for non-fixed pages as this will discard and move pages if required. |
|
2072 |
|
2073 @param aNumPages The number of contiguous pages to find |
|
2074 @param aPhysAddr Will contain the base address of any contiguous run if found |
|
2075 @param aAlign Alignment specified as the alignment shift |
|
2076 |
|
2077 @return KErrNone on success, KErrNoMemory otherwise |
|
2078 */ |
|
2079 TInt DRamAllocator::AllocContiguousRam(TUint aNumPages, TPhysAddr& aPhysAddr, TInt aAlign) |
|
2080 { |
|
2081 __KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam size %08x align %d",aNumPages,aAlign)); |
|
2082 |
|
2083 M::RamAllocIsLocked(); |
|
2084 |
|
2085 TInt alignWrtPage = Max(aAlign - KPageShift, 0); |
|
2086 TUint32 alignmask = (1u << alignWrtPage) - 1; |
|
2087 |
|
2088 // Attempt to find enough pages searching in preference order first then |
|
2089 // in address order |
|
2090 TZoneSearchState searchState = EZoneSearchPref; |
|
2091 SZone* zone; |
|
2092 SZone* prevZone = NULL; |
|
2093 TInt carryAll = 0; // Carry for all pages bma, clear to start new run. |
|
2094 TInt carryImmov = 0; // Carry for immovable pages bma, clear to start new run. |
|
2095 TInt base = 0; |
|
2096 TInt offset = 0; |
|
2097 iZoneTmpAddrIndex = -1; |
|
2098 iZoneTmpPrefLink = iZonePrefList.First(); |
|
2099 while (NextAllocZone(zone, searchState, EPageFixed, KRamZoneInvalidId, EFalse)) |
|
2100 { |
|
2101 // Be sure to start from scratch if zone not contiguous with previous zone |
|
2102 if (prevZone && (zone->iPhysBase == 0 || (zone->iPhysBase - 1) != prevZone->iPhysEnd)) |
|
2103 { |
|
2104 carryAll = 0; |
|
2105 carryImmov = 0; |
|
2106 } |
|
2107 prevZone = zone; |
|
2108 TBitMapAllocator& bmaAll = *(zone->iBma[KBmaAllPages]); |
|
2109 base = TInt(zone->iPhysBase >> KPageShift); |
|
2110 TInt runLength; |
|
2111 __KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: base=%08x carryAll=%08x offset=%08x", base, carryAll, offset)); |
|
2112 offset = bmaAll.AllocAligned(aNumPages, alignWrtPage, base, EFalse, carryAll, runLength); |
|
2113 __KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: offset=%08x", offset)); |
|
2114 |
|
2115 if (offset >= 0) |
1736 {// Have found enough contiguous pages so return address of physical page |
2116 {// Have found enough contiguous pages so return address of physical page |
1737 // at the start of the region |
2117 // at the start of the region |
1738 aPhysAddr = TPhysAddr((base + offset - carryAll + alignmask) & ~alignmask) << KPageShift; |
2118 aPhysAddr = TPhysAddr((base + offset - carryAll + alignmask) & ~alignmask) << KPageShift; |
1739 MarkPagesAllocated(aPhysAddr, aNumPages, aType); |
2119 MarkPagesAllocated(aPhysAddr, aNumPages, EPageFixed); |
1740 |
2120 |
1741 __KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam returns %08x",aPhysAddr)); |
2121 __KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam returns %08x",aPhysAddr)); |
1742 #ifdef BTRACE_RAM_ALLOCATOR |
2122 #ifdef BTRACE_RAM_ALLOCATOR |
1743 BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocContiguousRam, aType, aNumPages, aPhysAddr); |
2123 BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocContiguousRam, EPageFixed, aNumPages, aPhysAddr); |
1744 #endif |
2124 #endif |
1745 return KErrNone; |
2125 return KErrNone; |
1746 } |
2126 } |
1747 else |
2127 else |
1748 {// No run found when looking in just the free pages so see if this |
2128 {// No run found when looking in just the free pages so see if this |
2104 |
2482 |
2105 __NK_ASSERT_DEBUG(aZone - iZones < (TInt)iNumZones); |
2483 __NK_ASSERT_DEBUG(aZone - iZones < (TInt)iNumZones); |
2106 // Makes things simpler for bma selection. |
2484 // Makes things simpler for bma selection. |
2107 __NK_ASSERT_DEBUG(aType != EPageUnknown); |
2485 __NK_ASSERT_DEBUG(aType != EPageUnknown); |
2108 |
2486 |
2109 if (aOffset >= aZone->iPhysPages) |
2487 TUint zoneEndOffset = aZone->iPhysPages - 1; |
|
2488 if (aOffset > zoneEndOffset) |
2110 {// Starting point is outside the zone |
2489 {// Starting point is outside the zone |
2111 return KErrArgument; |
2490 return KErrArgument; |
2112 } |
2491 } |
2113 |
2492 |
2114 TUint offset = aOffset; |
2493 TUint wordIndex = aOffset >> 5; |
2115 TUint endOffset = aZone->iPhysPages; |
2494 TUint endWordIndex = zoneEndOffset >> 5; |
2116 TUint endOffsetAligned = endOffset & KWordAlignMask; |
|
2117 |
2495 |
2118 // Select the BMA to search, |
2496 // Select the BMA to search, |
2119 TUint bmaIndex = (aType == EPageTypes)? KBmaAllPages : aType; |
2497 TUint bmaIndex = (aType == EPageTypes)? KBmaAllPages : aType; |
2120 TUint32* map = &(aZone->iBma[bmaIndex]->iMap[offset >> 5]); |
2498 TUint32* map = &(aZone->iBma[bmaIndex]->iMap[wordIndex]); |
2121 TUint32 bits = *map++; |
2499 TUint32* mapEnd = &(aZone->iBma[bmaIndex]->iMap[endWordIndex]); |
|
2500 TUint32 bits = *map; |
2122 |
2501 |
2123 // Set bits for pages before 'offset' (i.e. ones we want to ignore)... |
2502 // Set bits for pages before 'offset' (i.e. ones we want to ignore)... |
2124 bits |= ~(KMaxTUint32 >> (offset & ~KWordAlignMask)); |
2503 bits |= ~(KMaxTUint32 >> (aOffset & ~KWordAlignMask)); |
2125 |
2504 |
2126 // Find the first bit map word from aOffset in aZone with allocated pages |
2505 // Find the first bit map word from aOffset in aZone with allocated pages |
2127 while (bits == KMaxTUint32 && offset < endOffsetAligned) |
2506 while (bits == KMaxTUint32 && map < mapEnd) |
2128 { |
2507 { |
2129 bits = *map++; |
2508 bits = *++map; |
2130 offset = (offset + 32) & KWordAlignMask; |
2509 } |
2131 } |
2510 |
2132 |
2511 if (map == mapEnd) |
2133 if (offset >= endOffsetAligned && endOffset != endOffsetAligned) |
|
2134 {// Have reached the last bit mask word so set the bits that are |
2512 {// Have reached the last bit mask word so set the bits that are |
2135 // outside of the zone so that they are ignored. |
2513 // outside of the zone so that they are ignored. |
2136 bits |= KMaxTUint32 >> (endOffset - endOffsetAligned); |
2514 bits |= (KMaxTUint32 >> (zoneEndOffset & ~KWordAlignMask)) >> 1; |
2137 } |
2515 } |
2138 |
2516 |
2139 if (bits == KMaxTUint32) |
2517 if (bits == KMaxTUint32) |
2140 {// No allocated pages found after aOffset in aZone. |
2518 {// No allocated pages found after aOffset in aZone. |
2141 return KErrNotFound; |
2519 return KErrNotFound; |
2142 } |
2520 } |
2143 |
2521 |
2144 // Now we have bits with allocated pages in it so determine the exact |
2522 // Now we have bits with allocated pages in it so determine the exact |
2145 // offset of the next allocated page |
2523 // offset of the next allocated page |
2146 TUint32 mask = 0x80000000 >> (offset & ~KWordAlignMask); |
2524 TInt msOne = __e32_find_ms1_32(~bits); |
2147 while (bits & mask) |
2525 __NK_ASSERT_DEBUG(msOne >= 0); // Must have at least one allocated page in the word. |
2148 { |
2526 TUint msOneOffset = 31 - msOne; |
2149 mask >>= 1; |
2527 aOffset = ((map - aZone->iBma[bmaIndex]->iMap) << 5) + msOneOffset; |
2150 offset++; |
|
2151 } |
|
2152 |
|
2153 if (offset >= endOffset) |
|
2154 {// Reached the end of the zone without finding an allocated page after aOffset |
|
2155 return KErrNotFound; |
|
2156 } |
|
2157 |
|
2158 // Should definitely have found an allocated page within aZone's pages |
|
2159 __NK_ASSERT_DEBUG(mask != 0 && !(bits & mask) && offset < aZone->iPhysPages); |
|
2160 |
|
2161 aOffset = offset; |
|
2162 return KErrNone; |
2528 return KErrNone; |
2163 } |
2529 } |
|
2530 |
|
2531 |
|
2532 /** |
|
2533 Get the next run of pages in this zone that are allocated after aOffset. |
|
2534 |
|
2535 @param aZone The zone to find the next allocated page in. |
|
2536 @param aOffset On entry this is the offset from which the next allocated |
|
2537 page in the zone should be found, on return it will be the offset |
|
2538 of the next allocated page. |
|
2539 @param aEndOffset The last offset within this RAM zone to check for allocated runs. |
|
2540 @return The length of any run found. |
|
2541 */ |
|
2542 TInt DRamAllocator::NextAllocatedRun(SZone* aZone, TUint& aOffset, TUint aEndOffset, TZonePageType aType) const |
|
2543 { |
|
2544 const TUint KWordAlignMask = KMaxTUint32 << 5; |
|
2545 |
|
2546 M::RamAllocIsLocked(); |
|
2547 |
|
2548 __NK_ASSERT_DEBUG(aZone - iZones < (TInt)iNumZones); |
|
2549 // Makes things simpler for bma selection. |
|
2550 __NK_ASSERT_DEBUG(aType != EPageUnknown); |
|
2551 |
|
2552 if (aOffset > aEndOffset) |
|
2553 {// UnblockContiguous() has already searched the whole range for this page type. |
|
2554 return 0; |
|
2555 } |
|
2556 |
|
2557 TUint wordIndex = aOffset >> 5; |
|
2558 TUint endWordIndex = aEndOffset >> 5; |
|
2559 |
|
2560 // Select the BMA to search, |
|
2561 TUint bmaIndex = (aType == EPageTypes)? KBmaAllPages : aType; |
|
2562 TUint32* map = &(aZone->iBma[bmaIndex]->iMap[wordIndex]); |
|
2563 TUint32* mapEnd = &(aZone->iBma[bmaIndex]->iMap[endWordIndex]); |
|
2564 TUint32 bits = *map; |
|
2565 |
|
2566 // Set bits for pages before 'offset' (i.e. ones we want to ignore)... |
|
2567 bits |= ~(KMaxTUint32 >> (aOffset & ~KWordAlignMask)); |
|
2568 |
|
2569 // Find the first bit map word from aOffset in aZone with allocated pages |
|
2570 while (bits == KMaxTUint32 && map < mapEnd) |
|
2571 { |
|
2572 bits = *++map; |
|
2573 } |
|
2574 |
|
2575 if (map == mapEnd) |
|
2576 {// Have reached the last bit mask word so set the bits that are |
|
2577 // outside of the range so that they are ignored. |
|
2578 bits |= (KMaxTUint32 >> (aEndOffset & ~KWordAlignMask)) >> 1; |
|
2579 } |
|
2580 |
|
2581 if (bits == KMaxTUint32) |
|
2582 {// No allocated pages found in the range. |
|
2583 aOffset = aEndOffset + 1; |
|
2584 return 0; |
|
2585 } |
|
2586 |
|
2587 // Now we have bits with allocated pages in it so determine the exact |
|
2588 // offset of the next allocated page |
|
2589 TInt msOne = __e32_find_ms1_32(~bits); |
|
2590 __NK_ASSERT_DEBUG(msOne >= 0); // Must have at least one allocated page in the word. |
|
2591 TUint msOneOffset = 31 - msOne; |
|
2592 aOffset = ((map - aZone->iBma[bmaIndex]->iMap) << 5) + msOneOffset; |
|
2593 TUint32* runWord = map; |
|
2594 |
|
2595 if (map < mapEnd && __e32_bit_count_32(~bits) == msOne + 1) |
|
2596 {// The whole of the region in this word is allocated. |
|
2597 // Find the next word which isn't completely allocated within the range. |
|
2598 do |
|
2599 { |
|
2600 bits = *++map; |
|
2601 } |
|
2602 while (!bits && map < mapEnd); |
|
2603 } |
|
2604 |
|
2605 // Clear any bits before the run so can get next free from __e32_find_msl_32(). |
|
2606 if (runWord == map) |
|
2607 bits &= KMaxTUint32 >> (aOffset & ~KWordAlignMask); |
|
2608 TInt msFree = __e32_find_ms1_32(bits); |
|
2609 __NK_ASSERT_DEBUG(msFree >= 0 || map == mapEnd); |
|
2610 TUint msFreeOffset = (msFree >= 0)? 31 - msFree : 32; |
|
2611 TUint endIndex = map - aZone->iBma[bmaIndex]->iMap; |
|
2612 TUint runEnd = (endIndex << 5) + msFreeOffset; |
|
2613 if (runEnd > aEndOffset + 1) // Ensure we don't go past the range. |
|
2614 runEnd = aEndOffset + 1; |
|
2615 __NK_ASSERT_DEBUG(runEnd > aOffset); |
|
2616 |
|
2617 return runEnd - aOffset; |
|
2618 } |
|
2619 |
2164 |
2620 |
2165 /** |
2621 /** |
2166 See if any of the least preferable RAM zones can be emptied. If they can then |
2622 See if any of the least preferable RAM zones can be emptied. If they can then |
2167 initialise the allocator for a general defragmentation operation. |
2623 initialise the allocator for a general defragmentation operation. |
2168 |
2624 |