352 __KTRACE_OPT(KMMU,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc)); // counts rolled over |
352 __KTRACE_OPT(KMMU,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc)); // counts rolled over |
353 __KTRACE_OPT(KMMU,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown], |
353 __KTRACE_OPT(KMMU,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown], |
354 aZone->iAllocPages[EPageFixed], aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard])); |
354 aZone->iAllocPages[EPageFixed], aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard])); |
355 Panic(EZonesCountErr); |
355 Panic(EZonesCountErr); |
356 } |
356 } |
357 __ASSERT_DEBUG(free == (TUint32)aZone->iBma[KBmaAllPages]->iAvail, Panic(EAllocRamPagesInconsistent)); |
|
358 __KTRACE_OPT(KMMU2,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc)); |
357 __KTRACE_OPT(KMMU2,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc)); |
359 __KTRACE_OPT(KMMU2,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown], |
358 __KTRACE_OPT(KMMU2,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown], |
360 aZone->iAllocPages[EPageFixed], aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard])); |
359 aZone->iAllocPages[EPageFixed], aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard])); |
361 |
360 |
362 if (iAllowBmaVerify) |
361 if (!iContiguousReserved) |
363 { |
362 { |
|
363 __ASSERT_DEBUG(free == (TUint32)aZone->iBma[KBmaAllPages]->iAvail, Panic(EAllocRamPagesInconsistent)); |
364 TBitMapAllocator& bmaType = *(aZone->iBma[(aType != EPageUnknown)? aType : EPageFixed]); |
364 TBitMapAllocator& bmaType = *(aZone->iBma[(aType != EPageUnknown)? aType : EPageFixed]); |
365 TUint allocPages; |
365 TUint allocPages; |
366 if (aType == EPageFixed || aType == EPageUnknown) |
366 if (aType == EPageFixed || aType == EPageUnknown) |
367 allocPages = aZone->iAllocPages[EPageUnknown] + aZone->iAllocPages[EPageFixed]; |
367 allocPages = aZone->iAllocPages[EPageUnknown] + aZone->iAllocPages[EPageFixed]; |
368 else |
368 else |
493 __KTRACE_OPT(KMMU,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc)); // counts rolled over |
493 __KTRACE_OPT(KMMU,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc)); // counts rolled over |
494 __KTRACE_OPT(KMMU,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown], |
494 __KTRACE_OPT(KMMU,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown], |
495 aZone->iAllocPages[EPageFixed], aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard])); |
495 aZone->iAllocPages[EPageFixed], aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard])); |
496 Panic(EZonesCountErr); |
496 Panic(EZonesCountErr); |
497 } |
497 } |
498 __ASSERT_DEBUG(free == (TUint32)aZone->iBma[KBmaAllPages]->iAvail, Panic(EAllocRamPagesInconsistent)); |
|
499 __KTRACE_OPT(KMMU2,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc)); |
498 __KTRACE_OPT(KMMU2,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc)); |
500 __KTRACE_OPT(KMMU2,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown], |
499 __KTRACE_OPT(KMMU2,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown], |
501 aZone->iAllocPages[EPageFixed], aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard])); |
500 aZone->iAllocPages[EPageFixed], aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard])); |
502 |
501 |
503 if (iAllowBmaVerify) |
502 if (!iContiguousReserved) |
504 { |
503 { |
|
504 __ASSERT_DEBUG(free == (TUint32)aZone->iBma[KBmaAllPages]->iAvail, Panic(EAllocRamPagesInconsistent)); |
505 TBitMapAllocator& bmaType = *(aZone->iBma[(aType != EPageUnknown)? aType : EPageFixed]); |
505 TBitMapAllocator& bmaType = *(aZone->iBma[(aType != EPageUnknown)? aType : EPageFixed]); |
506 TUint allocPages; |
506 TUint allocPages; |
507 if (aType == EPageFixed || aType == EPageUnknown) |
507 if (aType == EPageFixed || aType == EPageUnknown) |
508 allocPages = aZone->iAllocPages[EPageUnknown] + aZone->iAllocPages[EPageFixed]; |
508 allocPages = aZone->iAllocPages[EPageUnknown] + aZone->iAllocPages[EPageFixed]; |
509 else |
509 else |
1199 return KErrArgument; |
1206 return KErrArgument; |
1200 } |
1207 } |
1201 __KTRACE_OPT(KMMU2,Kern::Printf("Zone index %d page index %04x",z-iZones,n)); |
1208 __KTRACE_OPT(KMMU2,Kern::Printf("Zone index %d page index %04x",z-iZones,n)); |
1202 TBitMapAllocator& bmaAll = *(z->iBma[KBmaAllPages]); |
1209 TBitMapAllocator& bmaAll = *(z->iBma[KBmaAllPages]); |
1203 TBitMapAllocator& bmaType = *(z->iBma[aType]); |
1210 TBitMapAllocator& bmaType = *(z->iBma[aType]); |
1204 bmaAll.Free(n); |
1211 |
1205 bmaType.Free(n); |
1212 bmaType.Free(n); |
1206 ++iTotalFreeRamPages; |
1213 if (iContiguousReserved && aType != EPageFixed && z->iBma[EPageFixed]->NotFree(n, 1)) |
1207 ZoneFreePages(z, 1, aType); |
1214 {// This page has been reserved by AllocContiguous() so don't free it |
1208 |
1215 // but allocate it as fixed. |
|
1216 ZoneFreePages(z, 1, aType); |
|
1217 ZoneAllocPages(z, 1, EPageFixed); |
|
1218 } |
|
1219 else |
|
1220 { |
|
1221 bmaAll.Free(n); |
|
1222 ++iTotalFreeRamPages; |
|
1223 ZoneFreePages(z, 1, aType); |
|
1224 } |
1209 #ifdef BTRACE_RAM_ALLOCATOR |
1225 #ifdef BTRACE_RAM_ALLOCATOR |
1210 BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocFreePage, aType, aAddr); |
1226 BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocFreePage, aType, aAddr); |
1211 #endif |
1227 #endif |
1212 return KErrNone; |
1228 return KErrNone; |
1213 } |
1229 } |
|
1230 |
1214 |
1231 |
1215 void DRamAllocator::FreeRamPages(TPhysAddr* aPageList, TInt aNumPages, TZonePageType aType) |
1232 void DRamAllocator::FreeRamPages(TPhysAddr* aPageList, TInt aNumPages, TZonePageType aType) |
1216 { |
1233 { |
1217 __KTRACE_OPT(KMMU,Kern::Printf("FreeRamPages count=%08x",aNumPages)); |
1234 __KTRACE_OPT(KMMU,Kern::Printf("FreeRamPages count=%08x",aNumPages)); |
1218 |
1235 |
1257 --aNumPages; |
1274 --aNumPages; |
1258 ++aPageList; |
1275 ++aPageList; |
1259 pa += KPageSize; |
1276 pa += KPageSize; |
1260 } |
1277 } |
1261 __KTRACE_OPT(KMMU2,Kern::Printf("%d consecutive pages, zp_rem=%x, %d remaining pages",n,zp_rem,aNumPages)); |
1278 __KTRACE_OPT(KMMU2,Kern::Printf("%d consecutive pages, zp_rem=%x, %d remaining pages",n,zp_rem,aNumPages)); |
1262 bmaAll.Free(ix,n); |
|
1263 TBitMapAllocator& bmaType = *(z->iBma[aType]); |
1279 TBitMapAllocator& bmaType = *(z->iBma[aType]); |
1264 bmaType.Free(ix,n); |
1280 bmaType.Free(ix,n); |
1265 iTotalFreeRamPages += n; |
1281 |
1266 ZoneFreePages(z, n, aType); |
1282 if (iContiguousReserved && aType != EPageFixed) |
|
1283 {// See if a page has been reserved by AllocContiguous() in this range. |
|
1284 TUint pagesFreed = 0; |
|
1285 TUint allocStart = ix; |
|
1286 TUint freeOffset = ix; |
|
1287 TUint endOffset = ix + n - 1; |
|
1288 while (freeOffset <= endOffset) |
|
1289 { |
|
1290 TUint runLength = NextAllocatedRun(z, allocStart, endOffset, EPageFixed); |
|
1291 if (allocStart > freeOffset) |
|
1292 { |
|
1293 TUint freed = allocStart - freeOffset; |
|
1294 bmaAll.Free(freeOffset, freed); |
|
1295 pagesFreed += freed; |
|
1296 } |
|
1297 allocStart += runLength; |
|
1298 freeOffset = allocStart; |
|
1299 } |
|
1300 iTotalFreeRamPages += pagesFreed; |
|
1301 ZoneFreePages(z, n, aType); |
|
1302 ZoneAllocPages(z, n - pagesFreed, EPageFixed); |
|
1303 } |
|
1304 else |
|
1305 { |
|
1306 bmaAll.Free(ix,n); |
|
1307 iTotalFreeRamPages += n; |
|
1308 ZoneFreePages(z, n, aType); |
|
1309 } |
1267 #ifdef BTRACE_RAM_ALLOCATOR |
1310 #ifdef BTRACE_RAM_ALLOCATOR |
1268 BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocFreePages, aType, n, first_pa); |
1311 BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocFreePages, aType, n, first_pa); |
1269 #endif |
1312 #endif |
1270 } |
1313 } |
1271 #ifdef BTRACE_RAM_ALLOCATOR |
1314 #ifdef BTRACE_RAM_ALLOCATOR |
1272 BTrace0(BTrace::ERamAllocator, BTrace::ERamAllocFreePagesEnd); |
1315 BTrace0(BTrace::ERamAllocator, BTrace::ERamAllocFreePagesEnd); |
1273 #endif |
1316 #endif |
1274 } |
1317 } |
|
1318 |
1275 |
1319 |
1276 /** |
1320 /** |
1277 Attempt to clear upto the required amount of discardable or movable pages |
1321 Attempt to clear upto the required amount of discardable or movable pages |
1278 from the RAM zone. |
1322 from the RAM zone. |
1279 |
1323 |
1677 |
1721 |
1678 aState = (TZoneSearchState)currentState; |
1722 aState = (TZoneSearchState)currentState; |
1679 return r; |
1723 return r; |
1680 } |
1724 } |
1681 |
1725 |
|
1726 |
|
1727 #if !defined(__MEMMODEL_MULTIPLE__) && !defined(__MEMMODEL_MOVING__) |
|
1728 void DRamAllocator::BlockContiguousRegion(TPhysAddr aAddrBase, TUint aNumPages) |
|
1729 { |
|
1730 // Shouldn't be asked to block zero pages, addrEndPage would be wrong if we did. |
|
1731 __NK_ASSERT_DEBUG(aNumPages); |
|
1732 TPhysAddr addr = aAddrBase; |
|
1733 TPhysAddr addrEndPage = aAddrBase + ((aNumPages - 1) << KPageShift); |
|
1734 TInt tmpOffset; |
|
1735 SZone* endZone = GetZoneAndOffset(addrEndPage, tmpOffset); |
|
1736 SZone* tmpZone; |
|
1737 do |
|
1738 { |
|
1739 tmpZone = GetZoneAndOffset(addr, tmpOffset); |
|
1740 __NK_ASSERT_DEBUG(tmpZone != NULL); |
|
1741 TUint runLength = (addrEndPage < tmpZone->iPhysEnd)? |
|
1742 ((addrEndPage - addr) >> KPageShift) + 1: |
|
1743 tmpZone->iPhysPages - tmpOffset; |
|
1744 TUint reserved = tmpZone->iBma[KBmaAllPages]->SelectiveAlloc(tmpOffset, runLength); |
|
1745 if (reserved) |
|
1746 { |
|
1747 #ifdef _DEBUG |
|
1748 TUint runEnd = tmpOffset + runLength; |
|
1749 TUint free = 0; |
|
1750 for (TUint i = tmpOffset; i < runEnd; i++) |
|
1751 if (tmpZone->iBma[EPageMovable]->NotAllocated(i,1) && tmpZone->iBma[EPageDiscard]->NotAllocated(i,1)) |
|
1752 free++; |
|
1753 __NK_ASSERT_DEBUG(free == reserved); |
|
1754 #endif |
|
1755 ZoneAllocPages(tmpZone, reserved, EPageFixed); |
|
1756 iTotalFreeRamPages -= reserved; |
|
1757 } |
|
1758 tmpZone->iBma[EPageFixed]->Alloc(tmpOffset, runLength); |
|
1759 addr = tmpZone->iPhysEnd + 1; |
|
1760 } |
|
1761 while (tmpZone != endZone); |
|
1762 } |
|
1763 |
|
1764 |
|
1765 FORCE_INLINE void DRamAllocator::UnblockSetAllocRuns( TUint& aOffset1, TUint& aOffset2, |
|
1766 TUint aRunLength1, TUint aRunLength2, |
|
1767 TUint& aAllocLength, TUint& aAllocStart) |
|
1768 { |
|
1769 aAllocStart = aOffset1; |
|
1770 aAllocLength = aRunLength1; |
|
1771 aOffset1 += aAllocLength; |
|
1772 if (aOffset1 == aOffset2) |
|
1773 { |
|
1774 aAllocLength += aRunLength2; |
|
1775 aOffset2 += aRunLength2; |
|
1776 aOffset1 = aOffset2; |
|
1777 } |
|
1778 } |
|
1779 |
|
1780 |
|
1781 void DRamAllocator::UnblockContiguousRegion(TPhysAddr aAddrBase, TUint aNumPages) |
|
1782 { |
|
1783 // Shouldn't be asked to unblock zero pages, addrEndPage would be wrong if we did. |
|
1784 __NK_ASSERT_DEBUG(aNumPages); |
|
1785 TPhysAddr addr = aAddrBase; |
|
1786 TPhysAddr addrEndPage = aAddrBase + ((aNumPages - 1) << KPageShift); |
|
1787 TInt tmpOffset; |
|
1788 SZone* endZone = GetZoneAndOffset(addrEndPage, tmpOffset); |
|
1789 SZone* tmpZone; |
|
1790 do |
|
1791 { |
|
1792 tmpZone = GetZoneAndOffset(addr, tmpOffset); |
|
1793 __NK_ASSERT_DEBUG(tmpZone != NULL); |
|
1794 TUint runLength = (addrEndPage < tmpZone->iPhysEnd)? |
|
1795 ((addrEndPage - addr) >> KPageShift) + 1: |
|
1796 tmpZone->iPhysPages - tmpOffset; |
|
1797 TUint unreserved = 0; |
|
1798 TUint runEnd = tmpOffset + runLength - 1; |
|
1799 TUint freeOffset = tmpOffset; |
|
1800 TUint discardOffset = freeOffset; |
|
1801 TUint movableOffset = freeOffset; |
|
1802 __KTRACE_OPT(KMMU2, Kern::Printf("freeOff %d, runEnd %d", freeOffset, runEnd)); |
|
1803 while (freeOffset <= runEnd) |
|
1804 { |
|
1805 TUint discardRun; |
|
1806 TUint movableRun; |
|
1807 discardRun = NextAllocatedRun(tmpZone, discardOffset, runEnd, EPageDiscard); |
|
1808 movableRun = NextAllocatedRun(tmpZone, movableOffset, runEnd, EPageMovable); |
|
1809 TUint allocLength; |
|
1810 TUint allocStart; |
|
1811 __KTRACE_OPT(KMMU2, Kern::Printf("disOff %d len %d movOff %d len %d", discardOffset, discardRun, movableOffset, movableRun)); |
|
1812 if (discardOffset < movableOffset) |
|
1813 UnblockSetAllocRuns(discardOffset, movableOffset, discardRun, movableRun, allocLength, allocStart); |
|
1814 else |
|
1815 UnblockSetAllocRuns(movableOffset, discardOffset, movableRun, discardRun, allocLength, allocStart); |
|
1816 |
|
1817 if (allocStart > freeOffset) |
|
1818 { |
|
1819 unreserved += allocStart - freeOffset; |
|
1820 tmpZone->iBma[KBmaAllPages]->Free(freeOffset, allocStart - freeOffset); |
|
1821 __NK_ASSERT_DEBUG( !tmpZone->iBma[EPageMovable]->NotFree(freeOffset, allocStart - freeOffset) && |
|
1822 !tmpZone->iBma[EPageDiscard]->NotFree(freeOffset, allocStart - freeOffset)); |
|
1823 } |
|
1824 __KTRACE_OPT(KMMU2, Kern::Printf("disOff %d len %d movOff %d len %d start %d len %d", discardOffset, discardRun, movableOffset, movableRun, allocStart, allocLength)); |
|
1825 freeOffset = allocStart + allocLength; |
|
1826 __KTRACE_OPT(KMMU2, Kern::Printf("freeOff %d", freeOffset)); |
|
1827 } |
|
1828 tmpZone->iBma[EPageFixed]->Free(tmpOffset, runLength); |
|
1829 ZoneFreePages(tmpZone, unreserved, EPageFixed); |
|
1830 iTotalFreeRamPages += unreserved; |
|
1831 addr = tmpZone->iPhysEnd + 1; |
|
1832 } |
|
1833 while (tmpZone != endZone); |
|
1834 } |
|
1835 |
|
1836 |
|
1837 TBool DRamAllocator::ClearContiguousRegion(TPhysAddr aAddrBase, TPhysAddr aZoneBase, TUint aNumPages, TInt& aOffset) |
|
1838 { |
|
1839 TPhysAddr addr = aAddrBase; |
|
1840 TPhysAddr addrEnd = aAddrBase + (aNumPages << KPageShift); |
|
1841 TInt contigOffset = 0; |
|
1842 SZone* contigZone = GetZoneAndOffset(addr, contigOffset); |
|
1843 for (; addr != addrEnd; addr += KPageSize, contigOffset++) |
|
1844 { |
|
1845 if (contigZone->iPhysEnd < addr) |
|
1846 { |
|
1847 contigZone = GetZoneAndOffset(addr, contigOffset); |
|
1848 __NK_ASSERT_DEBUG(contigZone != NULL); |
|
1849 } |
|
1850 |
|
1851 __NK_ASSERT_DEBUG(contigZone != NULL); |
|
1852 __NK_ASSERT_DEBUG(contigZone->iBma[EPageFixed]->NotFree(contigOffset, 1)); |
|
1853 __NK_ASSERT_DEBUG(SPageInfo::SafeFromPhysAddr(addr) != NULL); |
|
1854 |
|
1855 // WARNING - This may flash the ram alloc mutex. |
|
1856 TInt exRet = M::MoveAndAllocPage(addr, EPageFixed); |
|
1857 if (exRet != KErrNone) |
|
1858 {// This page couldn't be moved or discarded so |
|
1859 // restart the search the page after this one. |
|
1860 __KTRACE_OPT(KMMU2, Kern::Printf("ContigMov fail contigOffset 0x%x exRet %d", contigOffset, exRet)); |
|
1861 aOffset = (addr < aZoneBase)? 0 : contigOffset + 1; |
|
1862 break; |
|
1863 } |
|
1864 } |
|
1865 return addr == addrEnd; |
|
1866 } |
|
1867 |
|
1868 |
1682 /** |
1869 /** |
1683 Search through the zones for the requested contiguous RAM, first in preference |
1870 Search through the zones for the requested contiguous RAM, first in preference |
1684 order then, if that fails, in address order. |
1871 order then, if that fails, in address order. |
1685 |
1872 |
|
1873 No support for non-fixed pages as this will discard and move pages if required. |
|
1874 |
1686 @param aNumPages The number of contiguous pages to find |
1875 @param aNumPages The number of contiguous pages to find |
1687 @param aPhysAddr Will contain the base address of any contiguous run if found |
1876 @param aPhysAddr Will contain the base address of any contiguous run if found |
1688 @param aType The page type of the memory to be allocated |
|
1689 @param aAlign Alignment specified as the alignment shift |
1877 @param aAlign Alignment specified as the alignment shift |
1690 @param aBlockedZoneId The ID of a zone that can't be allocated into, by default this has no effect |
|
1691 @param aBlockRest Set to ETrue to stop allocation as soon as aBlockedZoneId is reached |
|
1692 in preference ordering. EFalse otherwise. |
1878 in preference ordering. EFalse otherwise. |
1693 |
1879 |
1694 @return KErrNone on success, KErrNoMemory otherwise |
1880 @return KErrNone on success, KErrNoMemory otherwise |
1695 */ |
1881 */ |
1696 TInt DRamAllocator::AllocContiguousRam(TUint aNumPages, TPhysAddr& aPhysAddr, TZonePageType aType, TInt aAlign, TUint aBlockedZoneId, TBool aBlockRest) |
1882 TInt DRamAllocator::AllocContiguousRam(TUint aNumPages, TPhysAddr& aPhysAddr, TInt aAlign) |
1697 { |
1883 { |
1698 __KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam size %08x align %d",aNumPages,aAlign)); |
1884 __KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam size %08x align %d",aNumPages,aAlign)); |
1699 |
1885 |
1700 M::RamAllocIsLocked(); |
1886 M::RamAllocIsLocked(); |
1701 |
1887 |
1702 // No support for non-fixed pages as this will discard and move |
1888 if ((TUint)aNumPages > iTotalFreeRamPages + M::NumberOfFreeDpPages()) |
1703 // pages if required. |
1889 {// Not enough free space and not enough freeable pages. |
1704 __NK_ASSERT_DEBUG(aType == EPageFixed); |
1890 return KErrNoMemory; |
|
1891 } |
|
1892 |
1705 TInt alignWrtPage = Max(aAlign - KPageShift, 0); |
1893 TInt alignWrtPage = Max(aAlign - KPageShift, 0); |
1706 TUint32 alignmask = (1u << alignWrtPage) - 1; |
1894 TUint32 alignmask = (1u << alignWrtPage) - 1; |
1707 |
1895 |
1708 // Attempt to find enough pages searching in preference order first then |
1896 // Attempt to find enough pages searching in preference order first then |
1709 // in address order |
1897 // in address order |
1731 __KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: base=%08x carryAll=%08x offset=%08x", base, carryAll, offset)); |
1919 __KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: base=%08x carryAll=%08x offset=%08x", base, carryAll, offset)); |
1732 offset = bmaAll.AllocAligned(aNumPages, alignWrtPage, base, EFalse, carryAll, runLength); |
1920 offset = bmaAll.AllocAligned(aNumPages, alignWrtPage, base, EFalse, carryAll, runLength); |
1733 __KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: offset=%08x", offset)); |
1921 __KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: offset=%08x", offset)); |
1734 |
1922 |
1735 if (offset >= 0) |
1923 if (offset >= 0) |
|
1924 { |
|
1925 // Have found enough contiguous pages so return address of physical page |
|
1926 // at the start of the region |
|
1927 aPhysAddr = TPhysAddr((base + offset - carryAll + alignmask) & ~alignmask) << KPageShift; |
|
1928 MarkPagesAllocated(aPhysAddr, aNumPages, EPageFixed); |
|
1929 |
|
1930 __KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam returns %08x",aPhysAddr)); |
|
1931 #ifdef BTRACE_RAM_ALLOCATOR |
|
1932 BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocContiguousRam, EPageFixed, aNumPages, aPhysAddr); |
|
1933 #endif |
|
1934 return KErrNone; |
|
1935 } |
|
1936 // No run found when looking in just the free pages so see if this |
|
1937 // RAM zone could be used if pages where moved or discarded. |
|
1938 TBitMapAllocator& bmaImmov = *(zone->iBma[EPageFixed]); |
|
1939 offset = 0; // Clear so searches whole of fixed BMA on the first pass. |
|
1940 do |
|
1941 { |
|
1942 __KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: base=%08x carryImmov=%08x offset=%08x", base, carryImmov, offset)); |
|
1943 offset = bmaImmov.AllocAligned(aNumPages, alignWrtPage, base, EFalse, carryImmov, runLength, offset); |
|
1944 __KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: offset=%08x", offset)); |
|
1945 if (offset >= 0) |
|
1946 {// Have found a run in immovable page bma so attempt to clear |
|
1947 // it for the allocation. |
|
1948 TPhysAddr addrBase = TPhysAddr((base + offset - carryImmov + alignmask) & ~alignmask) << KPageShift; |
|
1949 __KTRACE_OPT(KMMU2, Kern::Printf(">AllocContig fix run 0x%08x - 0x%08x 0x%x", addrBase, addrBase + (aNumPages << KPageShift), TheCurrentThread)); |
|
1950 |
|
1951 // Block the contiguous region from being allocated. |
|
1952 iContiguousReserved++; |
|
1953 BlockContiguousRegion(addrBase, aNumPages); |
|
1954 if (ClearContiguousRegion(addrBase, zone->iPhysBase, aNumPages, offset)) |
|
1955 {// Cleared all the required pages. |
|
1956 // Return address of physical page at the start of the region. |
|
1957 iContiguousReserved--; |
|
1958 aPhysAddr = addrBase; |
|
1959 __KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam returns %08x",aPhysAddr)); |
|
1960 #ifdef BTRACE_RAM_ALLOCATOR |
|
1961 BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocContiguousRam, EPageFixed, aNumPages, aPhysAddr); |
|
1962 #endif |
|
1963 __KTRACE_OPT(KMMU2, Kern::Printf("<AllocContig suc run 0x%08x - 0x%08x 0x%x", addrBase, addrBase + (aNumPages << KPageShift), TheCurrentThread)); |
|
1964 return KErrNone; |
|
1965 } |
|
1966 else |
|
1967 { |
|
1968 // Unblock the contiguous region. |
|
1969 UnblockContiguousRegion(addrBase, aNumPages); |
|
1970 iContiguousReserved--; |
|
1971 __KTRACE_OPT(KMMU2, Kern::Printf("ContigMov fail offset 0x%x carryImmov %x", |
|
1972 offset, carryImmov)); |
|
1973 // Can't rely on RAM zone preference ordering being |
|
1974 // the same so clear carrys and restart search from |
|
1975 // within the current RAM zone or skip onto the next |
|
1976 // one if at the end of this one. |
|
1977 carryImmov = 0; |
|
1978 carryAll = 0; |
|
1979 __KTRACE_OPT(KMMU2, Kern::Printf("<AllocContigfail run 0x%08x - 0x%08x 0x%x", addrBase, addrBase + (aNumPages << KPageShift), TheCurrentThread)); |
|
1980 } |
|
1981 } |
|
1982 } |
|
1983 // Keep searching immovable page bma of the current RAM zone until |
|
1984 // gone past end of RAM zone or no run can be found. |
|
1985 while (offset >= 0 && (TUint)offset < zone->iPhysPages); |
|
1986 } |
|
1987 return KErrNoMemory; |
|
1988 } |
|
1989 |
|
1990 #else |
|
1991 |
|
1992 /** |
|
1993 Search through the zones for the requested contiguous RAM, first in preference |
|
1994 order then, if that fails, in address order. |
|
1995 |
|
1996 No support for non-fixed pages as this will discard and move pages if required. |
|
1997 |
|
1998 @param aNumPages The number of contiguous pages to find |
|
1999 @param aPhysAddr Will contain the base address of any contiguous run if found |
|
2000 @param aAlign Alignment specified as the alignment shift |
|
2001 |
|
2002 @return KErrNone on success, KErrNoMemory otherwise |
|
2003 */ |
|
2004 TInt DRamAllocator::AllocContiguousRam(TUint aNumPages, TPhysAddr& aPhysAddr, TInt aAlign) |
|
2005 { |
|
2006 __KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam size %08x align %d",aNumPages,aAlign)); |
|
2007 |
|
2008 M::RamAllocIsLocked(); |
|
2009 |
|
2010 TInt alignWrtPage = Max(aAlign - KPageShift, 0); |
|
2011 TUint32 alignmask = (1u << alignWrtPage) - 1; |
|
2012 |
|
2013 // Attempt to find enough pages searching in preference order first then |
|
2014 // in address order |
|
2015 TZoneSearchState searchState = EZoneSearchPref; |
|
2016 SZone* zone; |
|
2017 SZone* prevZone = NULL; |
|
2018 TInt carryAll = 0; // Carry for all pages bma, clear to start new run. |
|
2019 TInt carryImmov = 0; // Carry for immovable pages bma, clear to start new run. |
|
2020 TInt base = 0; |
|
2021 TInt offset = 0; |
|
2022 iZoneTmpAddrIndex = -1; |
|
2023 iZoneTmpPrefLink = iZonePrefList.First(); |
|
2024 while (NextAllocZone(zone, searchState, EPageFixed, KRamZoneInvalidId, EFalse)) |
|
2025 { |
|
2026 // Be sure to start from scratch if zone not contiguous with previous zone |
|
2027 if (prevZone && (zone->iPhysBase == 0 || (zone->iPhysBase - 1) != prevZone->iPhysEnd)) |
|
2028 { |
|
2029 carryAll = 0; |
|
2030 carryImmov = 0; |
|
2031 } |
|
2032 prevZone = zone; |
|
2033 TBitMapAllocator& bmaAll = *(zone->iBma[KBmaAllPages]); |
|
2034 base = TInt(zone->iPhysBase >> KPageShift); |
|
2035 TInt runLength; |
|
2036 __KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: base=%08x carryAll=%08x offset=%08x", base, carryAll, offset)); |
|
2037 offset = bmaAll.AllocAligned(aNumPages, alignWrtPage, base, EFalse, carryAll, runLength); |
|
2038 __KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: offset=%08x", offset)); |
|
2039 |
|
2040 if (offset >= 0) |
1736 {// Have found enough contiguous pages so return address of physical page |
2041 {// Have found enough contiguous pages so return address of physical page |
1737 // at the start of the region |
2042 // at the start of the region |
1738 aPhysAddr = TPhysAddr((base + offset - carryAll + alignmask) & ~alignmask) << KPageShift; |
2043 aPhysAddr = TPhysAddr((base + offset - carryAll + alignmask) & ~alignmask) << KPageShift; |
1739 MarkPagesAllocated(aPhysAddr, aNumPages, aType); |
2044 MarkPagesAllocated(aPhysAddr, aNumPages, EPageFixed); |
1740 |
2045 |
1741 __KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam returns %08x",aPhysAddr)); |
2046 __KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam returns %08x",aPhysAddr)); |
1742 #ifdef BTRACE_RAM_ALLOCATOR |
2047 #ifdef BTRACE_RAM_ALLOCATOR |
1743 BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocContiguousRam, aType, aNumPages, aPhysAddr); |
2048 BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocContiguousRam, EPageFixed, aNumPages, aPhysAddr); |
1744 #endif |
2049 #endif |
1745 return KErrNone; |
2050 return KErrNone; |
1746 } |
2051 } |
1747 else |
2052 else |
1748 {// No run found when looking in just the free pages so see if this |
2053 {// No run found when looking in just the free pages so see if this |
1856 |
2161 |
1857 @param aZoneIdList An array of the IDs of the RAM zones to allocate from. |
2162 @param aZoneIdList An array of the IDs of the RAM zones to allocate from. |
1858 @param aZoneIdCount The number of the IDs listed by aZoneIdList. |
2163 @param aZoneIdCount The number of the IDs listed by aZoneIdList. |
1859 @param aSize The number of contiguous bytes to find |
2164 @param aSize The number of contiguous bytes to find |
1860 @param aPhysAddr Will contain the base address of the contiguous run if found |
2165 @param aPhysAddr Will contain the base address of the contiguous run if found |
1861 @param aType The page type of the memory to be allocated |
|
1862 @param aAlign Alignment specified as the alignment shift |
2166 @param aAlign Alignment specified as the alignment shift |
1863 |
2167 |
1864 @return KErrNone on success, KErrNoMemory if allocation couldn't succeed or |
2168 @return KErrNone on success, KErrNoMemory if allocation couldn't succeed or |
1865 the RAM zone has the KRamZoneFlagNoAlloc flag set. KErrArgument if a zone of |
2169 the RAM zone has the KRamZoneFlagNoAlloc flag set. KErrArgument if a zone of |
1866 aZoneIdList exists or if aSize is larger than the size of the zone. |
2170 aZoneIdList exists or if aSize is larger than the size of the zone. |
1867 */ |
2171 */ |
1868 TInt DRamAllocator::ZoneAllocContiguousRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TZonePageType aType, TInt aAlign) |
2172 TInt DRamAllocator::ZoneAllocContiguousRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign) |
1869 { |
2173 { |
1870 __KTRACE_OPT(KMMU,Kern::Printf("ZoneAllocContiguousRam zones 0x%x size 0x%08x align %d",aZoneIdCount, aSize, aAlign)); |
2174 __KTRACE_OPT(KMMU,Kern::Printf("ZoneAllocContiguousRam zones 0x%x size 0x%08x align %d",aZoneIdCount, aSize, aAlign)); |
1871 |
2175 |
1872 M::RamAllocIsLocked(); |
2176 M::RamAllocIsLocked(); |
1873 __NK_ASSERT_DEBUG(aType == EPageFixed); |
|
1874 |
2177 |
1875 |
2178 |
1876 TUint numPages = (aSize + KPageSize - 1) >> KPageShift; |
2179 TUint numPages = (aSize + KPageSize - 1) >> KPageShift; |
1877 TInt carry = 0; // must be zero as this is always the start of a new run |
2180 TInt carry = 0; // must be zero as this is always the start of a new run |
1878 TInt alignWrtPage = Max(aAlign - KPageShift, 0); |
2181 TInt alignWrtPage = Max(aAlign - KPageShift, 0); |
2104 |
2407 |
2105 __NK_ASSERT_DEBUG(aZone - iZones < (TInt)iNumZones); |
2408 __NK_ASSERT_DEBUG(aZone - iZones < (TInt)iNumZones); |
2106 // Makes things simpler for bma selection. |
2409 // Makes things simpler for bma selection. |
2107 __NK_ASSERT_DEBUG(aType != EPageUnknown); |
2410 __NK_ASSERT_DEBUG(aType != EPageUnknown); |
2108 |
2411 |
2109 if (aOffset >= aZone->iPhysPages) |
2412 TUint zoneEndOffset = aZone->iPhysPages - 1; |
|
2413 if (aOffset > zoneEndOffset) |
2110 {// Starting point is outside the zone |
2414 {// Starting point is outside the zone |
2111 return KErrArgument; |
2415 return KErrArgument; |
2112 } |
2416 } |
2113 |
2417 |
2114 TUint offset = aOffset; |
2418 TUint wordIndex = aOffset >> 5; |
2115 TUint endOffset = aZone->iPhysPages; |
2419 TUint endWordIndex = zoneEndOffset >> 5; |
2116 TUint endOffsetAligned = endOffset & KWordAlignMask; |
|
2117 |
2420 |
2118 // Select the BMA to search, |
2421 // Select the BMA to search, |
2119 TUint bmaIndex = (aType == EPageTypes)? KBmaAllPages : aType; |
2422 TUint bmaIndex = (aType == EPageTypes)? KBmaAllPages : aType; |
2120 TUint32* map = &(aZone->iBma[bmaIndex]->iMap[offset >> 5]); |
2423 TUint32* map = &(aZone->iBma[bmaIndex]->iMap[wordIndex]); |
2121 TUint32 bits = *map++; |
2424 TUint32* mapEnd = &(aZone->iBma[bmaIndex]->iMap[endWordIndex]); |
|
2425 TUint32 bits = *map; |
2122 |
2426 |
2123 // Set bits for pages before 'offset' (i.e. ones we want to ignore)... |
2427 // Set bits for pages before 'offset' (i.e. ones we want to ignore)... |
2124 bits |= ~(KMaxTUint32 >> (offset & ~KWordAlignMask)); |
2428 bits |= ~(KMaxTUint32 >> (aOffset & ~KWordAlignMask)); |
2125 |
2429 |
2126 // Find the first bit map word from aOffset in aZone with allocated pages |
2430 // Find the first bit map word from aOffset in aZone with allocated pages |
2127 while (bits == KMaxTUint32 && offset < endOffsetAligned) |
2431 while (bits == KMaxTUint32 && map < mapEnd) |
2128 { |
2432 { |
2129 bits = *map++; |
2433 bits = *++map; |
2130 offset = (offset + 32) & KWordAlignMask; |
2434 } |
2131 } |
2435 |
2132 |
2436 if (map == mapEnd) |
2133 if (offset >= endOffsetAligned && endOffset != endOffsetAligned) |
|
2134 {// Have reached the last bit mask word so set the bits that are |
2437 {// Have reached the last bit mask word so set the bits that are |
2135 // outside of the zone so that they are ignored. |
2438 // outside of the zone so that they are ignored. |
2136 bits |= KMaxTUint32 >> (endOffset - endOffsetAligned); |
2439 bits |= (KMaxTUint32 >> (zoneEndOffset & ~KWordAlignMask)) >> 1; |
2137 } |
2440 } |
2138 |
2441 |
2139 if (bits == KMaxTUint32) |
2442 if (bits == KMaxTUint32) |
2140 {// No allocated pages found after aOffset in aZone. |
2443 {// No allocated pages found after aOffset in aZone. |
2141 return KErrNotFound; |
2444 return KErrNotFound; |
2142 } |
2445 } |
2143 |
2446 |
2144 // Now we have bits with allocated pages in it so determine the exact |
2447 // Now we have bits with allocated pages in it so determine the exact |
2145 // offset of the next allocated page |
2448 // offset of the next allocated page |
2146 TUint32 mask = 0x80000000 >> (offset & ~KWordAlignMask); |
2449 TInt msOne = __e32_find_ms1_32(~bits); |
2147 while (bits & mask) |
2450 __NK_ASSERT_DEBUG(msOne >= 0); // Must have at least one allocated page in the word. |
2148 { |
2451 TUint msOneOffset = 31 - msOne; |
2149 mask >>= 1; |
2452 aOffset = ((map - aZone->iBma[bmaIndex]->iMap) << 5) + msOneOffset; |
2150 offset++; |
|
2151 } |
|
2152 |
|
2153 if (offset >= endOffset) |
|
2154 {// Reached the end of the zone without finding an allocated page after aOffset |
|
2155 return KErrNotFound; |
|
2156 } |
|
2157 |
|
2158 // Should definitely have found an allocated page within aZone's pages |
|
2159 __NK_ASSERT_DEBUG(mask != 0 && !(bits & mask) && offset < aZone->iPhysPages); |
|
2160 |
|
2161 aOffset = offset; |
|
2162 return KErrNone; |
2453 return KErrNone; |
2163 } |
2454 } |
|
2455 |
|
2456 |
|
2457 /** |
|
2458 Get the next run of pages in this zone that are allocated after aOffset. |
|
2459 |
|
2460 @param aZone The zone to find the next allocated page in. |
|
2461 @param aOffset On entry this is the offset from which the next allocated |
|
2462 page in the zone should be found, on return it will be the offset |
|
2463 of the next allocated page. |
|
2464 @param aEndOffset The last offset within this RAM zone to check for allocated runs. |
|
2465 @return The length of any run found, KErrNotFound if no more pages in |
|
2466 the zone after aOffset are allocated, KErrArgument if aOffset is outside the zone. |
|
2467 */ |
|
2468 TInt DRamAllocator::NextAllocatedRun(SZone* aZone, TUint& aOffset, TUint aEndOffset, TZonePageType aType) const |
|
2469 { |
|
2470 const TUint KWordAlignMask = KMaxTUint32 << 5; |
|
2471 |
|
2472 M::RamAllocIsLocked(); |
|
2473 |
|
2474 __NK_ASSERT_DEBUG(aZone - iZones < (TInt)iNumZones); |
|
2475 // Makes things simpler for bma selection. |
|
2476 __NK_ASSERT_DEBUG(aType != EPageUnknown); |
|
2477 |
|
2478 if (aOffset > aEndOffset) |
|
2479 {// UnblockContiguous() has already searched the whole range for this page type. |
|
2480 return 0; |
|
2481 } |
|
2482 |
|
2483 TUint wordIndex = aOffset >> 5; |
|
2484 TUint endWordIndex = aEndOffset >> 5; |
|
2485 |
|
2486 // Select the BMA to search, |
|
2487 TUint bmaIndex = (aType == EPageTypes)? KBmaAllPages : aType; |
|
2488 TUint32* map = &(aZone->iBma[bmaIndex]->iMap[wordIndex]); |
|
2489 TUint32* mapEnd = &(aZone->iBma[bmaIndex]->iMap[endWordIndex]); |
|
2490 TUint32 bits = *map; |
|
2491 |
|
2492 // Set bits for pages before 'offset' (i.e. ones we want to ignore)... |
|
2493 bits |= ~(KMaxTUint32 >> (aOffset & ~KWordAlignMask)); |
|
2494 |
|
2495 // Find the first bit map word from aOffset in aZone with allocated pages |
|
2496 while (bits == KMaxTUint32 && map < mapEnd) |
|
2497 { |
|
2498 bits = *++map; |
|
2499 } |
|
2500 |
|
2501 if (map == mapEnd) |
|
2502 {// Have reached the last bit mask word so set the bits that are |
|
2503 // outside of the range so that they are ignored. |
|
2504 bits |= (KMaxTUint32 >> (aEndOffset & ~KWordAlignMask)) >> 1; |
|
2505 } |
|
2506 |
|
2507 if (bits == KMaxTUint32) |
|
2508 {// No allocated pages found in the range. |
|
2509 aOffset = aEndOffset + 1; |
|
2510 return 0; |
|
2511 } |
|
2512 |
|
2513 // Now we have bits with allocated pages in it so determine the exact |
|
2514 // offset of the next allocated page |
|
2515 TInt msOne = __e32_find_ms1_32(~bits); |
|
2516 __NK_ASSERT_DEBUG(msOne >= 0); // Must have at least one allocated page in the word. |
|
2517 TUint msOneOffset = 31 - msOne; |
|
2518 aOffset = ((map - aZone->iBma[bmaIndex]->iMap) << 5) + msOneOffset; |
|
2519 TUint32* runWord = map; |
|
2520 |
|
2521 if (map < mapEnd && __e32_bit_count_32(~bits) == msOne + 1) |
|
2522 {// The whole of the region in this word is allocated. |
|
2523 // Find the next word which isn't completely allocated within the range. |
|
2524 do |
|
2525 { |
|
2526 bits = *++map; |
|
2527 } |
|
2528 while (!bits && map < mapEnd); |
|
2529 } |
|
2530 |
|
2531 // Clear any bits before the run so can get next free from __e32_find_msl_32(). |
|
2532 if (runWord == map) |
|
2533 bits &= KMaxTUint32 >> (aOffset & ~KWordAlignMask); |
|
2534 TInt msFree = __e32_find_ms1_32(bits); |
|
2535 __NK_ASSERT_DEBUG(msFree >= 0 || map == mapEnd); |
|
2536 TUint msFreeOffset = (msFree >= 0)? 31 - msFree : 32; |
|
2537 TUint endIndex = map - aZone->iBma[bmaIndex]->iMap; |
|
2538 TUint runEnd = (endIndex << 5) + msFreeOffset; |
|
2539 if (runEnd > aEndOffset + 1) // Ensure we don't go past the range. |
|
2540 runEnd = aEndOffset + 1; |
|
2541 __NK_ASSERT_DEBUG(runEnd > aOffset); |
|
2542 |
|
2543 return runEnd - aOffset; |
|
2544 } |
|
2545 |
2164 |
2546 |
2165 /** |
2547 /** |
2166 See if any of the least preferable RAM zones can be emptied. If they can then |
2548 See if any of the least preferable RAM zones can be emptied. If they can then |
2167 initialise the allocator for a general defragmentation operation. |
2549 initialise the allocator for a general defragmentation operation. |
2168 |
2550 |