kernel/eka/memmodel/epoc/mmubase/ramalloc.cpp
branchGCC_SURGE
changeset 221 39b39e1a406e
parent 167 b41fc9c39ca7
parent 201 43365a9b78a3
equal deleted inserted replaced
219:0ff03867bdb6 221:39b39e1a406e
   352 		__KTRACE_OPT(KMMU,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc));	// counts rolled over
   352 		__KTRACE_OPT(KMMU,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc));	// counts rolled over
   353 		__KTRACE_OPT(KMMU,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown], 
   353 		__KTRACE_OPT(KMMU,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown], 
   354 					aZone->iAllocPages[EPageFixed],	aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard]));
   354 					aZone->iAllocPages[EPageFixed],	aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard]));
   355 		Panic(EZonesCountErr);
   355 		Panic(EZonesCountErr);
   356 		}
   356 		}
   357 	__ASSERT_DEBUG(free == (TUint32)aZone->iBma[KBmaAllPages]->iAvail, Panic(EAllocRamPagesInconsistent));
       
   358 	__KTRACE_OPT(KMMU2,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc));
   357 	__KTRACE_OPT(KMMU2,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc));
   359 	__KTRACE_OPT(KMMU2,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown], 
   358 	__KTRACE_OPT(KMMU2,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown], 
   360 					aZone->iAllocPages[EPageFixed],	aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard]));
   359 					aZone->iAllocPages[EPageFixed],	aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard]));
   361 
   360 
   362 	if (iAllowBmaVerify)
   361 	if (!iContiguousReserved)
   363 		{
   362 		{
       
   363 		__ASSERT_DEBUG(free == (TUint32)aZone->iBma[KBmaAllPages]->iAvail, Panic(EAllocRamPagesInconsistent));
   364 		TBitMapAllocator& bmaType = *(aZone->iBma[(aType != EPageUnknown)? aType : EPageFixed]);
   364 		TBitMapAllocator& bmaType = *(aZone->iBma[(aType != EPageUnknown)? aType : EPageFixed]);
   365 		TUint allocPages;
   365 		TUint allocPages;
   366 		if (aType == EPageFixed || aType == EPageUnknown)
   366 		if (aType == EPageFixed || aType == EPageUnknown)
   367 			allocPages = aZone->iAllocPages[EPageUnknown] + aZone->iAllocPages[EPageFixed];
   367 			allocPages = aZone->iAllocPages[EPageUnknown] + aZone->iAllocPages[EPageFixed];
   368 		else
   368 		else
   493 		__KTRACE_OPT(KMMU,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc));	// counts rolled over
   493 		__KTRACE_OPT(KMMU,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc));	// counts rolled over
   494 		__KTRACE_OPT(KMMU,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown], 
   494 		__KTRACE_OPT(KMMU,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown], 
   495 					aZone->iAllocPages[EPageFixed],	aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard]));
   495 					aZone->iAllocPages[EPageFixed],	aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard]));
   496 		Panic(EZonesCountErr);
   496 		Panic(EZonesCountErr);
   497 		}
   497 		}
   498 	__ASSERT_DEBUG(free == (TUint32)aZone->iBma[KBmaAllPages]->iAvail, Panic(EAllocRamPagesInconsistent));
       
   499 	__KTRACE_OPT(KMMU2,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc));
   498 	__KTRACE_OPT(KMMU2,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc));
   500 	__KTRACE_OPT(KMMU2,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown], 
   499 	__KTRACE_OPT(KMMU2,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown], 
   501 					aZone->iAllocPages[EPageFixed],	aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard]));
   500 					aZone->iAllocPages[EPageFixed],	aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard]));
   502 
   501 
   503 	if (iAllowBmaVerify)
   502 	if (!iContiguousReserved)
   504 		{
   503 		{
       
   504 		__ASSERT_DEBUG(free == (TUint32)aZone->iBma[KBmaAllPages]->iAvail, Panic(EAllocRamPagesInconsistent));
   505 		TBitMapAllocator& bmaType = *(aZone->iBma[(aType != EPageUnknown)? aType : EPageFixed]);
   505 		TBitMapAllocator& bmaType = *(aZone->iBma[(aType != EPageUnknown)? aType : EPageFixed]);
   506 		TUint allocPages;
   506 		TUint allocPages;
   507 		if (aType == EPageFixed || aType == EPageUnknown)
   507 		if (aType == EPageFixed || aType == EPageUnknown)
   508 			allocPages = aZone->iAllocPages[EPageUnknown] + aZone->iAllocPages[EPageFixed];
   508 			allocPages = aZone->iAllocPages[EPageUnknown] + aZone->iAllocPages[EPageFixed];
   509 		else
   509 		else
   966 	// and will therefore not be touched by the allocator.
   966 	// and will therefore not be touched by the allocator.
   967 	//////////////////////////////////////////////////////////////////////////
   967 	//////////////////////////////////////////////////////////////////////////
   968 	// Temporarily fill preference list so SetPhysicalRamState can succeed
   968 	// Temporarily fill preference list so SetPhysicalRamState can succeed
   969 #ifdef _DEBUG
   969 #ifdef _DEBUG
   970 	// Block bma verificaitons as bma and alloc counts aren't consistent yet.
   970 	// Block bma verificaitons as bma and alloc counts aren't consistent yet.
   971 	iAllowBmaVerify = EFalse;
   971 	iContiguousReserved = 1;
   972 #endif
   972 #endif
   973 	const SZone* const lastZone = iZones + iNumZones;
   973 	const SZone* const lastZone = iZones + iNumZones;
   974 	zone = iZones;
   974 	zone = iZones;
   975 	for (; zone < lastZone; zone++)
   975 	for (; zone < lastZone; zone++)
   976 		{
   976 		{
   982 		{// Free all the pages in this bank.
   982 		{// Free all the pages in this bank.
   983 		SetPhysicalRamState(bank->iBase, bank->iSize, ETrue, EPageUnknown);
   983 		SetPhysicalRamState(bank->iBase, bank->iSize, ETrue, EPageUnknown);
   984 		}
   984 		}
   985 #ifdef _DEBUG
   985 #ifdef _DEBUG
   986 	// Only now is it safe to enable bma verifications
   986 	// Only now is it safe to enable bma verifications
   987 	iAllowBmaVerify = ETrue;
   987 	iContiguousReserved = 0;
   988 #endif
   988 #endif
   989 
   989 
   990 	///////////////////////////////////////////////////////////////////////////
   990 	///////////////////////////////////////////////////////////////////////////
   991 	//	Sort the zones by preference and create a preference ordered linked list
   991 	//	Sort the zones by preference and create a preference ordered linked list
   992 	///////////////////////////////////////////////////////////////////////////
   992 	///////////////////////////////////////////////////////////////////////////
  1133 		pZ++; 		// zones in physical address order so move to next one
  1133 		pZ++; 		// zones in physical address order so move to next one
  1134 		offset = 0;	// and reset offset to start of the zone
  1134 		offset = 0;	// and reset offset to start of the zone
  1135 		}
  1135 		}
  1136 	}
  1136 	}
  1137 
  1137 
       
  1138 
  1138 TInt DRamAllocator::MarkPageAllocated(TPhysAddr aAddr, TZonePageType aType)
  1139 TInt DRamAllocator::MarkPageAllocated(TPhysAddr aAddr, TZonePageType aType)
  1139 	{
  1140 	{
  1140 	__KTRACE_OPT(KMMU,Kern::Printf("DRamAllocator::MarkPageAllocated %08x",aAddr));
  1141 	__KTRACE_OPT(KMMU,Kern::Printf("DRamAllocator::MarkPageAllocated %08x",aAddr));
  1141 
  1142 
  1142 	M::RamAllocIsLocked();
  1143 	M::RamAllocIsLocked();
  1158 		{
  1159 		{
  1159 		__KTRACE_OPT(KMMU,Kern::Printf("Page already allocated"));
  1160 		__KTRACE_OPT(KMMU,Kern::Printf("Page already allocated"));
  1160 		return KErrAlreadyExists;			// page is already allocated
  1161 		return KErrAlreadyExists;			// page is already allocated
  1161 		}
  1162 		}
  1162 	bmaAll.Alloc(n,1);
  1163 	bmaAll.Alloc(n,1);
  1163 	bmaType.Alloc(n,1);
  1164 	if (bmaType.NotAllocated(n,1))
       
  1165 		bmaType.Alloc(n,1);
       
  1166 #ifdef _DEBUG
       
  1167 	else // Allow this page to already be reserved in bmaType as AllocContiguousRam() may have done this.
       
  1168 		__NK_ASSERT_DEBUG(aType == EPageFixed);
       
  1169 #endif
  1164 	--iTotalFreeRamPages;
  1170 	--iTotalFreeRamPages;
  1165 	ZoneAllocPages(z, 1, aType);
  1171 	ZoneAllocPages(z, 1, aType);
  1166 	__KTRACE_OPT(KMMU,Kern::Printf("Total free RAM pages now = %d",iTotalFreeRamPages));
  1172 	__KTRACE_OPT(KMMU,Kern::Printf("Total free RAM pages now = %d",iTotalFreeRamPages));
  1167 
  1173 
  1168 #ifdef BTRACE_RAM_ALLOCATOR
  1174 #ifdef BTRACE_RAM_ALLOCATOR
  1169 	BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocMarkAllocated, aType, aAddr);
  1175 	BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocMarkAllocated, aType, aAddr);
  1170 #endif
  1176 #endif
  1171 	return KErrNone;
  1177 	return KErrNone;
  1172 	}
  1178 	}
       
  1179 
  1173 
  1180 
  1174 TInt DRamAllocator::FreeRamPage(TPhysAddr aAddr, TZonePageType aType)
  1181 TInt DRamAllocator::FreeRamPage(TPhysAddr aAddr, TZonePageType aType)
  1175 	{
  1182 	{
  1176 	__KTRACE_OPT(KMMU,Kern::Printf("FreeRamPage %08x",aAddr));
  1183 	__KTRACE_OPT(KMMU,Kern::Printf("FreeRamPage %08x",aAddr));
  1177 
  1184 
  1199 		return KErrArgument;
  1206 		return KErrArgument;
  1200 		}
  1207 		}
  1201 	__KTRACE_OPT(KMMU2,Kern::Printf("Zone index %d page index %04x",z-iZones,n));
  1208 	__KTRACE_OPT(KMMU2,Kern::Printf("Zone index %d page index %04x",z-iZones,n));
  1202 	TBitMapAllocator& bmaAll = *(z->iBma[KBmaAllPages]);
  1209 	TBitMapAllocator& bmaAll = *(z->iBma[KBmaAllPages]);
  1203 	TBitMapAllocator& bmaType = *(z->iBma[aType]);
  1210 	TBitMapAllocator& bmaType = *(z->iBma[aType]);
  1204 	bmaAll.Free(n);
  1211 
  1205 	bmaType.Free(n);
  1212 	bmaType.Free(n);
  1206 	++iTotalFreeRamPages;
  1213 	if (iContiguousReserved && aType != EPageFixed && z->iBma[EPageFixed]->NotFree(n, 1))
  1207 	ZoneFreePages(z, 1, aType);
  1214 		{// This page has been reserved by AllocContiguous() so don't free it
  1208 
  1215 		// but allocate it as fixed.
       
  1216 		ZoneFreePages(z, 1, aType);
       
  1217 		ZoneAllocPages(z, 1, EPageFixed);
       
  1218 		}
       
  1219 	else
       
  1220 		{
       
  1221 		bmaAll.Free(n);
       
  1222 		++iTotalFreeRamPages;
       
  1223 		ZoneFreePages(z, 1, aType);	
       
  1224 		}
  1209 #ifdef BTRACE_RAM_ALLOCATOR
  1225 #ifdef BTRACE_RAM_ALLOCATOR
  1210 	BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocFreePage, aType, aAddr);
  1226 	BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocFreePage, aType, aAddr);
  1211 #endif
  1227 #endif
  1212 	return KErrNone;
  1228 	return KErrNone;
  1213 	}
  1229 	}
       
  1230 
  1214 
  1231 
  1215 void DRamAllocator::FreeRamPages(TPhysAddr* aPageList, TInt aNumPages, TZonePageType aType)
  1232 void DRamAllocator::FreeRamPages(TPhysAddr* aPageList, TInt aNumPages, TZonePageType aType)
  1216 	{
  1233 	{
  1217 	__KTRACE_OPT(KMMU,Kern::Printf("FreeRamPages count=%08x",aNumPages));
  1234 	__KTRACE_OPT(KMMU,Kern::Printf("FreeRamPages count=%08x",aNumPages));
  1218 
  1235 
  1257 			--aNumPages;
  1274 			--aNumPages;
  1258 			++aPageList;
  1275 			++aPageList;
  1259 			pa += KPageSize;
  1276 			pa += KPageSize;
  1260 			}
  1277 			}
  1261 		__KTRACE_OPT(KMMU2,Kern::Printf("%d consecutive pages, zp_rem=%x, %d remaining pages",n,zp_rem,aNumPages));
  1278 		__KTRACE_OPT(KMMU2,Kern::Printf("%d consecutive pages, zp_rem=%x, %d remaining pages",n,zp_rem,aNumPages));
  1262 		bmaAll.Free(ix,n);
       
  1263 		TBitMapAllocator& bmaType = *(z->iBma[aType]);
  1279 		TBitMapAllocator& bmaType = *(z->iBma[aType]);
  1264 		bmaType.Free(ix,n);
  1280 		bmaType.Free(ix,n);
  1265 		iTotalFreeRamPages += n;
  1281 
  1266 		ZoneFreePages(z, n, aType);
  1282 		if (iContiguousReserved && aType != EPageFixed)
       
  1283 			{// See if a page has been reserved by AllocContiguous() in this range.
       
  1284 			TUint pagesFreed = 0;
       
  1285 			TUint allocStart = ix;
       
  1286 			TUint freeOffset = ix;
       
  1287 			TUint endOffset = ix + n - 1;
       
  1288 			while (freeOffset <= endOffset)
       
  1289 				{
       
  1290 				TUint runLength =  NextAllocatedRun(z, allocStart, endOffset, EPageFixed);
       
  1291 				if (allocStart > freeOffset)
       
  1292 					{
       
  1293 					TUint freed = allocStart - freeOffset;
       
  1294 					bmaAll.Free(freeOffset, freed);
       
  1295 					pagesFreed += freed;
       
  1296 					}
       
  1297 				allocStart += runLength;
       
  1298 				freeOffset = allocStart;
       
  1299 				}
       
  1300 			iTotalFreeRamPages += pagesFreed;
       
  1301 			ZoneFreePages(z, n, aType);
       
  1302 			ZoneAllocPages(z, n - pagesFreed, EPageFixed);
       
  1303 			}
       
  1304 		else
       
  1305 			{
       
  1306 			bmaAll.Free(ix,n);
       
  1307 			iTotalFreeRamPages += n;
       
  1308 			ZoneFreePages(z, n, aType);
       
  1309 			}
  1267 #ifdef BTRACE_RAM_ALLOCATOR
  1310 #ifdef BTRACE_RAM_ALLOCATOR
  1268 		BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocFreePages, aType, n, first_pa);
  1311 		BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocFreePages, aType, n, first_pa);
  1269 #endif
  1312 #endif
  1270 		}
  1313 		}
  1271 #ifdef BTRACE_RAM_ALLOCATOR
  1314 #ifdef BTRACE_RAM_ALLOCATOR
  1272 	BTrace0(BTrace::ERamAllocator, BTrace::ERamAllocFreePagesEnd);
  1315 	BTrace0(BTrace::ERamAllocator, BTrace::ERamAllocFreePagesEnd);
  1273 #endif
  1316 #endif
  1274 	}
  1317 	}
       
  1318 
  1275 
  1319 
  1276 /**
  1320 /**
  1277 	Attempt to clear upto the required amount of discardable or movable pages
  1321 	Attempt to clear upto the required amount of discardable or movable pages
  1278 	from the RAM zone.
  1322 	from the RAM zone.
  1279 
  1323 
  1284 	{
  1328 	{
  1285 	__KTRACE_OPT(KMMU, 
  1329 	__KTRACE_OPT(KMMU, 
  1286 		Kern::Printf("ZoneClearPages: ID 0x%x, req 0x%x", aZone.iId, aRequiredPages));
  1330 		Kern::Printf("ZoneClearPages: ID 0x%x, req 0x%x", aZone.iId, aRequiredPages));
  1287 	// Discard the required number of discardable pages.
  1331 	// Discard the required number of discardable pages.
  1288 	TUint offset = 0;
  1332 	TUint offset = 0;
  1289 	TInt r = NextAllocatedPage(&aZone, offset, EPageDiscard);
  1333 	for (; aRequiredPages; offset++)
  1290 	while (r == KErrNone && aRequiredPages)
  1334 		{
  1291 		{
  1335 		TInt r = NextAllocatedPage(&aZone, offset, EPageDiscard);
       
  1336 		if (r != KErrNone)
       
  1337 			break;
       
  1338 		if (iContiguousReserved && aZone.iBma[EPageFixed]->NotFree(offset, 1))
       
  1339 			{
       
  1340 			offset++;
       
  1341 			continue;
       
  1342 			}
  1292 		TPhysAddr physAddr = (offset << KPageShift) + aZone.iPhysBase;
  1343 		TPhysAddr physAddr = (offset << KPageShift) + aZone.iPhysBase;
  1293 		TInt discarded = M::DiscardPage(physAddr, aZone.iId, EFalse);
  1344 		TInt discarded = M::DiscardPage(physAddr, aZone.iId, M::EMoveDisMoveDirty);
  1294 		if (discarded == KErrNone)
  1345 		if (discarded == KErrNone)
  1295 			{// The page was successfully discarded.
  1346 			{// The page was successfully discarded.
  1296 			aRequiredPages--;
  1347 			aRequiredPages--;
  1297 			}
  1348 			}
  1298 		offset++;
       
  1299 		r = NextAllocatedPage(&aZone, offset, EPageDiscard);
       
  1300 		}
  1349 		}
  1301 	// Move the required number of movable pages.
  1350 	// Move the required number of movable pages.
  1302 	offset = 0;
  1351 	for (offset = 0; aRequiredPages; offset++)
  1303 	r = NextAllocatedPage(&aZone, offset, EPageMovable);
  1352 		{
  1304 	while(r == KErrNone && aRequiredPages)
  1353 		TInt r = NextAllocatedPage(&aZone, offset, EPageMovable);
  1305 		{
  1354 		if (r != KErrNone)
       
  1355 			break;
       
  1356 		if (iContiguousReserved && aZone.iBma[EPageFixed]->NotFree(offset, 1))
       
  1357 			{
       
  1358 			offset++;
       
  1359 			continue;
       
  1360 			}
  1306 		TPhysAddr physAddr = (offset << KPageShift) + aZone.iPhysBase;
  1361 		TPhysAddr physAddr = (offset << KPageShift) + aZone.iPhysBase;
  1307 		TPhysAddr newAddr = KPhysAddrInvalid;
  1362 		TPhysAddr newAddr = KPhysAddrInvalid;
  1308 		if (M::MovePage(physAddr, newAddr, aZone.iId, EFalse) == KErrNone)
  1363 		if (M::MovePage(physAddr, newAddr, aZone.iId, 0) == KErrNone)
  1309 			{// The page was successfully moved.
  1364 			{// The page was successfully moved.
  1310 #ifdef _DEBUG
  1365 #ifdef _DEBUG
  1311 			TInt newOffset = 0;
  1366 			TInt newOffset = 0;
  1312 			SZone* newZone = GetZoneAndOffset(newAddr, newOffset);
  1367 			SZone* newZone = GetZoneAndOffset(newAddr, newOffset);
  1313 			__NK_ASSERT_DEBUG(newZone != &aZone);
  1368 			__NK_ASSERT_DEBUG(newZone != &aZone);
  1314 #endif
  1369 #endif
  1315 			aRequiredPages--;
  1370 			aRequiredPages--;
  1316 			}
  1371 			}
  1317 		offset++;
       
  1318 		r = NextAllocatedPage(&aZone, offset, EPageMovable);
       
  1319 		}
  1372 		}
  1320 	}
  1373 	}
  1321 
  1374 
  1322 /** Attempt to allocate pages into a particular zone.  Pages will not
  1375 /** Attempt to allocate pages into a particular zone.  Pages will not
  1323 	always be contiguous.
  1376 	always be contiguous.
  1395 	__NK_ASSERT_DEBUG(aType != EPageUnknown);
  1448 	__NK_ASSERT_DEBUG(aType != EPageUnknown);
  1396 
  1449 
  1397 	TPhysAddr* pageListBase = aPageList;
  1450 	TPhysAddr* pageListBase = aPageList;
  1398 	TUint32 numMissing = aNumPages;
  1451 	TUint32 numMissing = aNumPages;
  1399 
  1452 
       
  1453 	if ((TUint)aNumPages > iTotalFreeRamPages)
       
  1454 		{// Not enough free pages to fulfill this request so return the amount required
       
  1455 		return aNumPages - iTotalFreeRamPages;
       
  1456 		}
       
  1457 
  1400 	if (aType == EPageFixed)
  1458 	if (aType == EPageFixed)
  1401 		{// Currently only a general defrag operation should set this and it won't
  1459 		{// Currently only a general defrag operation should set this and it won't
  1402 		// allocate fixed pages.
  1460 		// allocate fixed pages.
  1403 		__NK_ASSERT_DEBUG(!aBlockRest);
  1461 		__NK_ASSERT_DEBUG(!aBlockRest);
  1404 		if ((TUint)aNumPages > iTotalFreeRamPages + M::NumberOfFreeDpPages())
       
  1405 			{// Not enough free space and not enough freeable pages.
       
  1406 			goto exit;
       
  1407 			}
       
  1408 
  1462 
  1409 		// Search through each zone in preference order until all pages allocated or
  1463 		// Search through each zone in preference order until all pages allocated or
  1410 		// have reached the end of the preference list
  1464 		// have reached the end of the preference list
  1411 		SDblQueLink* link = iZonePrefList.First();
  1465 		SDblQueLink* link = iZonePrefList.First();
  1412 		while (numMissing && link != &iZonePrefList.iA)
  1466 		while (numMissing && link != &iZonePrefList.iA)
  1438 				}
  1492 				}
  1439 			}
  1493 			}
  1440 		}
  1494 		}
  1441 	else
  1495 	else
  1442 		{
  1496 		{
  1443 		if ((TUint)aNumPages > iTotalFreeRamPages)
       
  1444 			{// Not enough free pages to fulfill this request so return amount required
       
  1445 			return aNumPages - iTotalFreeRamPages;
       
  1446 			}
       
  1447 
       
  1448 		// Determine if there are enough free pages in the RAM zones in use.
  1497 		// Determine if there are enough free pages in the RAM zones in use.
  1449 		TUint totalFreeInUse = 0;
  1498 		TUint totalFreeInUse = 0;
  1450 		SDblQueLink* link = iZoneLeastMovDis;
  1499 		SDblQueLink* link = iZoneLeastMovDis;
  1451 		for(; link != &iZonePrefList.iA; link = link->iPrev)
  1500 		for(; link != &iZonePrefList.iA; link = link->iPrev)
  1452 			{
  1501 			{
  1462 
  1511 
  1463 		if (aBlockRest && totalFreeInUse < (TUint)aNumPages)
  1512 		if (aBlockRest && totalFreeInUse < (TUint)aNumPages)
  1464 			{// Allocating as part of a general defragmentation and
  1513 			{// Allocating as part of a general defragmentation and
  1465 			// can't allocate without using a RAM zone less preferable than
  1514 			// can't allocate without using a RAM zone less preferable than
  1466 			// the current least prefeable RAM zone with movable and/or 
  1515 			// the current least prefeable RAM zone with movable and/or 
  1467 			//discardable.
  1516 			// discardable.
  1468 			__NK_ASSERT_DEBUG(numMissing);
  1517 			__NK_ASSERT_DEBUG(numMissing);
  1469 			goto exit;
  1518 			goto exit;
  1470 			}
  1519 			}
  1471 		
  1520 		
  1472 		SDblQueLink* leastClearable = iZoneLeastMovDis;
  1521 		SDblQueLink* leastClearable = iZoneLeastMovDis;
  1677 
  1726 
  1678 	aState = (TZoneSearchState)currentState;
  1727 	aState = (TZoneSearchState)currentState;
  1679 	return r;
  1728 	return r;
  1680 	}
  1729 	}
  1681 
  1730 
       
  1731 
       
  1732 #if !defined(__MEMMODEL_MULTIPLE__) && !defined(__MEMMODEL_MOVING__)
       
  1733 TUint DRamAllocator::BlockContiguousRegion(TPhysAddr aAddrBase, TUint aNumPages)
       
  1734 	{
       
  1735 	// Shouldn't be asked to block zero pages, addrEndPage would be wrong if we did.
       
  1736 	__NK_ASSERT_DEBUG(aNumPages);
       
  1737 	TPhysAddr addr = aAddrBase;
       
  1738 	TPhysAddr addrEndPage = aAddrBase + ((aNumPages - 1) << KPageShift);
       
  1739 	TInt tmpOffset;
       
  1740 	SZone* endZone = GetZoneAndOffset(addrEndPage, tmpOffset);
       
  1741 	SZone* tmpZone;
       
  1742 	TUint totalUnreserved = aNumPages;
       
  1743 	do
       
  1744 		{
       
  1745 		tmpZone = GetZoneAndOffset(addr, tmpOffset);
       
  1746 		__NK_ASSERT_DEBUG(tmpZone != NULL);
       
  1747 		TUint runLength = 	(addrEndPage < tmpZone->iPhysEnd)? 
       
  1748 							((addrEndPage - addr) >> KPageShift) + 1: 
       
  1749 							tmpZone->iPhysPages - tmpOffset;
       
  1750 		TUint reserved = tmpZone->iBma[KBmaAllPages]->SelectiveAlloc(tmpOffset, runLength);
       
  1751 		if (reserved)
       
  1752 			{
       
  1753 #ifdef _DEBUG
       
  1754 			TUint runEnd = tmpOffset + runLength;
       
  1755 			TUint free = 0;
       
  1756 			for (TUint i = tmpOffset; i < runEnd; i++)
       
  1757 				if (tmpZone->iBma[EPageMovable]->NotAllocated(i,1) && tmpZone->iBma[EPageDiscard]->NotAllocated(i,1))
       
  1758 					free++;
       
  1759 			__NK_ASSERT_DEBUG(free == reserved);
       
  1760 #endif
       
  1761 			ZoneAllocPages(tmpZone, reserved, EPageFixed);
       
  1762 			iTotalFreeRamPages -= reserved;
       
  1763 			totalUnreserved -= reserved;
       
  1764 			}
       
  1765 		tmpZone->iBma[EPageFixed]->Alloc(tmpOffset, runLength);
       
  1766 		addr = tmpZone->iPhysEnd + 1;
       
  1767 		}
       
  1768 	while (tmpZone != endZone);
       
  1769 	return totalUnreserved;
       
  1770 	}
       
  1771 
       
  1772 
       
  1773 FORCE_INLINE void DRamAllocator::UnblockSetAllocRuns(	TUint& aOffset1, TUint& aOffset2, 
       
  1774 														TUint aRunLength1, TUint aRunLength2, 
       
  1775 														TUint& aAllocLength, TUint& aAllocStart)
       
  1776 	{
       
  1777 	aAllocStart = aOffset1;
       
  1778 	aAllocLength = aRunLength1;
       
  1779 	aOffset1 += aAllocLength;
       
  1780 	if  (aOffset1 == aOffset2)
       
  1781 		{
       
  1782 		aAllocLength += aRunLength2;
       
  1783 		aOffset2 += aRunLength2;
       
  1784 		aOffset1 = aOffset2;
       
  1785 		}
       
  1786 	} 	
       
  1787 
       
  1788 
       
  1789 void DRamAllocator::UnblockContiguousRegion(TPhysAddr aAddrBase, TUint aNumPages)
       
  1790 	{
       
  1791 	// Shouldn't be asked to unblock zero pages, addrEndPage would be wrong if we did.
       
  1792 	__NK_ASSERT_DEBUG(aNumPages);
       
  1793 	TPhysAddr addr = aAddrBase;
       
  1794 	TPhysAddr addrEndPage = aAddrBase + ((aNumPages - 1) << KPageShift);
       
  1795 	TInt tmpOffset;
       
  1796 	SZone* endZone = GetZoneAndOffset(addrEndPage, tmpOffset);
       
  1797 	SZone* tmpZone;
       
  1798 	do
       
  1799 		{
       
  1800 		tmpZone = GetZoneAndOffset(addr, tmpOffset);
       
  1801 		__NK_ASSERT_DEBUG(tmpZone != NULL);
       
  1802 		TUint runLength = 	(addrEndPage < tmpZone->iPhysEnd)? 
       
  1803 							((addrEndPage - addr) >> KPageShift) + 1: 
       
  1804 							tmpZone->iPhysPages - tmpOffset;
       
  1805 		TUint unreserved = 0;
       
  1806 		TUint runEnd = tmpOffset + runLength - 1;
       
  1807 		TUint freeOffset = tmpOffset;
       
  1808 		TUint discardOffset = freeOffset;
       
  1809 		TUint movableOffset = freeOffset;
       
  1810 		__KTRACE_OPT(KMMU2, Kern::Printf("freeOff %d, runEnd %d", freeOffset, runEnd));
       
  1811 		while (freeOffset <= runEnd)
       
  1812 			{
       
  1813 			TUint discardRun;
       
  1814 			TUint movableRun;
       
  1815 			discardRun = NextAllocatedRun(tmpZone, discardOffset, runEnd, EPageDiscard);
       
  1816 			movableRun = NextAllocatedRun(tmpZone, movableOffset, runEnd, EPageMovable);
       
  1817 			TUint allocLength;
       
  1818 			TUint allocStart;
       
  1819 			__KTRACE_OPT(KMMU2, Kern::Printf("disOff %d len %d movOff %d len %d", discardOffset, discardRun, movableOffset, movableRun));
       
  1820 			if (discardOffset < movableOffset)
       
  1821 				UnblockSetAllocRuns(discardOffset, movableOffset, discardRun, movableRun, allocLength, allocStart);
       
  1822 			else
       
  1823 				UnblockSetAllocRuns(movableOffset, discardOffset, movableRun, discardRun, allocLength, allocStart);
       
  1824 
       
  1825 			if (allocStart > freeOffset)
       
  1826 				{
       
  1827 				unreserved += allocStart - freeOffset;
       
  1828 				tmpZone->iBma[KBmaAllPages]->Free(freeOffset, allocStart - freeOffset);
       
  1829 				__NK_ASSERT_DEBUG(	!tmpZone->iBma[EPageMovable]->NotFree(freeOffset, allocStart - freeOffset) && 
       
  1830 									!tmpZone->iBma[EPageDiscard]->NotFree(freeOffset, allocStart - freeOffset));
       
  1831 				}
       
  1832 			__KTRACE_OPT(KMMU2, Kern::Printf("disOff %d len %d movOff %d len %d start %d len %d", discardOffset, discardRun, movableOffset, movableRun, allocStart, allocLength));
       
  1833 			freeOffset = allocStart + allocLength;
       
  1834 			__KTRACE_OPT(KMMU2, Kern::Printf("freeOff %d", freeOffset));
       
  1835 			}
       
  1836 		tmpZone->iBma[EPageFixed]->Free(tmpOffset, runLength);
       
  1837 		ZoneFreePages(tmpZone, unreserved, EPageFixed);
       
  1838 		iTotalFreeRamPages += unreserved;
       
  1839 		addr = tmpZone->iPhysEnd + 1;
       
  1840 		}
       
  1841 	while (tmpZone != endZone);
       
  1842 	}
       
  1843 
       
  1844 
       
  1845 TUint DRamAllocator::CountPagesInRun(TPhysAddr aAddrBase, TPhysAddr aAddrEndPage, TZonePageType aType)
       
  1846 	{
       
  1847 	__NK_ASSERT_DEBUG(aAddrBase <= aAddrEndPage);
       
  1848 	TUint totalAllocated = 0;
       
  1849 	TPhysAddr addr = aAddrBase;
       
  1850 	TUint tmpOffset;
       
  1851 	SZone* endZone = GetZoneAndOffset(aAddrEndPage, (TInt&)tmpOffset);
       
  1852 	SZone* tmpZone;
       
  1853 	do
       
  1854 		{
       
  1855 		tmpZone = GetZoneAndOffset(addr, (TInt&)tmpOffset);
       
  1856 		__NK_ASSERT_DEBUG(tmpZone != NULL);
       
  1857 		TUint runLength = 	(aAddrEndPage < tmpZone->iPhysEnd)? 
       
  1858 							((aAddrEndPage - addr) >> KPageShift) + 1: 
       
  1859 							tmpZone->iPhysPages - tmpOffset;
       
  1860 		TUint runEnd = tmpOffset + runLength - 1;
       
  1861 		while (tmpOffset <= runEnd)
       
  1862 			{
       
  1863 			TUint run = NextAllocatedRun(tmpZone, tmpOffset, runEnd, aType);
       
  1864 			totalAllocated += run;
       
  1865 			tmpOffset += run;
       
  1866 			}
       
  1867 		addr = tmpZone->iPhysEnd + 1;
       
  1868 		}
       
  1869 	while (tmpZone != endZone);
       
  1870 	return totalAllocated;
       
  1871 	}
       
  1872 
       
  1873 
       
  1874 TInt DRamAllocator::ClearContiguousRegion(TPhysAddr aAddrBase, TPhysAddr aZoneBase, TUint aNumPages, TInt& aOffset, TUint aUnreservedPages)
       
  1875 	{
       
  1876 	TPhysAddr addr = aAddrBase;
       
  1877 	TPhysAddr addrEndPage = aAddrBase + ((aNumPages -1 )<< KPageShift);
       
  1878 	TInt contigOffset = 0;
       
  1879 	SZone* contigZone = GetZoneAndOffset(addr, contigOffset);
       
  1880 	TUint unreservedPages = aUnreservedPages;
       
  1881 	for (; addr <= addrEndPage; addr += KPageSize, contigOffset++)
       
  1882 		{
       
  1883 		if (contigZone->iPhysEnd < addr)
       
  1884 			{
       
  1885 			contigZone = GetZoneAndOffset(addr, contigOffset);
       
  1886 			__NK_ASSERT_DEBUG(contigZone != NULL);
       
  1887 			}
       
  1888 
       
  1889 		__NK_ASSERT_DEBUG(contigZone != NULL);
       
  1890 		__NK_ASSERT_DEBUG(contigZone->iBma[EPageFixed]->NotFree(contigOffset, 1));
       
  1891 		__NK_ASSERT_DEBUG(SPageInfo::SafeFromPhysAddr(addr) != NULL);
       
  1892 
       
  1893 		if (unreservedPages > iTotalFreeRamPages)
       
  1894 			{// May need to discard some pages so there is free space for the 
       
  1895 			// pages in the contiguous run to be moved to.
       
  1896 			TUint requiredPages = unreservedPages - iTotalFreeRamPages;
       
  1897 			if (requiredPages)
       
  1898 				{// Ask the pager to get free some pages.
       
  1899 				M::GetFreePages(requiredPages);
       
  1900 
       
  1901 				// The ram alloc lock may have been flashed so ensure that we still have
       
  1902 				// enough free ram to complete the allocation.
       
  1903 				TUint remainingPages = ((addrEndPage - addr) >> KPageShift) + 1;
       
  1904 				unreservedPages = remainingPages - CountPagesInRun(addr, addrEndPage, EPageFixed);
       
  1905 				if (unreservedPages > iTotalFreeRamPages + M::NumberOfFreeDpPages())
       
  1906 					{// Not enough free space and not enough freeable pages.
       
  1907 					return KErrNoMemory;
       
  1908 					}
       
  1909 				}
       
  1910 			}
       
  1911 
       
  1912 		TInt r = M::MoveAndAllocPage(addr, EPageFixed);
       
  1913 		if (r != KErrNone)
       
  1914 			{// This page couldn't be moved or discarded so 
       
  1915 			// restart the search the page after this one.
       
  1916 			__KTRACE_OPT(KMMU2, Kern::Printf("ContigMov fail contigOffset 0x%x r %d", contigOffset, r));
       
  1917 			aOffset = (addr < aZoneBase)? 0 : contigOffset + 1;
       
  1918 			return r;
       
  1919 			}
       
  1920 		__NK_ASSERT_DEBUG(contigZone->iBma[EPageFixed]->NotFree(contigOffset, 1));
       
  1921 		__NK_ASSERT_DEBUG(contigZone->iBma[KBmaAllPages]->NotFree(contigOffset, 1));
       
  1922 		__NK_ASSERT_DEBUG(contigZone->iBma[EPageDiscard]->NotAllocated(contigOffset, 1));
       
  1923 		__NK_ASSERT_DEBUG(contigZone->iBma[EPageMovable]->NotAllocated(contigOffset, 1));
       
  1924 		}
       
  1925 
       
  1926 	// Successfully cleared the contiguous run
       
  1927 	return KErrNone;
       
  1928 	}
       
  1929 
       
  1930 
  1682 /**
  1931 /**
  1683 Search through the zones for the requested contiguous RAM, first in preference 
  1932 Search through the zones for the requested contiguous RAM, first in preference 
  1684 order then, if that fails, in address order.
  1933 order then, if that fails, in address order.
  1685 
  1934 
       
  1935 No support for non-fixed pages as this will discard and move pages if required.
       
  1936 
  1686 @param aNumPages The number of contiguous pages to find
  1937 @param aNumPages The number of contiguous pages to find
  1687 @param aPhysAddr Will contain the base address of any contiguous run if found
  1938 @param aPhysAddr Will contain the base address of any contiguous run if found
  1688 @param aType The page type of the memory to be allocated
       
  1689 @param aAlign Alignment specified as the alignment shift
  1939 @param aAlign Alignment specified as the alignment shift
  1690 @param aBlockedZoneId The ID of a zone that can't be allocated into, by default this has no effect
       
  1691 @param aBlockRest Set to ETrue to stop allocation as soon as aBlockedZoneId is reached 
       
  1692 in preference ordering.  EFalse otherwise.
  1940 in preference ordering.  EFalse otherwise.
  1693 
  1941 
  1694 @return KErrNone on success, KErrNoMemory otherwise
  1942 @return KErrNone on success, KErrNoMemory otherwise
  1695 */	
  1943 */	
  1696 TInt DRamAllocator::AllocContiguousRam(TUint aNumPages, TPhysAddr& aPhysAddr, TZonePageType aType, TInt aAlign, TUint aBlockedZoneId, TBool aBlockRest)
  1944 TInt DRamAllocator::AllocContiguousRam(TUint aNumPages, TPhysAddr& aPhysAddr, TInt aAlign)
  1697 	{
  1945 	{
  1698 	__KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam size %08x align %d",aNumPages,aAlign));
  1946 	__KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam size %08x align %d",aNumPages,aAlign));
  1699 
  1947 
  1700 	M::RamAllocIsLocked();
  1948 	M::RamAllocIsLocked();
  1701 
  1949 
  1702 	// No support for non-fixed pages as this will discard and move 
  1950 	if ((TUint)aNumPages > iTotalFreeRamPages + M::NumberOfFreeDpPages())
  1703 	// pages if required.
  1951 		{// Not enough free space and not enough freeable pages.
  1704 	__NK_ASSERT_DEBUG(aType == EPageFixed);
  1952 		return KErrNoMemory;
       
  1953 		}
       
  1954 	if (aNumPages > iTotalFreeRamPages)
       
  1955 		{// Need to discard some pages so there is free space for the pages in 
       
  1956 		// the contiguous run to be moved to.
       
  1957 		TUint requiredPages = aNumPages - iTotalFreeRamPages;
       
  1958 		if (!M::GetFreePages(requiredPages))
       
  1959 			return KErrNoMemory;
       
  1960 		}
       
  1961 
  1705 	TInt alignWrtPage = Max(aAlign - KPageShift, 0);
  1962 	TInt alignWrtPage = Max(aAlign - KPageShift, 0);
  1706 	TUint32 alignmask = (1u << alignWrtPage) - 1;
  1963 	TUint32 alignmask = (1u << alignWrtPage) - 1;
  1707 
  1964 
  1708 	// Attempt to find enough pages searching in preference order first then
  1965 	// Attempt to find enough pages searching in preference order first then
  1709 	// in address order
  1966 	// in address order
  1714 	TInt carryImmov = 0;	// Carry for immovable pages bma, clear to start new run.
  1971 	TInt carryImmov = 0;	// Carry for immovable pages bma, clear to start new run.
  1715 	TInt base = 0;
  1972 	TInt base = 0;
  1716 	TInt offset = 0;
  1973 	TInt offset = 0;
  1717 	iZoneTmpAddrIndex = -1;
  1974 	iZoneTmpAddrIndex = -1;
  1718 	iZoneTmpPrefLink = iZonePrefList.First();
  1975 	iZoneTmpPrefLink = iZonePrefList.First();
  1719 	while (NextAllocZone(zone, searchState, aType, aBlockedZoneId, aBlockRest))
  1976 	while (NextAllocZone(zone, searchState, EPageFixed, KRamZoneInvalidId, EFalse))
  1720 		{
  1977 		{
  1721 		// Be sure to start from scratch if zone not contiguous with previous zone
  1978 		// Be sure to start from scratch if zone not contiguous with previous zone
  1722 		if (prevZone && (zone->iPhysBase == 0 || (zone->iPhysBase - 1) != prevZone->iPhysEnd))
  1979 		if (prevZone && (zone->iPhysBase == 0 || (zone->iPhysBase - 1) != prevZone->iPhysEnd))
  1723 			{
  1980 			{
  1724 			carryAll = 0;
  1981 			carryAll = 0;
  1731 		__KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: base=%08x carryAll=%08x offset=%08x", base, carryAll, offset));
  1988 		__KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: base=%08x carryAll=%08x offset=%08x", base, carryAll, offset));
  1732 		offset = bmaAll.AllocAligned(aNumPages, alignWrtPage, base, EFalse, carryAll, runLength);
  1989 		offset = bmaAll.AllocAligned(aNumPages, alignWrtPage, base, EFalse, carryAll, runLength);
  1733 		__KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: offset=%08x", offset));
  1990 		__KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: offset=%08x", offset));
  1734 
  1991 
  1735 		if (offset >= 0)
  1992 		if (offset >= 0)
       
  1993 			{
       
  1994 			// Have found enough contiguous pages so return address of physical page
       
  1995 			// at the start of the region
       
  1996 			aPhysAddr = TPhysAddr((base + offset - carryAll + alignmask) & ~alignmask) << KPageShift;
       
  1997 			MarkPagesAllocated(aPhysAddr, aNumPages, EPageFixed);
       
  1998 
       
  1999 			__KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam returns %08x",aPhysAddr));
       
  2000 #ifdef BTRACE_RAM_ALLOCATOR
       
  2001 			BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocContiguousRam, EPageFixed, aNumPages, aPhysAddr);
       
  2002 #endif
       
  2003 			return KErrNone;
       
  2004 			}
       
  2005 		// No run found when looking in just the free pages so see if this
       
  2006 		// RAM zone could be used if pages where moved or discarded.
       
  2007 		TBitMapAllocator& bmaImmov = *(zone->iBma[EPageFixed]);
       
  2008 		offset = 0;	// Clear so searches whole of fixed BMA on the first pass.
       
  2009 		do
       
  2010 			{
       
  2011 			__KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: base=%08x carryImmov=%08x offset=%08x", base, carryImmov, offset));
       
  2012 			offset = bmaImmov.AllocAligned(aNumPages, alignWrtPage, base, EFalse, carryImmov, runLength, offset);
       
  2013 			__KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: offset=%08x", offset));
       
  2014 			if (offset >= 0)
       
  2015 				{// Have found a run in immovable page bma so attempt to clear
       
  2016 				// it for the allocation.
       
  2017 				TPhysAddr addrBase = TPhysAddr((base + offset - carryImmov + alignmask) & ~alignmask) << KPageShift;
       
  2018 				__KTRACE_OPT(KMMU2, Kern::Printf(">AllocContig fix run 0x%08x - 0x%08x 0x%x", addrBase, addrBase + (aNumPages << KPageShift), TheCurrentThread));
       
  2019 				
       
  2020 				// Block the contiguous region from being allocated.
       
  2021 				iContiguousReserved++;
       
  2022 				TUint unreservedPages = BlockContiguousRegion(addrBase, aNumPages);
       
  2023 				TInt clearRet = ClearContiguousRegion(addrBase, zone->iPhysBase, aNumPages, offset, unreservedPages);
       
  2024 				if (clearRet == KErrNone)
       
  2025 					{// Cleared all the required pages.
       
  2026 					// Return address of physical page at the start of the region.
       
  2027 					iContiguousReserved--;
       
  2028 					aPhysAddr = addrBase;
       
  2029 					__KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam returns %08x",aPhysAddr));
       
  2030 #ifdef BTRACE_RAM_ALLOCATOR
       
  2031 					BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocContiguousRam, EPageFixed, aNumPages, aPhysAddr);
       
  2032 #endif
       
  2033 					__KTRACE_OPT(KMMU2, Kern::Printf("<AllocContig suc run 0x%08x - 0x%08x 0x%x", addrBase, addrBase + (aNumPages << KPageShift), TheCurrentThread));
       
  2034 					return KErrNone;
       
  2035 					}
       
  2036 				else
       
  2037 					{
       
  2038 					// Unblock the contiguous region.
       
  2039 					UnblockContiguousRegion(addrBase, aNumPages);
       
  2040 					iContiguousReserved--;
       
  2041 					__KTRACE_OPT(KMMU2, Kern::Printf("ContigMov fail offset 0x%x carryImmov %x", 
       
  2042 														offset, carryImmov));
       
  2043 					// Can't rely on RAM zone preference ordering being
       
  2044 					// the same so clear carrys and restart search from
       
  2045 					// within the current RAM zone or skip onto the next 
       
  2046 					// one if at the end of this one.
       
  2047 					carryImmov = 0;
       
  2048 					carryAll = 0;
       
  2049 					__KTRACE_OPT(KMMU2, Kern::Printf("<AllocContigfail run 0x%08x - 0x%08x 0x%x", addrBase, addrBase + (aNumPages << KPageShift), TheCurrentThread));
       
  2050 					if (clearRet == KErrNoMemory)
       
  2051 						{// There are no longer enough free or discardable pages to 
       
  2052 						// be able to fulfill this allocation.
       
  2053 						return KErrNoMemory;
       
  2054 						}
       
  2055 					}
       
  2056 				}
       
  2057 			}
       
  2058 		// Keep searching immovable page bma of the current RAM zone until 
       
  2059 		// gone past end of RAM zone or no run can be found.
       
  2060 		while (offset >= 0 && (TUint)offset < zone->iPhysPages);
       
  2061 		}
       
  2062 	return KErrNoMemory;
       
  2063 	}
       
  2064 
       
  2065 #else
       
  2066 
       
  2067 /**
       
  2068 Search through the zones for the requested contiguous RAM, first in preference 
       
  2069 order then, if that fails, in address order.
       
  2070 
       
  2071 No support for non-fixed pages as this will discard and move pages if required.
       
  2072 
       
  2073 @param aNumPages The number of contiguous pages to find
       
  2074 @param aPhysAddr Will contain the base address of any contiguous run if found
       
  2075 @param aAlign Alignment specified as the alignment shift
       
  2076 
       
  2077 @return KErrNone on success, KErrNoMemory otherwise
       
  2078 */	
       
  2079 TInt DRamAllocator::AllocContiguousRam(TUint aNumPages, TPhysAddr& aPhysAddr, TInt aAlign)
       
  2080 	{
       
  2081 	__KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam size %08x align %d",aNumPages,aAlign));
       
  2082 
       
  2083 	M::RamAllocIsLocked();
       
  2084 
       
  2085 	TInt alignWrtPage = Max(aAlign - KPageShift, 0);
       
  2086 	TUint32 alignmask = (1u << alignWrtPage) - 1;
       
  2087 
       
  2088 	// Attempt to find enough pages searching in preference order first then
       
  2089 	// in address order
       
  2090 	TZoneSearchState searchState = EZoneSearchPref;
       
  2091 	SZone* zone;
       
  2092 	SZone* prevZone = NULL;
       
  2093 	TInt carryAll = 0;		// Carry for all pages bma, clear to start new run.
       
  2094 	TInt carryImmov = 0;	// Carry for immovable pages bma, clear to start new run.
       
  2095 	TInt base = 0;
       
  2096 	TInt offset = 0;
       
  2097 	iZoneTmpAddrIndex = -1;
       
  2098 	iZoneTmpPrefLink = iZonePrefList.First();
       
  2099 	while (NextAllocZone(zone, searchState, EPageFixed, KRamZoneInvalidId, EFalse))
       
  2100 		{
       
  2101 		// Be sure to start from scratch if zone not contiguous with previous zone
       
  2102 		if (prevZone && (zone->iPhysBase == 0 || (zone->iPhysBase - 1) != prevZone->iPhysEnd))
       
  2103 			{
       
  2104 			carryAll = 0;
       
  2105 			carryImmov = 0;
       
  2106 			}
       
  2107 		prevZone = zone;
       
  2108 		TBitMapAllocator& bmaAll = *(zone->iBma[KBmaAllPages]);
       
  2109 		base = TInt(zone->iPhysBase >> KPageShift);
       
  2110 		TInt runLength;
       
  2111 		__KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: base=%08x carryAll=%08x offset=%08x", base, carryAll, offset));
       
  2112 		offset = bmaAll.AllocAligned(aNumPages, alignWrtPage, base, EFalse, carryAll, runLength);
       
  2113 		__KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: offset=%08x", offset));
       
  2114 
       
  2115 		if (offset >= 0)
  1736 			{// Have found enough contiguous pages so return address of physical page
  2116 			{// Have found enough contiguous pages so return address of physical page
  1737 			 // at the start of the region
  2117 			 // at the start of the region
  1738 			aPhysAddr = TPhysAddr((base + offset - carryAll + alignmask) & ~alignmask) << KPageShift;
  2118 			aPhysAddr = TPhysAddr((base + offset - carryAll + alignmask) & ~alignmask) << KPageShift;
  1739 			MarkPagesAllocated(aPhysAddr, aNumPages, aType);
  2119 			MarkPagesAllocated(aPhysAddr, aNumPages, EPageFixed);
  1740 
  2120 
  1741 			__KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam returns %08x",aPhysAddr));
  2121 			__KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam returns %08x",aPhysAddr));
  1742 #ifdef BTRACE_RAM_ALLOCATOR
  2122 #ifdef BTRACE_RAM_ALLOCATOR
  1743 			BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocContiguousRam, aType, aNumPages, aPhysAddr);
  2123 			BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocContiguousRam, EPageFixed, aNumPages, aPhysAddr);
  1744 #endif
  2124 #endif
  1745 			return KErrNone;
  2125 			return KErrNone;
  1746 			}
  2126 			}
  1747 		else
  2127 		else
  1748 			{// No run found when looking in just the free pages so see if this
  2128 			{// No run found when looking in just the free pages so see if this
  1786 						if (contigZone->iPhysEnd < addr)
  2166 						if (contigZone->iPhysEnd < addr)
  1787 							{
  2167 							{
  1788 							contigZone = GetZoneAndOffset(addr, contigOffset);
  2168 							contigZone = GetZoneAndOffset(addr, contigOffset);
  1789 							__NK_ASSERT_DEBUG(contigZone != NULL);
  2169 							__NK_ASSERT_DEBUG(contigZone != NULL);
  1790 							}
  2170 							}
  1791 #ifdef _DEBUG			// This page shouldn't be allocated as fixed, only movable or discardable.
  2171 						// This page shouldn't be allocated as fixed, only movable or discardable.
  1792 						__NK_ASSERT_DEBUG(contigZone != NULL);
  2172 						__NK_ASSERT_DEBUG(contigZone != NULL);
  1793 						__NK_ASSERT_DEBUG(contigZone->iBma[EPageFixed]->NotAllocated(contigOffset, 1));
  2173 						__NK_ASSERT_DEBUG(contigZone->iBma[EPageFixed]->NotAllocated(contigOffset, 1));
  1794 						SPageInfo* pageInfo = SPageInfo::SafeFromPhysAddr(addr);
  2174 						__NK_ASSERT_DEBUG(SPageInfo::SafeFromPhysAddr(addr) != NULL);
  1795 						__NK_ASSERT_DEBUG(pageInfo != NULL);
  2175 
  1796 #endif
       
  1797 						TPhysAddr newAddr;
  2176 						TPhysAddr newAddr;
  1798 						TInt moveRet = M::MovePage(addr, newAddr, contigZone->iId, EFalse);
  2177 						TInt moveRet = M::MovePage(addr, newAddr, contigZone->iId, 0);
  1799 						if (moveRet != KErrNone && moveRet != KErrNotFound)
  2178 						if (moveRet != KErrNone && moveRet != KErrNotFound)
  1800 							{// This page couldn't be moved or discarded so 
  2179 							{// This page couldn't be moved or discarded so 
  1801 							// restart the search the page after this one.
  2180 							// restart the search the page after this one.
  1802 							__KTRACE_OPT(KMMU2, 
  2181 							__KTRACE_OPT(KMMU2, 
  1803 										Kern::Printf("ContigMov fail offset %x moveRet %d addr %x carryImmov %x", 
  2182 										Kern::Printf("ContigMov fail offset %x moveRet %d addr %x carryImmov %x", 
  1825 
  2204 
  1826 					if (addr == addrEnd)
  2205 					if (addr == addrEnd)
  1827 						{// Cleared all the required pages so allocate them.
  2206 						{// Cleared all the required pages so allocate them.
  1828 						// Return address of physical page at the start of the region.
  2207 						// Return address of physical page at the start of the region.
  1829 						aPhysAddr = addrBase;
  2208 						aPhysAddr = addrBase;
  1830 						MarkPagesAllocated(aPhysAddr, aNumPages, aType);
  2209 						MarkPagesAllocated(aPhysAddr, aNumPages, EPageFixed);
  1831 
  2210 
  1832 						__KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam returns %08x",aPhysAddr));
  2211 						__KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam returns %08x",aPhysAddr));
  1833 #ifdef BTRACE_RAM_ALLOCATOR
  2212 #ifdef BTRACE_RAM_ALLOCATOR
  1834 						BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocContiguousRam, aType, aNumPages, aPhysAddr);
  2213 						BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocContiguousRam, EPageFixed, aNumPages, aPhysAddr);
  1835 #endif
  2214 #endif
  1836 						return KErrNone;
  2215 						return KErrNone;
  1837 						}
  2216 						}
  1838 					}
  2217 					}
  1839 				}
  2218 				}
  1842 			while (offset >= 0 && (TUint)offset < zone->iPhysPages);
  2221 			while (offset >= 0 && (TUint)offset < zone->iPhysPages);
  1843 			}
  2222 			}
  1844 		}
  2223 		}
  1845 	return KErrNoMemory;
  2224 	return KErrNoMemory;
  1846 	}
  2225 	}
       
  2226 #endif // !defined(__MEMODEL_MULTIPLE__) || !defined(__MEMODEL_MOVING__)
  1847 
  2227 
  1848 
  2228 
  1849 /**
  2229 /**
  1850 Attempt to allocate the contiguous RAM from the specified zone.
  2230 Attempt to allocate the contiguous RAM from the specified zone.
  1851 
  2231 
  1856 
  2236 
  1857 @param aZoneIdList 	An array of the IDs of the RAM zones to allocate from.
  2237 @param aZoneIdList 	An array of the IDs of the RAM zones to allocate from.
  1858 @param aZoneIdCount	The number of the IDs listed by aZoneIdList.
  2238 @param aZoneIdCount	The number of the IDs listed by aZoneIdList.
  1859 @param aSize 		The number of contiguous bytes to find
  2239 @param aSize 		The number of contiguous bytes to find
  1860 @param aPhysAddr 	Will contain the base address of the contiguous run if found
  2240 @param aPhysAddr 	Will contain the base address of the contiguous run if found
  1861 @param aType 		The page type of the memory to be allocated
       
  1862 @param aAlign 		Alignment specified as the alignment shift
  2241 @param aAlign 		Alignment specified as the alignment shift
  1863 
  2242 
  1864 @return KErrNone on success, KErrNoMemory if allocation couldn't succeed or 
  2243 @return KErrNone on success, KErrNoMemory if allocation couldn't succeed or 
  1865 the RAM zone has the KRamZoneFlagNoAlloc flag set.  KErrArgument if a zone of
  2244 the RAM zone has the KRamZoneFlagNoAlloc flag set.  KErrArgument if a zone of
  1866 aZoneIdList exists or if aSize is larger than the size of the zone.
  2245 aZoneIdList exists or if aSize is larger than the size of the zone.
  1867 */	
  2246 */	
  1868 TInt DRamAllocator::ZoneAllocContiguousRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TZonePageType aType, TInt aAlign)
  2247 TInt DRamAllocator::ZoneAllocContiguousRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
  1869 	{
  2248 	{
  1870 	__KTRACE_OPT(KMMU,Kern::Printf("ZoneAllocContiguousRam zones 0x%x size 0x%08x align %d",aZoneIdCount, aSize, aAlign));
  2249 	__KTRACE_OPT(KMMU,Kern::Printf("ZoneAllocContiguousRam zones 0x%x size 0x%08x align %d",aZoneIdCount, aSize, aAlign));
  1871 
  2250 
  1872 	M::RamAllocIsLocked();
  2251 	M::RamAllocIsLocked();
  1873 	__NK_ASSERT_DEBUG(aType == EPageFixed);
       
  1874 
  2252 
  1875 
  2253 
  1876 	TUint numPages = (aSize + KPageSize - 1) >> KPageShift;
  2254 	TUint numPages = (aSize + KPageSize - 1) >> KPageShift;
  1877 	TInt carry = 0; // must be zero as this is always the start of a new run
  2255 	TInt carry = 0; // must be zero as this is always the start of a new run
  1878 	TInt alignWrtPage = Max(aAlign - KPageShift, 0);
  2256 	TInt alignWrtPage = Max(aAlign - KPageShift, 0);
  1928 		}
  2306 		}
  1929 
  2307 
  1930 	// Have found enough contiguous pages so mark the pages allocated and 
  2308 	// Have found enough contiguous pages so mark the pages allocated and 
  1931 	// return address of physical page at the start of the region.
  2309 	// return address of physical page at the start of the region.
  1932 	aPhysAddr = TPhysAddr((base + offset - carry + alignmask) & ~alignmask) << KPageShift;
  2310 	aPhysAddr = TPhysAddr((base + offset - carry + alignmask) & ~alignmask) << KPageShift;
  1933 	MarkPagesAllocated(aPhysAddr, numPages, aType);
  2311 	MarkPagesAllocated(aPhysAddr, numPages, EPageFixed);
  1934 
  2312 
  1935 	__KTRACE_OPT(KMMU,Kern::Printf("ZoneAllocContiguousRam returns %08x",aPhysAddr));
  2313 	__KTRACE_OPT(KMMU,Kern::Printf("ZoneAllocContiguousRam returns %08x",aPhysAddr));
  1936 #ifdef BTRACE_RAM_ALLOCATOR
  2314 #ifdef BTRACE_RAM_ALLOCATOR
  1937 	BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocZoneContiguousRam, aType, numPages, aPhysAddr);
  2315 	BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocZoneContiguousRam, EPageFixed, numPages, aPhysAddr);
  1938 #endif
  2316 #endif
  1939 	return KErrNone;
  2317 	return KErrNone;
  1940 	}
  2318 	}
  1941 
  2319 
  1942 
  2320 
  2104 
  2482 
  2105 	__NK_ASSERT_DEBUG(aZone - iZones < (TInt)iNumZones);
  2483 	__NK_ASSERT_DEBUG(aZone - iZones < (TInt)iNumZones);
  2106 	// Makes things simpler for bma selection.
  2484 	// Makes things simpler for bma selection.
  2107 	__NK_ASSERT_DEBUG(aType != EPageUnknown);
  2485 	__NK_ASSERT_DEBUG(aType != EPageUnknown);
  2108 
  2486 
  2109 	if (aOffset >= aZone->iPhysPages)
  2487 	TUint zoneEndOffset = aZone->iPhysPages - 1;
       
  2488 	if (aOffset > zoneEndOffset)
  2110 		{// Starting point is outside the zone
  2489 		{// Starting point is outside the zone
  2111 		return KErrArgument;
  2490 		return KErrArgument;
  2112 		}
  2491 		}
  2113 
  2492 
  2114 	TUint offset = aOffset;
  2493 	TUint wordIndex = aOffset >> 5;
  2115 	TUint endOffset = aZone->iPhysPages;
  2494 	TUint endWordIndex = zoneEndOffset >> 5;
  2116 	TUint endOffsetAligned = endOffset & KWordAlignMask;
       
  2117 
  2495 
  2118 	// Select the BMA to search, 
  2496 	// Select the BMA to search, 
  2119 	TUint bmaIndex = (aType == EPageTypes)? KBmaAllPages : aType;
  2497 	TUint bmaIndex = (aType == EPageTypes)? KBmaAllPages : aType;
  2120 	TUint32* map = &(aZone->iBma[bmaIndex]->iMap[offset >> 5]);
  2498 	TUint32* map = &(aZone->iBma[bmaIndex]->iMap[wordIndex]);
  2121 	TUint32 bits = *map++;
  2499 	TUint32* mapEnd = &(aZone->iBma[bmaIndex]->iMap[endWordIndex]);
       
  2500 	TUint32 bits = *map;
  2122 
  2501 
  2123 	// Set bits for pages before 'offset' (i.e. ones we want to ignore)...
  2502 	// Set bits for pages before 'offset' (i.e. ones we want to ignore)...
  2124 	bits |= ~(KMaxTUint32 >> (offset & ~KWordAlignMask));
  2503 	bits |= ~(KMaxTUint32 >> (aOffset & ~KWordAlignMask));
  2125 
  2504 
  2126 	// Find the first bit map word from aOffset in aZone with allocated pages
  2505 	// Find the first bit map word from aOffset in aZone with allocated pages
  2127 	while (bits == KMaxTUint32 && offset < endOffsetAligned)
  2506 	while (bits == KMaxTUint32 && map < mapEnd)
  2128 		{
  2507 		{
  2129 		bits = *map++;
  2508 		bits = *++map;
  2130 		offset = (offset + 32) & KWordAlignMask;
  2509 		}
  2131 		}
  2510 
  2132 
  2511 	if (map == mapEnd)
  2133 	if (offset >= endOffsetAligned && endOffset != endOffsetAligned)
       
  2134 		{// Have reached the last bit mask word so set the bits that are
  2512 		{// Have reached the last bit mask word so set the bits that are
  2135 		//  outside of the zone so that they are ignored.
  2513 		//  outside of the zone so that they are ignored.
  2136 		bits |= KMaxTUint32 >> (endOffset - endOffsetAligned);
  2514 		bits |= (KMaxTUint32 >> (zoneEndOffset & ~KWordAlignMask)) >> 1;
  2137 		}
  2515 		}
  2138 
  2516 
  2139 	if (bits == KMaxTUint32)
  2517 	if (bits == KMaxTUint32)
  2140 		{// No allocated pages found after aOffset in aZone.
  2518 		{// No allocated pages found after aOffset in aZone.
  2141 		return KErrNotFound;
  2519 		return KErrNotFound;
  2142 		}
  2520 		}
  2143 
  2521 
  2144 	// Now we have bits with allocated pages in it so determine the exact 
  2522 	// Now we have bits with allocated pages in it so determine the exact 
  2145 	// offset of the next allocated page
  2523 	// offset of the next allocated page
  2146 	TUint32 mask = 0x80000000 >> (offset & ~KWordAlignMask);
  2524 	TInt msOne = __e32_find_ms1_32(~bits);
  2147 	while (bits & mask)
  2525 	__NK_ASSERT_DEBUG(msOne >= 0);	// Must have at least one allocated page in the word.
  2148 		{
  2526 	TUint msOneOffset = 31 - msOne;
  2149 		mask >>= 1;
  2527 	aOffset = ((map - aZone->iBma[bmaIndex]->iMap) << 5) + msOneOffset;
  2150 		offset++;
       
  2151 		}
       
  2152 
       
  2153 	if (offset >= endOffset)
       
  2154 		{// Reached the end of the zone without finding an allocated page after aOffset
       
  2155 		return KErrNotFound;
       
  2156 		}
       
  2157 
       
  2158 	// Should definitely have found an allocated page within aZone's pages
       
  2159 	__NK_ASSERT_DEBUG(mask != 0 && !(bits & mask) && offset < aZone->iPhysPages);
       
  2160 
       
  2161 	aOffset = offset;
       
  2162 	return KErrNone;
  2528 	return KErrNone;
  2163 	}
  2529 	}
       
  2530 
       
  2531 
       
  2532 /**
       
  2533 Get the next run of pages in this zone that are allocated after aOffset.
       
  2534 
       
  2535 @param aZone	The zone to find the next allocated page in.
       
  2536 @param aOffset	On entry this is the offset from which the next allocated
       
  2537 				page in the zone should be found, on return it will be the offset 
       
  2538 				of the next allocated page.
       
  2539 @param aEndOffset The last offset within this RAM zone to check for allocated runs.
       
  2540 @return The length of any run found.
       
  2541 */
       
  2542 TInt DRamAllocator::NextAllocatedRun(SZone* aZone, TUint& aOffset, TUint aEndOffset, TZonePageType aType) const
       
  2543 	{
       
  2544 	const TUint KWordAlignMask = KMaxTUint32 << 5;
       
  2545 
       
  2546 	M::RamAllocIsLocked();
       
  2547 
       
  2548 	__NK_ASSERT_DEBUG(aZone - iZones < (TInt)iNumZones);
       
  2549 	// Makes things simpler for bma selection.
       
  2550 	__NK_ASSERT_DEBUG(aType != EPageUnknown);
       
  2551 
       
  2552 	if (aOffset > aEndOffset)
       
  2553 		{// UnblockContiguous() has already searched the whole range for this page type.
       
  2554 		return 0;
       
  2555 		}
       
  2556 
       
  2557 	TUint wordIndex = aOffset >> 5;
       
  2558 	TUint endWordIndex = aEndOffset >> 5;
       
  2559 
       
  2560 	// Select the BMA to search, 
       
  2561 	TUint bmaIndex = (aType == EPageTypes)? KBmaAllPages : aType;
       
  2562 	TUint32* map = &(aZone->iBma[bmaIndex]->iMap[wordIndex]);
       
  2563 	TUint32* mapEnd = &(aZone->iBma[bmaIndex]->iMap[endWordIndex]);
       
  2564 	TUint32 bits = *map;
       
  2565 
       
  2566 	// Set bits for pages before 'offset' (i.e. ones we want to ignore)...
       
  2567 	bits |= ~(KMaxTUint32 >> (aOffset & ~KWordAlignMask));
       
  2568 
       
  2569 	// Find the first bit map word from aOffset in aZone with allocated pages
       
  2570 	while (bits == KMaxTUint32 && map < mapEnd)
       
  2571 		{
       
  2572 		bits = *++map;
       
  2573 		}
       
  2574 
       
  2575 	if (map == mapEnd)
       
  2576 		{// Have reached the last bit mask word so set the bits that are
       
  2577 		//  outside of the range so that they are ignored.
       
  2578 		bits |= (KMaxTUint32 >> (aEndOffset & ~KWordAlignMask)) >> 1;
       
  2579 		}
       
  2580 
       
  2581 	if (bits == KMaxTUint32)
       
  2582 		{// No allocated pages found in the range.
       
  2583 		aOffset = aEndOffset + 1;
       
  2584 		return 0;
       
  2585 		}
       
  2586 
       
  2587 	// Now we have bits with allocated pages in it so determine the exact 
       
  2588 	// offset of the next allocated page
       
  2589 	TInt msOne = __e32_find_ms1_32(~bits);
       
  2590 	__NK_ASSERT_DEBUG(msOne >= 0);	// Must have at least one allocated page in the word.
       
  2591 	TUint msOneOffset = 31 - msOne;
       
  2592 	aOffset = ((map - aZone->iBma[bmaIndex]->iMap) << 5) + msOneOffset;
       
  2593 	TUint32* runWord = map;
       
  2594 
       
  2595 	if (map < mapEnd && __e32_bit_count_32(~bits) == msOne + 1)
       
  2596 		{// The whole of the region in this word is allocated.
       
  2597 		// Find the next word which isn't completely allocated within the range.
       
  2598 		do
       
  2599 			{
       
  2600 			bits = *++map;
       
  2601 			}
       
  2602 		while (!bits && map < mapEnd);
       
  2603 		}
       
  2604 
       
  2605 	// Clear any bits before the run so can get next free from __e32_find_msl_32().
       
  2606 	if (runWord == map)
       
  2607 		bits &= KMaxTUint32 >> (aOffset & ~KWordAlignMask);
       
  2608 	TInt msFree = __e32_find_ms1_32(bits);
       
  2609 	__NK_ASSERT_DEBUG(msFree >= 0 || map == mapEnd);
       
  2610 	TUint msFreeOffset = (msFree >= 0)? 31 - msFree : 32;
       
  2611 	TUint endIndex = map - aZone->iBma[bmaIndex]->iMap;
       
  2612 	TUint runEnd = (endIndex << 5) + msFreeOffset;
       
  2613 	if (runEnd > aEndOffset + 1)	// Ensure we don't go past the range.
       
  2614 		runEnd = aEndOffset + 1;
       
  2615 	__NK_ASSERT_DEBUG(runEnd > aOffset);
       
  2616 
       
  2617 	return runEnd - aOffset;
       
  2618 	}
       
  2619 
  2164 
  2620 
  2165 /**
  2621 /**
  2166 See if any of the least preferable RAM zones can be emptied.  If they can then 
  2622 See if any of the least preferable RAM zones can be emptied.  If they can then 
  2167 initialise the allocator for a general defragmentation operation.
  2623 initialise the allocator for a general defragmentation operation.
  2168 
  2624