kernel/eka/memmodel/epoc/flexible/mmu/mmapping.cpp
changeset 47 46fffbe7b5a7
parent 44 36bfc973b146
equal deleted inserted replaced
46:0bf4040442f9 47:46fffbe7b5a7
    61 
    61 
    62 
    62 
    63 DMemoryMapping::~DMemoryMapping()
    63 DMemoryMapping::~DMemoryMapping()
    64 	{
    64 	{
    65 	TRACE(("DMemoryMapping[0x%08x]::~DMemoryMapping()",this));
    65 	TRACE(("DMemoryMapping[0x%08x]::~DMemoryMapping()",this));
       
    66 	Destruct();
       
    67 	}
       
    68 
       
    69 
       
    70 void DMemoryMapping::Destruct()
       
    71 	{
    66 	__NK_ASSERT_DEBUG(!IsAttached());
    72 	__NK_ASSERT_DEBUG(!IsAttached());
    67 
    73 
    68 	// remove from address space...
    74 	// remove from address space...
    69 	TLinAddr addr = iAllocatedLinAddrAndOsAsid&~KPageMask;
    75 	TLinAddr addr = iAllocatedLinAddrAndOsAsid&~KPageMask;
    70 	TInt osAsid = iAllocatedLinAddrAndOsAsid&KPageMask;
    76 	TInt osAsid = iAllocatedLinAddrAndOsAsid&KPageMask;
  1256 	__NK_ASSERT_DEBUG(IsAttached());
  1262 	__NK_ASSERT_DEBUG(IsAttached());
  1257 	return GetPageTable(aLinAddr);
  1263 	return GetPageTable(aLinAddr);
  1258 	}
  1264 	}
  1259 
  1265 
  1260 
  1266 
       
  1267 //
       
  1268 // DKernelPinMapping
       
  1269 //
       
  1270 DKernelPinMapping::DKernelPinMapping()
       
  1271 	// : iReservePages(0)	// Allocated on the kernel heap so will already be 0.
       
  1272 	{
       
  1273 	Flags() |= EPhysicalPinningMapping | EPinned;
       
  1274 	}
       
  1275 
       
  1276 
       
  1277 TInt DKernelPinMapping::Construct(TUint aReserveMaxSize)
       
  1278 	{
       
  1279 	TInt r = KErrNone;
       
  1280 	if (aReserveMaxSize)
       
  1281 		{
       
  1282 		// Should not call Construct() on a mapping that has already reserved resources.
       
  1283 		__NK_ASSERT_DEBUG(!iReservePages);
       
  1284 		r = DFineMapping::Construct(EMemoryAttributeStandard, 
       
  1285 									EMappingCreateReserveAllResources, 
       
  1286 									KKernelOsAsid, 
       
  1287 									0, 
       
  1288 									aReserveMaxSize, 
       
  1289 									0);
       
  1290 		if (r == KErrNone)
       
  1291 			iReservePages = aReserveMaxSize >> KPageShift;
       
  1292 		}
       
  1293 	return r;
       
  1294 	}
       
  1295 
       
  1296 
       
  1297 TInt DKernelPinMapping::MapAndPin(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions)
       
  1298 	{
       
  1299 	if (IsAttached())
       
  1300 		{
       
  1301 		return KErrInUse;
       
  1302 		}
       
  1303 
       
  1304 	if (!iReservePages)
       
  1305 		{
       
  1306 		TInt r = DFineMapping::Construct(	EMemoryAttributeStandard, 
       
  1307 											EMappingCreateDefault, 
       
  1308 											KKernelOsAsid, 
       
  1309 											0, 
       
  1310 											aCount, 
       
  1311 											0);
       
  1312 		if (r != KErrNone)
       
  1313 			return r;
       
  1314 		}
       
  1315 	// Map the memory, this will pin it first then map it.
       
  1316 	TInt r = DFineMapping::Map(aMemory, aIndex, aCount, aPermissions);
       
  1317 
       
  1318 	if (r != KErrNone && !iReservePages)
       
  1319 		{// Reset this mapping object so it can be reused but has freed its address space.
       
  1320 		DMemoryMapping::Destruct();
       
  1321 		}
       
  1322 	return r;
       
  1323 	}
       
  1324 
       
  1325 
       
  1326 void DKernelPinMapping::UnmapAndUnpin()
       
  1327 	{
       
  1328 	DFineMapping::Unmap();
       
  1329 	if (!iReservePages)
       
  1330 		{// Reset this mapping object so it can be reused but has freed its address space.
       
  1331 		DMemoryMapping::Destruct();
       
  1332 		}
       
  1333 	}
       
  1334 
  1261 
  1335 
  1262 //
  1336 //
  1263 // DPhysicalPinMapping
  1337 // DPhysicalPinMapping
  1264 //
  1338 //
  1265 
  1339 
  1266 DPhysicalPinMapping::DPhysicalPinMapping()
  1340 DPhysicalPinMapping::DPhysicalPinMapping()
  1267 	: DMemoryMappingBase(EPinned|EPhysicalPinningMapping)
  1341 	: DMemoryMappingBase(EPinned|EPhysicalPinningMapping)
  1268 	{
  1342 	{
  1269 	}
       
  1270 
       
  1271 
       
  1272 TInt DPhysicalPinMapping::PhysAddr(TUint aIndex, TUint aCount, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList)
       
  1273 	{
       
  1274 	__NK_ASSERT_ALWAYS(IsAttached());
       
  1275 
       
  1276 	__NK_ASSERT_ALWAYS(TUint(aIndex+aCount)>aIndex && TUint(aIndex+aCount)<=iSizeInPages);
       
  1277 	aIndex += iStartIndex;
       
  1278 
       
  1279 	DCoarseMemory* memory = (DCoarseMemory*)Memory(true); // safe because we should only be called whilst memory is Pinned
       
  1280 	TInt r = memory->PhysAddr(aIndex,aCount,aPhysicalAddress,aPhysicalPageList);
       
  1281 	if(r!=KErrNone)
       
  1282 		return r;
       
  1283 
       
  1284 	if(memory->IsDemandPaged() && !IsReadOnly())
       
  1285 		{
       
  1286 		// the memory is demand paged and writeable so we need to mark it as dirty
       
  1287 		// as we have to assume that the memory will be modified via the physical
       
  1288 		// addresses we return...
       
  1289 		MmuLock::Lock();
       
  1290 		TPhysAddr* pages = aPhysicalPageList;
       
  1291 		TUint count = aCount;
       
  1292 		while(count)
       
  1293 			{
       
  1294 			SPageInfo* pi = SPageInfo::FromPhysAddr(*(pages++));
       
  1295 			pi->SetDirty();
       
  1296 			if((count&(KMaxPageInfoUpdatesInOneGo-1))==0)
       
  1297 				MmuLock::Flash(); // flash lock every KMaxPageInfoUpdatesInOneGo iterations of the loop
       
  1298 			--count;
       
  1299 			}
       
  1300 		MmuLock::Unlock();
       
  1301 		}
       
  1302 
       
  1303 	return KErrNone;
       
  1304 	}
  1343 	}
  1305 
  1344 
  1306 
  1345 
  1307 TInt DPhysicalPinMapping::Pin(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions)
  1346 TInt DPhysicalPinMapping::Pin(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions)
  1308 	{
  1347 	{
  1712 		memory->iManager->QueueCleanup(memory,DMemoryManager::ECleanupDecommitted);
  1751 		memory->iManager->QueueCleanup(memory,DMemoryManager::ECleanupDecommitted);
  1713 		}
  1752 		}
  1714 	}
  1753 	}
  1715 
  1754 
  1716 
  1755 
       
  1756 TInt DMemoryMappingBase::PhysAddr(TUint aIndex, TUint aCount, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList)
       
  1757 	{
       
  1758 	__NK_ASSERT_ALWAYS(IsAttached() && IsPhysicalPinning());
       
  1759 
       
  1760 	__NK_ASSERT_ALWAYS(TUint(aIndex+aCount)>aIndex && TUint(aIndex+aCount)<=iSizeInPages);
       
  1761 	aIndex += iStartIndex;
       
  1762 
       
  1763 	DCoarseMemory* memory = (DCoarseMemory*)Memory(true); // safe because we should only be called whilst memory is Pinned
       
  1764 	TInt r = memory->PhysAddr(aIndex,aCount,aPhysicalAddress,aPhysicalPageList);
       
  1765 	if(r!=KErrNone)
       
  1766 		return r;
       
  1767 
       
  1768 	if(memory->IsDemandPaged() && !IsReadOnly())
       
  1769 		{
       
  1770 		// the memory is demand paged and writeable so we need to mark it as dirty
       
  1771 		// as we have to assume that the memory will be modified via the physical
       
  1772 		// addresses we return...
       
  1773 		MmuLock::Lock();
       
  1774 		TPhysAddr* pages = aPhysicalPageList;
       
  1775 		TUint count = aCount;
       
  1776 		while(count)
       
  1777 			{
       
  1778 			SPageInfo* pi = SPageInfo::FromPhysAddr(*(pages++));
       
  1779 			pi->SetDirty();
       
  1780 			if((count&(KMaxPageInfoUpdatesInOneGo-1))==0)
       
  1781 				MmuLock::Flash(); // flash lock every KMaxPageInfoUpdatesInOneGo iterations of the loop
       
  1782 			--count;
       
  1783 			}
       
  1784 		MmuLock::Unlock();
       
  1785 		}
       
  1786 
       
  1787 	return KErrNone;
       
  1788 	}
       
  1789 
       
  1790 
  1717 
  1791 
  1718 //
  1792 //
  1719 // Debug
  1793 // Debug
  1720 //
  1794 //
  1721 
  1795