kernel/eka/memmodel/epoc/flexible/mmu/mmapping.cpp
changeset 47 46fffbe7b5a7
parent 44 36bfc973b146
--- a/kernel/eka/memmodel/epoc/flexible/mmu/mmapping.cpp	Fri Jan 22 11:03:55 2010 +0200
+++ b/kernel/eka/memmodel/epoc/flexible/mmu/mmapping.cpp	Tue Jan 26 13:13:38 2010 +0200
@@ -63,6 +63,12 @@
 DMemoryMapping::~DMemoryMapping()
 	{
 	TRACE(("DMemoryMapping[0x%08x]::~DMemoryMapping()",this));
+	Destruct();
+	}
+
+
+void DMemoryMapping::Destruct()
+	{
 	__NK_ASSERT_DEBUG(!IsAttached());
 
 	// remove from address space...
@@ -1258,6 +1264,74 @@
 	}
 
 
+//
+// DKernelPinMapping
+//
+DKernelPinMapping::DKernelPinMapping()
+	// : iReservePages(0)	// Allocated on the kernel heap so will already be 0.
+	{
+	Flags() |= EPhysicalPinningMapping | EPinned;
+	}
+
+
+TInt DKernelPinMapping::Construct(TUint aReserveMaxSize)
+	{
+	TInt r = KErrNone;
+	if (aReserveMaxSize)
+		{
+		// Should not call Construct() on a mapping that has already reserved resources.
+		__NK_ASSERT_DEBUG(!iReservePages);
+		r = DFineMapping::Construct(EMemoryAttributeStandard, 
+									EMappingCreateReserveAllResources, 
+									KKernelOsAsid, 
+									0, 
+									aReserveMaxSize, 
+									0);
+		if (r == KErrNone)
+			iReservePages = aReserveMaxSize >> KPageShift;
+		}
+	return r;
+	}
+
+
+TInt DKernelPinMapping::MapAndPin(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions)
+	{
+	if (IsAttached())
+		{
+		return KErrInUse;
+		}
+
+	if (!iReservePages)
+		{
+		TInt r = DFineMapping::Construct(	EMemoryAttributeStandard, 
+											EMappingCreateDefault, 
+											KKernelOsAsid, 
+											0, 
+											aCount, 
+											0);
+		if (r != KErrNone)
+			return r;
+		}
+	// Map the memory, this will pin it first then map it.
+	TInt r = DFineMapping::Map(aMemory, aIndex, aCount, aPermissions);
+
+	if (r != KErrNone && !iReservePages)
+		{// Reset this mapping object so it can be reused but has freed its address space.
+		DMemoryMapping::Destruct();
+		}
+	return r;
+	}
+
+
+void DKernelPinMapping::UnmapAndUnpin()
+	{
+	DFineMapping::Unmap();
+	if (!iReservePages)
+		{// Reset this mapping object so it can be reused but has freed its address space.
+		DMemoryMapping::Destruct();
+		}
+	}
+
 
 //
 // DPhysicalPinMapping
@@ -1269,41 +1343,6 @@
 	}
 
 
-TInt DPhysicalPinMapping::PhysAddr(TUint aIndex, TUint aCount, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList)
-	{
-	__NK_ASSERT_ALWAYS(IsAttached());
-
-	__NK_ASSERT_ALWAYS(TUint(aIndex+aCount)>aIndex && TUint(aIndex+aCount)<=iSizeInPages);
-	aIndex += iStartIndex;
-
-	DCoarseMemory* memory = (DCoarseMemory*)Memory(true); // safe because we should only be called whilst memory is Pinned
-	TInt r = memory->PhysAddr(aIndex,aCount,aPhysicalAddress,aPhysicalPageList);
-	if(r!=KErrNone)
-		return r;
-
-	if(memory->IsDemandPaged() && !IsReadOnly())
-		{
-		// the memory is demand paged and writeable so we need to mark it as dirty
-		// as we have to assume that the memory will be modified via the physical
-		// addresses we return...
-		MmuLock::Lock();
-		TPhysAddr* pages = aPhysicalPageList;
-		TUint count = aCount;
-		while(count)
-			{
-			SPageInfo* pi = SPageInfo::FromPhysAddr(*(pages++));
-			pi->SetDirty();
-			if((count&(KMaxPageInfoUpdatesInOneGo-1))==0)
-				MmuLock::Flash(); // flash lock every KMaxPageInfoUpdatesInOneGo iterations of the loop
-			--count;
-			}
-		MmuLock::Unlock();
-		}
-
-	return KErrNone;
-	}
-
-
 TInt DPhysicalPinMapping::Pin(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions)
 	{
 	PteType() =	Mmu::PteType(aPermissions,true);
@@ -1714,6 +1753,41 @@
 	}
 
 
+TInt DMemoryMappingBase::PhysAddr(TUint aIndex, TUint aCount, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList)
+	{
+	__NK_ASSERT_ALWAYS(IsAttached() && IsPhysicalPinning());
+
+	__NK_ASSERT_ALWAYS(TUint(aIndex+aCount)>aIndex && TUint(aIndex+aCount)<=iSizeInPages);
+	aIndex += iStartIndex;
+
+	DCoarseMemory* memory = (DCoarseMemory*)Memory(true); // safe because we should only be called whilst memory is Pinned
+	TInt r = memory->PhysAddr(aIndex,aCount,aPhysicalAddress,aPhysicalPageList);
+	if(r!=KErrNone)
+		return r;
+
+	if(memory->IsDemandPaged() && !IsReadOnly())
+		{
+		// the memory is demand paged and writeable so we need to mark it as dirty
+		// as we have to assume that the memory will be modified via the physical
+		// addresses we return...
+		MmuLock::Lock();
+		TPhysAddr* pages = aPhysicalPageList;
+		TUint count = aCount;
+		while(count)
+			{
+			SPageInfo* pi = SPageInfo::FromPhysAddr(*(pages++));
+			pi->SetDirty();
+			if((count&(KMaxPageInfoUpdatesInOneGo-1))==0)
+				MmuLock::Flash(); // flash lock every KMaxPageInfoUpdatesInOneGo iterations of the loop
+			--count;
+			}
+		MmuLock::Unlock();
+		}
+
+	return KErrNone;
+	}
+
+
 
 //
 // Debug