kernel/eka/memmodel/epoc/flexible/mmu/mmapping.cpp
branchanywhere
changeset 41 d32f34975bbf
parent 33 0173bcd7697c
child 39 5d2844f35677
equal deleted inserted replaced
40:04a1b74efd48 41:d32f34975bbf
   102 			return KErrArgument;
   102 			return KErrArgument;
   103 		if((aCount|aIndex)&(KChunkMask>>KPageShift))
   103 		if((aCount|aIndex)&(KChunkMask>>KPageShift))
   104 			return KErrArgument;
   104 			return KErrArgument;
   105 		}
   105 		}
   106 
   106 
   107 	TLinAddr base = iAllocatedLinAddrAndOsAsid&~KPageMask;
   107 	TLinAddr base = iAllocatedLinAddrAndOsAsid & ~KPageMask;
   108 #ifdef _DEBUG
   108 	TLinAddr top = base + (aCount << KPageShift);
   109 	TUint osAsid = iAllocatedLinAddrAndOsAsid&KPageMask;
       
   110 #endif
       
   111 
   109 
   112 	// check user/supervisor memory partitioning...
   110 	// check user/supervisor memory partitioning...
   113 	if(base<KUserMemoryLimit != (bool)(aPermissions&EUser))
   111 	if (aPermissions & EUser)
   114 		return KErrAccessDenied;
   112 		{
   115 
   113 		if (base > KUserMemoryLimit || top > KUserMemoryLimit)
   116 	// check mapping doesn't straddle KGlobalMemoryBase or KUserMemoryLimit...
   114 			return KErrAccessDenied;
   117 	__NK_ASSERT_DEBUG(TUint(KGlobalMemoryBase-base)==0 || TUint(KGlobalMemoryBase-base)>=TUint(aCount<<KPageShift));
   115 		}
   118 	__NK_ASSERT_DEBUG(TUint(KUserMemoryLimit-base)==0 || TUint(KUserMemoryLimit-base)>=TUint(aCount<<KPageShift));
   116 	else
       
   117 		{
       
   118 		if (base < KUserMemoryLimit || top < KUserMemoryLimit)
       
   119 			return KErrAccessDenied;
       
   120 		}
       
   121 
       
   122 	// check that mapping doesn't straddle KUserMemoryLimit or KGlobalMemoryBase ...
       
   123 	__NK_ASSERT_DEBUG((base < KUserMemoryLimit) == (top <= KUserMemoryLimit));
       
   124 	__NK_ASSERT_DEBUG((base < KGlobalMemoryBase) == (top <= KGlobalMemoryBase));
       
   125 
       
   126 	// check that only global memory is mapped into the kernel process
       
   127 	TBool global = base >= KGlobalMemoryBase;
       
   128 	__NK_ASSERT_DEBUG(global || (iAllocatedLinAddrAndOsAsid & KPageMask) != KKernelOsAsid);
   119 
   129 
   120 	// setup attributes...
   130 	// setup attributes...
   121 	TBool global = base>=KGlobalMemoryBase;
       
   122 	__NK_ASSERT_DEBUG(global || osAsid!=(TInt)KKernelOsAsid); // prevent non-global memory in kernel process
       
   123 	PteType() =	Mmu::PteType(aPermissions,global);
   131 	PteType() =	Mmu::PteType(aPermissions,global);
   124 	iBlankPte = Mmu::BlankPte(aMemory->Attributes(),PteType());
   132 	iBlankPte = Mmu::BlankPte(aMemory->Attributes(),PteType());
   125 
   133 
   126 	// setup base address... 
   134 	// setup base address... 
   127 	TUint colourOffset = ((aIndex&KPageColourMask)<<KPageShift);
   135 	TUint colourOffset = ((aIndex&KPageColourMask)<<KPageShift);
   283 			__NK_ASSERT_DEBUG(*pPde==KPdeUnallocatedEntry);
   291 			__NK_ASSERT_DEBUG(*pPde==KPdeUnallocatedEntry);
   284 			}
   292 			}
   285 		else
   293 		else
   286 			{
   294 			{
   287 			TPde pde = Mmu::PageTablePhysAddr(pt)|iBlankPde;
   295 			TPde pde = Mmu::PageTablePhysAddr(pt)|iBlankPde;
       
   296 #ifdef	__USER_MEMORY_GUARDS_ENABLED__
       
   297 			if (IsUserMapping())
       
   298 				pde = PDE_IN_DOMAIN(pde, USER_MEMORY_DOMAIN);
       
   299 #endif
   288 			TRACE2(("!PDE %x=%x (was %x)",pPde,pde,*pPde));
   300 			TRACE2(("!PDE %x=%x (was %x)",pPde,pde,*pPde));
   289 			if (Mmu::PdeMapsSection(*pPde))
   301 			if (Mmu::PdeMapsSection(*pPde))
   290 				{
   302 				{
   291 				// break previous section mapping...
   303 				// break previous section mapping...
   292 				__NK_ASSERT_DEBUG(*pPde==Mmu::PageToSectionEntry(pt[0],iBlankPde));
   304 				__NK_ASSERT_DEBUG(*pPde==Mmu::PageToSectionEntry(pt[0],iBlankPde));
   493 		TPte* pt = GetOrAllocatePageTable(aAddr);
   505 		TPte* pt = GetOrAllocatePageTable(aAddr);
   494 
   506 
   495 		if(pinnedPt && pinnedPt!=pt)
   507 		if(pinnedPt && pinnedPt!=pt)
   496 			{
   508 			{
   497 			// previously pinned page table not needed...
   509 			// previously pinned page table not needed...
   498 			PageTableAllocator::UnpinPageTable(pinnedPt,aPinArgs);
   510 			::PageTables.UnpinPageTable(pinnedPt,aPinArgs);
   499 
   511 
   500 			// make sure we have memory for next pin attempt...
   512 			// make sure we have memory for next pin attempt...
   501 			MmuLock::Unlock();
   513 			MmuLock::Unlock();
   502 			aPinArgs.AllocReplacementPages(KNumPagesToPinOnePageTable);
   514 			aPinArgs.AllocReplacementPages(KNumPagesToPinOnePageTable);
   503 			MmuLock::Lock();
       
   504 			if(!aPinArgs.HaveSufficientPages(KNumPagesToPinOnePageTable)) // if out of memory...
   515 			if(!aPinArgs.HaveSufficientPages(KNumPagesToPinOnePageTable)) // if out of memory...
   505 				{
   516 				{
   506 				// make sure we free any unneeded page table we allocated...
   517 				// make sure we free any unneeded page table we allocated...
   507 				if(pt)
   518 				if(pt)
   508 					FreePageTable(Mmu::PageDirectoryEntry(OsAsid(),aAddr));
   519 					FreePageTable(Mmu::PageDirectoryEntry(OsAsid(),aAddr));
       
   520 				MmuLock::Lock();
   509 				return 0;
   521 				return 0;
   510 				}
   522 				}
       
   523 			MmuLock::Lock();
   511 			}
   524 			}
   512 
   525 
   513 		if(!pt)
   526 		if(!pt)
   514 			return 0; // out of memory
   527 			return 0; // out of memory
   515 
   528 
   525 		SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt);
   538 		SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt);
   526 		if(!pti->IsDemandPaged())
   539 		if(!pti->IsDemandPaged())
   527 			return pt;
   540 			return pt;
   528 
   541 
   529 		// pin the page table...
   542 		// pin the page table...
       
   543 		if (::PageTables.PinPageTable(pt,aPinArgs) != KErrNone)
       
   544 			{// Couldn't pin the page table...
       
   545 			MmuLock::Unlock();
       
   546 			// make sure we free any unneeded page table we allocated...
       
   547 			FreePageTable(Mmu::PageDirectoryEntry(OsAsid(),aAddr));
       
   548 			MmuLock::Lock();
       
   549 			return 0;
       
   550 			}
       
   551 
   530 		pinnedPt = pt;
   552 		pinnedPt = pt;
   531 		PageTableAllocator::PinPageTable(pinnedPt,aPinArgs);
       
   532 		}
   553 		}
   533 	}
   554 	}
   534 
   555 
   535 
   556 
   536 TInt DFineMapping::AllocateVirtualMemory(TMappingCreateFlags aFlags, TInt aOsAsid, TLinAddr aAddr, TUint aSize, TLinAddr aColourOffset)
   557 TInt DFineMapping::AllocateVirtualMemory(TMappingCreateFlags aFlags, TInt aOsAsid, TLinAddr aAddr, TUint aSize, TLinAddr aColourOffset)
   626 			// setup new page table...
   647 			// setup new page table...
   627 			SPageTableInfo* pti = SPageTableInfo::FromPtPtr(newPt);
   648 			SPageTableInfo* pti = SPageTableInfo::FromPtPtr(newPt);
   628 			pti->SetFine(aAddr&~KChunkMask,iAllocatedLinAddrAndOsAsid&KPageMask);
   649 			pti->SetFine(aAddr&~KChunkMask,iAllocatedLinAddrAndOsAsid&KPageMask);
   629 
   650 
   630 			TPde pde = Mmu::PageTablePhysAddr(newPt)|iBlankPde;
   651 			TPde pde = Mmu::PageTablePhysAddr(newPt)|iBlankPde;
       
   652 #ifdef	__USER_MEMORY_GUARDS_ENABLED__
       
   653 			if (IsUserMapping())
       
   654 				pde = PDE_IN_DOMAIN(pde, USER_MEMORY_DOMAIN);
       
   655 #endif
   631 			TRACE2(("!PDE %x=%x",aPdeAddress,pde));
   656 			TRACE2(("!PDE %x=%x",aPdeAddress,pde));
   632 			__NK_ASSERT_DEBUG(((*aPdeAddress^pde)&~KPdeMatchMask)==0 || *aPdeAddress==KPdeUnallocatedEntry);
   657 			__NK_ASSERT_DEBUG(((*aPdeAddress^pde)&~KPdeMatchMask)==0 || *aPdeAddress==KPdeUnallocatedEntry);
   633 			*aPdeAddress = pde;
   658 			*aPdeAddress = pde;
   634 			SinglePdeUpdated(aPdeAddress);
   659 			SinglePdeUpdated(aPdeAddress);
   635 
   660 
  1467 	TPte** pPt = PageTableArray();
  1492 	TPte** pPt = PageTableArray();
  1468 	TPte** pPtEnd = pPt+iNumPinnedPageTables;
  1493 	TPte** pPtEnd = pPt+iNumPinnedPageTables;
  1469 
  1494 
  1470 	MmuLock::Lock();
  1495 	MmuLock::Lock();
  1471 	while(pPt<pPtEnd)
  1496 	while(pPt<pPtEnd)
  1472 		PageTableAllocator::UnpinPageTable(*pPt++,aPinArgs);
  1497 		::PageTables.UnpinPageTable(*pPt++,aPinArgs);
  1473 	MmuLock::Unlock();
  1498 	MmuLock::Unlock();
  1474 	iNumPinnedPageTables = 0;
  1499 	iNumPinnedPageTables = 0;
  1475 
  1500 
  1476 	if(!iMaxCount)
  1501 	if(!iMaxCount)
  1477 		FreePageTableArray();
  1502 		FreePageTableArray();