kernel/eka/memmodel/epoc/multiple/mmu.cpp
changeset 0 a41df078684a
equal deleted inserted replaced
-1:000000000000 0:a41df078684a
       
     1 // Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
       
     2 // All rights reserved.
       
     3 // This component and the accompanying materials are made available
       
     4 // under the terms of the License "Eclipse Public License v1.0"
       
     5 // which accompanies this distribution, and is available
       
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
       
     7 //
       
     8 // Initial Contributors:
       
     9 // Nokia Corporation - initial contribution.
       
    10 //
       
    11 // Contributors:
       
    12 //
       
    13 // Description:
       
    14 // e32\memmodel\epoc\multiple\mmu.cpp
       
    15 // 
       
    16 //
       
    17 
       
    18 #include "memmodel.h"
       
    19 #include <ramalloc.h>
       
    20 
       
    21 _LIT(KLitGlobalDollarCode,"GLOBAL$CODE");
       
    22 
       
    23 /*******************************************************************************
       
    24  * "Independent" MMU code
       
    25  *******************************************************************************/
       
    26 
       
    27 void Mmu::Panic(TPanic aPanic)
       
    28 	{
       
    29 	Kern::Fault("MMU",aPanic);
       
    30 	}
       
    31 
       
    32 TPde* Mmu::LocalPageDir(TInt aOsAsid)
       
    33 	{
       
    34 	__ASSERT_DEBUG(TUint32(aOsAsid)<TUint32(iNumOsAsids),Panic(ELocalPageDirBadAsid));
       
    35 	return (TPde*)(iPdeBase+(aOsAsid<<iGlobalPdShift));
       
    36 	}
       
    37 
       
    38 TPde* Mmu::GlobalPageDir(TInt aOsAsid)
       
    39 	{
       
    40 	__ASSERT_DEBUG(TUint32(aOsAsid)<TUint32(iNumOsAsids),Panic(EGlobalPageDirBadAsid));
       
    41 	if (iAsidInfo[aOsAsid]&1)
       
    42 		return (TPde*)(iPdeBase+(aOsAsid<<iGlobalPdShift));
       
    43 	return (TPde*)iPdeBase;
       
    44 	}
       
    45 /*
       
    46 TPde& Mmu::PDE(TLinAddr aAddr, TInt aOsAsid)
       
    47 	{
       
    48 	__ASSERT_DEBUG(TUint32(aOsAsid)<TUint32(iNumOsAsids),Panic(EPDEBadAsid));
       
    49 	TPde* p=(TPde*)(iPdeBase+(aOsAsid<<iGlobalPdShift));
       
    50 	if (aAddr>=iUserSharedEnd && (iAsidInfo[aOsAsid]&1))
       
    51 		p=(TPde*)iPdeBase;
       
    52 	p+=(aAddr>>iChunkShift);
       
    53 	__KTRACE_OPT(KMMU,Kern::Printf("PDE(%08x,%d) at %08x",aAddr,aOsAsid,p));
       
    54 	return *p;
       
    55 	}
       
    56 */
       
    57 TInt Mmu::NewOsAsid(TBool aSeparateGlobal)
       
    58 //
       
    59 // Allocate a new OS ASID and page directory.
       
    60 // Map the page directory at the expected linear address and initialise it.
       
    61 //
       
    62 	{
       
    63 	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::NewOsAsid(%x)",aSeparateGlobal));
       
    64 	TInt os_asid=iOsAsidAllocator->Alloc();
       
    65 	if (os_asid<0)
       
    66 		return KErrNoMemory;
       
    67 	TPhysAddr pdPhys;
       
    68 	TInt pdPages=0;
       
    69 	TInt r=NewPageDirectory(os_asid,aSeparateGlobal,pdPhys,pdPages);
       
    70 	__KTRACE_OPT(KMMU,Kern::Printf("NewPageDirectory: %d %08x %d",r,pdPhys,pdPages));
       
    71 	if (r!=KErrNone)
       
    72 		{
       
    73 		iOsAsidAllocator->Free(os_asid);
       
    74 		return KErrNoMemory;
       
    75 		}
       
    76 	TBool global=(pdPages<<iPageShift==iGlobalPdSize)?1:0;
       
    77 	TLinAddr pdLin=iPdeBase+(os_asid<<iGlobalPdShift);
       
    78 	if (((os_asid & iAsidGroupMask)==0) && (!iOsAsidAllocator->NotFree(os_asid+1,iAsidGroupMask)) )
       
    79 		{
       
    80 		// expand page directory mapping
       
    81 		TInt xptid=AllocPageTable();
       
    82 		if (xptid<0)
       
    83 			{
       
    84 			iRamPageAllocator->FreePhysicalRam(pdPhys,pdPages<<iPageShift);
       
    85 			iOsAsidAllocator->Free(os_asid);
       
    86 			return KErrNoMemory;
       
    87 			}
       
    88 		AssignPageTable(xptid, SPageTableInfo::EGlobal, NULL, pdLin, iPdPdePerm);	// map XPT
       
    89 		}
       
    90 	TInt i;
       
    91 	for (i=0; i<pdPages; ++i)
       
    92 		MapRamPage(pdLin+(i<<iPageShift), pdPhys+(i<<iPageShift), iPdPtePerm);
       
    93 	InitPageDirectory(os_asid, global);
       
    94 	iNumGlobalPageDirs+=global;
       
    95 	iAsidInfo[os_asid]=global;
       
    96 	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::NewOsAsid returns %d (%d)",os_asid,global));
       
    97 	return os_asid;
       
    98 	}
       
    99 
       
   100 void Mmu::FreeOsAsid(TInt aOsAsid)
       
   101 //
       
   102 // Free an OS ASID and the corresponding page directory.
       
   103 // Assumes any local PDEs have already been unmapped.
       
   104 //
       
   105 	{
       
   106 	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreeOsAsid(%d)",aOsAsid));
       
   107 	__ASSERT_DEBUG(TUint32(aOsAsid)<TUint32(iNumOsAsids),Panic(EFreeOsAsidBadAsid));
       
   108 	TBool global=iAsidInfo[aOsAsid]&1;
       
   109 	iAsidInfo[aOsAsid]=0;
       
   110 	iOsAsidAllocator->Free(aOsAsid);
       
   111 	iNumGlobalPageDirs-=global;
       
   112 	TLinAddr pdLin=iPdeBase+(aOsAsid<<iGlobalPdShift);
       
   113 	TUint32 size=global?iGlobalPdSize:iLocalPdSize;
       
   114 	UnmapAndFree(pdLin,size>>iPageShift);
       
   115 #ifdef BTRACE_KERNEL_MEMORY
       
   116 	BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscFree, size);
       
   117 	Epoc::KernelMiscPages -= size>>iPageShift;
       
   118 #endif
       
   119 	TInt asid_group=aOsAsid&~iAsidGroupMask;
       
   120 	if (!iOsAsidAllocator->NotFree(asid_group,iAsidGroupSize))
       
   121 		{
       
   122 		// shrink page directory mapping
       
   123 		TInt xptid=PageTableId(pdLin,0);
       
   124 		DoUnassignPageTable(pdLin, GLOBAL_MAPPING);
       
   125 		FreePageTable(xptid);
       
   126 		}
       
   127 	}
       
   128 
       
   129 TPhysAddr Mmu::LinearToPhysical(TLinAddr aLinAddr)
       
   130 //
       
   131 // Find the physical address corresponding to a given linear address
       
   132 // Call with system locked
       
   133 //
       
   134 	{
       
   135 	DMemModelProcess* pP=(DMemModelProcess*)TheCurrentThread->iOwningProcess;
       
   136 	return LinearToPhysical(aLinAddr, pP->iOsAsid);
       
   137 	}
       
   138 
       
   139 TInt Mmu::LinearToPhysical(TLinAddr aLinAddr, TInt aSize, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList)
       
   140 	{
       
   141 	DMemModelProcess* pP=(DMemModelProcess*)TheCurrentThread->iOwningProcess;
       
   142 	return LinearToPhysical(aLinAddr, aSize, aPhysicalAddress, aPhysicalPageList, pP->iOsAsid);
       
   143 	}
       
   144 
       
   145 TInt Mmu::PageTableId(TLinAddr aAddr)
       
   146 	{
       
   147 	DMemModelProcess* pP=(DMemModelProcess*)TheCurrentThread->iOwningProcess;
       
   148 	return PageTableId(aAddr, pP->iOsAsid);
       
   149 	}
       
   150 
       
   151 void Mmu::Init1()
       
   152 	{
       
   153 	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Mmu::Init1"));
       
   154 	iMaxPageTables=65535;		// possibly reduced when RAM size known
       
   155 	memclr(iAsidInfo, iNumOsAsids*sizeof(TUint32));
       
   156 	MmuBase::Init1();
       
   157 	}
       
   158 
       
   159 void Mmu::CreateUserGlobalSection(TLinAddr aBase, TLinAddr aEnd)
       
   160 	{
       
   161 	iUserGlobalSection=TLinearSection::New(aBase, aEnd);
       
   162 	__ASSERT_ALWAYS(iUserGlobalSection,Panic(ECreateUserGlobalSectionFailed));
       
   163 	}
       
   164 
       
   165 void Mmu::DoInit2()
       
   166 	{
       
   167 	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Mmu::DoInit2"));
       
   168 	iSharedSection=TLinearSection::New(iUserSharedBase, iUserSharedEnd);
       
   169 	__ASSERT_ALWAYS(iSharedSection,Panic(ECreateSharedSectionFailed));
       
   170 	iOsAsidAllocator=TBitMapAllocator::New(iNumOsAsids,ETrue);
       
   171 	__ASSERT_ALWAYS(iOsAsidAllocator,Panic(EOsAsidAllocCreateFailed));
       
   172 	iOsAsidAllocator->Alloc(0,1);	// 0=kernel process
       
   173 	DMemModelProcess* pP=(DMemModelProcess*)K::TheKernelProcess;
       
   174 	if (iLocalPdSize)
       
   175 		pP->iLocalPageDir=LinearToPhysical(TLinAddr(LocalPageDir(0)));
       
   176 	pP->iGlobalPageDir=LinearToPhysical(TLinAddr(GlobalPageDir(0)));
       
   177 	__KTRACE_OPT(KMMU,Kern::Printf("Kernel process: LPD=%08x GPD=%08x",pP->iLocalPageDir,pP->iGlobalPageDir));
       
   178 	MM::UserCodeAllocator=TBitMapAllocator::New(iMaxUserCodeSize>>iAliasShift, ETrue);	// code is aligned to alias size
       
   179 	__ASSERT_ALWAYS(MM::UserCodeAllocator,Panic(EUserCodeAllocatorCreateFailed));
       
   180 	MM::DllDataAllocator=TBitMapAllocator::New(iMaxDllDataSize>>iPageShift, ETrue);
       
   181 	__ASSERT_ALWAYS(MM::DllDataAllocator,Panic(EDllDataAllocatorCreateFailed));
       
   182 	__ASSERT_ALWAYS(TheRomHeader().iUserDataAddress==iDllDataBase+iMaxDllDataSize,Panic(ERomUserDataAddressInvalid));
       
   183 	__ASSERT_ALWAYS((TheRomHeader().iTotalUserDataSize&iPageMask)==0,Panic(ERomUserDataSizeInvalid));
       
   184 	TInt rom_dll_pages=TheRomHeader().iTotalUserDataSize>>iPageShift;
       
   185 	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("UserCodeAllocator @ %08x DllDataAllocator @ %08x, %d ROM DLL Data Pages",
       
   186 		MM::UserCodeAllocator, MM::DllDataAllocator, rom_dll_pages));
       
   187 	if (rom_dll_pages)
       
   188 		MM::DllDataAllocator->Alloc(0, rom_dll_pages);	// low bit numbers represent high addresses
       
   189 	}
       
   190 
       
   191 void Mmu::SetupInitialPageInfo(SPageInfo* aPageInfo, TLinAddr aChunkAddr, TInt aPdeIndex)
       
   192 	{
       
   193 	__ASSERT_ALWAYS(aChunkAddr>=iUserSharedEnd,Panic(EBadInitialPageAddr));
       
   194 	TLinAddr addr=aChunkAddr+(aPdeIndex<<iPageShift);
       
   195 	if (aPageInfo->Type()!=SPageInfo::EUnused)
       
   196 		return;	// already set (page table)
       
   197 	if (addr==(TLinAddr)iPtInfo)
       
   198 		{
       
   199 		aPageInfo->SetPtInfo(0);
       
   200 		aPageInfo->Lock();
       
   201 		}
       
   202 	else if (addr>=iPdeBase && addr<iPdeBase+iGlobalPdSize)
       
   203 		{
       
   204 		aPageInfo->SetPageDir(0,aPdeIndex);
       
   205 		aPageInfo->Lock();
       
   206 		}
       
   207 	else
       
   208 		aPageInfo->SetFixed();
       
   209 	}
       
   210 
       
   211 void Mmu::SetupInitialPageTableInfo(TInt aId, TLinAddr aChunkAddr, TInt aNumPtes)
       
   212 	{
       
   213 	__ASSERT_ALWAYS(aChunkAddr>=iUserSharedEnd || aChunkAddr==0,Panic(EBadInitialPageAddr));
       
   214 	SPageTableInfo& pti=PtInfo(aId);
       
   215 	pti.iCount=aNumPtes;
       
   216 	pti.SetGlobal(aChunkAddr>>iChunkShift);
       
   217 	}
       
   218 
       
   219 void Mmu::AssignPageTable(TInt aId, TInt aUsage, TAny* aObject, TLinAddr aAddr, TPde aPdePerm)
       
   220 	{
       
   221 	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::AssignPageTable id=%d, u=%08x, obj=%08x, addr=%08x, perm=%08x",
       
   222 					aId, aUsage, aObject, aAddr, aPdePerm));
       
   223 	const TAny* asids=GLOBAL_MAPPING;
       
   224 	SPageTableInfo& pti=PtInfo(aId);
       
   225 	switch (aUsage)
       
   226 		{
       
   227 		case SPageTableInfo::EChunk:
       
   228 			{
       
   229 			DMemModelChunk* pC=(DMemModelChunk*)aObject;
       
   230 			TUint32 ccp=K::CompressKHeapPtr(pC);
       
   231 			TUint32 offset=(aAddr-TLinAddr(pC->iBase))>>iChunkShift;
       
   232 			pti.SetChunk(ccp,offset);
       
   233 			if (pC->iOsAsids)
       
   234 				asids=pC->iOsAsids;
       
   235 			else if (pC->iOwningProcess)
       
   236 				asids=(const TAny*)((DMemModelProcess*)pC->iOwningProcess)->iOsAsid;
       
   237 			break;
       
   238 			}
       
   239 //		case SPageTableInfo::EHwChunk:
       
   240 //			break;
       
   241 		case SPageTableInfo::EGlobal:
       
   242 			pti.SetGlobal(aAddr>>iChunkShift);
       
   243 			break;
       
   244 		default:
       
   245 			Panic(EAssignPageTableInvalidUsage);
       
   246 		}
       
   247 	DoAssignPageTable(aId, aAddr, aPdePerm, asids);
       
   248 	}
       
   249 
       
   250 TInt Mmu::UnassignPageTable(TLinAddr aAddr)
       
   251 	{
       
   252 	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::UnassignPageTable addr=%08x", aAddr));
       
   253 	TInt id=PageTableId(aAddr, 0);
       
   254 	if (id>=0)
       
   255 		DoUnassignPageTable(aAddr, GLOBAL_MAPPING);
       
   256 	return id;
       
   257 	}
       
   258 
       
   259 TInt Mmu::CreateGlobalCodeChunk()
       
   260 //
       
   261 // Enter and return with neither system lock nor MMU mutex held
       
   262 //
       
   263 	{
       
   264 	__KTRACE_OPT(KDLL,Kern::Printf("Mmu::CreateGlobalCodeChunk"));
       
   265 	TInt maxsize=Min(TheSuperPage().iTotalRamSize/2, 0x01000000);
       
   266 	SChunkCreateInfo c;
       
   267 	c.iGlobal=ETrue;
       
   268 	c.iAtt=TChunkCreate::EDisconnected;
       
   269 	c.iForceFixed=EFalse;
       
   270 	c.iOperations=SChunkCreateInfo::EAdjust|SChunkCreateInfo::EAdd;
       
   271 	c.iRunAddress=0;
       
   272 	c.iPreallocated=0;
       
   273 	c.iType=EDll;
       
   274 	c.iMaxSize=maxsize;
       
   275 	c.iName.Set(KLitGlobalDollarCode);
       
   276 	c.iOwner=NULL;
       
   277 	c.iInitialBottom=0;
       
   278 	c.iInitialTop=0;
       
   279 	TLinAddr runAddr;
       
   280 	return K::TheKernelProcess->NewChunk((DChunk*&)iGlobalCode,c,runAddr);
       
   281 	}