0
|
1 |
// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
|
|
2 |
// All rights reserved.
|
|
3 |
// This component and the accompanying materials are made available
|
|
4 |
// under the terms of the License "Eclipse Public License v1.0"
|
|
5 |
// which accompanies this distribution, and is available
|
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
|
|
7 |
//
|
|
8 |
// Initial Contributors:
|
|
9 |
// Nokia Corporation - initial contribution.
|
|
10 |
//
|
|
11 |
// Contributors:
|
|
12 |
//
|
|
13 |
// Description:
|
|
14 |
//
|
|
15 |
|
|
16 |
#include "memmodel.h"
|
|
17 |
#include "mm.h"
|
|
18 |
#include "mmu.h"
|
|
19 |
|
|
20 |
#include "mpdalloc.h"
|
|
21 |
#include "mobject.h"
|
|
22 |
#include "cache_maintenance.inl"
|
|
23 |
|
|
24 |
|
|
25 |
// check enough space for page directories...
|
|
26 |
__ASSERT_COMPILE(KNumOsAsids <= (KPageDirectoryEnd-KPageDirectoryBase)/KPageDirectorySize);
|
|
27 |
|
|
28 |
|
|
29 |
PageDirectoryAllocator PageDirectories;
|
|
30 |
|
|
31 |
|
|
32 |
const TUint KLocalPdShift = KPageDirectoryShift > KPageShift ? KPageDirectoryShift-1 : KPageShift;
|
|
33 |
const TUint KLocalPdSize = 1<<KLocalPdShift;
|
|
34 |
const TUint KLocalPdPages = 1<<(KLocalPdShift-KPageShift);
|
|
35 |
|
|
36 |
|
|
37 |
__ASSERT_COMPILE((KPageDirectoryBase&(31*KPageDirectorySize))==0); // following code assumes this alignment
|
|
38 |
|
|
39 |
void PageDirectoryAllocator::GlobalPdeChanged(TPde* aPde)
|
|
40 |
{
|
|
41 |
__NK_ASSERT_DEBUG(MmuLock::IsHeld());
|
|
42 |
__NK_ASSERT_DEBUG(TLinAddr(aPde)>=KPageDirectoryBase);
|
|
43 |
__NK_ASSERT_DEBUG(TLinAddr(aPde)<KPageDirectoryEnd);
|
|
44 |
__NK_ASSERT_DEBUG(KLocalPdSize==(TUint)KPageDirectorySize); // shouldn't be called if we have separate global PDs
|
|
45 |
|
|
46 |
TLinAddr addr = (TLinAddr(aPde)&KPageDirectoryMask)*(KChunkSize/sizeof(TPde));
|
|
47 |
if(addr<KGlobalMemoryBase)
|
|
48 |
return; // change was in local part of PD, so nothing to do
|
|
49 |
if(addr-KIPCAlias<KIPCAliasAreaSize)
|
|
50 |
return; // change was in IPC alias area, so nothing to do
|
|
51 |
if(!iAllocator)
|
|
52 |
return; // not yet initialised
|
|
53 |
|
|
54 |
TRACE2(("PageDirectoryAllocator::GlobalPdeChanged(0x%08x)",aPde));
|
|
55 |
TPde pde = *aPde;
|
|
56 |
TLinAddr pPde = KPageDirectoryBase+(TLinAddr(aPde)&KPageDirectoryMask); // first page directory
|
|
57 |
|
|
58 |
// copy PDE to all allocated page directories
|
|
59 |
pPde -= KPageDirectorySize; // start off at PD minus one
|
|
60 |
TLinAddr lastPd = KPageDirectoryBase+(KNumOsAsids-1)*KPageDirectorySize;
|
|
61 |
TUint32* ptr = iAllocator->iMap;
|
|
62 |
do
|
|
63 |
{
|
|
64 |
TUint32 bits = ~*ptr++;
|
|
65 |
do
|
|
66 |
{
|
|
67 |
pPde += KPageDirectorySize; // step to next page directory
|
|
68 |
if(bits&0x80000000u)
|
|
69 |
{
|
|
70 |
TRACE2(("!PDE %x=%x",pPde,pde));
|
|
71 |
*(TPde*)pPde = pde;
|
|
72 |
CacheMaintenance::SinglePdeUpdated(pPde);
|
|
73 |
}
|
|
74 |
}
|
|
75 |
while(bits<<=1);
|
|
76 |
pPde |= 31*KPageDirectorySize; // step to next group of 32 PDs
|
|
77 |
}
|
|
78 |
while(pPde<lastPd);
|
|
79 |
}
|
|
80 |
|
|
81 |
|
|
82 |
void PageDirectoryAllocator::Init2()
|
|
83 |
{
|
|
84 |
TRACEB(("PageDirectoryAllocator::Init2()"));
|
|
85 |
|
|
86 |
// construct memory object for page directories...
|
|
87 |
#if defined(__CPU_PAGE_TABLES_FULLY_CACHED)
|
|
88 |
TMemoryAttributes memAttr = EMemoryAttributeStandard;
|
|
89 |
#else
|
|
90 |
TMemoryAttributes memAttr = (TMemoryAttributes)(EMemoryAttributeNormalUncached|EMemoryAttributeDefaultShareable);
|
|
91 |
#endif
|
|
92 |
TInt r = MM::InitFixedKernelMemory(iPageDirectoryMemory, KPageDirectoryBase, KPageDirectoryEnd, KPageDirectorySize, EMemoryObjectHardware, EMemoryCreateNoWipe, memAttr, EMappingCreateFixedVirtual);
|
|
93 |
__NK_ASSERT_ALWAYS(r==KErrNone);
|
|
94 |
|
|
95 |
// initialise kernel page directory...
|
|
96 |
TPhysAddr kernelPd = Mmu::LinearToPhysical((TLinAddr)Mmu::PageDirectory(KKernelOsAsid));
|
|
97 |
iKernelPageDirectory = kernelPd;
|
|
98 |
((DMemModelProcess*)K::TheKernelProcess)->iPageDir = kernelPd;
|
|
99 |
AssignPages(KKernelOsAsid*(KPageDirectorySize>>KPageShift),KPageDirectorySize>>KPageShift,kernelPd);
|
|
100 |
|
|
101 |
// construct allocator...
|
|
102 |
iAllocator = TBitMapAllocator::New(KNumOsAsids,ETrue);
|
|
103 |
__NK_ASSERT_ALWAYS(iAllocator);
|
|
104 |
iAllocator->Alloc(KKernelOsAsid,1); // kernel page directory already allocated
|
|
105 |
|
|
106 |
TRACEB(("PageDirectoryAllocator::Init2 done"));
|
|
107 |
}
|
|
108 |
|
|
109 |
|
|
110 |
void PageDirectoryAllocator::AssignPages(TUint aIndex, TUint aCount, TPhysAddr aPhysAddr)
|
|
111 |
{
|
|
112 |
__NK_ASSERT_DEBUG(aCount<=KMaxPageInfoUpdatesInOneGo);
|
|
113 |
MmuLock::Lock();
|
|
114 |
SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr);
|
|
115 |
SPageInfo* piEnd = pi+aCount;
|
|
116 |
while(pi<piEnd)
|
|
117 |
{
|
|
118 |
pi->SetPhysAlloc(iPageDirectoryMemory,aIndex);
|
|
119 |
++pi;
|
|
120 |
++aIndex;
|
|
121 |
}
|
|
122 |
MmuLock::Unlock();
|
|
123 |
}
|
|
124 |
|
|
125 |
|
|
126 |
TInt PageDirectoryAllocator::Alloc(TUint aOsAsid, TPhysAddr& aPageDirectory)
|
|
127 |
{
|
|
128 |
TRACE(("PageDirectoryAllocator::Alloc(%d)",aOsAsid));
|
|
129 |
|
|
130 |
// get memory for local page directory...
|
|
131 |
Mmu& m = TheMmu;
|
|
132 |
TUint offset = aOsAsid*KPageDirectorySize;
|
|
133 |
TPhysAddr pdPhys;
|
|
134 |
RamAllocLock::Lock();
|
|
135 |
TInt r = m.AllocContiguousRam(pdPhys, KLocalPdPages, KLocalPdShift-KPageShift, iPageDirectoryMemory->RamAllocFlags());
|
|
136 |
if(r==KErrNone)
|
|
137 |
AssignPages(offset>>KPageShift,KLocalPdPages,pdPhys);
|
|
138 |
RamAllocLock::Unlock();
|
|
139 |
|
|
140 |
if(r==KErrNone)
|
|
141 |
{
|
|
142 |
TRACE(("PageDirectoryAllocator::Alloc pdPhys = 0x%08x",pdPhys));
|
|
143 |
|
|
144 |
// map local page directory...
|
|
145 |
r = MM::MemoryAddContiguous(iPageDirectoryMemory,MM::BytesToPages(offset),KLocalPdPages,pdPhys);
|
|
146 |
if(r!=KErrNone)
|
|
147 |
{
|
|
148 |
RamAllocLock::Lock();
|
|
149 |
m.FreeContiguousRam(pdPhys,KLocalPdPages);
|
|
150 |
RamAllocLock::Unlock();
|
|
151 |
}
|
|
152 |
else
|
|
153 |
{
|
|
154 |
aPageDirectory = pdPhys;
|
|
155 |
|
|
156 |
TPde* pd = Mmu::PageDirectory(aOsAsid);
|
|
157 |
const TUint globalOffset = (KGlobalMemoryBase>>KChunkShift)*sizeof(TPde); // start of global part
|
|
158 |
|
|
159 |
// clear local entries in page directory...
|
|
160 |
memclr(pd,globalOffset);
|
|
161 |
CacheMaintenance::PdesInitialised((TLinAddr)pd,globalOffset);
|
|
162 |
|
|
163 |
if(KLocalPdSize<(TUint)KPageDirectorySize)
|
|
164 |
{
|
|
165 |
// map global page directory after local part...
|
|
166 |
__NK_ASSERT_DEBUG(KLocalPdSize==globalOffset);
|
|
167 |
r = MM::MemoryAddContiguous(iPageDirectoryMemory, MM::BytesToPages(offset+KLocalPdSize),
|
|
168 |
(KPageDirectorySize-KLocalPdSize)/KPageSize, iKernelPageDirectory+KLocalPdSize);
|
|
169 |
__NK_ASSERT_DEBUG(r==KErrNone); // can't fail
|
|
170 |
MmuLock::Lock(); // need lock because allocator not otherwise atomic
|
|
171 |
iAllocator->Alloc(aOsAsid,1);
|
|
172 |
MmuLock::Unlock();
|
|
173 |
}
|
|
174 |
else
|
|
175 |
{
|
|
176 |
// copy global entries to local page directory...
|
|
177 |
TPde* globalPd = Mmu::PageDirectory(KKernelOsAsid);
|
|
178 |
MmuLock::Lock(); // need lock because allocator not otherwise atomic, also to make sure GlobalPdeChanged() only accesses extant PDs
|
|
179 |
memcpy((TUint8*)pd+globalOffset,(TUint8*)globalPd+globalOffset,KPageDirectorySize-globalOffset);
|
|
180 |
iAllocator->Alloc(aOsAsid,1);
|
|
181 |
MmuLock::Unlock();
|
|
182 |
CacheMaintenance::PdesInitialised((TLinAddr)((TUint8*)pd+globalOffset),KPageDirectorySize-globalOffset);
|
|
183 |
}
|
|
184 |
}
|
|
185 |
}
|
|
186 |
TRACE(("PageDirectoryAllocator::Alloc returns %d",r));
|
|
187 |
return r;
|
|
188 |
}
|
|
189 |
|
|
190 |
|
|
191 |
void PageDirectoryAllocator::Free(TUint aOsAsid)
|
|
192 |
{
|
|
193 |
TRACE(("PageDirectoryAllocator::Free(%d)",aOsAsid));
|
|
194 |
|
|
195 |
MmuLock::Lock(); // need lock because allocator not otherwise atomic, also to make sure GlobalPdeChanged() only accesses extant PDs
|
|
196 |
iAllocator->Free(aOsAsid, 1);
|
|
197 |
MmuLock::Unlock();
|
|
198 |
|
|
199 |
const TUint KPageDirectoryPageCount = KPageDirectorySize>>KPageShift;
|
|
200 |
TPhysAddr pages[KPageDirectoryPageCount];
|
|
201 |
TUint n = MM::MemoryRemovePages(iPageDirectoryMemory,aOsAsid*KPageDirectoryPageCount,KPageDirectoryPageCount,pages);
|
|
202 |
(void)n;
|
|
203 |
__NK_ASSERT_DEBUG(n==KPageDirectoryPageCount);
|
|
204 |
|
|
205 |
RamAllocLock::Lock();
|
|
206 |
Mmu& m = TheMmu;
|
|
207 |
// Page directories are fixed.
|
|
208 |
m.FreeRam(pages, KLocalPdPages, EPageFixed);
|
|
209 |
RamAllocLock::Unlock();
|
|
210 |
}
|
|
211 |
|