0
|
1 |
// Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies).
|
|
2 |
// All rights reserved.
|
|
3 |
// This component and the accompanying materials are made available
|
|
4 |
// under the terms of the License "Eclipse Public License v1.0"
|
|
5 |
// which accompanies this distribution, and is available
|
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
|
|
7 |
//
|
|
8 |
// Initial Contributors:
|
|
9 |
// Nokia Corporation - initial contribution.
|
|
10 |
//
|
|
11 |
// Contributors:
|
|
12 |
//
|
|
13 |
// Description:
|
|
14 |
// e32\memmodel\epoc\multiple\mchunk.cpp
|
|
15 |
//
|
|
16 |
//
|
|
17 |
|
|
18 |
#include "memmodel.h"
|
|
19 |
#include "cache_maintenance.h"
|
|
20 |
#include <mmubase.inl>
|
|
21 |
#include <ramalloc.h>
|
|
22 |
|
|
23 |
DMemModelChunk::DMemModelChunk()
|
|
24 |
{
|
|
25 |
}
|
|
26 |
|
|
27 |
TLinearSection* DMemModelChunk::LinearSection()
|
|
28 |
{
|
|
29 |
Mmu& m=Mmu::Get();
|
|
30 |
TInt ar=(iAttributes&EAddressRangeMask);
|
|
31 |
switch (ar)
|
|
32 |
{
|
|
33 |
case EAddressLocal: return ((DMemModelProcess*)iOwningProcess)->iLocalSection;
|
|
34 |
case EAddressFixed: return NULL;
|
|
35 |
case EAddressShared: return m.iSharedSection;
|
|
36 |
case EAddressUserGlobal: return m.iUserGlobalSection;
|
|
37 |
case EAddressKernel: return m.iKernelSection;
|
|
38 |
}
|
|
39 |
MM::Panic(MM::EChunkBadAddressRange);
|
|
40 |
return NULL;
|
|
41 |
}
|
|
42 |
|
|
43 |
void DMemModelChunk::Destruct()
|
|
44 |
{
|
|
45 |
__KTRACE_OPT(KTHREAD,Kern::Printf("DMemModelChunk destruct %O",this));
|
|
46 |
if (iPageTables)
|
|
47 |
{
|
|
48 |
#ifdef _DEBUG
|
|
49 |
TInt r;
|
|
50 |
#define SET_R_IF_DEBUG(x) r = (x)
|
|
51 |
#else
|
|
52 |
#define SET_R_IF_DEBUG(x) (void)(x)
|
|
53 |
#endif
|
|
54 |
if (iAttributes & EDisconnected)
|
|
55 |
SET_R_IF_DEBUG(Decommit(0,iMaxSize));
|
|
56 |
else if (iAttributes & EDoubleEnded)
|
|
57 |
SET_R_IF_DEBUG(AdjustDoubleEnded(0,0));
|
|
58 |
else
|
|
59 |
SET_R_IF_DEBUG(Adjust(0));
|
|
60 |
__ASSERT_DEBUG(r==KErrNone, MM::Panic(MM::EDecommitFailed));
|
|
61 |
#ifdef _DEBUG
|
|
62 |
// check all page tables have been freed...
|
|
63 |
Mmu& m=Mmu::Get();
|
|
64 |
TInt nPdes=(iMaxSize+m.iChunkMask)>>m.iChunkShift;
|
|
65 |
for(TInt i=0; i<nPdes; i++)
|
|
66 |
{
|
|
67 |
__NK_ASSERT_DEBUG(iPageTables[i]==0xffff);
|
|
68 |
}
|
|
69 |
#endif
|
|
70 |
}
|
|
71 |
if (iBase)
|
|
72 |
{
|
|
73 |
TLinearSection* s=LinearSection();
|
|
74 |
if(s)
|
|
75 |
{
|
|
76 |
Mmu::Wait();
|
|
77 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::~DMemModelChunk remove region"));
|
|
78 |
Mmu& m=Mmu::Get();
|
|
79 |
s->iAllocator.Free( (TLinAddr(iBase)-s->iBase)>>m.iChunkShift, iMaxSize>>m.iChunkShift);
|
|
80 |
Mmu::Signal();
|
|
81 |
}
|
|
82 |
}
|
|
83 |
delete iOsAsids;
|
|
84 |
Kern::Free(iPageTables);
|
|
85 |
delete iPageBitMap;
|
|
86 |
delete iPermanentPageBitMap;
|
|
87 |
|
|
88 |
if(iKernelMirror)
|
|
89 |
iKernelMirror->Close(NULL);
|
|
90 |
|
|
91 |
TDfc* dfc = iDestroyedDfc;
|
|
92 |
if (dfc)
|
|
93 |
dfc->QueueOnIdle();
|
|
94 |
|
|
95 |
__KTRACE_OPT(KMEMTRACE, {Mmu::Wait(); Kern::Printf("MT:D %d %x %O",NTickCount(),this,this);Mmu::Signal();});
|
|
96 |
#ifdef BTRACE_CHUNKS
|
|
97 |
BTraceContext4(BTrace::EChunks,BTrace::EChunkDestroyed,this);
|
|
98 |
#endif
|
|
99 |
}
|
|
100 |
|
|
101 |
TInt DMemModelChunk::Close(TAny* aPtr)
|
|
102 |
{
|
|
103 |
if (aPtr)
|
|
104 |
{
|
|
105 |
DMemModelProcess* pP=(DMemModelProcess*)aPtr;
|
|
106 |
if ((iAttributes&EMapTypeMask)==EMapTypeLocal)
|
|
107 |
pP=(DMemModelProcess*)iOwningProcess;
|
|
108 |
pP->RemoveChunk(this);
|
|
109 |
}
|
|
110 |
TInt r=Dec();
|
|
111 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Close %d %O",r,this));
|
|
112 |
__NK_ASSERT_DEBUG(r > 0); // Should never be negative.
|
|
113 |
if (r==1)
|
|
114 |
{
|
|
115 |
K::ObjDelete(this);
|
|
116 |
return EObjectDeleted;
|
|
117 |
}
|
|
118 |
return 0;
|
|
119 |
}
|
|
120 |
|
|
121 |
|
|
122 |
TUint8* DMemModelChunk::Base(DProcess* aProcess)
|
|
123 |
{
|
|
124 |
return iBase;
|
|
125 |
}
|
|
126 |
|
|
127 |
|
|
128 |
TInt DMemModelChunk::DoCreate(SChunkCreateInfo& aInfo)
|
|
129 |
{
|
|
130 |
__KTRACE_OPT(KMMU,Kern::Printf("Chunk %O DoCreate att=%08x",this,iAttributes));
|
|
131 |
|
|
132 |
__ASSERT_COMPILE(!(EMMChunkAttributesMask & EChunkAttributesMask));
|
|
133 |
|
|
134 |
if (aInfo.iMaxSize<=0)
|
|
135 |
return KErrArgument;
|
|
136 |
|
|
137 |
if (iKernelMirror)
|
|
138 |
{
|
|
139 |
iKernelMirror->iAttributes |= iAttributes|EMemoryNotOwned;
|
|
140 |
TInt r=iKernelMirror->DoCreate(aInfo);
|
|
141 |
if(r!=KErrNone)
|
|
142 |
return r;
|
|
143 |
}
|
|
144 |
|
|
145 |
Mmu& m=Mmu::Get();
|
|
146 |
TInt nPdes=(aInfo.iMaxSize+m.iChunkMask)>>m.iChunkShift;
|
|
147 |
iMaxSize=nPdes<<m.iChunkShift;
|
|
148 |
iMapAttr = aInfo.iMapAttr;
|
|
149 |
SetupPermissions();
|
|
150 |
TInt mapType=iAttributes & EMapTypeMask;
|
|
151 |
if (mapType==EMapTypeShared)
|
|
152 |
{
|
|
153 |
iOsAsids=TBitMapAllocator::New(m.iNumOsAsids,ETrue);
|
|
154 |
if (!iOsAsids)
|
|
155 |
return KErrNoMemory;
|
|
156 |
}
|
|
157 |
TInt maxpages=iMaxSize>>m.iPageShift;
|
|
158 |
if (iAttributes & EDisconnected)
|
|
159 |
{
|
|
160 |
TBitMapAllocator* pM=TBitMapAllocator::New(maxpages,ETrue);
|
|
161 |
if (!pM)
|
|
162 |
return KErrNoMemory;
|
|
163 |
iPageBitMap=pM;
|
|
164 |
__KTRACE_OPT(KMMU,Kern::Printf("PageBitMap at %08x, MaxPages %d",pM,maxpages));
|
|
165 |
}
|
|
166 |
if(iChunkType==ESharedKernelSingle || iChunkType==ESharedKernelMultiple)
|
|
167 |
{
|
|
168 |
TBitMapAllocator* pM=TBitMapAllocator::New(maxpages,ETrue);
|
|
169 |
if (!pM)
|
|
170 |
return KErrNoMemory;
|
|
171 |
iPermanentPageBitMap = pM;
|
|
172 |
}
|
|
173 |
iPageTables=(TUint16*)Kern::Alloc(nPdes*sizeof(TUint16));
|
|
174 |
if (!iPageTables)
|
|
175 |
return KErrNoMemory;
|
|
176 |
memset(iPageTables,0xff,nPdes*sizeof(TUint16));
|
|
177 |
MmuBase::Wait();
|
|
178 |
TInt r=AllocateAddress();
|
|
179 |
__KTRACE_OPT(KMEMTRACE,Kern::Printf("MT:C %d %x %O",NTickCount(),this,this));
|
|
180 |
MmuBase::Signal();
|
|
181 |
#ifdef BTRACE_CHUNKS
|
|
182 |
TKName nameBuf;
|
|
183 |
Name(nameBuf);
|
|
184 |
BTraceContextN(BTrace::EChunks,BTrace::EChunkCreated,this,iMaxSize,nameBuf.Ptr(),nameBuf.Size());
|
|
185 |
if(iOwningProcess)
|
|
186 |
BTrace8(BTrace::EChunks,BTrace::EChunkOwner,this,iOwningProcess);
|
|
187 |
BTraceContext12(BTrace::EChunks,BTrace::EChunkInfo,this,iChunkType,iAttributes);
|
|
188 |
#endif
|
|
189 |
return r;
|
|
190 |
}
|
|
191 |
|
|
192 |
void DMemModelChunk::ClaimInitialPages()
|
|
193 |
{
|
|
194 |
__KTRACE_OPT(KMMU,Kern::Printf("Chunk %O ClaimInitialPages()",this));
|
|
195 |
Mmu& m=Mmu::Get();
|
|
196 |
TInt offset=0;
|
|
197 |
TUint32 ccp=K::CompressKHeapPtr(this);
|
|
198 |
NKern::LockSystem();
|
|
199 |
while(offset<iSize)
|
|
200 |
{
|
|
201 |
TInt ptid=m.PageTableId(TLinAddr(iBase)+offset);
|
|
202 |
__ASSERT_ALWAYS(ptid>=0,MM::Panic(MM::EClaimInitialPagesBadPageTable));
|
|
203 |
__KTRACE_OPT(KMMU,Kern::Printf("Offset %x PTID=%d",offset,ptid));
|
|
204 |
iPageTables[offset>>m.iChunkShift]=ptid;
|
|
205 |
SPageTableInfo& ptinfo = m.PtInfo(ptid);
|
|
206 |
ptinfo.SetChunk(ccp,offset>>m.iChunkShift);
|
|
207 |
TPte* pPte=(TPte*)m.PageTableLinAddr(ptid);
|
|
208 |
TInt i;
|
|
209 |
TInt np = 0;
|
|
210 |
TInt flashCount = MM::MaxPagesInOneGo;
|
|
211 |
for (i=0; i<m.iChunkSize>>m.iPageShift; ++i, offset+=m.iPageSize)
|
|
212 |
{
|
|
213 |
if(--flashCount<=0)
|
|
214 |
{
|
|
215 |
flashCount = MM::MaxPagesInOneGo;
|
|
216 |
NKern::FlashSystem();
|
|
217 |
}
|
|
218 |
TPte pte=pPte[i];
|
|
219 |
if (m.PteIsPresent(pte))
|
|
220 |
{
|
|
221 |
++np;
|
|
222 |
TPhysAddr phys=m.PtePhysAddr(pte, i);
|
|
223 |
__KTRACE_OPT(KMMU,Kern::Printf("Offset %x phys %08x",offset,phys));
|
|
224 |
SPageInfo* info = SPageInfo::SafeFromPhysAddr(phys);
|
|
225 |
if(info)
|
|
226 |
{
|
|
227 |
info->SetChunk(this,offset>>m.iPageShift);
|
|
228 |
#ifdef BTRACE_KERNEL_MEMORY
|
|
229 |
--Epoc::KernelMiscPages; // page now owned by chunk, and is not 'miscelaneous'
|
|
230 |
#endif
|
|
231 |
}
|
|
232 |
}
|
|
233 |
}
|
|
234 |
ptinfo.iCount = np;
|
|
235 |
__KTRACE_OPT(KMMU,Kern::Printf("Offset %x PTID %d NP %d", offset, ptid, np));
|
|
236 |
}
|
|
237 |
NKern::UnlockSystem();
|
|
238 |
}
|
|
239 |
|
|
240 |
void DMemModelChunk::SetFixedAddress(TLinAddr aAddr, TInt aInitialSize)
|
|
241 |
{
|
|
242 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O SetFixedAddress %08x size %08x",this,aAddr,aInitialSize));
|
|
243 |
iBase=(TUint8*)aAddr;
|
|
244 |
iSize=Mmu::RoundToPageSize(aInitialSize);
|
|
245 |
ClaimInitialPages();
|
|
246 |
}
|
|
247 |
|
|
248 |
TInt DMemModelChunk::Reserve(TInt aInitialSize)
|
|
249 |
//
|
|
250 |
// Reserve home section address space for a chunk
|
|
251 |
//
|
|
252 |
{
|
|
253 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O Reserve() size %08x",this,aInitialSize));
|
|
254 |
iSize=Mmu::RoundToPageSize(aInitialSize);
|
|
255 |
ClaimInitialPages();
|
|
256 |
return KErrNone;
|
|
257 |
}
|
|
258 |
|
|
259 |
TInt DMemModelChunk::Adjust(TInt aNewSize)
|
|
260 |
//
|
|
261 |
// Adjust a standard chunk.
|
|
262 |
//
|
|
263 |
{
|
|
264 |
|
|
265 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Adjust %08x",aNewSize));
|
|
266 |
if (iAttributes & (EDoubleEnded|EDisconnected))
|
|
267 |
return KErrGeneral;
|
|
268 |
if (aNewSize<0 || aNewSize>iMaxSize)
|
|
269 |
return KErrArgument;
|
|
270 |
|
|
271 |
TInt r=KErrNone;
|
|
272 |
TInt newSize=Mmu::RoundToPageSize(aNewSize);
|
|
273 |
if (newSize!=iSize)
|
|
274 |
{
|
|
275 |
Mmu::Wait();
|
|
276 |
if (newSize>iSize)
|
|
277 |
{
|
|
278 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Adjust growing"));
|
|
279 |
r=DoCommit(iSize,newSize-iSize);
|
|
280 |
}
|
|
281 |
else if (newSize<iSize)
|
|
282 |
{
|
|
283 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Adjust shrinking"));
|
|
284 |
DoDecommit(newSize,iSize-newSize);
|
|
285 |
}
|
|
286 |
Mmu::Signal();
|
|
287 |
}
|
|
288 |
__COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
|
|
289 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O adjusted to %x base %08x",this,iSize,iBase));
|
|
290 |
return r;
|
|
291 |
}
|
|
292 |
|
|
293 |
TInt DMemModelChunk::Address(TInt aOffset, TInt aSize, TLinAddr& aKernelAddress)
|
|
294 |
{
|
|
295 |
if(!iPermanentPageBitMap)
|
|
296 |
return KErrAccessDenied;
|
|
297 |
if(TUint(aOffset)>=TUint(iMaxSize))
|
|
298 |
return KErrArgument;
|
|
299 |
if(TUint(aOffset+aSize)>TUint(iMaxSize))
|
|
300 |
return KErrArgument;
|
|
301 |
if(aSize<=0)
|
|
302 |
return KErrArgument;
|
|
303 |
TInt pageShift = Mmu::Get().iPageShift;
|
|
304 |
TInt start = aOffset>>pageShift;
|
|
305 |
TInt size = ((aOffset+aSize-1)>>pageShift)-start+1;
|
|
306 |
if(iPermanentPageBitMap->NotAllocated(start,size))
|
|
307 |
return KErrNotFound;
|
|
308 |
aKernelAddress = (TLinAddr)iKernelMirror->iBase+aOffset;
|
|
309 |
return KErrNone;
|
|
310 |
}
|
|
311 |
|
|
312 |
TInt DMemModelChunk::PhysicalAddress(TInt aOffset, TInt aSize, TLinAddr& aKernelAddress, TUint32& aPhysicalAddress, TUint32* aPhysicalPageList)
|
|
313 |
{
|
|
314 |
TInt r=Address(aOffset,aSize,aKernelAddress);
|
|
315 |
if(r!=KErrNone)
|
|
316 |
return r;
|
|
317 |
|
|
318 |
return Mmu::Get().LinearToPhysical(aKernelAddress,aSize,aPhysicalAddress,aPhysicalPageList);
|
|
319 |
}
|
|
320 |
|
|
321 |
void DMemModelChunk::Substitute(TInt aOffset, TPhysAddr aOldAddr, TPhysAddr aNewAddr)
|
|
322 |
{
|
|
323 |
// Substitute the page mapping at aOffset with aNewAddr.
|
|
324 |
// Enter and leave with system locked.
|
|
325 |
// This is sometimes called with interrupts disabled and should leave them alone.
|
|
326 |
Mmu& m = Mmu::Get();
|
|
327 |
__ASSERT_ALWAYS(iKernelMirror==NULL,MM::Panic(MM::EChunkRemapUnsupported));
|
|
328 |
|
|
329 |
TInt ptid=iPageTables[aOffset>>m.iChunkShift];
|
|
330 |
if(ptid==0xffff)
|
|
331 |
MM::Panic(MM::EChunkRemapNoPageTable);
|
|
332 |
|
|
333 |
// Permissions for global code will have been overwritten with ApplyPermissions
|
|
334 |
// so we can't trust iPtePermissions for those chunk types
|
|
335 |
TPte perms;
|
|
336 |
if(iChunkType==EKernelCode)
|
|
337 |
perms = m.iKernelCodePtePerm;
|
|
338 |
else if(iChunkType==EDll)
|
|
339 |
perms = m.iGlobalCodePtePerm;
|
|
340 |
else
|
|
341 |
perms = iPtePermissions;
|
|
342 |
|
|
343 |
m.RemapPage(ptid, (TLinAddr)iBase+aOffset, aOldAddr, aNewAddr, perms, iOwningProcess);
|
|
344 |
}
|
|
345 |
|
|
346 |
/**
|
|
347 |
Get the movability type of the chunk's pages
|
|
348 |
@return How movable the chunk's pages are
|
|
349 |
*/
|
|
350 |
TZonePageType DMemModelChunk::GetPageType()
|
|
351 |
{
|
|
352 |
// Shared chunks have their physical addresses available
|
|
353 |
if (iChunkType == ESharedKernelSingle ||
|
|
354 |
iChunkType == ESharedKernelMultiple ||
|
|
355 |
iChunkType == ESharedIo ||
|
|
356 |
iChunkType == ESharedKernelMirror ||
|
|
357 |
iChunkType == EKernelMessage ||
|
|
358 |
iChunkType == EKernelData) // Don't move kernel heap pages as DMA may be accessing them.
|
|
359 |
{
|
|
360 |
return EPageFixed;
|
|
361 |
}
|
|
362 |
// All other types of chunk are movable
|
|
363 |
return EPageMovable;
|
|
364 |
}
|
|
365 |
|
|
366 |
TInt DMemModelChunk::DoCommit(TInt aOffset, TInt aSize, TCommitType aCommitType, TUint32* aExtraArg)
|
|
367 |
{
|
|
368 |
// Commit more RAM to a chunk at a specified offset
|
|
369 |
// enter and leave with system unlocked
|
|
370 |
// must hold RamAlloc mutex before calling this function
|
|
371 |
__ASSERT_MUTEX(MmuBase::RamAllocatorMutex);
|
|
372 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DoCommit %x+%x type=%d extra=%08x",aOffset,aSize,aCommitType,aExtraArg));
|
|
373 |
TInt offset=aOffset;
|
|
374 |
TInt endOffset=offset+aSize;
|
|
375 |
TInt newPtId=-1;
|
|
376 |
Mmu& m = Mmu::Get();
|
|
377 |
DRamAllocator& a = *m.iRamPageAllocator;
|
|
378 |
TInt r=KErrNone;
|
|
379 |
TPhysAddr pageList[KMaxPages];
|
|
380 |
TPhysAddr* pPageList=0; // In case of discontiguous commit it points to the list of physical pages.
|
|
381 |
TPhysAddr nextPage=0; // In case of contiguous commit, it points to the physical address to commit
|
|
382 |
SPageInfo::TType type = SPageInfo::EChunk;
|
|
383 |
|
|
384 |
// Set flag to indicate if RAM should be cleared before being committed.
|
|
385 |
// Note, EDll, EUserCode are covered in the code segment, in order not to clear
|
|
386 |
// the region overwritten by the loader
|
|
387 |
TBool clearRam = iChunkType==EUserData
|
|
388 |
|| iChunkType==EDllData
|
|
389 |
|| iChunkType==EUserSelfModCode
|
|
390 |
|| iChunkType==ESharedKernelSingle
|
|
391 |
|| iChunkType==ESharedKernelMultiple
|
|
392 |
|| iChunkType==ESharedIo
|
|
393 |
|| iChunkType==ERamDrive;
|
|
394 |
|
|
395 |
|
|
396 |
TBool ownsMemory = !(iAttributes&EMemoryNotOwned);
|
|
397 |
TBool physicalCommit = aCommitType&DChunk::ECommitPhysicalMask;
|
|
398 |
if(ownsMemory)
|
|
399 |
{
|
|
400 |
if(physicalCommit)
|
|
401 |
return KErrNotSupported;
|
|
402 |
}
|
|
403 |
else
|
|
404 |
{
|
|
405 |
if(!physicalCommit && aCommitType != DChunk::ECommitVirtual)
|
|
406 |
return KErrNotSupported;
|
|
407 |
type = SPageInfo::EInvalid; // to indicate page info not to be updated
|
|
408 |
}
|
|
409 |
|
|
410 |
switch(aCommitType)
|
|
411 |
{
|
|
412 |
case DChunk::ECommitDiscontiguous:
|
|
413 |
// No setup to do
|
|
414 |
break;
|
|
415 |
|
|
416 |
case DChunk::ECommitContiguous:
|
|
417 |
{
|
|
418 |
// Allocate a block of contiguous RAM from the free pool
|
|
419 |
TInt numPages=(endOffset-offset)>>m.iPageShift;
|
|
420 |
r=m.AllocContiguousRam(numPages<<m.iPageShift, nextPage, GetPageType(), 0);
|
|
421 |
if (r!=KErrNone)
|
|
422 |
return r;
|
|
423 |
if(clearRam)
|
|
424 |
m.ClearPages(numPages, (TPhysAddr*)(nextPage|1), iClearByte); // clear RAM if required
|
|
425 |
*aExtraArg = nextPage; // store physical address of RAM as return argument
|
|
426 |
}
|
|
427 |
break;
|
|
428 |
|
|
429 |
case DChunk::ECommitDiscontiguousPhysical:
|
|
430 |
{
|
|
431 |
pPageList = aExtraArg; // use pages given given to us
|
|
432 |
|
|
433 |
// Check address of pages are multiples of page size...
|
|
434 |
TInt numPages=(endOffset-offset)>>m.iPageShift;
|
|
435 |
TUint32* ptr = aExtraArg;
|
|
436 |
TUint32* endPtr = aExtraArg+numPages;
|
|
437 |
if(ptr>=endPtr)
|
|
438 |
return KErrNone; // Zero size commit is OK
|
|
439 |
TPhysAddr pageBits = 0;
|
|
440 |
do
|
|
441 |
pageBits |= *ptr++;
|
|
442 |
while(ptr<endPtr);
|
|
443 |
if(pageBits&(m.iPageSize-1))
|
|
444 |
return KErrArgument; // all addresses must be multiple of page size
|
|
445 |
}
|
|
446 |
break;
|
|
447 |
|
|
448 |
case DChunk::ECommitContiguousPhysical:
|
|
449 |
nextPage = (TPhysAddr)aExtraArg; // we have been given the physical address to use
|
|
450 |
if(nextPage&(m.iPageSize-1))
|
|
451 |
return KErrArgument; // address must be multiple of page size
|
|
452 |
break;
|
|
453 |
|
|
454 |
case DChunk::ECommitVirtual:
|
|
455 |
#ifndef __MARM__
|
|
456 |
return KErrNotSupported;
|
|
457 |
#endif
|
|
458 |
break;
|
|
459 |
|
|
460 |
default:
|
|
461 |
return KErrNotSupported;
|
|
462 |
}
|
|
463 |
|
|
464 |
while(offset<endOffset)
|
|
465 |
{
|
|
466 |
TInt np=(endOffset-offset)>>m.iPageShift; // pages remaining to satisfy request
|
|
467 |
TInt npEnd=(m.iChunkSize-(offset&m.iChunkMask))>>m.iPageShift;// number of pages to end of page table
|
|
468 |
if (np>npEnd)
|
|
469 |
np=npEnd; // limit to single page table
|
|
470 |
if (np>MM::MaxPagesInOneGo)
|
|
471 |
np=MM::MaxPagesInOneGo; // limit
|
|
472 |
TInt ptid=iPageTables[offset>>m.iChunkShift];
|
|
473 |
newPtId=-1;
|
|
474 |
if (ptid==0xffff)
|
|
475 |
{
|
|
476 |
// need to allocate a new page table
|
|
477 |
newPtId=m.AllocPageTable();
|
|
478 |
if (newPtId<0)
|
|
479 |
{
|
|
480 |
r=KErrNoMemory;
|
|
481 |
break; // Exit the loop. Below, we'll free all ram
|
|
482 |
// that is allocated in the previous loop passes.
|
|
483 |
}
|
|
484 |
ptid=newPtId;
|
|
485 |
}
|
|
486 |
|
|
487 |
if(aCommitType==DChunk::ECommitDiscontiguous)
|
|
488 |
{
|
|
489 |
pPageList = pageList;
|
|
490 |
r=m.AllocRamPages(pPageList,np, GetPageType()); // try to allocate pages
|
|
491 |
if (r!=KErrNone) //If fail, clean up what was allocated in this loop.
|
|
492 |
{
|
|
493 |
if (newPtId>=0)
|
|
494 |
m.FreePageTable(newPtId);
|
|
495 |
break; // Exit the loop. Below, we'll free all ram
|
|
496 |
// that is allocated in the previous loop passes.
|
|
497 |
}
|
|
498 |
if(clearRam)
|
|
499 |
m.ClearPages(np, pPageList, iClearByte); // clear RAM if required
|
|
500 |
}
|
|
501 |
|
|
502 |
TInt commitSize = np<<m.iPageShift;
|
|
503 |
|
|
504 |
|
|
505 |
// In shared chunks (visible to both user and kernel side), it is always kernel side
|
|
506 |
// to be mapped the first. Decommiting will go in reverse order.
|
|
507 |
if(iKernelMirror)
|
|
508 |
{
|
|
509 |
// Map the same memory into the kernel mirror chunk
|
|
510 |
if(pPageList)
|
|
511 |
r = iKernelMirror->DoCommit(offset,commitSize,ECommitDiscontiguousPhysical,pPageList);
|
|
512 |
else
|
|
513 |
r = iKernelMirror->DoCommit(offset,commitSize,ECommitContiguousPhysical,(TUint32*)nextPage);
|
|
514 |
__KTRACE_OPT(KMMU,Kern::Printf("iKernelMirror->DoCommit returns %d",r));
|
|
515 |
if(r!=KErrNone) //If fail, clean up what was allocated in this loop.
|
|
516 |
{
|
|
517 |
if(aCommitType==DChunk::ECommitDiscontiguous)
|
|
518 |
m.FreePages(pPageList,np,EPageFixed);
|
|
519 |
if (newPtId>=0)
|
|
520 |
m.FreePageTable(newPtId);
|
|
521 |
|
|
522 |
break; // Exit the loop. Below, we'll free all ram
|
|
523 |
// that is allocated in the previous loop passes.
|
|
524 |
}
|
|
525 |
}
|
|
526 |
|
|
527 |
// Commit the memory.
|
|
528 |
NKern::LockSystem(); // lock the system while we change the MMU mappings
|
|
529 |
iSize += commitSize; // update committed size
|
|
530 |
if (aCommitType==DChunk::ECommitVirtual)
|
|
531 |
m.MapVirtual(ptid, np);
|
|
532 |
else if(pPageList)
|
|
533 |
{
|
|
534 |
m.MapRamPages(ptid, type, this, offset, pPageList, np, iPtePermissions);
|
|
535 |
pPageList += np;
|
|
536 |
}
|
|
537 |
else
|
|
538 |
{
|
|
539 |
m.MapPhysicalPages(ptid, type, this, offset, nextPage, np, iPtePermissions);
|
|
540 |
nextPage += commitSize;
|
|
541 |
}
|
|
542 |
NKern::UnlockSystem();
|
|
543 |
|
|
544 |
if (newPtId>=0)
|
|
545 |
{
|
|
546 |
// We have allocated a new page table, now we must assign it
|
|
547 |
iPageTables[offset>>m.iChunkShift]=ptid;
|
|
548 |
TLinAddr addr=(TLinAddr)iBase+offset; // current address
|
|
549 |
m.AssignPageTable(ptid, SPageTableInfo::EChunk, this, addr, iPdePermissions);
|
|
550 |
newPtId = -1;
|
|
551 |
}
|
|
552 |
__KTRACE_OPT(KMEMTRACE,Kern::Printf("MT:A %d %x %x %O",NTickCount(),this,iSize,this));
|
|
553 |
#ifdef BTRACE_CHUNKS
|
|
554 |
BTraceContext12(BTrace::EChunks,ownsMemory?BTrace::EChunkMemoryAllocated:BTrace::EChunkMemoryAdded,this,offset,commitSize);
|
|
555 |
#endif
|
|
556 |
|
|
557 |
offset += commitSize; // update offset
|
|
558 |
}
|
|
559 |
|
|
560 |
if (r==KErrNone)
|
|
561 |
{
|
|
562 |
if(iPermanentPageBitMap)
|
|
563 |
iPermanentPageBitMap->Alloc(aOffset>>m.iPageShift,aSize>>m.iPageShift);
|
|
564 |
}
|
|
565 |
else
|
|
566 |
{
|
|
567 |
// We ran out of memory somewhere.
|
|
568 |
// Free any memory we succeeded in allocating in the loops before the one that failed
|
|
569 |
if (iChunkType != ESharedKernelMirror) //Kernel mirror chunk will be decommited alongside the main chunk.
|
|
570 |
{
|
|
571 |
DChunk::TDecommitType decommitType = aCommitType==DChunk::ECommitVirtual ?
|
|
572 |
DChunk::EDecommitVirtual : DChunk::EDecommitNormal;
|
|
573 |
DoDecommit(aOffset,offset-aOffset,decommitType);
|
|
574 |
}
|
|
575 |
|
|
576 |
if(aCommitType==DChunk::ECommitContiguous)
|
|
577 |
{
|
|
578 |
// Free the pages we allocated but didn't get around to commiting
|
|
579 |
// It has to go page after page as we cannot use FreePhysicalRam here because the part of
|
|
580 |
// of original allocated contiguous memory is already partly freed (in DoDecommit).
|
|
581 |
TPhysAddr last = nextPage + ((endOffset-offset)>>m.iPageShift<<m.iPageShift);
|
|
582 |
while(nextPage<last)
|
|
583 |
{
|
|
584 |
a.FreeRamPage(nextPage, GetPageType());
|
|
585 |
nextPage += m.iPageSize;
|
|
586 |
}
|
|
587 |
*aExtraArg = KPhysAddrInvalid; // return invalid physical address
|
|
588 |
}
|
|
589 |
|
|
590 |
m.iAllocFailed=ETrue;
|
|
591 |
}
|
|
592 |
return r;
|
|
593 |
}
|
|
594 |
|
|
595 |
void DMemModelChunk::DoDecommit(TInt aOffset, TInt aSize, TDecommitType aDecommitType)
|
|
596 |
{
|
|
597 |
// Decommit RAM from a chunk at a specified offset
|
|
598 |
// enter and leave with system unlocked
|
|
599 |
// must hold RamAlloc mutex before calling this function
|
|
600 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DoDecommit %x+%x",aOffset,aSize));
|
|
601 |
|
|
602 |
TBool ownsMemory = !(iAttributes&EMemoryNotOwned);
|
|
603 |
|
|
604 |
TInt deferred=0;
|
|
605 |
TInt offset=aOffset;
|
|
606 |
TInt endOffset=offset+aSize;
|
|
607 |
Mmu& m = Mmu::Get();
|
|
608 |
DRamAllocator& a = *m.iRamPageAllocator;
|
|
609 |
TPhysAddr pageList[KMaxPages];
|
|
610 |
TLinAddr linearPageList[KMaxPages];
|
|
611 |
const TAny* asids=GLOBAL_MAPPING;
|
|
612 |
if (iOsAsids)
|
|
613 |
asids=iOsAsids;
|
|
614 |
else if (iOwningProcess)
|
|
615 |
asids=(const TAny*)((DMemModelProcess*)iOwningProcess)->iOsAsid;
|
|
616 |
TUint size_in_pages = (TUint)(Min(aSize,iSize)>>m.iPageShift);
|
|
617 |
TBool sync_decommit = (size_in_pages<m.iDecommitThreshold);
|
|
618 |
TInt total_freed=0;
|
|
619 |
while(offset<endOffset)
|
|
620 |
{
|
|
621 |
TInt np=(endOffset-offset)>>m.iPageShift; // number of pages remaining to decommit
|
|
622 |
TInt pdeEnd=(offset+m.iChunkSize)&~m.iChunkMask;
|
|
623 |
TInt npEnd=(pdeEnd-offset)>>m.iPageShift; // number of pages to end of page table
|
|
624 |
if (np>npEnd)
|
|
625 |
np=npEnd; // limit to single page table
|
|
626 |
if (np>MM::MaxPagesInOneGo)
|
|
627 |
np=MM::MaxPagesInOneGo; // limit
|
|
628 |
TLinAddr addr=(TLinAddr)iBase+offset; // current address
|
|
629 |
TInt ptid=iPageTables[offset>>m.iChunkShift]; // get page table ID if a page table is already assigned here
|
|
630 |
if (ptid!=0xffff)
|
|
631 |
{
|
|
632 |
TInt nPtes=0;
|
|
633 |
TInt nUnmapped=0;
|
|
634 |
|
|
635 |
#ifdef BTRACE_CHUNKS
|
|
636 |
TUint oldFree = m.FreeRamInBytes();
|
|
637 |
#endif
|
|
638 |
// Unmap the pages, clear the PTEs and place the physical addresses of the now-free RAM pages in
|
|
639 |
// pageList. Return nPtes=number of pages placed in list, remain=number of PTEs remaining in page table
|
|
640 |
// Bit 31 of return value is set if TLB flush may be incomplete
|
|
641 |
NKern::LockSystem();
|
|
642 |
TInt remain;
|
|
643 |
if (ownsMemory)
|
|
644 |
{
|
|
645 |
if (aDecommitType == EDecommitVirtual)
|
|
646 |
remain=m.UnmapVirtual(ptid,addr,np,pageList,ETrue,nPtes,nUnmapped,iOwningProcess);
|
|
647 |
else
|
|
648 |
remain=m.UnmapPages(ptid,addr,np,pageList,ETrue,nPtes,nUnmapped,iOwningProcess);
|
|
649 |
}
|
|
650 |
else
|
|
651 |
{
|
|
652 |
if (aDecommitType == EDecommitVirtual)
|
|
653 |
remain=m.UnmapUnownedVirtual(ptid,addr,np,pageList,linearPageList,nPtes,nUnmapped,iOwningProcess);
|
|
654 |
else
|
|
655 |
remain=m.UnmapUnownedPages(ptid,addr,np,pageList,linearPageList,nPtes,nUnmapped,iOwningProcess);
|
|
656 |
}
|
|
657 |
TInt nFree = ownsMemory ? nUnmapped : 0; //The number of pages to free
|
|
658 |
deferred |= remain;
|
|
659 |
TInt decommitSize=nPtes<<m.iPageShift;
|
|
660 |
iSize-=decommitSize; // reduce the committed size
|
|
661 |
NKern::UnlockSystem();
|
|
662 |
|
|
663 |
|
|
664 |
|
|
665 |
__KTRACE_OPT(KMEMTRACE,Kern::Printf("MT:A %d %x %x %O",NTickCount(),this,iSize,this));
|
|
666 |
#ifdef BTRACE_CHUNKS
|
|
667 |
TUint reclaimed = (oldFree-m.FreeRamInBytes())>>m.iPageShift; // number of 'unlocked' pages reclaimed from ram cache
|
|
668 |
if(nFree-reclaimed)
|
|
669 |
BTraceContext12(BTrace::EChunks,ownsMemory?BTrace::EChunkMemoryDeallocated:BTrace::EChunkMemoryRemoved,this,offset,(nFree-reclaimed)<<m.iPageShift);
|
|
670 |
#endif
|
|
671 |
|
|
672 |
if (sync_decommit && (remain & KUnmapPagesTLBFlushDeferred))
|
|
673 |
{
|
|
674 |
// must ensure DTLB flushed before doing cache purge on decommit
|
|
675 |
m.GenericFlush(Mmu::EFlushDTLB);
|
|
676 |
}
|
|
677 |
|
|
678 |
// if page table is now completely empty, unassign it and update chunk PDE info
|
|
679 |
remain &= KUnmapPagesCountMask;
|
|
680 |
if (remain==0)
|
|
681 |
{
|
|
682 |
m.DoUnassignPageTable(addr, asids);
|
|
683 |
m.FreePageTable(ptid);
|
|
684 |
iPageTables[offset>>m.iChunkShift]=0xffff;
|
|
685 |
}
|
|
686 |
|
|
687 |
// Physical memory not owned by the chunk has to be preserved from cache memory.
|
|
688 |
if(!ownsMemory)
|
|
689 |
{
|
|
690 |
// If a chunk has Kernel mirror, it is sufficient to do it just once.
|
|
691 |
if (!iKernelMirror)
|
|
692 |
{
|
|
693 |
TInt i;
|
|
694 |
for (i=0;i<nUnmapped;i++)
|
|
695 |
m.CacheMaintenanceOnPreserve(pageList[i], KPageSize, linearPageList[i], iMapAttr);
|
|
696 |
}
|
|
697 |
}
|
|
698 |
else if (nFree)
|
|
699 |
{
|
|
700 |
// We can now return the decommitted pages to the free page list and sort out caching.
|
|
701 |
total_freed+=nFree;
|
|
702 |
if (sync_decommit) //Purge cache if the size is below decommit threshold
|
|
703 |
m.CacheMaintenanceOnDecommit(pageList, nFree);
|
|
704 |
a.FreeRamPages(pageList,nFree, GetPageType());
|
|
705 |
}
|
|
706 |
|
|
707 |
offset+=(np<<m.iPageShift);
|
|
708 |
}
|
|
709 |
else
|
|
710 |
{
|
|
711 |
__KTRACE_OPT(KMMU,Kern::Printf("No page table at %08x",addr));
|
|
712 |
if ((iAttributes&EDisconnected)==0)
|
|
713 |
MM::Panic(MM::EChunkDecommitNoPageTable);
|
|
714 |
offset=pdeEnd; // disconnected chunk - step on to next PDE
|
|
715 |
}
|
|
716 |
}
|
|
717 |
if (deferred & KUnmapPagesTLBFlushDeferred)
|
|
718 |
m.GenericFlush( (iAttributes&ECode) ? Mmu::EFlushDTLB|Mmu::EFlushITLB : Mmu::EFlushDTLB );
|
|
719 |
|
|
720 |
if (total_freed && !sync_decommit) //Flash entire cache if the size exceeds decommit threshold
|
|
721 |
CacheMaintenance::SyncPhysicalCache_All(); //On ARMv6, this deals with both L1 & L2 cache
|
|
722 |
|
|
723 |
// Kernel mapped part of the chunk is removed at the end. At this point, no user side is mapped
|
|
724 |
// which ensures that evicting data from cache will surely succeed.
|
|
725 |
if(iKernelMirror)
|
|
726 |
iKernelMirror->DoDecommit(aOffset,aSize);
|
|
727 |
}
|
|
728 |
|
|
729 |
|
|
730 |
TInt DMemModelChunk::AdjustDoubleEnded(TInt aBottom, TInt aTop)
|
|
731 |
//
|
|
732 |
// Adjust a double-ended chunk.
|
|
733 |
//
|
|
734 |
{
|
|
735 |
|
|
736 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::AdjustDoubleEnded %x-%x",aBottom,aTop));
|
|
737 |
if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDoubleEnded)
|
|
738 |
return KErrGeneral;
|
|
739 |
if (aTop<0 || aBottom<0 || aTop<aBottom || aTop>iMaxSize)
|
|
740 |
return KErrArgument;
|
|
741 |
Mmu& m = Mmu::Get();
|
|
742 |
aBottom &= ~m.iPageMask;
|
|
743 |
aTop=(aTop+m.iPageMask)&~m.iPageMask;
|
|
744 |
TInt newSize=aTop-aBottom;
|
|
745 |
if (newSize>iMaxSize)
|
|
746 |
return KErrArgument;
|
|
747 |
|
|
748 |
Mmu::Wait();
|
|
749 |
TInt initBottom=iStartPos;
|
|
750 |
TInt initTop=iStartPos+iSize;
|
|
751 |
TInt nBottom=Max(aBottom,iStartPos); // intersection bottom
|
|
752 |
TInt nTop=Min(aTop,iStartPos+iSize); // intersection top
|
|
753 |
TInt r=KErrNone;
|
|
754 |
if (nBottom<nTop)
|
|
755 |
{
|
|
756 |
__KTRACE_OPT(KMMU,Kern::Printf("Initial and final regions intersect"));
|
|
757 |
if (initBottom<nBottom)
|
|
758 |
{
|
|
759 |
iStartPos=aBottom;
|
|
760 |
DoDecommit(initBottom,nBottom-initBottom);
|
|
761 |
}
|
|
762 |
if (initTop>nTop)
|
|
763 |
DoDecommit(nTop,initTop-nTop); // this changes iSize
|
|
764 |
if (aBottom<nBottom)
|
|
765 |
{
|
|
766 |
r=DoCommit(aBottom,nBottom-aBottom);
|
|
767 |
if (r==KErrNone)
|
|
768 |
{
|
|
769 |
if (aTop>nTop)
|
|
770 |
r=DoCommit(nTop,aTop-nTop);
|
|
771 |
if (r==KErrNone)
|
|
772 |
iStartPos=aBottom;
|
|
773 |
else
|
|
774 |
DoDecommit(aBottom,nBottom-aBottom);
|
|
775 |
}
|
|
776 |
}
|
|
777 |
else if (aTop>nTop)
|
|
778 |
r=DoCommit(nTop,aTop-nTop);
|
|
779 |
}
|
|
780 |
else
|
|
781 |
{
|
|
782 |
__KTRACE_OPT(KMMU,Kern::Printf("Initial and final regions disjoint"));
|
|
783 |
if (iSize)
|
|
784 |
DoDecommit(initBottom,iSize);
|
|
785 |
iStartPos=aBottom;
|
|
786 |
if (newSize)
|
|
787 |
r=DoCommit(iStartPos,newSize);
|
|
788 |
}
|
|
789 |
Mmu::Signal();
|
|
790 |
__COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
|
|
791 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O adjusted to %x+%x base %08x",this,iStartPos,iSize,iBase));
|
|
792 |
return r;
|
|
793 |
}
|
|
794 |
|
|
795 |
TInt DMemModelChunk::Commit(TInt aOffset, TInt aSize, TCommitType aCommitType, TUint32* aExtraArg)
|
|
796 |
//
|
|
797 |
// Commit to a disconnected chunk.
|
|
798 |
//
|
|
799 |
{
|
|
800 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Commit %x+%x type=%d extra=%08x",aOffset,aSize,aCommitType,aExtraArg));
|
|
801 |
if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
|
|
802 |
return KErrGeneral;
|
|
803 |
if (aOffset<0 || aSize<0)
|
|
804 |
return KErrArgument;
|
|
805 |
if (aSize==0)
|
|
806 |
return KErrNone;
|
|
807 |
Mmu& m = Mmu::Get();
|
|
808 |
aSize+=(aOffset & m.iPageMask);
|
|
809 |
aOffset &= ~m.iPageMask;
|
|
810 |
aSize=(aSize+m.iPageMask)&~m.iPageMask;
|
|
811 |
if ((aOffset+aSize)>iMaxSize)
|
|
812 |
return KErrArgument;
|
|
813 |
|
|
814 |
Mmu::Wait();
|
|
815 |
TInt r=KErrNone;
|
|
816 |
TInt i=aOffset>>m.iPageShift;
|
|
817 |
TInt n=aSize>>m.iPageShift;
|
|
818 |
if (iPageBitMap->NotFree(i,n))
|
|
819 |
r=KErrAlreadyExists;
|
|
820 |
else
|
|
821 |
{
|
|
822 |
r=DoCommit(aOffset,aSize,aCommitType,aExtraArg);
|
|
823 |
if (r==KErrNone)
|
|
824 |
iPageBitMap->Alloc(i,n);
|
|
825 |
}
|
|
826 |
Mmu::Signal();
|
|
827 |
__COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
|
|
828 |
return r;
|
|
829 |
}
|
|
830 |
|
|
831 |
TInt DMemModelChunk::Allocate(TInt aSize, TInt aGuard, TInt aAlign)
|
|
832 |
//
|
|
833 |
// Allocate offset and commit to a disconnected chunk.
|
|
834 |
//
|
|
835 |
{
|
|
836 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Allocate %x %x %d",aSize,aGuard,aAlign));
|
|
837 |
|
|
838 |
// Only allow this to be called on disconnected chunks and not disconnected
|
|
839 |
// cache chunks as when guards pages exist the bit map can't be used to determine
|
|
840 |
// the size of disconnected cache chunks as is required by Decommit().
|
|
841 |
if ((iAttributes & (EDoubleEnded|EDisconnected|ECache))!=EDisconnected)
|
|
842 |
return KErrGeneral;
|
|
843 |
|
|
844 |
if (aSize<=0 || aGuard<0)
|
|
845 |
return KErrArgument;
|
|
846 |
Mmu& m = Mmu::Get();
|
|
847 |
aAlign=Max(aAlign-m.iPageShift,0);
|
|
848 |
TInt base=TInt(TLinAddr(iBase)>>m.iPageShift);
|
|
849 |
aSize=(aSize+m.iPageMask)&~m.iPageMask;
|
|
850 |
aGuard=(aGuard+m.iPageMask)&~m.iPageMask;
|
|
851 |
if ((aSize+aGuard)>iMaxSize)
|
|
852 |
return KErrArgument;
|
|
853 |
|
|
854 |
Mmu::Wait();
|
|
855 |
TInt r=KErrNone;
|
|
856 |
TInt n=(aSize+aGuard)>>m.iPageShift;
|
|
857 |
TInt i=iPageBitMap->AllocAligned(n,aAlign,base,EFalse); // allocate the offset
|
|
858 |
if (i<0)
|
|
859 |
r=KErrNoMemory; // run out of reserved space for this chunk
|
|
860 |
else
|
|
861 |
{
|
|
862 |
TInt offset=i<<m.iPageShift;
|
|
863 |
__KTRACE_OPT(KMMU,Kern::Printf("Offset %x allocated",offset));
|
|
864 |
r=DoCommit(offset+aGuard,aSize);
|
|
865 |
if (r==KErrNone)
|
|
866 |
{
|
|
867 |
iPageBitMap->Alloc(i,n);
|
|
868 |
r=offset; // if operation successful, return allocated offset
|
|
869 |
}
|
|
870 |
}
|
|
871 |
Mmu::Signal();
|
|
872 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Allocate returns %x",r));
|
|
873 |
__COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
|
|
874 |
return r;
|
|
875 |
}
|
|
876 |
|
|
877 |
TInt DMemModelChunk::Decommit(TInt aOffset, TInt aSize)
|
|
878 |
//
|
|
879 |
// Decommit from a disconnected chunk.
|
|
880 |
//
|
|
881 |
{
|
|
882 |
return Decommit(aOffset, aSize, EDecommitNormal);
|
|
883 |
}
|
|
884 |
|
|
885 |
TInt DMemModelChunk::Decommit(TInt aOffset, TInt aSize, TDecommitType aDecommitType)
|
|
886 |
//
|
|
887 |
// Decommit from a disconnected chunk
|
|
888 |
//
|
|
889 |
// @param aDecommitType Used to indicate whether area was originally committed with the
|
|
890 |
// ECommitVirtual type
|
|
891 |
//
|
|
892 |
{
|
|
893 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Decommit %x+%x",aOffset,aSize));
|
|
894 |
if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
|
|
895 |
return KErrGeneral;
|
|
896 |
if (aOffset<0 || aSize<0)
|
|
897 |
return KErrArgument;
|
|
898 |
if (aSize==0)
|
|
899 |
return KErrNone;
|
|
900 |
#ifndef __MARM__
|
|
901 |
if (aDecommitType == EDecommitVirtual)
|
|
902 |
return KErrNotSupported;
|
|
903 |
#endif
|
|
904 |
Mmu& m = Mmu::Get();
|
|
905 |
aSize+=(aOffset & m.iPageMask);
|
|
906 |
aOffset &= ~m.iPageMask;
|
|
907 |
aSize=(aSize+m.iPageMask)&~m.iPageMask;
|
|
908 |
if ((aOffset+aSize)>iMaxSize)
|
|
909 |
return KErrArgument;
|
|
910 |
|
|
911 |
Mmu::Wait();
|
|
912 |
|
|
913 |
// limit the range to the home region range
|
|
914 |
__KTRACE_OPT(KMMU,Kern::Printf("Rounded and Clipped range %x+%x",aOffset,aSize));
|
|
915 |
|
|
916 |
TInt i=aOffset>>m.iPageShift;
|
|
917 |
TInt n=aSize>>m.iPageShift;
|
|
918 |
|
|
919 |
__KTRACE_OPT(KMMU,Kern::Printf("Calling SelectiveFree(%d,%d)",i,n));
|
|
920 |
TUint oldAvail = iPageBitMap->iAvail;
|
|
921 |
TUint oldSize = iSize;
|
|
922 |
|
|
923 |
// Free those positions which are still commited and also any guard pages,
|
|
924 |
// i.e. pages that are reserved in this chunk but which are not commited.
|
|
925 |
iPageBitMap->SelectiveFree(i,n);
|
|
926 |
|
|
927 |
DoDecommit(aOffset,aSize,aDecommitType);
|
|
928 |
|
|
929 |
if (iAttributes & ECache)
|
|
930 |
{// If this is the file server cache chunk then adjust the size based
|
|
931 |
// on the bit map size because:-
|
|
932 |
// - Unlocked and reclaimed pages will be unmapped without updating
|
|
933 |
// iSize or the bit map.
|
|
934 |
// - DoDecommit() only decommits the mapped pages.
|
|
935 |
// For all other chunks what is mapped is what is committed to the
|
|
936 |
// chunk so iSize is accurate.
|
|
937 |
TUint actualFreedPages = iPageBitMap->iAvail - oldAvail;
|
|
938 |
iSize = oldSize - (actualFreedPages << KPageShift);
|
|
939 |
}
|
|
940 |
|
|
941 |
Mmu::Signal();
|
|
942 |
__DEBUG_EVENT(EEventUpdateChunk, this);
|
|
943 |
return KErrNone;
|
|
944 |
}
|
|
945 |
|
|
946 |
TInt DMemModelChunk::Unlock(TInt aOffset, TInt aSize)
|
|
947 |
{
|
|
948 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Unlock %x+%x",aOffset,aSize));
|
|
949 |
if (!(iAttributes&ECache))
|
|
950 |
return KErrGeneral;
|
|
951 |
if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
|
|
952 |
return KErrGeneral;
|
|
953 |
|
|
954 |
// Mark this as the file server cache chunk. This is safe as it is only the
|
|
955 |
// file server that can invoke this function.
|
|
956 |
iAttributes |= ECache;
|
|
957 |
|
|
958 |
if (aOffset<0 || aSize<0)
|
|
959 |
return KErrArgument;
|
|
960 |
if (aSize==0)
|
|
961 |
return KErrNone;
|
|
962 |
Mmu& m = Mmu::Get();
|
|
963 |
aSize+=(aOffset & m.iPageMask);
|
|
964 |
aOffset &= ~m.iPageMask;
|
|
965 |
aSize=(aSize+m.iPageMask)&~m.iPageMask;
|
|
966 |
if ((aOffset+aSize)>iMaxSize)
|
|
967 |
return KErrArgument;
|
|
968 |
|
|
969 |
Mmu::Wait();
|
|
970 |
TInt r=KErrNone;
|
|
971 |
TInt i=aOffset>>m.iPageShift;
|
|
972 |
TInt n=aSize>>m.iPageShift;
|
|
973 |
if (iPageBitMap->NotAllocated(i,n))
|
|
974 |
r=KErrNotFound;
|
|
975 |
else
|
|
976 |
{
|
|
977 |
#ifdef BTRACE_CHUNKS
|
|
978 |
TUint oldFree = m.FreeRamInBytes();
|
|
979 |
#endif
|
|
980 |
r=m.UnlockRamCachePages((TLinAddr)(iBase+aOffset),n,iOwningProcess);
|
|
981 |
#ifdef BTRACE_CHUNKS
|
|
982 |
if(r==KErrNone)
|
|
983 |
{
|
|
984 |
TUint unlocked = m.FreeRamInBytes()-oldFree; // size of memory unlocked
|
|
985 |
if(unlocked)
|
|
986 |
BTraceContext12(BTrace::EChunks,BTrace::EChunkMemoryDeallocated,this,aOffset,unlocked);
|
|
987 |
}
|
|
988 |
#endif
|
|
989 |
}
|
|
990 |
|
|
991 |
Mmu::Signal();
|
|
992 |
__COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
|
|
993 |
return r;
|
|
994 |
}
|
|
995 |
|
|
996 |
TInt DMemModelChunk::Lock(TInt aOffset, TInt aSize)
|
|
997 |
{
|
|
998 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Lock %x+%x",aOffset,aSize));
|
|
999 |
if (!(iAttributes&ECache))
|
|
1000 |
return KErrGeneral;
|
|
1001 |
if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
|
|
1002 |
return KErrGeneral;
|
|
1003 |
if (aOffset<0 || aSize<0)
|
|
1004 |
return KErrArgument;
|
|
1005 |
if (aSize==0)
|
|
1006 |
return KErrNone;
|
|
1007 |
Mmu& m = Mmu::Get();
|
|
1008 |
aSize+=(aOffset & m.iPageMask);
|
|
1009 |
aOffset &= ~m.iPageMask;
|
|
1010 |
aSize=(aSize+m.iPageMask)&~m.iPageMask;
|
|
1011 |
if ((aOffset+aSize)>iMaxSize)
|
|
1012 |
return KErrArgument;
|
|
1013 |
|
|
1014 |
Mmu::Wait();
|
|
1015 |
TInt r=KErrNone;
|
|
1016 |
TInt i=aOffset>>m.iPageShift;
|
|
1017 |
TInt n=aSize>>m.iPageShift;
|
|
1018 |
if (iPageBitMap->NotAllocated(i,n))
|
|
1019 |
r=KErrNotFound;
|
|
1020 |
else
|
|
1021 |
{
|
|
1022 |
#ifdef BTRACE_CHUNKS
|
|
1023 |
TUint oldFree = m.FreeRamInBytes();
|
|
1024 |
#endif
|
|
1025 |
r=m.LockRamCachePages((TLinAddr)(iBase+aOffset),n,iOwningProcess);
|
|
1026 |
#ifdef BTRACE_CHUNKS
|
|
1027 |
if(r==KErrNone)
|
|
1028 |
{
|
|
1029 |
TUint locked = oldFree-m.FreeRamInBytes();
|
|
1030 |
if(locked)
|
|
1031 |
BTraceContext12(BTrace::EChunks,BTrace::EChunkMemoryAllocated,this,aOffset,locked);
|
|
1032 |
}
|
|
1033 |
#endif
|
|
1034 |
}
|
|
1035 |
if(r!=KErrNone)
|
|
1036 |
{
|
|
1037 |
// decommit memory on error...
|
|
1038 |
__KTRACE_OPT(KMMU,Kern::Printf("Calling SelectiveFree(%d,%d)",i,n));
|
|
1039 |
TUint oldAvail = iPageBitMap->iAvail;
|
|
1040 |
iPageBitMap->SelectiveFree(i,n); // free those positions which are actually allocated
|
|
1041 |
TUint oldSize = iSize;
|
|
1042 |
|
|
1043 |
DoDecommit(aOffset,aSize);
|
|
1044 |
|
|
1045 |
// Use the bit map to adjust the size of the chunk as unlocked and reclaimed pages
|
|
1046 |
// will have been unmapped but not removed from the bit map as DoDecommit() only
|
|
1047 |
// decommits the mapped pages.
|
|
1048 |
TUint actualFreedPages = iPageBitMap->iAvail - oldAvail;
|
|
1049 |
iSize = oldSize - (actualFreedPages << KPageShift);
|
|
1050 |
}
|
|
1051 |
|
|
1052 |
Mmu::Signal();
|
|
1053 |
__COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
|
|
1054 |
return r;
|
|
1055 |
}
|
|
1056 |
|
|
1057 |
TInt DMemModelChunk::AllocateAddress()
|
|
1058 |
{
|
|
1059 |
__KTRACE_OPT(KMMU,Kern::Printf("Chunk %O AllocateAddress()",this));
|
|
1060 |
TLinearSection* s=LinearSection();
|
|
1061 |
if (!s)
|
|
1062 |
return KErrNone; // chunk has fixed preallocated address
|
|
1063 |
|
|
1064 |
Mmu& m=Mmu::Get();
|
|
1065 |
TUint32 required=iMaxSize>>m.iChunkShift;
|
|
1066 |
__KTRACE_OPT(KMMU,Kern::Printf("Searching from low to high addresses"));
|
|
1067 |
TInt r=s->iAllocator.AllocConsecutive(required, EFalse);
|
|
1068 |
if (r<0)
|
|
1069 |
return KErrNoMemory;
|
|
1070 |
s->iAllocator.Alloc(r, required);
|
|
1071 |
iBase=(TUint8*)(s->iBase + (r<<m.iChunkShift));
|
|
1072 |
__KTRACE_OPT(KMMU,Kern::Printf("Address %08x allocated",iBase));
|
|
1073 |
return KErrNone;
|
|
1074 |
}
|
|
1075 |
|
|
1076 |
void DMemModelChunk::ApplyPermissions(TInt aOffset, TInt aSize, TPte aPtePerm)
|
|
1077 |
{
|
|
1078 |
__KTRACE_OPT(KMMU,Kern::Printf("Chunk %O ApplyPermissions(%x+%x,%08x)",this,aOffset,aSize,aPtePerm));
|
|
1079 |
__ASSERT_ALWAYS(aOffset>=0 && aSize>=0, MM::Panic(MM::EChunkApplyPermissions1));
|
|
1080 |
if (aSize==0)
|
|
1081 |
return;
|
|
1082 |
Mmu& m=Mmu::Get();
|
|
1083 |
aOffset &= ~m.iPageMask;
|
|
1084 |
aSize=(aSize+m.iPageMask)&~m.iPageMask;
|
|
1085 |
TInt endOffset=aOffset+aSize;
|
|
1086 |
__ASSERT_ALWAYS(endOffset<=iMaxSize, MM::Panic(MM::EChunkApplyPermissions2));
|
|
1087 |
|
|
1088 |
Mmu::Wait();
|
|
1089 |
while(aOffset<endOffset)
|
|
1090 |
{
|
|
1091 |
TInt ptid=iPageTables[aOffset>>m.iChunkShift];
|
|
1092 |
TInt pdeEnd=(aOffset+m.iChunkSize)&~m.iChunkMask;
|
|
1093 |
if (ptid==0xffff)
|
|
1094 |
{
|
|
1095 |
aOffset=pdeEnd;
|
|
1096 |
continue;
|
|
1097 |
}
|
|
1098 |
TInt np=(endOffset-aOffset)>>m.iPageShift; // number of pages remaining to process
|
|
1099 |
TInt npEnd=(pdeEnd-aOffset)>>m.iPageShift; // number of pages to end of page table
|
|
1100 |
if (np>npEnd)
|
|
1101 |
np=npEnd; // limit to single page table
|
|
1102 |
if (np>MM::MaxPagesInOneGo)
|
|
1103 |
np=MM::MaxPagesInOneGo; // limit
|
|
1104 |
m.ApplyPagePermissions(ptid, (aOffset&m.iChunkMask)>>m.iPageShift, np, aPtePerm);
|
|
1105 |
aOffset+=(np<<m.iPageShift);
|
|
1106 |
}
|
|
1107 |
Mmu::Signal();
|
|
1108 |
}
|
|
1109 |
|
|
1110 |
TInt DMemModelChunkHw::Close(TAny*)
|
|
1111 |
{
|
|
1112 |
__KTRACE_OPT(KOBJECT,Kern::Printf("DMemModelChunkHw::Close %d %O",AccessCount(),this));
|
|
1113 |
TInt r=Dec();
|
|
1114 |
if (r==1)
|
|
1115 |
{
|
|
1116 |
if (iLinAddr)
|
|
1117 |
{
|
|
1118 |
// Save data for cache maintenance before beind destroyed by DeallocateLinearAddress
|
|
1119 |
TPhysAddr pa = iPhysAddr;
|
|
1120 |
TLinAddr la = iLinAddr;
|
|
1121 |
TInt size = iSize;
|
|
1122 |
TUint attr = iAttribs;
|
|
1123 |
|
|
1124 |
MmuBase& m=*MmuBase::TheMmu;
|
|
1125 |
MmuBase::Wait();
|
|
1126 |
m.Unmap(iLinAddr,iSize);
|
|
1127 |
MmuBase::Signal();
|
|
1128 |
DeallocateLinearAddress();
|
|
1129 |
|
|
1130 |
// Physical memory has to be evicted from cache(s).
|
|
1131 |
// Must be preserved as it can still be in use by the driver.
|
|
1132 |
MmuBase::Wait();
|
|
1133 |
m.CacheMaintenanceOnPreserve(pa, size ,la ,attr);
|
|
1134 |
MmuBase::Signal();
|
|
1135 |
}
|
|
1136 |
K::ObjDelete(this);
|
|
1137 |
}
|
|
1138 |
return r;
|
|
1139 |
}
|
|
1140 |
|
|
1141 |
TInt DMemModelChunk::CheckAccess()
|
|
1142 |
{
|
|
1143 |
DProcess* pP=TheCurrentThread->iOwningProcess;
|
|
1144 |
if (iAttributes&EPrivate)
|
|
1145 |
{
|
|
1146 |
if (iOwningProcess && iOwningProcess!=pP && pP!=K::TheKernelProcess)
|
|
1147 |
return KErrAccessDenied;
|
|
1148 |
}
|
|
1149 |
return KErrNone;
|
|
1150 |
}
|
|
1151 |
|
|
1152 |
|
|
1153 |
void DMemModelChunk::BTracePrime(TInt aCategory)
|
|
1154 |
{
|
|
1155 |
DChunk::BTracePrime(aCategory);
|
|
1156 |
|
|
1157 |
#ifdef BTRACE_CHUNKS
|
|
1158 |
if (aCategory == BTrace::EChunks || aCategory == -1)
|
|
1159 |
{
|
|
1160 |
MmuBase::Wait();
|
|
1161 |
|
|
1162 |
TBool memoryOwned = !(iAttributes&EMemoryNotOwned);
|
|
1163 |
MmuBase& m=*MmuBase::TheMmu;
|
|
1164 |
TInt committedBase = -1;
|
|
1165 |
|
|
1166 |
// look at each page table in this chunk...
|
|
1167 |
TUint chunkEndIndex = iMaxSize>>KChunkShift;
|
|
1168 |
for(TUint chunkIndex=0; chunkIndex<chunkEndIndex; ++chunkIndex)
|
|
1169 |
{
|
|
1170 |
TInt ptid = iPageTables[chunkIndex];
|
|
1171 |
if(ptid==0xffff)
|
|
1172 |
{
|
|
1173 |
// no page table...
|
|
1174 |
if(committedBase!=-1)
|
|
1175 |
{
|
|
1176 |
TUint committedEnd = chunkIndex*KChunkSize;
|
|
1177 |
BTrace12(BTrace::EChunks, memoryOwned?BTrace::EChunkMemoryAllocated:BTrace::EChunkMemoryAdded,this,committedBase,committedEnd-committedBase);
|
|
1178 |
committedBase = -1;
|
|
1179 |
}
|
|
1180 |
continue;
|
|
1181 |
}
|
|
1182 |
|
|
1183 |
TPte* pPte=(TPte*)m.PageTableLinAddr(ptid);
|
|
1184 |
|
|
1185 |
// look at each page in page table...
|
|
1186 |
NKern::LockSystem();
|
|
1187 |
for(TUint pageIndex=0; pageIndex<KChunkSize/KPageSize; ++pageIndex)
|
|
1188 |
{
|
|
1189 |
TBool committed = false;
|
|
1190 |
TPhysAddr phys = m.PtePhysAddr(pPte[pageIndex], pageIndex);
|
|
1191 |
if(phys!=KPhysAddrInvalid)
|
|
1192 |
{
|
|
1193 |
// we have a page...
|
|
1194 |
if(!memoryOwned)
|
|
1195 |
committed = true;
|
|
1196 |
else
|
|
1197 |
{
|
|
1198 |
// make sure we own the page...
|
|
1199 |
SPageInfo* pi = SPageInfo::SafeFromPhysAddr(phys);
|
|
1200 |
if(pi && pi->Type()==SPageInfo::EChunk && pi->Owner()==this)
|
|
1201 |
committed = true;
|
|
1202 |
}
|
|
1203 |
}
|
|
1204 |
|
|
1205 |
if(committed)
|
|
1206 |
{
|
|
1207 |
if(committedBase==-1)
|
|
1208 |
committedBase = chunkIndex*KChunkSize+pageIndex*KPageSize; // start of new region
|
|
1209 |
}
|
|
1210 |
else
|
|
1211 |
{
|
|
1212 |
if(committedBase!=-1)
|
|
1213 |
{
|
|
1214 |
// generate trace for region...
|
|
1215 |
NKern::FlashSystem();
|
|
1216 |
TUint committedEnd = chunkIndex*KChunkSize+pageIndex*KPageSize;
|
|
1217 |
BTrace12(BTrace::EChunks, memoryOwned?BTrace::EChunkMemoryAllocated:BTrace::EChunkMemoryAdded,this,committedBase,committedEnd-committedBase);
|
|
1218 |
committedBase = -1;
|
|
1219 |
}
|
|
1220 |
}
|
|
1221 |
|
|
1222 |
if((pageIndex&15)==0)
|
|
1223 |
NKern::FlashSystem();
|
|
1224 |
}
|
|
1225 |
|
|
1226 |
NKern::UnlockSystem();
|
|
1227 |
}
|
|
1228 |
|
|
1229 |
if(committedBase!=-1)
|
|
1230 |
{
|
|
1231 |
TUint committedEnd = chunkEndIndex*KChunkSize;
|
|
1232 |
BTrace12(BTrace::EChunks, memoryOwned?BTrace::EChunkMemoryAllocated:BTrace::EChunkMemoryAdded,this,committedBase,committedEnd-committedBase);
|
|
1233 |
}
|
|
1234 |
|
|
1235 |
MmuBase::Signal();
|
|
1236 |
}
|
|
1237 |
#endif
|
|
1238 |
}
|