author | Pat Downey <patd@symbian.org> |
Wed, 01 Sep 2010 12:34:56 +0100 | |
branch | RCL_3 |
changeset 44 | 3e88ff8f41d5 |
parent 26 | c734af59ce98 |
permissions | -rw-r--r-- |
0 | 1 |
// Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies). |
2 |
// All rights reserved. |
|
3 |
// This component and the accompanying materials are made available |
|
4 |
// under the terms of the License "Eclipse Public License v1.0" |
|
5 |
// which accompanies this distribution, and is available |
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 |
// |
|
8 |
// Initial Contributors: |
|
9 |
// Nokia Corporation - initial contribution. |
|
10 |
// |
|
11 |
// Contributors: |
|
12 |
// |
|
13 |
// Description: |
|
14 |
// e32\memmodel\epoc\multiple\mchunk.cpp |
|
15 |
// |
|
16 |
// |
|
17 |
||
18 |
#include "memmodel.h" |
|
19 |
#include "cache_maintenance.h" |
|
20 |
#include <mmubase.inl> |
|
21 |
#include <ramalloc.h> |
|
22 |
||
23 |
DMemModelChunk::DMemModelChunk() |
|
24 |
{ |
|
25 |
} |
|
26 |
||
27 |
TLinearSection* DMemModelChunk::LinearSection() |
|
28 |
{ |
|
29 |
Mmu& m=Mmu::Get(); |
|
30 |
TInt ar=(iAttributes&EAddressRangeMask); |
|
31 |
switch (ar) |
|
32 |
{ |
|
33 |
case EAddressLocal: return ((DMemModelProcess*)iOwningProcess)->iLocalSection; |
|
34 |
case EAddressFixed: return NULL; |
|
35 |
case EAddressShared: return m.iSharedSection; |
|
36 |
case EAddressUserGlobal: return m.iUserGlobalSection; |
|
37 |
case EAddressKernel: return m.iKernelSection; |
|
38 |
} |
|
39 |
MM::Panic(MM::EChunkBadAddressRange); |
|
40 |
return NULL; |
|
41 |
} |
|
42 |
||
43 |
void DMemModelChunk::Destruct() |
|
44 |
{ |
|
45 |
__KTRACE_OPT(KTHREAD,Kern::Printf("DMemModelChunk destruct %O",this)); |
|
46 |
if (iPageTables) |
|
47 |
{ |
|
48 |
#ifdef _DEBUG |
|
49 |
TInt r; |
|
50 |
#define SET_R_IF_DEBUG(x) r = (x) |
|
51 |
#else |
|
52 |
#define SET_R_IF_DEBUG(x) (void)(x) |
|
53 |
#endif |
|
54 |
if (iAttributes & EDisconnected) |
|
55 |
SET_R_IF_DEBUG(Decommit(0,iMaxSize)); |
|
56 |
else if (iAttributes & EDoubleEnded) |
|
57 |
SET_R_IF_DEBUG(AdjustDoubleEnded(0,0)); |
|
58 |
else |
|
59 |
SET_R_IF_DEBUG(Adjust(0)); |
|
60 |
__ASSERT_DEBUG(r==KErrNone, MM::Panic(MM::EDecommitFailed)); |
|
61 |
#ifdef _DEBUG |
|
62 |
// check all page tables have been freed... |
|
63 |
Mmu& m=Mmu::Get(); |
|
64 |
TInt nPdes=(iMaxSize+m.iChunkMask)>>m.iChunkShift; |
|
65 |
for(TInt i=0; i<nPdes; i++) |
|
66 |
{ |
|
67 |
__NK_ASSERT_DEBUG(iPageTables[i]==0xffff); |
|
68 |
} |
|
69 |
#endif |
|
70 |
} |
|
71 |
if (iBase) |
|
72 |
{ |
|
73 |
TLinearSection* s=LinearSection(); |
|
74 |
if(s) |
|
75 |
{ |
|
76 |
Mmu::Wait(); |
|
77 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::~DMemModelChunk remove region")); |
|
78 |
Mmu& m=Mmu::Get(); |
|
79 |
s->iAllocator.Free( (TLinAddr(iBase)-s->iBase)>>m.iChunkShift, iMaxSize>>m.iChunkShift); |
|
80 |
Mmu::Signal(); |
|
81 |
} |
|
82 |
} |
|
83 |
delete iOsAsids; |
|
84 |
Kern::Free(iPageTables); |
|
85 |
delete iPageBitMap; |
|
86 |
delete iPermanentPageBitMap; |
|
87 |
||
88 |
if(iKernelMirror) |
|
89 |
iKernelMirror->Close(NULL); |
|
90 |
||
91 |
TDfc* dfc = iDestroyedDfc; |
|
92 |
if (dfc) |
|
93 |
dfc->QueueOnIdle(); |
|
94 |
||
95 |
__KTRACE_OPT(KMEMTRACE, {Mmu::Wait(); Kern::Printf("MT:D %d %x %O",NTickCount(),this,this);Mmu::Signal();}); |
|
96 |
#ifdef BTRACE_CHUNKS |
|
97 |
BTraceContext4(BTrace::EChunks,BTrace::EChunkDestroyed,this); |
|
98 |
#endif |
|
99 |
} |
|
100 |
||
101 |
TInt DMemModelChunk::Close(TAny* aPtr) |
|
102 |
{ |
|
103 |
if (aPtr) |
|
104 |
{ |
|
105 |
DMemModelProcess* pP=(DMemModelProcess*)aPtr; |
|
106 |
if ((iAttributes&EMapTypeMask)==EMapTypeLocal) |
|
107 |
pP=(DMemModelProcess*)iOwningProcess; |
|
108 |
pP->RemoveChunk(this); |
|
109 |
} |
|
110 |
TInt r=Dec(); |
|
111 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Close %d %O",r,this)); |
|
112 |
__NK_ASSERT_DEBUG(r > 0); // Should never be negative. |
|
113 |
if (r==1) |
|
114 |
{ |
|
115 |
K::ObjDelete(this); |
|
116 |
return EObjectDeleted; |
|
117 |
} |
|
118 |
return 0; |
|
119 |
} |
|
120 |
||
121 |
||
122 |
TUint8* DMemModelChunk::Base(DProcess* aProcess) |
|
123 |
{ |
|
124 |
return iBase; |
|
125 |
} |
|
126 |
||
127 |
||
128 |
TInt DMemModelChunk::DoCreate(SChunkCreateInfo& aInfo) |
|
129 |
{ |
|
130 |
__KTRACE_OPT(KMMU,Kern::Printf("Chunk %O DoCreate att=%08x",this,iAttributes)); |
|
131 |
||
132 |
__ASSERT_COMPILE(!(EMMChunkAttributesMask & EChunkAttributesMask)); |
|
133 |
||
134 |
if (aInfo.iMaxSize<=0) |
|
135 |
return KErrArgument; |
|
136 |
||
137 |
if (iKernelMirror) |
|
138 |
{ |
|
139 |
iKernelMirror->iAttributes |= iAttributes|EMemoryNotOwned; |
|
140 |
TInt r=iKernelMirror->DoCreate(aInfo); |
|
141 |
if(r!=KErrNone) |
|
142 |
return r; |
|
143 |
} |
|
144 |
||
145 |
Mmu& m=Mmu::Get(); |
|
146 |
TInt nPdes=(aInfo.iMaxSize+m.iChunkMask)>>m.iChunkShift; |
|
147 |
iMaxSize=nPdes<<m.iChunkShift; |
|
148 |
iMapAttr = aInfo.iMapAttr; |
|
149 |
SetupPermissions(); |
|
150 |
TInt mapType=iAttributes & EMapTypeMask; |
|
151 |
if (mapType==EMapTypeShared) |
|
152 |
{ |
|
153 |
iOsAsids=TBitMapAllocator::New(m.iNumOsAsids,ETrue); |
|
154 |
if (!iOsAsids) |
|
155 |
return KErrNoMemory; |
|
156 |
} |
|
157 |
TInt maxpages=iMaxSize>>m.iPageShift; |
|
158 |
if (iAttributes & EDisconnected) |
|
159 |
{ |
|
160 |
TBitMapAllocator* pM=TBitMapAllocator::New(maxpages,ETrue); |
|
161 |
if (!pM) |
|
162 |
return KErrNoMemory; |
|
163 |
iPageBitMap=pM; |
|
164 |
__KTRACE_OPT(KMMU,Kern::Printf("PageBitMap at %08x, MaxPages %d",pM,maxpages)); |
|
165 |
} |
|
166 |
if(iChunkType==ESharedKernelSingle || iChunkType==ESharedKernelMultiple) |
|
167 |
{ |
|
168 |
TBitMapAllocator* pM=TBitMapAllocator::New(maxpages,ETrue); |
|
169 |
if (!pM) |
|
170 |
return KErrNoMemory; |
|
171 |
iPermanentPageBitMap = pM; |
|
172 |
} |
|
173 |
iPageTables=(TUint16*)Kern::Alloc(nPdes*sizeof(TUint16)); |
|
174 |
if (!iPageTables) |
|
175 |
return KErrNoMemory; |
|
176 |
memset(iPageTables,0xff,nPdes*sizeof(TUint16)); |
|
177 |
MmuBase::Wait(); |
|
178 |
TInt r=AllocateAddress(); |
|
179 |
__KTRACE_OPT(KMEMTRACE,Kern::Printf("MT:C %d %x %O",NTickCount(),this,this)); |
|
180 |
MmuBase::Signal(); |
|
181 |
#ifdef BTRACE_CHUNKS |
|
182 |
TKName nameBuf; |
|
183 |
Name(nameBuf); |
|
184 |
BTraceContextN(BTrace::EChunks,BTrace::EChunkCreated,this,iMaxSize,nameBuf.Ptr(),nameBuf.Size()); |
|
185 |
if(iOwningProcess) |
|
186 |
BTrace8(BTrace::EChunks,BTrace::EChunkOwner,this,iOwningProcess); |
|
187 |
BTraceContext12(BTrace::EChunks,BTrace::EChunkInfo,this,iChunkType,iAttributes); |
|
188 |
#endif |
|
189 |
return r; |
|
190 |
} |
|
191 |
||
192 |
void DMemModelChunk::ClaimInitialPages() |
|
193 |
{ |
|
194 |
__KTRACE_OPT(KMMU,Kern::Printf("Chunk %O ClaimInitialPages()",this)); |
|
195 |
Mmu& m=Mmu::Get(); |
|
196 |
TInt offset=0; |
|
197 |
TUint32 ccp=K::CompressKHeapPtr(this); |
|
198 |
NKern::LockSystem(); |
|
199 |
while(offset<iSize) |
|
200 |
{ |
|
201 |
TInt ptid=m.PageTableId(TLinAddr(iBase)+offset); |
|
202 |
__ASSERT_ALWAYS(ptid>=0,MM::Panic(MM::EClaimInitialPagesBadPageTable)); |
|
203 |
__KTRACE_OPT(KMMU,Kern::Printf("Offset %x PTID=%d",offset,ptid)); |
|
204 |
iPageTables[offset>>m.iChunkShift]=ptid; |
|
205 |
SPageTableInfo& ptinfo = m.PtInfo(ptid); |
|
206 |
ptinfo.SetChunk(ccp,offset>>m.iChunkShift); |
|
207 |
TPte* pPte=(TPte*)m.PageTableLinAddr(ptid); |
|
208 |
TInt i; |
|
209 |
TInt np = 0; |
|
210 |
TInt flashCount = MM::MaxPagesInOneGo; |
|
211 |
for (i=0; i<m.iChunkSize>>m.iPageShift; ++i, offset+=m.iPageSize) |
|
212 |
{ |
|
213 |
if(--flashCount<=0) |
|
214 |
{ |
|
215 |
flashCount = MM::MaxPagesInOneGo; |
|
216 |
NKern::FlashSystem(); |
|
217 |
} |
|
218 |
TPte pte=pPte[i]; |
|
219 |
if (m.PteIsPresent(pte)) |
|
220 |
{ |
|
221 |
++np; |
|
222 |
TPhysAddr phys=m.PtePhysAddr(pte, i); |
|
223 |
__KTRACE_OPT(KMMU,Kern::Printf("Offset %x phys %08x",offset,phys)); |
|
224 |
SPageInfo* info = SPageInfo::SafeFromPhysAddr(phys); |
|
225 |
if(info) |
|
226 |
{ |
|
227 |
info->SetChunk(this,offset>>m.iPageShift); |
|
228 |
#ifdef BTRACE_KERNEL_MEMORY |
|
229 |
--Epoc::KernelMiscPages; // page now owned by chunk, and is not 'miscelaneous' |
|
230 |
#endif |
|
231 |
} |
|
232 |
} |
|
233 |
} |
|
234 |
ptinfo.iCount = np; |
|
235 |
__KTRACE_OPT(KMMU,Kern::Printf("Offset %x PTID %d NP %d", offset, ptid, np)); |
|
236 |
} |
|
237 |
NKern::UnlockSystem(); |
|
238 |
} |
|
239 |
||
240 |
void DMemModelChunk::SetFixedAddress(TLinAddr aAddr, TInt aInitialSize) |
|
241 |
{ |
|
242 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O SetFixedAddress %08x size %08x",this,aAddr,aInitialSize)); |
|
243 |
iBase=(TUint8*)aAddr; |
|
244 |
iSize=Mmu::RoundToPageSize(aInitialSize); |
|
245 |
ClaimInitialPages(); |
|
246 |
} |
|
247 |
||
248 |
TInt DMemModelChunk::Reserve(TInt aInitialSize) |
|
249 |
// |
|
250 |
// Reserve home section address space for a chunk |
|
251 |
// |
|
252 |
{ |
|
253 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O Reserve() size %08x",this,aInitialSize)); |
|
254 |
iSize=Mmu::RoundToPageSize(aInitialSize); |
|
255 |
ClaimInitialPages(); |
|
256 |
return KErrNone; |
|
257 |
} |
|
258 |
||
259 |
TInt DMemModelChunk::Adjust(TInt aNewSize) |
|
260 |
// |
|
261 |
// Adjust a standard chunk. |
|
262 |
// |
|
263 |
{ |
|
264 |
||
265 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Adjust %08x",aNewSize)); |
|
266 |
if (iAttributes & (EDoubleEnded|EDisconnected)) |
|
267 |
return KErrGeneral; |
|
268 |
if (aNewSize<0 || aNewSize>iMaxSize) |
|
269 |
return KErrArgument; |
|
270 |
||
271 |
TInt r=KErrNone; |
|
272 |
TInt newSize=Mmu::RoundToPageSize(aNewSize); |
|
273 |
if (newSize!=iSize) |
|
274 |
{ |
|
275 |
Mmu::Wait(); |
|
276 |
if (newSize>iSize) |
|
277 |
{ |
|
278 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Adjust growing")); |
|
279 |
r=DoCommit(iSize,newSize-iSize); |
|
280 |
} |
|
281 |
else if (newSize<iSize) |
|
282 |
{ |
|
283 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Adjust shrinking")); |
|
284 |
DoDecommit(newSize,iSize-newSize); |
|
285 |
} |
|
286 |
Mmu::Signal(); |
|
287 |
} |
|
288 |
__COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this); |
|
289 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O adjusted to %x base %08x",this,iSize,iBase)); |
|
290 |
return r; |
|
291 |
} |
|
292 |
||
293 |
TInt DMemModelChunk::Address(TInt aOffset, TInt aSize, TLinAddr& aKernelAddress) |
|
294 |
{ |
|
295 |
if(!iPermanentPageBitMap) |
|
296 |
return KErrAccessDenied; |
|
297 |
if(TUint(aOffset)>=TUint(iMaxSize)) |
|
298 |
return KErrArgument; |
|
299 |
if(TUint(aOffset+aSize)>TUint(iMaxSize)) |
|
300 |
return KErrArgument; |
|
301 |
if(aSize<=0) |
|
302 |
return KErrArgument; |
|
303 |
TInt pageShift = Mmu::Get().iPageShift; |
|
304 |
TInt start = aOffset>>pageShift; |
|
305 |
TInt size = ((aOffset+aSize-1)>>pageShift)-start+1; |
|
306 |
if(iPermanentPageBitMap->NotAllocated(start,size)) |
|
307 |
return KErrNotFound; |
|
308 |
aKernelAddress = (TLinAddr)iKernelMirror->iBase+aOffset; |
|
309 |
return KErrNone; |
|
310 |
} |
|
311 |
||
312 |
TInt DMemModelChunk::PhysicalAddress(TInt aOffset, TInt aSize, TLinAddr& aKernelAddress, TUint32& aPhysicalAddress, TUint32* aPhysicalPageList) |
|
313 |
{ |
|
314 |
TInt r=Address(aOffset,aSize,aKernelAddress); |
|
315 |
if(r!=KErrNone) |
|
316 |
return r; |
|
317 |
||
318 |
return Mmu::Get().LinearToPhysical(aKernelAddress,aSize,aPhysicalAddress,aPhysicalPageList); |
|
319 |
} |
|
320 |
||
321 |
void DMemModelChunk::Substitute(TInt aOffset, TPhysAddr aOldAddr, TPhysAddr aNewAddr) |
|
322 |
{ |
|
323 |
// Substitute the page mapping at aOffset with aNewAddr. |
|
324 |
// Enter and leave with system locked. |
|
325 |
// This is sometimes called with interrupts disabled and should leave them alone. |
|
326 |
Mmu& m = Mmu::Get(); |
|
327 |
__ASSERT_ALWAYS(iKernelMirror==NULL,MM::Panic(MM::EChunkRemapUnsupported)); |
|
328 |
||
329 |
TInt ptid=iPageTables[aOffset>>m.iChunkShift]; |
|
330 |
if(ptid==0xffff) |
|
331 |
MM::Panic(MM::EChunkRemapNoPageTable); |
|
332 |
||
333 |
// Permissions for global code will have been overwritten with ApplyPermissions |
|
334 |
// so we can't trust iPtePermissions for those chunk types |
|
335 |
TPte perms; |
|
336 |
if(iChunkType==EKernelCode) |
|
337 |
perms = m.iKernelCodePtePerm; |
|
338 |
else if(iChunkType==EDll) |
|
339 |
perms = m.iGlobalCodePtePerm; |
|
340 |
else |
|
341 |
perms = iPtePermissions; |
|
342 |
||
343 |
m.RemapPage(ptid, (TLinAddr)iBase+aOffset, aOldAddr, aNewAddr, perms, iOwningProcess); |
|
344 |
} |
|
345 |
||
346 |
/** |
|
347 |
Get the movability type of the chunk's pages |
|
348 |
@return How movable the chunk's pages are |
|
349 |
*/ |
|
350 |
TZonePageType DMemModelChunk::GetPageType() |
|
351 |
{ |
|
352 |
// Shared chunks have their physical addresses available |
|
353 |
if (iChunkType == ESharedKernelSingle || |
|
354 |
iChunkType == ESharedKernelMultiple || |
|
355 |
iChunkType == ESharedIo || |
|
356 |
iChunkType == ESharedKernelMirror || |
|
357 |
iChunkType == EKernelMessage || |
|
358 |
iChunkType == EKernelData) // Don't move kernel heap pages as DMA may be accessing them. |
|
359 |
{ |
|
360 |
return EPageFixed; |
|
361 |
} |
|
362 |
// All other types of chunk are movable |
|
363 |
return EPageMovable; |
|
364 |
} |
|
365 |
||
366 |
TInt DMemModelChunk::DoCommit(TInt aOffset, TInt aSize, TCommitType aCommitType, TUint32* aExtraArg) |
|
367 |
{ |
|
368 |
// Commit more RAM to a chunk at a specified offset |
|
369 |
// enter and leave with system unlocked |
|
370 |
// must hold RamAlloc mutex before calling this function |
|
371 |
__ASSERT_MUTEX(MmuBase::RamAllocatorMutex); |
|
372 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DoCommit %x+%x type=%d extra=%08x",aOffset,aSize,aCommitType,aExtraArg)); |
|
373 |
TInt offset=aOffset; |
|
374 |
TInt endOffset=offset+aSize; |
|
375 |
TInt newPtId=-1; |
|
376 |
Mmu& m = Mmu::Get(); |
|
377 |
DRamAllocator& a = *m.iRamPageAllocator; |
|
378 |
TInt r=KErrNone; |
|
379 |
TPhysAddr pageList[KMaxPages]; |
|
380 |
TPhysAddr* pPageList=0; // In case of discontiguous commit it points to the list of physical pages. |
|
381 |
TPhysAddr nextPage=0; // In case of contiguous commit, it points to the physical address to commit |
|
382 |
SPageInfo::TType type = SPageInfo::EChunk; |
|
383 |
||
384 |
// Set flag to indicate if RAM should be cleared before being committed. |
|
385 |
// Note, EDll, EUserCode are covered in the code segment, in order not to clear |
|
386 |
// the region overwritten by the loader |
|
387 |
TBool clearRam = iChunkType==EUserData |
|
388 |
|| iChunkType==EDllData |
|
389 |
|| iChunkType==EUserSelfModCode |
|
390 |
|| iChunkType==ESharedKernelSingle |
|
391 |
|| iChunkType==ESharedKernelMultiple |
|
392 |
|| iChunkType==ESharedIo |
|
393 |
|| iChunkType==ERamDrive; |
|
394 |
||
395 |
||
396 |
TBool ownsMemory = !(iAttributes&EMemoryNotOwned); |
|
397 |
TBool physicalCommit = aCommitType&DChunk::ECommitPhysicalMask; |
|
398 |
if(ownsMemory) |
|
399 |
{ |
|
400 |
if(physicalCommit) |
|
401 |
return KErrNotSupported; |
|
402 |
} |
|
403 |
else |
|
404 |
{ |
|
405 |
if(!physicalCommit && aCommitType != DChunk::ECommitVirtual) |
|
406 |
return KErrNotSupported; |
|
407 |
type = SPageInfo::EInvalid; // to indicate page info not to be updated |
|
408 |
} |
|
409 |
||
410 |
switch(aCommitType) |
|
411 |
{ |
|
412 |
case DChunk::ECommitDiscontiguous: |
|
413 |
// No setup to do |
|
414 |
break; |
|
415 |
||
416 |
case DChunk::ECommitContiguous: |
|
417 |
{ |
|
418 |
// Allocate a block of contiguous RAM from the free pool |
|
419 |
TInt numPages=(endOffset-offset)>>m.iPageShift; |
|
26
c734af59ce98
Revision: 201019
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
420 |
__NK_ASSERT_DEBUG(EPageFixed == GetPageType()); |
c734af59ce98
Revision: 201019
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
421 |
r=m.AllocContiguousRam(numPages<<m.iPageShift, nextPage, 0); |
0 | 422 |
if (r!=KErrNone) |
423 |
return r; |
|
424 |
if(clearRam) |
|
425 |
m.ClearPages(numPages, (TPhysAddr*)(nextPage|1), iClearByte); // clear RAM if required |
|
426 |
*aExtraArg = nextPage; // store physical address of RAM as return argument |
|
427 |
} |
|
428 |
break; |
|
429 |
||
430 |
case DChunk::ECommitDiscontiguousPhysical: |
|
431 |
{ |
|
432 |
pPageList = aExtraArg; // use pages given given to us |
|
433 |
||
434 |
// Check address of pages are multiples of page size... |
|
435 |
TInt numPages=(endOffset-offset)>>m.iPageShift; |
|
436 |
TUint32* ptr = aExtraArg; |
|
437 |
TUint32* endPtr = aExtraArg+numPages; |
|
438 |
if(ptr>=endPtr) |
|
439 |
return KErrNone; // Zero size commit is OK |
|
440 |
TPhysAddr pageBits = 0; |
|
441 |
do |
|
442 |
pageBits |= *ptr++; |
|
443 |
while(ptr<endPtr); |
|
444 |
if(pageBits&(m.iPageSize-1)) |
|
445 |
return KErrArgument; // all addresses must be multiple of page size |
|
446 |
} |
|
447 |
break; |
|
448 |
||
449 |
case DChunk::ECommitContiguousPhysical: |
|
450 |
nextPage = (TPhysAddr)aExtraArg; // we have been given the physical address to use |
|
451 |
if(nextPage&(m.iPageSize-1)) |
|
452 |
return KErrArgument; // address must be multiple of page size |
|
453 |
break; |
|
454 |
||
455 |
case DChunk::ECommitVirtual: |
|
456 |
#ifndef __MARM__ |
|
457 |
return KErrNotSupported; |
|
458 |
#endif |
|
459 |
break; |
|
460 |
||
461 |
default: |
|
462 |
return KErrNotSupported; |
|
463 |
} |
|
464 |
||
465 |
while(offset<endOffset) |
|
466 |
{ |
|
467 |
TInt np=(endOffset-offset)>>m.iPageShift; // pages remaining to satisfy request |
|
468 |
TInt npEnd=(m.iChunkSize-(offset&m.iChunkMask))>>m.iPageShift;// number of pages to end of page table |
|
469 |
if (np>npEnd) |
|
470 |
np=npEnd; // limit to single page table |
|
471 |
if (np>MM::MaxPagesInOneGo) |
|
472 |
np=MM::MaxPagesInOneGo; // limit |
|
473 |
TInt ptid=iPageTables[offset>>m.iChunkShift]; |
|
474 |
newPtId=-1; |
|
475 |
if (ptid==0xffff) |
|
476 |
{ |
|
477 |
// need to allocate a new page table |
|
478 |
newPtId=m.AllocPageTable(); |
|
479 |
if (newPtId<0) |
|
480 |
{ |
|
481 |
r=KErrNoMemory; |
|
482 |
break; // Exit the loop. Below, we'll free all ram |
|
483 |
// that is allocated in the previous loop passes. |
|
484 |
} |
|
485 |
ptid=newPtId; |
|
486 |
} |
|
487 |
||
488 |
if(aCommitType==DChunk::ECommitDiscontiguous) |
|
489 |
{ |
|
490 |
pPageList = pageList; |
|
491 |
r=m.AllocRamPages(pPageList,np, GetPageType()); // try to allocate pages |
|
492 |
if (r!=KErrNone) //If fail, clean up what was allocated in this loop. |
|
493 |
{ |
|
494 |
if (newPtId>=0) |
|
495 |
m.FreePageTable(newPtId); |
|
496 |
break; // Exit the loop. Below, we'll free all ram |
|
497 |
// that is allocated in the previous loop passes. |
|
498 |
} |
|
499 |
if(clearRam) |
|
500 |
m.ClearPages(np, pPageList, iClearByte); // clear RAM if required |
|
501 |
} |
|
502 |
||
503 |
TInt commitSize = np<<m.iPageShift; |
|
504 |
||
505 |
||
506 |
// In shared chunks (visible to both user and kernel side), it is always kernel side |
|
507 |
// to be mapped the first. Decommiting will go in reverse order. |
|
508 |
if(iKernelMirror) |
|
509 |
{ |
|
510 |
// Map the same memory into the kernel mirror chunk |
|
511 |
if(pPageList) |
|
512 |
r = iKernelMirror->DoCommit(offset,commitSize,ECommitDiscontiguousPhysical,pPageList); |
|
513 |
else |
|
514 |
r = iKernelMirror->DoCommit(offset,commitSize,ECommitContiguousPhysical,(TUint32*)nextPage); |
|
515 |
__KTRACE_OPT(KMMU,Kern::Printf("iKernelMirror->DoCommit returns %d",r)); |
|
516 |
if(r!=KErrNone) //If fail, clean up what was allocated in this loop. |
|
517 |
{ |
|
518 |
if(aCommitType==DChunk::ECommitDiscontiguous) |
|
519 |
m.FreePages(pPageList,np,EPageFixed); |
|
520 |
if (newPtId>=0) |
|
521 |
m.FreePageTable(newPtId); |
|
522 |
||
523 |
break; // Exit the loop. Below, we'll free all ram |
|
524 |
// that is allocated in the previous loop passes. |
|
525 |
} |
|
526 |
} |
|
527 |
||
528 |
// Commit the memory. |
|
529 |
NKern::LockSystem(); // lock the system while we change the MMU mappings |
|
530 |
iSize += commitSize; // update committed size |
|
531 |
if (aCommitType==DChunk::ECommitVirtual) |
|
532 |
m.MapVirtual(ptid, np); |
|
533 |
else if(pPageList) |
|
534 |
{ |
|
535 |
m.MapRamPages(ptid, type, this, offset, pPageList, np, iPtePermissions); |
|
536 |
pPageList += np; |
|
537 |
} |
|
538 |
else |
|
539 |
{ |
|
540 |
m.MapPhysicalPages(ptid, type, this, offset, nextPage, np, iPtePermissions); |
|
541 |
nextPage += commitSize; |
|
542 |
} |
|
543 |
NKern::UnlockSystem(); |
|
544 |
||
545 |
if (newPtId>=0) |
|
546 |
{ |
|
547 |
// We have allocated a new page table, now we must assign it |
|
548 |
iPageTables[offset>>m.iChunkShift]=ptid; |
|
549 |
TLinAddr addr=(TLinAddr)iBase+offset; // current address |
|
550 |
m.AssignPageTable(ptid, SPageTableInfo::EChunk, this, addr, iPdePermissions); |
|
551 |
newPtId = -1; |
|
552 |
} |
|
553 |
__KTRACE_OPT(KMEMTRACE,Kern::Printf("MT:A %d %x %x %O",NTickCount(),this,iSize,this)); |
|
554 |
#ifdef BTRACE_CHUNKS |
|
555 |
BTraceContext12(BTrace::EChunks,ownsMemory?BTrace::EChunkMemoryAllocated:BTrace::EChunkMemoryAdded,this,offset,commitSize); |
|
556 |
#endif |
|
557 |
||
558 |
offset += commitSize; // update offset |
|
559 |
} |
|
560 |
||
561 |
if (r==KErrNone) |
|
562 |
{ |
|
563 |
if(iPermanentPageBitMap) |
|
564 |
iPermanentPageBitMap->Alloc(aOffset>>m.iPageShift,aSize>>m.iPageShift); |
|
565 |
} |
|
566 |
else |
|
567 |
{ |
|
568 |
// We ran out of memory somewhere. |
|
569 |
// Free any memory we succeeded in allocating in the loops before the one that failed |
|
570 |
if (iChunkType != ESharedKernelMirror) //Kernel mirror chunk will be decommited alongside the main chunk. |
|
571 |
{ |
|
572 |
DChunk::TDecommitType decommitType = aCommitType==DChunk::ECommitVirtual ? |
|
573 |
DChunk::EDecommitVirtual : DChunk::EDecommitNormal; |
|
574 |
DoDecommit(aOffset,offset-aOffset,decommitType); |
|
575 |
} |
|
576 |
||
577 |
if(aCommitType==DChunk::ECommitContiguous) |
|
578 |
{ |
|
579 |
// Free the pages we allocated but didn't get around to commiting |
|
580 |
// It has to go page after page as we cannot use FreePhysicalRam here because the part of |
|
581 |
// of original allocated contiguous memory is already partly freed (in DoDecommit). |
|
582 |
TPhysAddr last = nextPage + ((endOffset-offset)>>m.iPageShift<<m.iPageShift); |
|
583 |
while(nextPage<last) |
|
584 |
{ |
|
585 |
a.FreeRamPage(nextPage, GetPageType()); |
|
586 |
nextPage += m.iPageSize; |
|
587 |
} |
|
588 |
*aExtraArg = KPhysAddrInvalid; // return invalid physical address |
|
589 |
} |
|
590 |
||
591 |
m.iAllocFailed=ETrue; |
|
592 |
} |
|
593 |
return r; |
|
594 |
} |
|
595 |
||
596 |
void DMemModelChunk::DoDecommit(TInt aOffset, TInt aSize, TDecommitType aDecommitType) |
|
597 |
{ |
|
598 |
// Decommit RAM from a chunk at a specified offset |
|
599 |
// enter and leave with system unlocked |
|
600 |
// must hold RamAlloc mutex before calling this function |
|
601 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DoDecommit %x+%x",aOffset,aSize)); |
|
602 |
||
603 |
TBool ownsMemory = !(iAttributes&EMemoryNotOwned); |
|
604 |
||
605 |
TInt deferred=0; |
|
606 |
TInt offset=aOffset; |
|
607 |
TInt endOffset=offset+aSize; |
|
608 |
Mmu& m = Mmu::Get(); |
|
609 |
DRamAllocator& a = *m.iRamPageAllocator; |
|
610 |
TPhysAddr pageList[KMaxPages]; |
|
611 |
TLinAddr linearPageList[KMaxPages]; |
|
612 |
const TAny* asids=GLOBAL_MAPPING; |
|
613 |
if (iOsAsids) |
|
614 |
asids=iOsAsids; |
|
615 |
else if (iOwningProcess) |
|
616 |
asids=(const TAny*)((DMemModelProcess*)iOwningProcess)->iOsAsid; |
|
617 |
TUint size_in_pages = (TUint)(Min(aSize,iSize)>>m.iPageShift); |
|
618 |
TBool sync_decommit = (size_in_pages<m.iDecommitThreshold); |
|
619 |
TInt total_freed=0; |
|
620 |
while(offset<endOffset) |
|
621 |
{ |
|
622 |
TInt np=(endOffset-offset)>>m.iPageShift; // number of pages remaining to decommit |
|
623 |
TInt pdeEnd=(offset+m.iChunkSize)&~m.iChunkMask; |
|
624 |
TInt npEnd=(pdeEnd-offset)>>m.iPageShift; // number of pages to end of page table |
|
625 |
if (np>npEnd) |
|
626 |
np=npEnd; // limit to single page table |
|
627 |
if (np>MM::MaxPagesInOneGo) |
|
628 |
np=MM::MaxPagesInOneGo; // limit |
|
629 |
TLinAddr addr=(TLinAddr)iBase+offset; // current address |
|
630 |
TInt ptid=iPageTables[offset>>m.iChunkShift]; // get page table ID if a page table is already assigned here |
|
631 |
if (ptid!=0xffff) |
|
632 |
{ |
|
633 |
TInt nPtes=0; |
|
634 |
TInt nUnmapped=0; |
|
635 |
||
636 |
#ifdef BTRACE_CHUNKS |
|
637 |
TUint oldFree = m.FreeRamInBytes(); |
|
638 |
#endif |
|
639 |
// Unmap the pages, clear the PTEs and place the physical addresses of the now-free RAM pages in |
|
640 |
// pageList. Return nPtes=number of pages placed in list, remain=number of PTEs remaining in page table |
|
641 |
// Bit 31 of return value is set if TLB flush may be incomplete |
|
642 |
NKern::LockSystem(); |
|
643 |
TInt remain; |
|
644 |
if (ownsMemory) |
|
645 |
{ |
|
646 |
if (aDecommitType == EDecommitVirtual) |
|
647 |
remain=m.UnmapVirtual(ptid,addr,np,pageList,ETrue,nPtes,nUnmapped,iOwningProcess); |
|
648 |
else |
|
649 |
remain=m.UnmapPages(ptid,addr,np,pageList,ETrue,nPtes,nUnmapped,iOwningProcess); |
|
650 |
} |
|
651 |
else |
|
652 |
{ |
|
653 |
if (aDecommitType == EDecommitVirtual) |
|
654 |
remain=m.UnmapUnownedVirtual(ptid,addr,np,pageList,linearPageList,nPtes,nUnmapped,iOwningProcess); |
|
655 |
else |
|
656 |
remain=m.UnmapUnownedPages(ptid,addr,np,pageList,linearPageList,nPtes,nUnmapped,iOwningProcess); |
|
657 |
} |
|
658 |
TInt nFree = ownsMemory ? nUnmapped : 0; //The number of pages to free |
|
659 |
deferred |= remain; |
|
660 |
TInt decommitSize=nPtes<<m.iPageShift; |
|
661 |
iSize-=decommitSize; // reduce the committed size |
|
662 |
NKern::UnlockSystem(); |
|
663 |
||
664 |
||
665 |
||
666 |
__KTRACE_OPT(KMEMTRACE,Kern::Printf("MT:A %d %x %x %O",NTickCount(),this,iSize,this)); |
|
667 |
#ifdef BTRACE_CHUNKS |
|
668 |
TUint reclaimed = (oldFree-m.FreeRamInBytes())>>m.iPageShift; // number of 'unlocked' pages reclaimed from ram cache |
|
669 |
if(nFree-reclaimed) |
|
670 |
BTraceContext12(BTrace::EChunks,ownsMemory?BTrace::EChunkMemoryDeallocated:BTrace::EChunkMemoryRemoved,this,offset,(nFree-reclaimed)<<m.iPageShift); |
|
671 |
#endif |
|
672 |
||
673 |
if (sync_decommit && (remain & KUnmapPagesTLBFlushDeferred)) |
|
674 |
{ |
|
675 |
// must ensure DTLB flushed before doing cache purge on decommit |
|
676 |
m.GenericFlush(Mmu::EFlushDTLB); |
|
677 |
} |
|
678 |
||
679 |
// if page table is now completely empty, unassign it and update chunk PDE info |
|
680 |
remain &= KUnmapPagesCountMask; |
|
681 |
if (remain==0) |
|
682 |
{ |
|
683 |
m.DoUnassignPageTable(addr, asids); |
|
684 |
m.FreePageTable(ptid); |
|
685 |
iPageTables[offset>>m.iChunkShift]=0xffff; |
|
686 |
} |
|
687 |
||
688 |
// Physical memory not owned by the chunk has to be preserved from cache memory. |
|
689 |
if(!ownsMemory) |
|
690 |
{ |
|
691 |
// If a chunk has Kernel mirror, it is sufficient to do it just once. |
|
692 |
if (!iKernelMirror) |
|
693 |
{ |
|
694 |
TInt i; |
|
695 |
for (i=0;i<nUnmapped;i++) |
|
696 |
m.CacheMaintenanceOnPreserve(pageList[i], KPageSize, linearPageList[i], iMapAttr); |
|
697 |
} |
|
698 |
} |
|
699 |
else if (nFree) |
|
700 |
{ |
|
701 |
// We can now return the decommitted pages to the free page list and sort out caching. |
|
702 |
total_freed+=nFree; |
|
703 |
if (sync_decommit) //Purge cache if the size is below decommit threshold |
|
704 |
m.CacheMaintenanceOnDecommit(pageList, nFree); |
|
705 |
a.FreeRamPages(pageList,nFree, GetPageType()); |
|
706 |
} |
|
707 |
||
708 |
offset+=(np<<m.iPageShift); |
|
709 |
} |
|
710 |
else |
|
711 |
{ |
|
712 |
__KTRACE_OPT(KMMU,Kern::Printf("No page table at %08x",addr)); |
|
713 |
if ((iAttributes&EDisconnected)==0) |
|
714 |
MM::Panic(MM::EChunkDecommitNoPageTable); |
|
715 |
offset=pdeEnd; // disconnected chunk - step on to next PDE |
|
716 |
} |
|
717 |
} |
|
718 |
if (deferred & KUnmapPagesTLBFlushDeferred) |
|
719 |
m.GenericFlush( (iAttributes&ECode) ? Mmu::EFlushDTLB|Mmu::EFlushITLB : Mmu::EFlushDTLB ); |
|
720 |
||
721 |
if (total_freed && !sync_decommit) //Flash entire cache if the size exceeds decommit threshold |
|
722 |
CacheMaintenance::SyncPhysicalCache_All(); //On ARMv6, this deals with both L1 & L2 cache |
|
723 |
||
724 |
// Kernel mapped part of the chunk is removed at the end. At this point, no user side is mapped |
|
725 |
// which ensures that evicting data from cache will surely succeed. |
|
726 |
if(iKernelMirror) |
|
727 |
iKernelMirror->DoDecommit(aOffset,aSize); |
|
728 |
} |
|
729 |
||
730 |
||
731 |
TInt DMemModelChunk::AdjustDoubleEnded(TInt aBottom, TInt aTop) |
|
732 |
// |
|
733 |
// Adjust a double-ended chunk. |
|
734 |
// |
|
735 |
{ |
|
736 |
||
737 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::AdjustDoubleEnded %x-%x",aBottom,aTop)); |
|
738 |
if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDoubleEnded) |
|
739 |
return KErrGeneral; |
|
740 |
if (aTop<0 || aBottom<0 || aTop<aBottom || aTop>iMaxSize) |
|
741 |
return KErrArgument; |
|
742 |
Mmu& m = Mmu::Get(); |
|
743 |
aBottom &= ~m.iPageMask; |
|
744 |
aTop=(aTop+m.iPageMask)&~m.iPageMask; |
|
745 |
TInt newSize=aTop-aBottom; |
|
746 |
if (newSize>iMaxSize) |
|
747 |
return KErrArgument; |
|
748 |
||
749 |
Mmu::Wait(); |
|
750 |
TInt initBottom=iStartPos; |
|
751 |
TInt initTop=iStartPos+iSize; |
|
752 |
TInt nBottom=Max(aBottom,iStartPos); // intersection bottom |
|
753 |
TInt nTop=Min(aTop,iStartPos+iSize); // intersection top |
|
754 |
TInt r=KErrNone; |
|
755 |
if (nBottom<nTop) |
|
756 |
{ |
|
757 |
__KTRACE_OPT(KMMU,Kern::Printf("Initial and final regions intersect")); |
|
758 |
if (initBottom<nBottom) |
|
759 |
{ |
|
760 |
iStartPos=aBottom; |
|
761 |
DoDecommit(initBottom,nBottom-initBottom); |
|
762 |
} |
|
763 |
if (initTop>nTop) |
|
764 |
DoDecommit(nTop,initTop-nTop); // this changes iSize |
|
765 |
if (aBottom<nBottom) |
|
766 |
{ |
|
767 |
r=DoCommit(aBottom,nBottom-aBottom); |
|
768 |
if (r==KErrNone) |
|
769 |
{ |
|
770 |
if (aTop>nTop) |
|
771 |
r=DoCommit(nTop,aTop-nTop); |
|
772 |
if (r==KErrNone) |
|
773 |
iStartPos=aBottom; |
|
774 |
else |
|
775 |
DoDecommit(aBottom,nBottom-aBottom); |
|
776 |
} |
|
777 |
} |
|
778 |
else if (aTop>nTop) |
|
779 |
r=DoCommit(nTop,aTop-nTop); |
|
780 |
} |
|
781 |
else |
|
782 |
{ |
|
783 |
__KTRACE_OPT(KMMU,Kern::Printf("Initial and final regions disjoint")); |
|
784 |
if (iSize) |
|
785 |
DoDecommit(initBottom,iSize); |
|
786 |
iStartPos=aBottom; |
|
787 |
if (newSize) |
|
788 |
r=DoCommit(iStartPos,newSize); |
|
789 |
} |
|
790 |
Mmu::Signal(); |
|
791 |
__COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this); |
|
792 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O adjusted to %x+%x base %08x",this,iStartPos,iSize,iBase)); |
|
793 |
return r; |
|
794 |
} |
|
795 |
||
796 |
TInt DMemModelChunk::Commit(TInt aOffset, TInt aSize, TCommitType aCommitType, TUint32* aExtraArg) |
|
797 |
// |
|
798 |
// Commit to a disconnected chunk. |
|
799 |
// |
|
800 |
{ |
|
801 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Commit %x+%x type=%d extra=%08x",aOffset,aSize,aCommitType,aExtraArg)); |
|
802 |
if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected) |
|
803 |
return KErrGeneral; |
|
804 |
if (aOffset<0 || aSize<0) |
|
805 |
return KErrArgument; |
|
806 |
if (aSize==0) |
|
807 |
return KErrNone; |
|
808 |
Mmu& m = Mmu::Get(); |
|
809 |
aSize+=(aOffset & m.iPageMask); |
|
810 |
aOffset &= ~m.iPageMask; |
|
811 |
aSize=(aSize+m.iPageMask)&~m.iPageMask; |
|
812 |
if ((aOffset+aSize)>iMaxSize) |
|
813 |
return KErrArgument; |
|
814 |
||
815 |
Mmu::Wait(); |
|
816 |
TInt r=KErrNone; |
|
817 |
TInt i=aOffset>>m.iPageShift; |
|
818 |
TInt n=aSize>>m.iPageShift; |
|
819 |
if (iPageBitMap->NotFree(i,n)) |
|
820 |
r=KErrAlreadyExists; |
|
821 |
else |
|
822 |
{ |
|
823 |
r=DoCommit(aOffset,aSize,aCommitType,aExtraArg); |
|
824 |
if (r==KErrNone) |
|
825 |
iPageBitMap->Alloc(i,n); |
|
826 |
} |
|
827 |
Mmu::Signal(); |
|
828 |
__COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this); |
|
829 |
return r; |
|
830 |
} |
|
831 |
||
832 |
TInt DMemModelChunk::Allocate(TInt aSize, TInt aGuard, TInt aAlign) |
|
833 |
// |
|
834 |
// Allocate offset and commit to a disconnected chunk. |
|
835 |
// |
|
836 |
{ |
|
837 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Allocate %x %x %d",aSize,aGuard,aAlign)); |
|
838 |
||
839 |
// Only allow this to be called on disconnected chunks and not disconnected |
|
840 |
// cache chunks as when guards pages exist the bit map can't be used to determine |
|
841 |
// the size of disconnected cache chunks as is required by Decommit(). |
|
842 |
if ((iAttributes & (EDoubleEnded|EDisconnected|ECache))!=EDisconnected) |
|
843 |
return KErrGeneral; |
|
844 |
||
845 |
if (aSize<=0 || aGuard<0) |
|
846 |
return KErrArgument; |
|
847 |
Mmu& m = Mmu::Get(); |
|
848 |
aAlign=Max(aAlign-m.iPageShift,0); |
|
849 |
TInt base=TInt(TLinAddr(iBase)>>m.iPageShift); |
|
850 |
aSize=(aSize+m.iPageMask)&~m.iPageMask; |
|
851 |
aGuard=(aGuard+m.iPageMask)&~m.iPageMask; |
|
852 |
if ((aSize+aGuard)>iMaxSize) |
|
853 |
return KErrArgument; |
|
854 |
||
855 |
Mmu::Wait(); |
|
856 |
TInt r=KErrNone; |
|
857 |
TInt n=(aSize+aGuard)>>m.iPageShift; |
|
858 |
TInt i=iPageBitMap->AllocAligned(n,aAlign,base,EFalse); // allocate the offset |
|
859 |
if (i<0) |
|
860 |
r=KErrNoMemory; // run out of reserved space for this chunk |
|
861 |
else |
|
862 |
{ |
|
863 |
TInt offset=i<<m.iPageShift; |
|
864 |
__KTRACE_OPT(KMMU,Kern::Printf("Offset %x allocated",offset)); |
|
865 |
r=DoCommit(offset+aGuard,aSize); |
|
866 |
if (r==KErrNone) |
|
867 |
{ |
|
868 |
iPageBitMap->Alloc(i,n); |
|
869 |
r=offset; // if operation successful, return allocated offset |
|
870 |
} |
|
871 |
} |
|
872 |
Mmu::Signal(); |
|
873 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Allocate returns %x",r)); |
|
874 |
__COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this); |
|
875 |
return r; |
|
876 |
} |
|
877 |
||
878 |
TInt DMemModelChunk::Decommit(TInt aOffset, TInt aSize) |
|
879 |
// |
|
880 |
// Decommit from a disconnected chunk. |
|
881 |
// |
|
882 |
{ |
|
883 |
return Decommit(aOffset, aSize, EDecommitNormal); |
|
884 |
} |
|
885 |
||
886 |
TInt DMemModelChunk::Decommit(TInt aOffset, TInt aSize, TDecommitType aDecommitType) |
|
887 |
// |
|
888 |
// Decommit from a disconnected chunk |
|
889 |
// |
|
890 |
// @param aDecommitType Used to indicate whether area was originally committed with the |
|
891 |
// ECommitVirtual type |
|
892 |
// |
|
893 |
{ |
|
894 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Decommit %x+%x",aOffset,aSize)); |
|
895 |
if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected) |
|
896 |
return KErrGeneral; |
|
897 |
if (aOffset<0 || aSize<0) |
|
898 |
return KErrArgument; |
|
899 |
if (aSize==0) |
|
900 |
return KErrNone; |
|
901 |
#ifndef __MARM__ |
|
902 |
if (aDecommitType == EDecommitVirtual) |
|
903 |
return KErrNotSupported; |
|
904 |
#endif |
|
905 |
Mmu& m = Mmu::Get(); |
|
906 |
aSize+=(aOffset & m.iPageMask); |
|
907 |
aOffset &= ~m.iPageMask; |
|
908 |
aSize=(aSize+m.iPageMask)&~m.iPageMask; |
|
909 |
if ((aOffset+aSize)>iMaxSize) |
|
910 |
return KErrArgument; |
|
911 |
||
912 |
Mmu::Wait(); |
|
913 |
||
914 |
// limit the range to the home region range |
|
915 |
__KTRACE_OPT(KMMU,Kern::Printf("Rounded and Clipped range %x+%x",aOffset,aSize)); |
|
916 |
||
917 |
TInt i=aOffset>>m.iPageShift; |
|
918 |
TInt n=aSize>>m.iPageShift; |
|
919 |
||
920 |
__KTRACE_OPT(KMMU,Kern::Printf("Calling SelectiveFree(%d,%d)",i,n)); |
|
921 |
TUint oldAvail = iPageBitMap->iAvail; |
|
922 |
TUint oldSize = iSize; |
|
923 |
||
924 |
// Free those positions which are still commited and also any guard pages, |
|
925 |
// i.e. pages that are reserved in this chunk but which are not commited. |
|
926 |
iPageBitMap->SelectiveFree(i,n); |
|
927 |
||
928 |
DoDecommit(aOffset,aSize,aDecommitType); |
|
929 |
||
930 |
if (iAttributes & ECache) |
|
931 |
{// If this is the file server cache chunk then adjust the size based |
|
932 |
// on the bit map size because:- |
|
933 |
// - Unlocked and reclaimed pages will be unmapped without updating |
|
934 |
// iSize or the bit map. |
|
935 |
// - DoDecommit() only decommits the mapped pages. |
|
936 |
// For all other chunks what is mapped is what is committed to the |
|
937 |
// chunk so iSize is accurate. |
|
938 |
TUint actualFreedPages = iPageBitMap->iAvail - oldAvail; |
|
939 |
iSize = oldSize - (actualFreedPages << KPageShift); |
|
940 |
} |
|
941 |
||
942 |
Mmu::Signal(); |
|
943 |
__DEBUG_EVENT(EEventUpdateChunk, this); |
|
944 |
return KErrNone; |
|
945 |
} |
|
946 |
||
947 |
TInt DMemModelChunk::Unlock(TInt aOffset, TInt aSize) |
|
948 |
{ |
|
949 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Unlock %x+%x",aOffset,aSize)); |
|
950 |
if (!(iAttributes&ECache)) |
|
951 |
return KErrGeneral; |
|
952 |
if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected) |
|
953 |
return KErrGeneral; |
|
954 |
||
955 |
// Mark this as the file server cache chunk. This is safe as it is only the |
|
956 |
// file server that can invoke this function. |
|
957 |
iAttributes |= ECache; |
|
958 |
||
959 |
if (aOffset<0 || aSize<0) |
|
960 |
return KErrArgument; |
|
961 |
if (aSize==0) |
|
962 |
return KErrNone; |
|
963 |
Mmu& m = Mmu::Get(); |
|
964 |
aSize+=(aOffset & m.iPageMask); |
|
965 |
aOffset &= ~m.iPageMask; |
|
966 |
aSize=(aSize+m.iPageMask)&~m.iPageMask; |
|
967 |
if ((aOffset+aSize)>iMaxSize) |
|
968 |
return KErrArgument; |
|
969 |
||
970 |
Mmu::Wait(); |
|
971 |
TInt r=KErrNone; |
|
972 |
TInt i=aOffset>>m.iPageShift; |
|
973 |
TInt n=aSize>>m.iPageShift; |
|
974 |
if (iPageBitMap->NotAllocated(i,n)) |
|
975 |
r=KErrNotFound; |
|
976 |
else |
|
977 |
{ |
|
978 |
#ifdef BTRACE_CHUNKS |
|
979 |
TUint oldFree = m.FreeRamInBytes(); |
|
980 |
#endif |
|
981 |
r=m.UnlockRamCachePages((TLinAddr)(iBase+aOffset),n,iOwningProcess); |
|
982 |
#ifdef BTRACE_CHUNKS |
|
983 |
if(r==KErrNone) |
|
984 |
{ |
|
985 |
TUint unlocked = m.FreeRamInBytes()-oldFree; // size of memory unlocked |
|
986 |
if(unlocked) |
|
987 |
BTraceContext12(BTrace::EChunks,BTrace::EChunkMemoryDeallocated,this,aOffset,unlocked); |
|
988 |
} |
|
989 |
#endif |
|
990 |
} |
|
991 |
||
992 |
Mmu::Signal(); |
|
993 |
__COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this); |
|
994 |
return r; |
|
995 |
} |
|
996 |
||
997 |
TInt DMemModelChunk::Lock(TInt aOffset, TInt aSize) |
|
998 |
{ |
|
999 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Lock %x+%x",aOffset,aSize)); |
|
1000 |
if (!(iAttributes&ECache)) |
|
1001 |
return KErrGeneral; |
|
1002 |
if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected) |
|
1003 |
return KErrGeneral; |
|
1004 |
if (aOffset<0 || aSize<0) |
|
1005 |
return KErrArgument; |
|
1006 |
if (aSize==0) |
|
1007 |
return KErrNone; |
|
1008 |
Mmu& m = Mmu::Get(); |
|
1009 |
aSize+=(aOffset & m.iPageMask); |
|
1010 |
aOffset &= ~m.iPageMask; |
|
1011 |
aSize=(aSize+m.iPageMask)&~m.iPageMask; |
|
1012 |
if ((aOffset+aSize)>iMaxSize) |
|
1013 |
return KErrArgument; |
|
1014 |
||
1015 |
Mmu::Wait(); |
|
1016 |
TInt r=KErrNone; |
|
1017 |
TInt i=aOffset>>m.iPageShift; |
|
1018 |
TInt n=aSize>>m.iPageShift; |
|
1019 |
if (iPageBitMap->NotAllocated(i,n)) |
|
1020 |
r=KErrNotFound; |
|
1021 |
else |
|
1022 |
{ |
|
1023 |
#ifdef BTRACE_CHUNKS |
|
1024 |
TUint oldFree = m.FreeRamInBytes(); |
|
1025 |
#endif |
|
1026 |
r=m.LockRamCachePages((TLinAddr)(iBase+aOffset),n,iOwningProcess); |
|
1027 |
#ifdef BTRACE_CHUNKS |
|
1028 |
if(r==KErrNone) |
|
1029 |
{ |
|
1030 |
TUint locked = oldFree-m.FreeRamInBytes(); |
|
1031 |
if(locked) |
|
1032 |
BTraceContext12(BTrace::EChunks,BTrace::EChunkMemoryAllocated,this,aOffset,locked); |
|
1033 |
} |
|
1034 |
#endif |
|
1035 |
} |
|
1036 |
if(r!=KErrNone) |
|
1037 |
{ |
|
1038 |
// decommit memory on error... |
|
1039 |
__KTRACE_OPT(KMMU,Kern::Printf("Calling SelectiveFree(%d,%d)",i,n)); |
|
1040 |
TUint oldAvail = iPageBitMap->iAvail; |
|
1041 |
iPageBitMap->SelectiveFree(i,n); // free those positions which are actually allocated |
|
1042 |
TUint oldSize = iSize; |
|
1043 |
||
1044 |
DoDecommit(aOffset,aSize); |
|
1045 |
||
1046 |
// Use the bit map to adjust the size of the chunk as unlocked and reclaimed pages |
|
1047 |
// will have been unmapped but not removed from the bit map as DoDecommit() only |
|
1048 |
// decommits the mapped pages. |
|
1049 |
TUint actualFreedPages = iPageBitMap->iAvail - oldAvail; |
|
1050 |
iSize = oldSize - (actualFreedPages << KPageShift); |
|
1051 |
} |
|
1052 |
||
1053 |
Mmu::Signal(); |
|
1054 |
__COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this); |
|
1055 |
return r; |
|
1056 |
} |
|
1057 |
||
1058 |
TInt DMemModelChunk::AllocateAddress() |
|
1059 |
{ |
|
1060 |
__KTRACE_OPT(KMMU,Kern::Printf("Chunk %O AllocateAddress()",this)); |
|
1061 |
TLinearSection* s=LinearSection(); |
|
1062 |
if (!s) |
|
1063 |
return KErrNone; // chunk has fixed preallocated address |
|
1064 |
||
1065 |
Mmu& m=Mmu::Get(); |
|
1066 |
TUint32 required=iMaxSize>>m.iChunkShift; |
|
1067 |
__KTRACE_OPT(KMMU,Kern::Printf("Searching from low to high addresses")); |
|
1068 |
TInt r=s->iAllocator.AllocConsecutive(required, EFalse); |
|
1069 |
if (r<0) |
|
1070 |
return KErrNoMemory; |
|
1071 |
s->iAllocator.Alloc(r, required); |
|
1072 |
iBase=(TUint8*)(s->iBase + (r<<m.iChunkShift)); |
|
1073 |
__KTRACE_OPT(KMMU,Kern::Printf("Address %08x allocated",iBase)); |
|
1074 |
return KErrNone; |
|
1075 |
} |
|
1076 |
||
1077 |
void DMemModelChunk::ApplyPermissions(TInt aOffset, TInt aSize, TPte aPtePerm) |
|
1078 |
{ |
|
1079 |
__KTRACE_OPT(KMMU,Kern::Printf("Chunk %O ApplyPermissions(%x+%x,%08x)",this,aOffset,aSize,aPtePerm)); |
|
1080 |
__ASSERT_ALWAYS(aOffset>=0 && aSize>=0, MM::Panic(MM::EChunkApplyPermissions1)); |
|
1081 |
if (aSize==0) |
|
1082 |
return; |
|
1083 |
Mmu& m=Mmu::Get(); |
|
1084 |
aOffset &= ~m.iPageMask; |
|
1085 |
aSize=(aSize+m.iPageMask)&~m.iPageMask; |
|
1086 |
TInt endOffset=aOffset+aSize; |
|
1087 |
__ASSERT_ALWAYS(endOffset<=iMaxSize, MM::Panic(MM::EChunkApplyPermissions2)); |
|
1088 |
||
1089 |
Mmu::Wait(); |
|
1090 |
while(aOffset<endOffset) |
|
1091 |
{ |
|
1092 |
TInt ptid=iPageTables[aOffset>>m.iChunkShift]; |
|
1093 |
TInt pdeEnd=(aOffset+m.iChunkSize)&~m.iChunkMask; |
|
1094 |
if (ptid==0xffff) |
|
1095 |
{ |
|
1096 |
aOffset=pdeEnd; |
|
1097 |
continue; |
|
1098 |
} |
|
1099 |
TInt np=(endOffset-aOffset)>>m.iPageShift; // number of pages remaining to process |
|
1100 |
TInt npEnd=(pdeEnd-aOffset)>>m.iPageShift; // number of pages to end of page table |
|
1101 |
if (np>npEnd) |
|
1102 |
np=npEnd; // limit to single page table |
|
1103 |
if (np>MM::MaxPagesInOneGo) |
|
1104 |
np=MM::MaxPagesInOneGo; // limit |
|
1105 |
m.ApplyPagePermissions(ptid, (aOffset&m.iChunkMask)>>m.iPageShift, np, aPtePerm); |
|
1106 |
aOffset+=(np<<m.iPageShift); |
|
1107 |
} |
|
1108 |
Mmu::Signal(); |
|
1109 |
} |
|
1110 |
||
1111 |
TInt DMemModelChunkHw::Close(TAny*) |
|
1112 |
{ |
|
1113 |
__KTRACE_OPT(KOBJECT,Kern::Printf("DMemModelChunkHw::Close %d %O",AccessCount(),this)); |
|
1114 |
TInt r=Dec(); |
|
1115 |
if (r==1) |
|
1116 |
{ |
|
1117 |
if (iLinAddr) |
|
1118 |
{ |
|
1119 |
// Save data for cache maintenance before beind destroyed by DeallocateLinearAddress |
|
1120 |
TPhysAddr pa = iPhysAddr; |
|
1121 |
TLinAddr la = iLinAddr; |
|
1122 |
TInt size = iSize; |
|
1123 |
TUint attr = iAttribs; |
|
1124 |
||
1125 |
MmuBase& m=*MmuBase::TheMmu; |
|
1126 |
MmuBase::Wait(); |
|
1127 |
m.Unmap(iLinAddr,iSize); |
|
1128 |
MmuBase::Signal(); |
|
1129 |
DeallocateLinearAddress(); |
|
1130 |
||
1131 |
// Physical memory has to be evicted from cache(s). |
|
1132 |
// Must be preserved as it can still be in use by the driver. |
|
1133 |
MmuBase::Wait(); |
|
1134 |
m.CacheMaintenanceOnPreserve(pa, size ,la ,attr); |
|
1135 |
MmuBase::Signal(); |
|
1136 |
} |
|
1137 |
K::ObjDelete(this); |
|
1138 |
} |
|
1139 |
return r; |
|
1140 |
} |
|
1141 |
||
1142 |
TInt DMemModelChunk::CheckAccess() |
|
1143 |
{ |
|
1144 |
DProcess* pP=TheCurrentThread->iOwningProcess; |
|
1145 |
if (iAttributes&EPrivate) |
|
1146 |
{ |
|
1147 |
if (iOwningProcess && iOwningProcess!=pP && pP!=K::TheKernelProcess) |
|
1148 |
return KErrAccessDenied; |
|
1149 |
} |
|
1150 |
return KErrNone; |
|
1151 |
} |
|
1152 |
||
1153 |
||
1154 |
void DMemModelChunk::BTracePrime(TInt aCategory) |
|
1155 |
{ |
|
1156 |
DChunk::BTracePrime(aCategory); |
|
1157 |
||
1158 |
#ifdef BTRACE_CHUNKS |
|
1159 |
if (aCategory == BTrace::EChunks || aCategory == -1) |
|
1160 |
{ |
|
1161 |
MmuBase::Wait(); |
|
1162 |
||
1163 |
TBool memoryOwned = !(iAttributes&EMemoryNotOwned); |
|
1164 |
MmuBase& m=*MmuBase::TheMmu; |
|
1165 |
TInt committedBase = -1; |
|
1166 |
||
1167 |
// look at each page table in this chunk... |
|
1168 |
TUint chunkEndIndex = iMaxSize>>KChunkShift; |
|
1169 |
for(TUint chunkIndex=0; chunkIndex<chunkEndIndex; ++chunkIndex) |
|
1170 |
{ |
|
1171 |
TInt ptid = iPageTables[chunkIndex]; |
|
1172 |
if(ptid==0xffff) |
|
1173 |
{ |
|
1174 |
// no page table... |
|
1175 |
if(committedBase!=-1) |
|
1176 |
{ |
|
1177 |
TUint committedEnd = chunkIndex*KChunkSize; |
|
1178 |
BTrace12(BTrace::EChunks, memoryOwned?BTrace::EChunkMemoryAllocated:BTrace::EChunkMemoryAdded,this,committedBase,committedEnd-committedBase); |
|
1179 |
committedBase = -1; |
|
1180 |
} |
|
1181 |
continue; |
|
1182 |
} |
|
1183 |
||
1184 |
TPte* pPte=(TPte*)m.PageTableLinAddr(ptid); |
|
1185 |
||
1186 |
// look at each page in page table... |
|
1187 |
NKern::LockSystem(); |
|
1188 |
for(TUint pageIndex=0; pageIndex<KChunkSize/KPageSize; ++pageIndex) |
|
1189 |
{ |
|
1190 |
TBool committed = false; |
|
1191 |
TPhysAddr phys = m.PtePhysAddr(pPte[pageIndex], pageIndex); |
|
1192 |
if(phys!=KPhysAddrInvalid) |
|
1193 |
{ |
|
1194 |
// we have a page... |
|
1195 |
if(!memoryOwned) |
|
1196 |
committed = true; |
|
1197 |
else |
|
1198 |
{ |
|
1199 |
// make sure we own the page... |
|
1200 |
SPageInfo* pi = SPageInfo::SafeFromPhysAddr(phys); |
|
1201 |
if(pi && pi->Type()==SPageInfo::EChunk && pi->Owner()==this) |
|
1202 |
committed = true; |
|
1203 |
} |
|
1204 |
} |
|
1205 |
||
1206 |
if(committed) |
|
1207 |
{ |
|
1208 |
if(committedBase==-1) |
|
1209 |
committedBase = chunkIndex*KChunkSize+pageIndex*KPageSize; // start of new region |
|
1210 |
} |
|
1211 |
else |
|
1212 |
{ |
|
1213 |
if(committedBase!=-1) |
|
1214 |
{ |
|
1215 |
// generate trace for region... |
|
1216 |
NKern::FlashSystem(); |
|
1217 |
TUint committedEnd = chunkIndex*KChunkSize+pageIndex*KPageSize; |
|
1218 |
BTrace12(BTrace::EChunks, memoryOwned?BTrace::EChunkMemoryAllocated:BTrace::EChunkMemoryAdded,this,committedBase,committedEnd-committedBase); |
|
1219 |
committedBase = -1; |
|
1220 |
} |
|
1221 |
} |
|
1222 |
||
1223 |
if((pageIndex&15)==0) |
|
1224 |
NKern::FlashSystem(); |
|
1225 |
} |
|
1226 |
||
1227 |
NKern::UnlockSystem(); |
|
1228 |
} |
|
1229 |
||
1230 |
if(committedBase!=-1) |
|
1231 |
{ |
|
1232 |
TUint committedEnd = chunkEndIndex*KChunkSize; |
|
1233 |
BTrace12(BTrace::EChunks, memoryOwned?BTrace::EChunkMemoryAllocated:BTrace::EChunkMemoryAdded,this,committedBase,committedEnd-committedBase); |
|
1234 |
} |
|
1235 |
||
1236 |
MmuBase::Signal(); |
|
1237 |
} |
|
1238 |
#endif |
|
1239 |
} |