|
1 // Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of the License "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // e32\memmodel\epoc\moving\mchunk.cpp |
|
15 // |
|
16 // |
|
17 |
|
18 #include "memmodel.h" |
|
19 #include "cache_maintenance.h" |
|
20 #include <mmubase.inl> |
|
21 #include <ramalloc.h> |
|
22 |
|
23 DMemModelChunk::DMemModelChunk() |
|
24 { |
|
25 } |
|
26 |
|
27 void DMemModelChunk::Destruct() |
|
28 { |
|
29 __KTRACE_OPT(KTHREAD,Kern::Printf("DMemModelChunk destruct %O",this)); |
|
30 Mmu& m = Mmu::Get(); |
|
31 TInt nPdes=iMaxSize>>m.iChunkShift; |
|
32 if (nPdes<=32 || iPdeBitMap!=NULL) |
|
33 { |
|
34 if ((iAttributes & EDisconnected) && iPageBitMap!=NULL) |
|
35 Decommit(0,iMaxSize); |
|
36 else if (iAttributes & EDoubleEnded) |
|
37 AdjustDoubleEnded(0,0); |
|
38 else |
|
39 Adjust(0); |
|
40 } |
|
41 |
|
42 if ((iAttributes&EFixedAddress) && iHomeRegionBase>=m.iKernelSection->iBase) |
|
43 { |
|
44 Mmu::Wait(); |
|
45 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::~DMemModelChunk remove region")); |
|
46 if (TLinAddr(iBase)==iHomeBase) |
|
47 iBase=NULL; |
|
48 DeallocateHomeAddress(); // unlink from home section queue |
|
49 iHomeRegionBase=0; |
|
50 iHomeBase=0; |
|
51 Mmu::Signal(); |
|
52 } |
|
53 if ((iMaxSize>>m.iChunkShift) > 32) |
|
54 { |
|
55 TAny* pM = __e32_atomic_swp_ord_ptr(&iPdeBitMap, 0); |
|
56 Kern::Free(pM); |
|
57 } |
|
58 TBitMapAllocator* pM = (TBitMapAllocator*)__e32_atomic_swp_ord_ptr(&iPageBitMap, 0); |
|
59 delete pM; |
|
60 pM = (TBitMapAllocator*)__e32_atomic_swp_ord_ptr(&iPermanentPageBitMap, 0); |
|
61 delete pM; |
|
62 |
|
63 TDfc* dfc = (TDfc*)__e32_atomic_swp_ord_ptr(&iDestroyedDfc, 0); |
|
64 if(dfc) |
|
65 dfc->Enque(); |
|
66 |
|
67 __KTRACE_OPT(KMEMTRACE, {Mmu::Wait(); Kern::Printf("MT:D %d %x %O",NTickCount(),this,this);Mmu::Signal();}); |
|
68 #ifdef BTRACE_CHUNKS |
|
69 BTraceContext4(BTrace::EChunks,BTrace::EChunkDestroyed,this); |
|
70 #endif |
|
71 } |
|
72 |
|
73 TInt DMemModelChunk::Close(TAny* aPtr) |
|
74 { |
|
75 if (aPtr) |
|
76 { |
|
77 DMemModelProcess* pP=(DMemModelProcess*)aPtr; |
|
78 pP->RemoveChunk(this); |
|
79 } |
|
80 TInt r=Dec(); |
|
81 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Close %d %O",r,this)); |
|
82 __NK_ASSERT_DEBUG(r > 0); // Should never be negative. |
|
83 if (r==1) |
|
84 { |
|
85 K::ObjDelete(this); |
|
86 return EObjectDeleted; |
|
87 } |
|
88 return 0; |
|
89 } |
|
90 |
|
91 |
|
92 TUint8* DMemModelChunk::Base(DProcess* aProcess) |
|
93 { |
|
94 return iBase; |
|
95 } |
|
96 |
|
97 |
|
98 TInt DMemModelChunk::DoCreate(SChunkCreateInfo& aInfo) |
|
99 { |
|
100 __ASSERT_COMPILE(!(EMMChunkAttributesMask & EChunkAttributesMask)); |
|
101 |
|
102 if (aInfo.iMaxSize<=0) |
|
103 return KErrArgument; |
|
104 Mmu& m=Mmu::Get(); |
|
105 TInt nPdes=(aInfo.iMaxSize+m.iChunkMask)>>m.iChunkShift; |
|
106 iMaxSize=nPdes<<m.iChunkShift; |
|
107 iMapAttr = aInfo.iMapAttr; |
|
108 SetupPermissions(); |
|
109 if (nPdes>32) |
|
110 { |
|
111 TInt words=(nPdes+31)>>5; |
|
112 iPdeBitMap=(TUint32*)Kern::Alloc(words*sizeof(TUint32)); |
|
113 if (!iPdeBitMap) |
|
114 return KErrNoMemory; |
|
115 memclr(iPdeBitMap, words*sizeof(TUint32)); |
|
116 } |
|
117 else |
|
118 iPdeBitMap=NULL; |
|
119 |
|
120 TInt maxpages=iMaxSize>>m.iPageShift; |
|
121 if (iAttributes & EDisconnected) |
|
122 { |
|
123 TBitMapAllocator* pM=TBitMapAllocator::New(maxpages,ETrue); |
|
124 if (!pM) |
|
125 return KErrNoMemory; |
|
126 iPageBitMap=pM; |
|
127 __KTRACE_OPT(KMMU,Kern::Printf("PageBitMap at %08x, MaxPages %d",pM,maxpages)); |
|
128 } |
|
129 if(iChunkType==ESharedKernelSingle || iChunkType==ESharedKernelMultiple) |
|
130 { |
|
131 TBitMapAllocator* pM=TBitMapAllocator::New(maxpages,ETrue); |
|
132 if (!pM) |
|
133 return KErrNoMemory; |
|
134 iPermanentPageBitMap = pM; |
|
135 } |
|
136 __KTRACE_OPT(KMEMTRACE, {Mmu::Wait();Kern::Printf("MT:C %d %x %O",NTickCount(),this,this);Mmu::Signal();}); |
|
137 #ifdef BTRACE_CHUNKS |
|
138 TKName nameBuf; |
|
139 Name(nameBuf); |
|
140 BTraceContextN(BTrace::EChunks,BTrace::EChunkCreated,this,iMaxSize,nameBuf.Ptr(),nameBuf.Size()); |
|
141 if(iOwningProcess) |
|
142 BTrace8(BTrace::EChunks,BTrace::EChunkOwner,this,iOwningProcess); |
|
143 BTraceContext12(BTrace::EChunks,BTrace::EChunkInfo,this,iChunkType,iAttributes); |
|
144 #endif |
|
145 return KErrNone; |
|
146 } |
|
147 |
|
148 void DMemModelChunk::ClaimInitialPages() |
|
149 { |
|
150 __KTRACE_OPT(KMMU,Kern::Printf("Chunk %O ClaimInitialPages()",this)); |
|
151 Mmu& m=Mmu::Get(); |
|
152 TInt offset=0; |
|
153 TUint32 ccp=K::CompressKHeapPtr(this); |
|
154 NKern::LockSystem(); |
|
155 while(offset<iSize) |
|
156 { |
|
157 TInt ptid=m.GetPageTableId(TLinAddr(iBase)+offset); |
|
158 __ASSERT_ALWAYS(ptid>=0,MM::Panic(MM::EClaimInitialPagesBadPageTable)); |
|
159 __KTRACE_OPT(KMMU,Kern::Printf("Offset %x PTID=%d",offset,ptid)); |
|
160 AddPde(offset); |
|
161 SPageTableInfo& ptinfo = m.PtInfo(ptid); |
|
162 ptinfo.SetChunk(ccp,offset>>m.iChunkShift); |
|
163 TPte* pPte=(TPte*)m.PageTableLinAddr(ptid); |
|
164 TInt i; |
|
165 TInt np = 0; |
|
166 TInt flashCount = MM::MaxPagesInOneGo; |
|
167 for (i=0; i<m.iChunkSize>>m.iPageShift; ++i, offset+=m.iPageSize) |
|
168 { |
|
169 if(--flashCount<=0) |
|
170 { |
|
171 flashCount = MM::MaxPagesInOneGo; |
|
172 NKern::FlashSystem(); |
|
173 } |
|
174 TPte pte=pPte[i]; |
|
175 if (m.PteIsPresent(pte)) |
|
176 { |
|
177 ++np; |
|
178 TPhysAddr phys=m.PtePhysAddr(pte, i); |
|
179 __KTRACE_OPT(KMMU,Kern::Printf("Offset %x phys %08x",offset,phys)); |
|
180 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(phys); |
|
181 if (pi) |
|
182 { |
|
183 pi->SetChunk(this,offset>>m.iPageShift); |
|
184 #ifdef BTRACE_KERNEL_MEMORY |
|
185 --Epoc::KernelMiscPages; // page now owned by chunk, and is not 'miscelaneous' |
|
186 #endif |
|
187 } |
|
188 } |
|
189 } |
|
190 ptinfo.iCount = np; |
|
191 __KTRACE_OPT(KMMU,Kern::Printf("Offset %x PTID %d NP %d", offset, ptid, np)); |
|
192 } |
|
193 NKern::UnlockSystem(); |
|
194 __KTRACE_OPT(KMMU,Kern::Printf("nPdes=%d, Pdes=%08x, HomePdes=%08x",iNumPdes,iPdes,iHomePdes)); |
|
195 } |
|
196 |
|
197 void DMemModelChunk::SetFixedAddress(TLinAddr aAddr, TInt aInitialSize) |
|
198 { |
|
199 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O SetFixedAddress %08X size %08X",this,aAddr,aInitialSize)); |
|
200 iHomeRegionOffset=0; |
|
201 iHomeRegionBase=aAddr; |
|
202 iHomeBase=aAddr; |
|
203 iBase=(TUint8*)aAddr; |
|
204 iHomeRegionSize=iMaxSize; |
|
205 iAttributes|=EFixedAddress; |
|
206 iSize=Mmu::RoundToPageSize(aInitialSize); |
|
207 ClaimInitialPages(); |
|
208 } |
|
209 |
|
210 TInt DMemModelChunk::Reserve(TInt aInitialSize) |
|
211 // |
|
212 // Reserve home section address space for a chunk |
|
213 // |
|
214 { |
|
215 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O Reserve() size %08x",this,aInitialSize)); |
|
216 iHomeRegionOffset=0; |
|
217 if (!K::Initialising) |
|
218 Mmu::Wait(); |
|
219 iHomeRegionBase=AllocateHomeAddress(iMaxSize); |
|
220 if (!K::Initialising) |
|
221 Mmu::Signal(); |
|
222 iHomeBase=iHomeRegionBase; |
|
223 iBase=(TUint8*)iHomeRegionBase; |
|
224 if (iHomeRegionBase==0) |
|
225 return KErrNoMemory; |
|
226 iSize=Mmu::RoundToPageSize(aInitialSize); |
|
227 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O address %08x",this,iHomeRegionBase)); |
|
228 ClaimInitialPages(); |
|
229 return KErrNone; |
|
230 } |
|
231 |
|
232 TInt DMemModelChunk::Adjust(TInt aNewSize) |
|
233 // |
|
234 // Adjust a standard chunk. |
|
235 // |
|
236 { |
|
237 |
|
238 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Adjust %08x",aNewSize)); |
|
239 if (iAttributes & (EDoubleEnded|EDisconnected)) |
|
240 return KErrGeneral; |
|
241 if (aNewSize<0 || aNewSize>iMaxSize) |
|
242 return KErrArgument; |
|
243 |
|
244 TInt r=KErrNone; |
|
245 TInt newSize=Mmu::RoundToPageSize(aNewSize); |
|
246 if (newSize!=iSize) |
|
247 { |
|
248 Mmu::Wait(); |
|
249 if (newSize>iSize) |
|
250 { |
|
251 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Adjust growing")); |
|
252 r=DoCommit(iSize,newSize-iSize); |
|
253 } |
|
254 else if (newSize<iSize) |
|
255 { |
|
256 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Adjust shrinking")); |
|
257 DoDecommit(newSize,iSize-newSize); |
|
258 } |
|
259 Mmu::Signal(); |
|
260 } |
|
261 __COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this); |
|
262 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O adjusted to %x base %08x home %08x",this,iSize,iBase,iHomeRegionBase)); |
|
263 return r; |
|
264 } |
|
265 |
|
266 TInt DMemModelChunk::ExpandHomeRegion(TInt aOffset, TInt aSize) |
|
267 { |
|
268 // Ensure that the chunk's home region is big enough to accommodate extra RAM being committed |
|
269 __KTRACE_OPT(KMMU,Kern::Printf("Chunk %O ExpandHomeRegion(%x,%x)",this,aOffset,aSize)); |
|
270 Mmu& m = Mmu::Get(); |
|
271 TBool lowerLimitOk=(aOffset>=iHomeRegionOffset && aOffset<=iHomeRegionOffset+iHomeRegionSize); |
|
272 TBool upperLimitOk=(aOffset+aSize>=iHomeRegionOffset && aOffset+aSize<=iHomeRegionOffset+iHomeRegionSize); |
|
273 if (lowerLimitOk && upperLimitOk) |
|
274 return KErrNone; // no change required |
|
275 TInt newLowerLimit; |
|
276 TInt newUpperLimit; |
|
277 if (iHomeRegionSize) |
|
278 { |
|
279 newLowerLimit=Min(iHomeRegionOffset,aOffset); |
|
280 newUpperLimit=Max(iHomeRegionOffset+iHomeRegionSize,aOffset+aSize); |
|
281 } |
|
282 else |
|
283 { |
|
284 newLowerLimit=aOffset; |
|
285 newUpperLimit=aOffset+aSize; |
|
286 } |
|
287 newLowerLimit &= ~m.iChunkMask; |
|
288 newUpperLimit = (newUpperLimit+m.iChunkMask)&~m.iChunkMask; |
|
289 TInt newHomeRegionSize=newUpperLimit-newLowerLimit; |
|
290 __KTRACE_OPT(KMMU,Kern::Printf("newLowerLimit=%x, newUpperLimit=%x",newLowerLimit,newUpperLimit)); |
|
291 if (newHomeRegionSize>iMaxSize) |
|
292 return KErrArgument; |
|
293 TLinAddr newHomeRegionBase; |
|
294 if (iHomeRegionSize==0) |
|
295 newHomeRegionBase=AllocateHomeAddress(newHomeRegionSize); |
|
296 else |
|
297 newHomeRegionBase=ReallocateHomeAddress(newHomeRegionSize); |
|
298 __KTRACE_OPT(KMMU,Kern::Printf("newHomeRegionBase=%08x",newHomeRegionBase)); |
|
299 if (newHomeRegionBase==0) |
|
300 return KErrNoMemory; |
|
301 TInt deltaOffset=iHomeRegionOffset-newLowerLimit; |
|
302 TLinAddr newHomeBase=newHomeRegionBase-newLowerLimit; |
|
303 TLinAddr translatedHomeBase=newHomeRegionBase+deltaOffset; |
|
304 |
|
305 // lock the kernel while we change the chunk's home region |
|
306 // Note: The new home region always contains the original home region, so |
|
307 // if we reach here, it must be strictly larger. |
|
308 NKern::LockSystem(); |
|
309 if (iNumPdes && iHomeRegionBase!=translatedHomeBase) |
|
310 { |
|
311 TLinAddr oldBase=TLinAddr(iBase); |
|
312 if (oldBase==iHomeBase) |
|
313 { |
|
314 // chunk is currently at home, so must move it |
|
315 // Note: this operation must cope with overlapping initial and final regions |
|
316 m.GenericFlush(Mmu::EFlushDMove); // preemption could occur here... |
|
317 if (TLinAddr(iBase)==iHomeBase) // ...so need to check chunk is still at home address |
|
318 { |
|
319 m.MoveChunk(iHomeRegionBase,translatedHomeBase,iNumPdes); |
|
320 iBase=(TUint8*)newHomeBase; |
|
321 MoveCurrentPdes(iHomeRegionBase,translatedHomeBase); |
|
322 MoveHomePdes(iHomeRegionBase,translatedHomeBase); |
|
323 } |
|
324 } |
|
325 else |
|
326 { |
|
327 MoveHomePdes(iHomeRegionBase,translatedHomeBase); |
|
328 } |
|
329 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::ExpandHomeRegion moved home base from %08x to %08x", |
|
330 iHomeRegionBase,newHomeRegionBase)); |
|
331 } |
|
332 if (!iBase) |
|
333 iBase=(TUint8*)newHomeBase; |
|
334 iHomeRegionBase=newHomeRegionBase; |
|
335 iHomeRegionOffset=newLowerLimit; |
|
336 iHomeBase=newHomeBase; |
|
337 __KTRACE_OPT(KMMU,Kern::Printf("Final iHomeRegionBase=%08x, iHomeRegionOffset=%08x",iHomeRegionBase,iHomeRegionOffset)); |
|
338 __KTRACE_OPT(KMMU,Kern::Printf("Final iHomeRegionSize=%08x, iBase=%08x, iHomeBase=%08x",iHomeRegionSize,iBase,iHomeBase)); |
|
339 __KTRACE_OPT(KMMU,Kern::Printf("nPdes=%d, Pdes=%08x, HomePdes=%08x",iNumPdes,iPdes,iHomePdes)); |
|
340 NKern::UnlockSystem(); |
|
341 return KErrNone; |
|
342 } |
|
343 |
|
344 TInt DMemModelChunk::Address(TInt aOffset, TInt aSize, TLinAddr& aKernelAddress) |
|
345 { |
|
346 if(!iPermanentPageBitMap) |
|
347 return KErrAccessDenied; |
|
348 if(TUint(aOffset)>=TUint(iMaxSize)) |
|
349 return KErrArgument; |
|
350 if(TUint(aOffset+aSize)>TUint(iMaxSize)) |
|
351 return KErrArgument; |
|
352 if(aSize<=0) |
|
353 return KErrArgument; |
|
354 TInt pageShift = Mmu::Get().iPageShift; |
|
355 TInt start = aOffset>>pageShift; |
|
356 TInt size = ((aOffset+aSize-1)>>pageShift)-start+1; |
|
357 if(iPermanentPageBitMap->NotAllocated(start,size)) |
|
358 return KErrNotFound; |
|
359 aKernelAddress = (TLinAddr)iBase+aOffset; |
|
360 return KErrNone; |
|
361 } |
|
362 |
|
363 TInt DMemModelChunk::PhysicalAddress(TInt aOffset, TInt aSize, TLinAddr& aKernelAddress, TUint32& aPhysicalAddress, TUint32* aPhysicalPageList) |
|
364 { |
|
365 TInt r=Address(aOffset,aSize,aKernelAddress); |
|
366 if(r!=KErrNone) |
|
367 return r; |
|
368 |
|
369 return Mmu::Get().LinearToPhysical(aKernelAddress,aSize,aPhysicalAddress,aPhysicalPageList); |
|
370 } |
|
371 |
|
372 void DMemModelChunk::Substitute(TInt aOffset, TPhysAddr aOldAddr, TPhysAddr aNewAddr) |
|
373 { |
|
374 // Substitute the page mapping at aOffset with aNewAddr. |
|
375 // Called with the system lock held. |
|
376 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Substitute %x %08x %08x",aOffset,aOldAddr,aNewAddr)); |
|
377 Mmu& m = Mmu::Get(); |
|
378 |
|
379 TLinAddr addr=(TLinAddr)iBase+aOffset; |
|
380 TInt ptid=m.GetPageTableId(addr); |
|
381 if(ptid<0) |
|
382 MM::Panic(MM::EChunkRemapNoPageTable); |
|
383 |
|
384 m.RemapPage(ptid, addr, aOldAddr, aNewAddr, iPtePermissions, iOwningProcess); |
|
385 if(iChunkType==EKernelCode || iChunkType==EDll || iChunkType==EUserSelfModCode) |
|
386 m.SyncCodeMappings(); |
|
387 } |
|
388 |
|
389 /** |
|
390 Get the movability type of the chunk's pages |
|
391 @return How movable the chunk's pages are |
|
392 */ |
|
393 TZonePageType DMemModelChunk::GetPageType() |
|
394 { |
|
395 // Shared chunks have their physical addresses available |
|
396 if (iChunkType == ESharedKernelSingle || |
|
397 iChunkType == ESharedKernelMultiple || |
|
398 iChunkType == ESharedIo || |
|
399 iChunkType == ESharedKernelMirror || |
|
400 iChunkType == EKernelMessage || |
|
401 iChunkType == EKernelData) // Don't move kernel heap pages as DMA may be accessing them. |
|
402 { |
|
403 return EPageFixed; |
|
404 } |
|
405 // All other types of chunk are movable |
|
406 return EPageMovable; |
|
407 } |
|
408 |
|
409 TInt DMemModelChunk::DoCommit(TInt aOffset, TInt aSize, TCommitType aCommitType, TUint32* aExtraArg) |
|
410 { |
|
411 // Commit more RAM to a chunk at a specified offset |
|
412 // enter and leave with system unlocked |
|
413 // must hold RamAlloc mutex before calling this function |
|
414 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DoCommit %x+%x type=%d extra=%08x",aOffset,aSize,aCommitType,aExtraArg)); |
|
415 TInt offset=aOffset; |
|
416 TInt endOffset=offset+aSize; |
|
417 TInt newPtId=-1; |
|
418 Mmu& m = Mmu::Get(); |
|
419 DRamAllocator& a = *m.iRamPageAllocator; |
|
420 TInt r=KErrNone; |
|
421 TPhysAddr pageList[KMaxPages]; |
|
422 TPhysAddr* pPageList=0; |
|
423 TPhysAddr nextPage=0; |
|
424 TUint32 ccp=K::CompressKHeapPtr(this); |
|
425 SPageInfo::TType type = SPageInfo::EChunk; |
|
426 |
|
427 if (iHomeRegionSize==0 || (iAttributes&EFixedAddress)==0) |
|
428 { |
|
429 r=ExpandHomeRegion(aOffset,aSize); |
|
430 if (r!=KErrNone) |
|
431 return r; |
|
432 } |
|
433 |
|
434 // Set flag to indicate if RAM should be cleared before being committed. |
|
435 // Note, EDll, EUserCode are covered in the code segment, in order not to clear |
|
436 // the region overwritten by the loader |
|
437 TBool clearRam = iChunkType==EUserData |
|
438 || iChunkType==EDllData |
|
439 || iChunkType==EUserSelfModCode |
|
440 || iChunkType==ESharedKernelSingle |
|
441 || iChunkType==ESharedKernelMultiple |
|
442 || iChunkType==ESharedIo |
|
443 || iChunkType==ERamDrive; |
|
444 |
|
445 |
|
446 TBool ownsMemory = !(iAttributes&EMemoryNotOwned); |
|
447 TBool physicalCommit = aCommitType&DChunk::ECommitPhysicalMask; |
|
448 if(ownsMemory) |
|
449 { |
|
450 if(physicalCommit) |
|
451 return KErrNotSupported; |
|
452 } |
|
453 else |
|
454 { |
|
455 if(!physicalCommit) |
|
456 return KErrNotSupported; |
|
457 type = SPageInfo::EInvalid; // to indicate page info not to be updated |
|
458 } |
|
459 |
|
460 switch(aCommitType) |
|
461 { |
|
462 case DChunk::ECommitDiscontiguous: |
|
463 // No setup to do |
|
464 break; |
|
465 |
|
466 case DChunk::ECommitContiguous: |
|
467 { |
|
468 // Allocate a block of contiguous RAM from the free pool |
|
469 TInt numPages=(endOffset-offset)>>m.iPageShift; |
|
470 r=m.AllocContiguousRam(numPages<<m.iPageShift, nextPage, GetPageType(), 0); |
|
471 if (r!=KErrNone) |
|
472 return r; |
|
473 if(clearRam) |
|
474 m.ClearPages(numPages, (TPhysAddr*)(nextPage|1), iClearByte); // clear RAM if required |
|
475 *aExtraArg = nextPage; // store physical address of RAM as return argument |
|
476 } |
|
477 break; |
|
478 |
|
479 case DChunk::ECommitDiscontiguousPhysical: |
|
480 { |
|
481 pPageList = aExtraArg; // use pages given given to us |
|
482 |
|
483 // Check address of pages are multiples of page size... |
|
484 TInt numPages=(endOffset-offset)>>m.iPageShift; |
|
485 TUint32* ptr = aExtraArg; |
|
486 TUint32* endPtr = aExtraArg+numPages; |
|
487 if(ptr>=endPtr) |
|
488 return KErrNone; // Zero size commit is OK |
|
489 TPhysAddr pageBits = 0; |
|
490 do |
|
491 pageBits |= *ptr++; |
|
492 while(ptr<endPtr); |
|
493 if(pageBits&(m.iPageSize-1)) |
|
494 return KErrArgument; // all addresses must be multiple of page size |
|
495 } |
|
496 break; |
|
497 |
|
498 case DChunk::ECommitContiguousPhysical: |
|
499 nextPage = (TPhysAddr)aExtraArg; // we have been given the physical address to use |
|
500 if(nextPage&(m.iPageSize-1)) |
|
501 return KErrArgument; // address must be multiple of page size |
|
502 break; |
|
503 |
|
504 #ifdef __MARM__ |
|
505 case DChunk::ECommitVirtual: |
|
506 break; |
|
507 #endif |
|
508 |
|
509 default: |
|
510 return KErrNotSupported; |
|
511 } |
|
512 |
|
513 // Commit memory a bit at a time (so system lock is only needs to be held for limited time) |
|
514 while(offset<endOffset) |
|
515 { |
|
516 TInt np=(endOffset-offset)>>m.iPageShift; // pages remaining to satisfy request |
|
517 TInt npEnd=(m.iChunkSize-(offset&m.iChunkMask))>>m.iPageShift; // number of pages to end of page table |
|
518 if (np>npEnd) |
|
519 np=npEnd; // limit to single page table |
|
520 if (np>MM::MaxPagesInOneGo) |
|
521 np=MM::MaxPagesInOneGo; // limit |
|
522 NKern::LockSystem(); // lock the system while we look at the page directory |
|
523 TLinAddr addr=(TLinAddr)iBase+offset; // current address |
|
524 TInt ptid=m.GetPageTableId(addr); // get page table ID if a page table is already assigned here |
|
525 NKern::UnlockSystem(); // we can now unlock the system |
|
526 newPtId=-1; |
|
527 if (ptid<0) |
|
528 { |
|
529 // need to allocate a new page table |
|
530 newPtId=m.AllocPageTable(); |
|
531 if (newPtId<0) |
|
532 { |
|
533 // out of memory, so break out and revert |
|
534 r=KErrNoMemory; |
|
535 break; |
|
536 } |
|
537 ptid=newPtId; |
|
538 } |
|
539 |
|
540 if(aCommitType==DChunk::ECommitDiscontiguous) |
|
541 { |
|
542 pPageList = pageList; |
|
543 r=m.AllocRamPages(pPageList,np, GetPageType()); // try to allocate pages |
|
544 if (r!=KErrNone) |
|
545 break; // if we fail, break out and revert |
|
546 if(clearRam) |
|
547 m.ClearPages(np, pPageList, iClearByte); // clear RAM if required |
|
548 } |
|
549 |
|
550 // lock the system while we change the MMU mappings |
|
551 NKern::LockSystem(); |
|
552 TInt commitSize = np<<m.iPageShift; |
|
553 iSize += commitSize; // update committed size |
|
554 if (aCommitType==DChunk::ECommitVirtual) |
|
555 m.MapVirtual(ptid, np); |
|
556 else if(pPageList) |
|
557 { |
|
558 m.MapRamPages(ptid, type, this, offset, pPageList, np, iPtePermissions); |
|
559 pPageList += np; |
|
560 } |
|
561 else |
|
562 { |
|
563 m.MapPhysicalPages(ptid, type, this, offset, nextPage, np, iPtePermissions); |
|
564 nextPage += commitSize; |
|
565 } |
|
566 NKern::UnlockSystem(); |
|
567 |
|
568 NKern::LockSystem(); |
|
569 if (newPtId>=0) |
|
570 { |
|
571 // We have allocated a new page table, now we must assign it and update PDE info |
|
572 SPageTableInfo& pti=m.PtInfo(ptid); |
|
573 pti.SetChunk(ccp, offset>>m.iChunkShift); |
|
574 TLinAddr addr=(TLinAddr)iBase+offset; // current address |
|
575 m.DoAssignPageTable(ptid, addr, iPdePermissions[iChunkState]); |
|
576 AddPde(offset); // update PDE info |
|
577 } |
|
578 __KTRACE_OPT(KMMU,Kern::Printf("nPdes=%d, Pdes=%08x, HomePdes=%08x",iNumPdes,iPdes,iHomePdes)); |
|
579 NKern::UnlockSystem(); |
|
580 __KTRACE_OPT(KMEMTRACE,Kern::Printf("MT:A %d %x %x %O",NTickCount(),this,iSize,this)); |
|
581 #ifdef BTRACE_CHUNKS |
|
582 BTraceContext12(BTrace::EChunks,ownsMemory?BTrace::EChunkMemoryAllocated:BTrace::EChunkMemoryAdded,this,offset,commitSize); |
|
583 #endif |
|
584 |
|
585 offset += commitSize; // update offset |
|
586 } |
|
587 |
|
588 if (r==KErrNone) |
|
589 { |
|
590 if(iPermanentPageBitMap) |
|
591 iPermanentPageBitMap->Alloc(aOffset>>m.iPageShift,aSize>>m.iPageShift); |
|
592 } |
|
593 else |
|
594 { |
|
595 // we ran out of memory somewhere |
|
596 // first check if we have an unassigned page table |
|
597 if (newPtId>=0) |
|
598 m.FreePageTable(newPtId); // free the unassigned page table |
|
599 |
|
600 // now free any memory we succeeded in allocating and return the chunk to its initial state |
|
601 DChunk::TDecommitType decommitType = aCommitType==DChunk::ECommitVirtual ? |
|
602 DChunk::EDecommitVirtual : DChunk::EDecommitNormal; |
|
603 DoDecommit(aOffset,offset-aOffset,decommitType); |
|
604 |
|
605 if(aCommitType==DChunk::ECommitContiguous) |
|
606 { |
|
607 // Free the pages we allocated but didn't get around to commiting |
|
608 TPhysAddr last = nextPage + ((endOffset-offset)>>m.iPageShift<<m.iPageShift); |
|
609 while(nextPage<last) |
|
610 { |
|
611 a.FreeRamPage(nextPage, GetPageType()); |
|
612 nextPage += m.iPageSize; |
|
613 } |
|
614 *aExtraArg = KPhysAddrInvalid; // return invalid physical address |
|
615 } |
|
616 |
|
617 m.iAllocFailed=ETrue; |
|
618 } |
|
619 return r; |
|
620 } |
|
621 |
|
622 void DMemModelChunk::DoDecommit(TInt aOffset, TInt aSize, TDecommitType aDecommitType) |
|
623 { |
|
624 // Decommit RAM from a chunk at a specified offset |
|
625 // enter and leave with kernel unlocked |
|
626 // must hold RamAlloc mutex before calling this function |
|
627 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DoDecommit %x+%x",aOffset,aSize)); |
|
628 if (iHomeRegionBase==0) |
|
629 return; |
|
630 |
|
631 TBool ownsMemory = !(iAttributes&EMemoryNotOwned); |
|
632 if (!ownsMemory) |
|
633 { |
|
634 // Physical memory not owned by the chunk also has to be evicted from cache(s). |
|
635 // We cannot just purge, as it can still be in use by the driver. Therefore, we'll flush it. |
|
636 // Purging physical memory from cache(s) that is owned by the chunk is done below. |
|
637 CacheMaintenance::MemoryToPreserveAndReuse((TLinAddr)(iBase+aOffset), aSize, iMapAttr); |
|
638 } |
|
639 |
|
640 TInt offset=aOffset; |
|
641 TInt endOffset=offset+aSize; |
|
642 Mmu& m = Mmu::Get(); |
|
643 DRamAllocator& a = *m.iRamPageAllocator; |
|
644 TPhysAddr pageList[KMaxPages]; |
|
645 #ifdef __CPU_WRITE_BACK_CACHE |
|
646 TInt size_reduction = Min(aSize,iSize); |
|
647 TBool selectiveFlush=((TUint)size_reduction<=(CacheMaintenance::SyncAllPerformanceThresholdPages()<<KPageShift)); |
|
648 #endif |
|
649 while(offset<endOffset) |
|
650 { |
|
651 TInt np=(endOffset-offset)>>m.iPageShift; // number of pages remaining to decommit |
|
652 TInt pdeEnd=(offset+m.iChunkSize)&~m.iChunkMask; |
|
653 TInt npEnd=(pdeEnd-offset)>>m.iPageShift; // number of pages to end of page table |
|
654 if (np>npEnd) |
|
655 np=npEnd; // limit to single page table |
|
656 if (np>MM::MaxPagesInOneGo) |
|
657 np=MM::MaxPagesInOneGo; // limit |
|
658 NKern::LockSystem(); // lock the system while we look at the page directory |
|
659 TUint8* base=iBase; // save base address |
|
660 TLinAddr addr=(TLinAddr)base+offset; // current address |
|
661 TInt ptid=m.GetPageTableId(addr); // get page table ID if a page table is already assigned here |
|
662 if (ptid>=0) |
|
663 { |
|
664 TInt nPtes=0; |
|
665 TInt nFree=0; |
|
666 |
|
667 // Unmap the pages, clear the PTEs and place the physical addresses of the now-free RAM pages in |
|
668 // pageList. Return nPtes=number of pages placed in list, remain=number of PTEs remaining in page table |
|
669 // This also invalidates any TLB entries for the unmapped pages. |
|
670 // NB for WriteBack cache, we must also invalidate any cached entries for these pages - this might be done |
|
671 // by invalidating entry-by-entry or by a complete cache flush at the end. |
|
672 // NB For split TLB, ITLB may not be invalidated. In that case it will be invalidated by |
|
673 // Mmu::SyncCodeMappings() at the end of the function. |
|
674 TInt remain; |
|
675 if (aDecommitType == EDecommitVirtual) |
|
676 remain=m.UnmapVirtual(ptid,addr,np,pageList,ownsMemory,nPtes,nFree,iOwningProcess); |
|
677 else |
|
678 remain=m.UnmapPages(ptid,addr,np,pageList,ownsMemory,nPtes,nFree,iOwningProcess); |
|
679 TInt decommitSize=nPtes<<m.iPageShift; |
|
680 iSize-=decommitSize; // reduce the committed size |
|
681 |
|
682 // if page table is now completely empty, unassign it and update chunk PDE info |
|
683 remain &= KUnmapPagesCountMask; |
|
684 if (remain==0) |
|
685 { |
|
686 m.DoUnassignPageTable(addr); |
|
687 RemovePde(offset); |
|
688 NKern::UnlockSystem(); |
|
689 m.FreePageTable(ptid); |
|
690 NKern::LockSystem(); |
|
691 } |
|
692 __KTRACE_OPT(KMMU,Kern::Printf("nPdes=%d, Pdes=%08x, HomePdes=%08x",iNumPdes,iPdes,iHomePdes)); |
|
693 #ifdef __CPU_WRITE_BACK_CACHE |
|
694 if (selectiveFlush) |
|
695 { |
|
696 TInt n=np; |
|
697 while(n && iBase==base) // reschedule may move base, but then cache will have been flushed so we can stop purging L1 |
|
698 { |
|
699 CacheMaintenance::PageToReuseVirtualCache(addr); |
|
700 addr+=m.iPageSize; |
|
701 --n; |
|
702 NKern::FlashSystem(); |
|
703 } |
|
704 Mmu::Get().CacheMaintenanceOnDecommit(pageList, nFree); //On ARMv5, this deals with L2 cache only |
|
705 } |
|
706 #endif |
|
707 NKern::UnlockSystem(); // we can now unlock the system |
|
708 __KTRACE_OPT(KMEMTRACE,Kern::Printf("MT:A %d %x %x %O",NTickCount(),this,iSize,this)); |
|
709 #ifdef BTRACE_CHUNKS |
|
710 if(nFree) |
|
711 BTraceContext12(BTrace::EChunks,ownsMemory?BTrace::EChunkMemoryDeallocated:BTrace::EChunkMemoryRemoved,this,offset,nFree<<m.iPageShift); |
|
712 #endif |
|
713 |
|
714 // We can now return the decommitted pages to the free page list |
|
715 if (nFree) |
|
716 a.FreeRamPages(pageList,nFree, GetPageType()); |
|
717 |
|
718 offset+=(np<<m.iPageShift); |
|
719 } |
|
720 else |
|
721 { |
|
722 NKern::UnlockSystem(); |
|
723 __KTRACE_OPT(KMMU,Kern::Printf("No page table at %08x",addr)); |
|
724 if ((iAttributes&EDisconnected)==0) |
|
725 MM::Panic(MM::EChunkDecommitNoPageTable); |
|
726 offset=pdeEnd; // disconnected chunk - step on to next PDE |
|
727 } |
|
728 } |
|
729 if (iSize==0 && (iAttributes&EFixedAddress)==0) |
|
730 { |
|
731 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Adjust remove region")); |
|
732 NKern::LockSystem(); |
|
733 if (TLinAddr(iBase)==iHomeBase) |
|
734 iBase=NULL; |
|
735 DeallocateHomeAddress(); |
|
736 NKern::UnlockSystem(); |
|
737 } |
|
738 #ifdef __CPU_WRITE_BACK_CACHE |
|
739 if (!selectiveFlush) |
|
740 { |
|
741 NKern::LockSystem(); |
|
742 m.GenericFlush((TUint)Mmu::EFlushDDecommit); //Flush virtual DCache |
|
743 CacheMaintenance::SyncPhysicalCache_All(); |
|
744 NKern::UnlockSystem(); |
|
745 } |
|
746 #endif |
|
747 if (iAttributes & ECode) |
|
748 m.SyncCodeMappings(); // flush ITLB if necessary |
|
749 } |
|
750 |
|
751 |
|
752 TInt DMemModelChunk::AdjustDoubleEnded(TInt aBottom, TInt aTop) |
|
753 // |
|
754 // Adjust a double-ended chunk. |
|
755 // |
|
756 { |
|
757 |
|
758 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::AdjustDoubleEnded %x-%x",aBottom,aTop)); |
|
759 if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDoubleEnded) |
|
760 return KErrGeneral; |
|
761 if (aTop<0 || aBottom<0 || aTop<aBottom || aTop>iMaxSize) |
|
762 return KErrArgument; |
|
763 Mmu& m = Mmu::Get(); |
|
764 aBottom &= ~m.iPageMask; |
|
765 aTop=(aTop+m.iPageMask)&~m.iPageMask; |
|
766 TInt newSize=aTop-aBottom; |
|
767 if (newSize>iMaxSize) |
|
768 return KErrArgument; |
|
769 |
|
770 Mmu::Wait(); |
|
771 TInt initBottom=iStartPos; |
|
772 TInt initTop=iStartPos+iSize; |
|
773 TInt nBottom=Max(aBottom,iStartPos); // intersection bottom |
|
774 TInt nTop=Min(aTop,iStartPos+iSize); // intersection top |
|
775 TInt r=KErrNone; |
|
776 if (nBottom<nTop) |
|
777 { |
|
778 __KTRACE_OPT(KMMU,Kern::Printf("Initial and final regions intersect")); |
|
779 if (initBottom<nBottom) |
|
780 { |
|
781 iStartPos=aBottom; |
|
782 DoDecommit(initBottom,nBottom-initBottom); |
|
783 } |
|
784 if (initTop>nTop) |
|
785 DoDecommit(nTop,initTop-nTop); // this changes iSize |
|
786 if (aBottom<nBottom) |
|
787 { |
|
788 r=DoCommit(aBottom,nBottom-aBottom); |
|
789 if (r==KErrNone) |
|
790 { |
|
791 if (aTop>nTop) |
|
792 r=DoCommit(nTop,aTop-nTop); |
|
793 if (r==KErrNone) |
|
794 iStartPos=aBottom; |
|
795 else |
|
796 DoDecommit(aBottom,nBottom-aBottom); |
|
797 } |
|
798 } |
|
799 else if (aTop>nTop) |
|
800 r=DoCommit(nTop,aTop-nTop); |
|
801 } |
|
802 else |
|
803 { |
|
804 __KTRACE_OPT(KMMU,Kern::Printf("Initial and final regions disjoint")); |
|
805 if (iSize) |
|
806 DoDecommit(initBottom,iSize); |
|
807 iStartPos=aBottom; |
|
808 if (newSize) |
|
809 r=DoCommit(iStartPos,newSize); |
|
810 } |
|
811 Mmu::Signal(); |
|
812 __COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this); |
|
813 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O adjusted to %x+%x base %08x home %08x",this,iStartPos,iSize,iBase,iHomeRegionBase)); |
|
814 return r; |
|
815 } |
|
816 |
|
817 TInt DMemModelChunk::Commit(TInt aOffset, TInt aSize, TCommitType aCommitType, TUint32* aExtraArg) |
|
818 // |
|
819 // Commit to a disconnected chunk. |
|
820 // |
|
821 { |
|
822 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Commit %x+%x type=%d extra=%08x",aOffset,aSize,aCommitType,aExtraArg)); |
|
823 if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected) |
|
824 return KErrGeneral; |
|
825 if (aOffset<0 || aSize<0) |
|
826 return KErrArgument; |
|
827 if (aSize==0) |
|
828 return KErrNone; |
|
829 Mmu& m = Mmu::Get(); |
|
830 aSize+=(aOffset & m.iPageMask); |
|
831 aOffset &= ~m.iPageMask; |
|
832 aSize=(aSize+m.iPageMask)&~m.iPageMask; |
|
833 if ((aOffset+aSize)>iMaxSize) |
|
834 return KErrArgument; |
|
835 |
|
836 Mmu::Wait(); |
|
837 TInt r=KErrNone; |
|
838 TInt i=aOffset>>m.iPageShift; |
|
839 TInt n=aSize>>m.iPageShift; |
|
840 if (iPageBitMap->NotFree(i,n)) |
|
841 r=KErrAlreadyExists; |
|
842 else |
|
843 { |
|
844 r=DoCommit(aOffset,aSize,aCommitType,aExtraArg); |
|
845 if (r==KErrNone) |
|
846 iPageBitMap->Alloc(i,n); |
|
847 } |
|
848 Mmu::Signal(); |
|
849 __COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this); |
|
850 return r; |
|
851 } |
|
852 |
|
853 TInt DMemModelChunk::Allocate(TInt aSize, TInt aGuard, TInt aAlign) |
|
854 // |
|
855 // Allocate offset and commit to a disconnected chunk. |
|
856 // |
|
857 { |
|
858 TInt r = DoAllocate(aSize, aGuard, aAlign, ETrue); |
|
859 __COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this); |
|
860 return r; |
|
861 } |
|
862 |
|
863 TInt DMemModelChunk::FindFree(TInt aSize, TInt aGuard, TInt aAlign) |
|
864 // |
|
865 // Find free offset but don't commit any memory. |
|
866 // |
|
867 { |
|
868 return DoAllocate(aSize, aGuard, aAlign, EFalse); |
|
869 } |
|
870 |
|
871 TInt DMemModelChunk::DoAllocate(TInt aSize, TInt aGuard, TInt aAlign, TBool aCommit) |
|
872 { |
|
873 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DoAllocate %x %x %d",aSize,aGuard,aAlign)); |
|
874 |
|
875 // Only allow this to be called on disconnected chunks and not disconnected |
|
876 // cache chunks as when guards pages exist the bit map can't be used to determine |
|
877 // the size of disconnected cache chunks as is required by Decommit(). |
|
878 if ((iAttributes & (EDoubleEnded|EDisconnected|ECache))!=EDisconnected) |
|
879 return KErrGeneral; |
|
880 |
|
881 if (aSize<=0 || aGuard<0) |
|
882 return KErrArgument; |
|
883 Mmu& m = Mmu::Get(); |
|
884 aAlign=Max(aAlign-m.iPageShift,0); |
|
885 aSize=(aSize+m.iPageMask)&~m.iPageMask; |
|
886 aGuard=(aGuard+m.iPageMask)&~m.iPageMask; |
|
887 if ((aSize+aGuard)>iMaxSize) |
|
888 return KErrArgument; |
|
889 |
|
890 Mmu::Wait(); |
|
891 TInt r=KErrNone; |
|
892 TInt n=(aSize+aGuard)>>m.iPageShift; |
|
893 TInt i=iPageBitMap->AllocAligned(n,aAlign,0,EFalse); // allocate the offset |
|
894 if (i<0) |
|
895 r=KErrNoMemory; // run out of reserved space for this chunk |
|
896 else |
|
897 { |
|
898 TInt offset=i<<m.iPageShift; |
|
899 __KTRACE_OPT(KMMU,Kern::Printf("Offset %x allocated",offset)); |
|
900 if (aCommit) |
|
901 { |
|
902 r=DoCommit(offset+aGuard,aSize,ECommitDiscontiguous); |
|
903 if (r==KErrNone) |
|
904 iPageBitMap->Alloc(i,n); |
|
905 } |
|
906 if (r==KErrNone) |
|
907 r=offset; // if operation successful, return allocated offset |
|
908 } |
|
909 Mmu::Signal(); |
|
910 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DoAllocate returns %x",r)); |
|
911 return r; |
|
912 } |
|
913 |
|
914 TInt DMemModelChunk::Decommit(TInt aOffset, TInt aSize) |
|
915 // |
|
916 // Decommit from a disconnected chunk. |
|
917 // |
|
918 { |
|
919 return Decommit(aOffset, aSize, EDecommitNormal); |
|
920 } |
|
921 |
|
922 TInt DMemModelChunk::Decommit(TInt aOffset, TInt aSize, TDecommitType aDecommitType) |
|
923 // |
|
924 // Decommit from a disconnected chunk. |
|
925 // |
|
926 { |
|
927 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Decommit %x+%x",aOffset,aSize)); |
|
928 if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected) |
|
929 return KErrGeneral; |
|
930 if (aOffset<0 || aSize<0) |
|
931 return KErrArgument; |
|
932 if (aSize==0) |
|
933 return KErrNone; |
|
934 Mmu& m = Mmu::Get(); |
|
935 aSize+=(aOffset & m.iPageMask); |
|
936 aOffset &= ~m.iPageMask; |
|
937 aSize=(aSize+m.iPageMask)&~m.iPageMask; |
|
938 if ((aOffset+aSize)>iMaxSize) |
|
939 return KErrArgument; |
|
940 |
|
941 Mmu::Wait(); |
|
942 |
|
943 // limit the range to the home region range |
|
944 TInt end = aOffset+aSize; |
|
945 if (aOffset<iHomeRegionOffset) |
|
946 aOffset=iHomeRegionOffset; |
|
947 if (end>iHomeRegionOffset+iHomeRegionSize) |
|
948 end=iHomeRegionOffset+iHomeRegionSize; |
|
949 aSize = end-aOffset; |
|
950 if(aSize<0) |
|
951 aSize=0; |
|
952 __KTRACE_OPT(KMMU,Kern::Printf("Rounded and Clipped range %x+%x",aOffset,aSize)); |
|
953 |
|
954 if (aSize) |
|
955 { |
|
956 TInt i=aOffset>>m.iPageShift; |
|
957 TInt n=aSize>>m.iPageShift; |
|
958 __KTRACE_OPT(KMMU,Kern::Printf("Calling SelectiveFree(%d,%d)",i,n)); |
|
959 TUint oldAvail = iPageBitMap->iAvail; |
|
960 TUint oldSize = iSize; |
|
961 |
|
962 // Free those positions which are still commited and also any guard pages, |
|
963 // i.e. pages that are reserved in this chunk but which are not commited. |
|
964 iPageBitMap->SelectiveFree(i,n); |
|
965 DoDecommit(aOffset,aSize,aDecommitType); |
|
966 |
|
967 if (iAttributes & ECache) |
|
968 {// If this is the file server cache chunk then adjust the size based |
|
969 // on the bit map size because:- |
|
970 // - Unlocked and reclaimed pages will be unmapped without updating |
|
971 // iSize or the bit map. |
|
972 // - DoDecommit() only decommits the mapped pages. |
|
973 // For all other chunks what is mapped is what is committed to the |
|
974 // chunk so iSize is accurate. |
|
975 TUint actualFreedPages = iPageBitMap->iAvail - oldAvail; |
|
976 iSize = oldSize - (actualFreedPages << KPageShift); |
|
977 } |
|
978 } |
|
979 |
|
980 Mmu::Signal(); |
|
981 __DEBUG_EVENT(EEventUpdateChunk, this); |
|
982 return KErrNone; |
|
983 } |
|
984 |
|
985 TInt DMemModelChunk::Unlock(TInt aOffset, TInt aSize) |
|
986 { |
|
987 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Unlock %x+%x",aOffset,aSize)); |
|
988 if (!(iAttributes&ECache)) |
|
989 return KErrGeneral; |
|
990 if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected) |
|
991 return KErrGeneral; |
|
992 |
|
993 // Mark this as the file server cache chunk. This is safe as it is only the |
|
994 // file server that can invoke this function. |
|
995 iAttributes |= ECache; |
|
996 |
|
997 if (aOffset<0 || aSize<0) |
|
998 return KErrArgument; |
|
999 if (aSize==0) |
|
1000 return KErrNone; |
|
1001 Mmu& m = Mmu::Get(); |
|
1002 aSize+=(aOffset & m.iPageMask); |
|
1003 aOffset &= ~m.iPageMask; |
|
1004 aSize=(aSize+m.iPageMask)&~m.iPageMask; |
|
1005 if ((aOffset+aSize)>iMaxSize) |
|
1006 return KErrArgument; |
|
1007 |
|
1008 Mmu::Wait(); |
|
1009 TInt r=KErrNone; |
|
1010 TInt i=aOffset>>m.iPageShift; |
|
1011 TInt n=aSize>>m.iPageShift; |
|
1012 if (iPageBitMap->NotAllocated(i,n)) |
|
1013 r=KErrNotFound; |
|
1014 else |
|
1015 { |
|
1016 #ifdef BTRACE_CHUNKS |
|
1017 TUint oldFree = m.FreeRamInBytes(); |
|
1018 #endif |
|
1019 r=Mmu::Get().UnlockRamCachePages(iBase,i,n); |
|
1020 #ifdef BTRACE_CHUNKS |
|
1021 if(r==KErrNone) |
|
1022 { |
|
1023 TUint unlocked = m.FreeRamInBytes()-oldFree; // size of memory unlocked |
|
1024 if(unlocked) |
|
1025 BTraceContext12(BTrace::EChunks,BTrace::EChunkMemoryDeallocated,this,aOffset,unlocked); |
|
1026 } |
|
1027 #endif |
|
1028 } |
|
1029 Mmu::Signal(); |
|
1030 __COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this); |
|
1031 return r; |
|
1032 } |
|
1033 |
|
1034 TInt DMemModelChunk::Lock(TInt aOffset, TInt aSize) |
|
1035 { |
|
1036 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Lock %x+%x",aOffset,aSize)); |
|
1037 if (!(iAttributes&ECache)) |
|
1038 return KErrGeneral; |
|
1039 if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected) |
|
1040 return KErrGeneral; |
|
1041 if (aOffset<0 || aSize<0) |
|
1042 return KErrArgument; |
|
1043 if (aSize==0) |
|
1044 return KErrNone; |
|
1045 Mmu& m = Mmu::Get(); |
|
1046 aSize+=(aOffset & m.iPageMask); |
|
1047 aOffset &= ~m.iPageMask; |
|
1048 aSize=(aSize+m.iPageMask)&~m.iPageMask; |
|
1049 if ((aOffset+aSize)>iMaxSize) |
|
1050 return KErrArgument; |
|
1051 |
|
1052 Mmu::Wait(); |
|
1053 TInt r=KErrNone; |
|
1054 TInt i=aOffset>>m.iPageShift; |
|
1055 TInt n=aSize>>m.iPageShift; |
|
1056 if (iPageBitMap->NotAllocated(i,n)) |
|
1057 r=KErrNotFound; |
|
1058 else |
|
1059 { |
|
1060 #ifdef BTRACE_CHUNKS |
|
1061 TUint oldFree = m.FreeRamInBytes(); |
|
1062 #endif |
|
1063 r=Mmu::Get().LockRamCachePages(iBase,i,n); |
|
1064 #ifdef BTRACE_CHUNKS |
|
1065 if(r==KErrNone) |
|
1066 { |
|
1067 TUint locked = oldFree-m.FreeRamInBytes(); |
|
1068 if(locked) |
|
1069 BTraceContext12(BTrace::EChunks,BTrace::EChunkMemoryAllocated,this,aOffset,locked); |
|
1070 } |
|
1071 #endif |
|
1072 } |
|
1073 if(r!=KErrNone) |
|
1074 { |
|
1075 // decommit memory on error... |
|
1076 __KTRACE_OPT(KMMU,Kern::Printf("Calling SelectiveFree(%d,%d)",i,n)); |
|
1077 TUint oldAvail = iPageBitMap->iAvail; |
|
1078 iPageBitMap->SelectiveFree(i,n); // free those positions which are actually allocated |
|
1079 TUint oldSize = iSize; |
|
1080 |
|
1081 DoDecommit(aOffset,aSize); |
|
1082 |
|
1083 // Use the bit map to adjust the size of the chunk as unlocked and reclaimed pages |
|
1084 // will have been unmapped but not removed from the bit map as DoDecommit() only |
|
1085 // decommits the mapped pages. |
|
1086 TUint actualFreedPages = iPageBitMap->iAvail - oldAvail; |
|
1087 iSize = oldSize - (actualFreedPages << KPageShift); |
|
1088 } |
|
1089 |
|
1090 Mmu::Signal(); |
|
1091 __COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this); |
|
1092 return r; |
|
1093 } |
|
1094 |
|
1095 #ifndef __SCHEDULER_MACHINE_CODED__ |
|
1096 // System locked in this function for a time proportional to chunk size. |
|
1097 // This is unavoidable since the chunk state must always be well defined |
|
1098 // whenever the system is unlocked. |
|
1099 TUint32 DMemModelChunk::ApplyTopLevelPermissions(TChunkState aChunkState) |
|
1100 { |
|
1101 __KTRACE_OPT(KMMU,Kern::Printf("ApplyTopLevelPermissions ChunkState=%d",aChunkState)); |
|
1102 if (!(iAttributes&EFixedAccess)) |
|
1103 { |
|
1104 iChunkState=aChunkState; |
|
1105 if (iSize) |
|
1106 { |
|
1107 Mmu& m = Mmu::Get(); |
|
1108 TLinAddr base=(TLinAddr)iBase; |
|
1109 TInt size=iSize; |
|
1110 TUint32 mask=m.iChunkMask; |
|
1111 if (iAttributes & EDoubleEnded) |
|
1112 { |
|
1113 base+=(iStartPos & ~mask); |
|
1114 size=((iStartPos&mask)+size+mask)&~mask; |
|
1115 } |
|
1116 m.ApplyTopLevelPermissions(base,size,iPdePermissions[aChunkState]); |
|
1117 } |
|
1118 return (iAttributes&ECode)?Mmu::EFlushDPermChg|Mmu::EFlushIPermChg:Mmu::EFlushDPermChg; |
|
1119 } |
|
1120 return 0; |
|
1121 } |
|
1122 |
|
1123 // System locked in this function for a time proportional to chunk size. |
|
1124 // This is unavoidable since the chunk state must always be well defined |
|
1125 // whenever the system is unlocked. |
|
1126 TUint32 DMemModelChunk::MoveToRunAddress(TLinAddr aLinearAddr, TChunkState aChunkState) |
|
1127 { |
|
1128 iChunkState=aChunkState; |
|
1129 if (iSize) |
|
1130 { |
|
1131 TLinAddr base=(TLinAddr)iBase; |
|
1132 TLinAddr dest=aLinearAddr; |
|
1133 TInt size=iSize; |
|
1134 if (iAttributes & EDoubleEnded) |
|
1135 { |
|
1136 Mmu& m = Mmu::Get(); |
|
1137 TUint32 mask=m.iChunkMask; |
|
1138 base+=(iStartPos & ~mask); |
|
1139 dest+=(iStartPos & ~mask); |
|
1140 size=((iStartPos&mask)+size+mask)&~mask; |
|
1141 } |
|
1142 m.MoveChunk(base,size,dest,iPdePermissions[aChunkState]); |
|
1143 } |
|
1144 MoveCurrentPdes((TLinAddr)iBase,aLinearAddr); |
|
1145 iBase=(TUint8 *)aLinearAddr; |
|
1146 return Mmu::EFlushDMove; // chunk can't contain code |
|
1147 } |
|
1148 |
|
1149 // System locked in this function for a time proportional to chunk size. |
|
1150 // This is unavoidable since the chunk state must always be well defined |
|
1151 // whenever the system is unlocked. |
|
1152 TUint32 DMemModelChunk::MoveToHomeSection() |
|
1153 { |
|
1154 iChunkState=ENotRunning; |
|
1155 if (iSize) |
|
1156 { |
|
1157 TLinAddr base=TLinAddr(iBase); |
|
1158 TLinAddr home=iHomeRegionBase; |
|
1159 TInt size=iSize; |
|
1160 if (iAttributes & EDoubleEnded) |
|
1161 { |
|
1162 Mmu& m = Mmu::Get(); |
|
1163 TUint32 mask=m.iChunkMask; |
|
1164 base+=(iStartPos & ~mask); |
|
1165 home+=(iStartPos & ~mask); |
|
1166 size=((iStartPos&mask)+size+mask)&~mask; |
|
1167 } |
|
1168 m.MoveChunk(base,size,home,iPdePermissions[0]); |
|
1169 } |
|
1170 iBase=(TUint8 *)iHomeRegionBase; |
|
1171 iHomePdes=iPdes; |
|
1172 return Mmu::EFlushDMove; // chunk can't contain code |
|
1173 } |
|
1174 #endif |
|
1175 |
|
1176 TLinAddr DMemModelChunk::AllocateHomeAddress(TInt aSize) |
|
1177 { |
|
1178 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::AllocateHomeAddress size %08x",aSize)); |
|
1179 Mmu& m = Mmu::Get(); |
|
1180 TLinearSection* s = m.iKernelSection; |
|
1181 TUint required; |
|
1182 if (iAttributes&EFixedAddress) |
|
1183 required=Mmu::RoundToChunkSize(iMaxSize); |
|
1184 else |
|
1185 required=Mmu::RoundToChunkSize(aSize); |
|
1186 required >>= m.iChunkShift; |
|
1187 TInt r = s->iAllocator.AllocConsecutive(required, EFalse); |
|
1188 if (r<0) |
|
1189 return 0; |
|
1190 s->iAllocator.Alloc(r, required); |
|
1191 TLinAddr addr = s->iBase + (r<<m.iChunkShift); |
|
1192 __KTRACE_OPT(KMMU,Kern::Printf("Address %08x allocated",addr)); |
|
1193 iHomeRegionSize = required << m.iChunkShift; |
|
1194 return addr; |
|
1195 } |
|
1196 |
|
1197 void DMemModelChunk::DeallocateHomeAddress() |
|
1198 { |
|
1199 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DeallocateHomeAddress %08x+%x", iHomeRegionBase, iHomeRegionSize)); |
|
1200 if (iHomeRegionSize) |
|
1201 { |
|
1202 Mmu& m = Mmu::Get(); |
|
1203 TLinearSection* s = m.iKernelSection; |
|
1204 TInt first = (TInt)((iHomeRegionBase - s->iBase)>>m.iChunkShift); |
|
1205 TInt count = (TInt)(iHomeRegionSize >> m.iChunkShift); |
|
1206 s->iAllocator.Free(first, count); |
|
1207 iHomeRegionBase=0; |
|
1208 iHomeRegionSize=0; |
|
1209 } |
|
1210 } |
|
1211 |
|
1212 TLinAddr DMemModelChunk::ReallocateHomeAddress(TInt aNewSize) |
|
1213 { |
|
1214 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::ReallocateHomeAddress(%08x) for chunk %O",aNewSize,this)); |
|
1215 |
|
1216 // can never be called for a fixed address chunk |
|
1217 __ASSERT_ALWAYS((iAttributes&(EFixedAddress))==0,MM::Panic(MM::EFixedChunkMoving)); |
|
1218 |
|
1219 Mmu& m = Mmu::Get(); |
|
1220 TLinearSection* s = m.iKernelSection; |
|
1221 TUint required=Mmu::RoundToChunkSize(aNewSize); |
|
1222 TInt next = (TInt)((iHomeRegionBase + iHomeRegionSize - s->iBase)>>m.iChunkShift); |
|
1223 TInt count = (TInt)((required - iHomeRegionSize) >> m.iChunkShift); |
|
1224 if (!s->iAllocator.NotFree(next, count)) |
|
1225 { |
|
1226 // we can expand in place |
|
1227 s->iAllocator.Alloc(next, count); |
|
1228 iHomeRegionSize = required; |
|
1229 return iHomeRegionBase; |
|
1230 } |
|
1231 TUint oldHomeSize = iHomeRegionSize; |
|
1232 TLinAddr addr = AllocateHomeAddress(required); // try to get a new home address |
|
1233 if (addr && oldHomeSize) |
|
1234 { |
|
1235 // succeeded - free old region |
|
1236 next = (TInt)((iHomeRegionBase - s->iBase)>>m.iChunkShift); |
|
1237 count = (TInt)(oldHomeSize >> m.iChunkShift); |
|
1238 s->iAllocator.Free(next, count); |
|
1239 } |
|
1240 // if it fails, keep our current home region |
|
1241 return addr; |
|
1242 } |
|
1243 |
|
1244 TInt DMemModelChunk::CheckAccess() |
|
1245 { |
|
1246 DProcess* pP=TheCurrentThread->iOwningProcess; |
|
1247 if (iAttributes&EPrivate) |
|
1248 { |
|
1249 if (iOwningProcess && iOwningProcess!=pP && pP!=K::TheKernelProcess) |
|
1250 return KErrAccessDenied; |
|
1251 } |
|
1252 return KErrNone; |
|
1253 } |
|
1254 |
|
1255 TInt DMemModelChunkHw::Close(TAny*) |
|
1256 { |
|
1257 __KTRACE_OPT(KOBJECT,Kern::Printf("DMemModelChunkHw::Close %d %O",AccessCount(),this)); |
|
1258 TInt r=Dec(); |
|
1259 if (r==1) |
|
1260 { |
|
1261 if (iLinAddr) |
|
1262 { |
|
1263 // Physical memory has to be evicted from cache(s). |
|
1264 // Must be preserved as well, as it can still be in use by the driver. |
|
1265 CacheMaintenance::MemoryToPreserveAndReuse(iLinAddr, iSize, iAttribs); |
|
1266 |
|
1267 MmuBase& m=*MmuBase::TheMmu; |
|
1268 MmuBase::Wait(); |
|
1269 m.Unmap(iLinAddr,iSize); |
|
1270 MmuBase::Signal(); |
|
1271 DeallocateLinearAddress(); |
|
1272 } |
|
1273 K::ObjDelete(this); |
|
1274 } |
|
1275 return r; |
|
1276 } |
|
1277 |
|
1278 void DMemModelChunk::BTracePrime(TInt aCategory) |
|
1279 { |
|
1280 DChunk::BTracePrime(aCategory); |
|
1281 |
|
1282 #ifdef BTRACE_CHUNKS |
|
1283 if (aCategory == BTrace::EChunks || aCategory == -1) |
|
1284 { |
|
1285 MmuBase::Wait(); |
|
1286 |
|
1287 TBool memoryOwned = !(iAttributes&EMemoryNotOwned); |
|
1288 Mmu& m=Mmu::Get(); |
|
1289 TInt committedBase = -1; |
|
1290 |
|
1291 // look at each page table in this chunk... |
|
1292 TUint chunkEndIndex = iMaxSize>>KChunkShift; |
|
1293 NKern::LockSystem(); |
|
1294 for(TUint chunkIndex=0; chunkIndex<chunkEndIndex; ++chunkIndex) |
|
1295 { |
|
1296 TLinAddr addr=(TLinAddr)iBase+chunkIndex*KChunkSize; // current address |
|
1297 TInt ptid = m.GetPageTableId(addr); |
|
1298 if(ptid<0) |
|
1299 { |
|
1300 // no page table... |
|
1301 if(committedBase!=-1) |
|
1302 { |
|
1303 NKern::FlashSystem(); |
|
1304 TUint committedEnd = chunkIndex*KChunkSize; |
|
1305 BTrace12(BTrace::EChunks, memoryOwned?BTrace::EChunkMemoryAllocated:BTrace::EChunkMemoryAdded,this,committedBase,committedEnd-committedBase); |
|
1306 committedBase = -1; |
|
1307 } |
|
1308 continue; |
|
1309 } |
|
1310 TPte* pPte=(TPte*)m.PageTableLinAddr(ptid); |
|
1311 |
|
1312 // look at each page in page table... |
|
1313 for(TUint pageIndex=0; pageIndex<KChunkSize/KPageSize; ++pageIndex) |
|
1314 { |
|
1315 TBool committed = false; |
|
1316 TPhysAddr phys = m.PtePhysAddr(pPte[pageIndex], pageIndex); |
|
1317 if(phys!=KPhysAddrInvalid) |
|
1318 { |
|
1319 // we have a page... |
|
1320 if(!memoryOwned) |
|
1321 committed = true; |
|
1322 else |
|
1323 { |
|
1324 // make sure we own the page... |
|
1325 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(phys); |
|
1326 if(pi && pi->Type()==SPageInfo::EChunk && pi->Owner()==this) |
|
1327 committed = true; |
|
1328 } |
|
1329 } |
|
1330 |
|
1331 if(committed) |
|
1332 { |
|
1333 if(committedBase==-1) |
|
1334 committedBase = chunkIndex*KChunkSize+pageIndex*KPageSize; // start of new region |
|
1335 } |
|
1336 else |
|
1337 { |
|
1338 if(committedBase!=-1) |
|
1339 { |
|
1340 // generate trace for region... |
|
1341 NKern::FlashSystem(); |
|
1342 TUint committedEnd = chunkIndex*KChunkSize+pageIndex*KPageSize; |
|
1343 BTrace12(BTrace::EChunks, memoryOwned?BTrace::EChunkMemoryAllocated:BTrace::EChunkMemoryAdded,this,committedBase,committedEnd-committedBase); |
|
1344 committedBase = -1; |
|
1345 } |
|
1346 } |
|
1347 |
|
1348 if((pageIndex&15)==0) |
|
1349 NKern::FlashSystem(); |
|
1350 } |
|
1351 } |
|
1352 NKern::UnlockSystem(); |
|
1353 |
|
1354 if(committedBase!=-1) |
|
1355 { |
|
1356 TUint committedEnd = chunkEndIndex*KChunkSize; |
|
1357 BTrace12(BTrace::EChunks, memoryOwned?BTrace::EChunkMemoryAllocated:BTrace::EChunkMemoryAdded,this,committedBase,committedEnd-committedBase); |
|
1358 } |
|
1359 |
|
1360 MmuBase::Signal(); |
|
1361 } |
|
1362 #endif |
|
1363 } |