|
1 // Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of the License "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // |
|
15 |
|
16 #include <memmodel.h> |
|
17 #include "mmu/mm.h" |
|
18 #include "mmboot.h" |
|
19 |
|
20 |
|
21 DMemModelChunk::DMemModelChunk() |
|
22 { |
|
23 } |
|
24 |
|
25 |
|
26 DMemModelChunk::~DMemModelChunk() |
|
27 { |
|
28 __KTRACE_OPT(KTHREAD,Kern::Printf("DMemModelChunk destruct %O",this)); |
|
29 |
|
30 MM::MappingDestroy(iKernelMapping); |
|
31 MM::MemoryDestroy(iMemoryObject); |
|
32 |
|
33 delete iPageBitMap; |
|
34 delete iPermanentPageBitMap; |
|
35 |
|
36 TDfc* dfc = iDestroyedDfc; |
|
37 if(dfc) |
|
38 dfc->QueueOnIdle(); |
|
39 |
|
40 __KTRACE_OPT(KMEMTRACE, Kern::Printf("MT:D %d %x %O",NTickCount(),this,this)); |
|
41 #ifdef BTRACE_CHUNKS |
|
42 BTraceContext4(BTrace::EChunks,BTrace::EChunkDestroyed,this); |
|
43 #endif |
|
44 } |
|
45 |
|
46 |
|
47 TInt DMemModelChunk::Close(TAny* aPtr) |
|
48 { |
|
49 if (aPtr) |
|
50 { |
|
51 DMemModelProcess* pP=(DMemModelProcess*)aPtr; |
|
52 __NK_ASSERT_DEBUG(!iOwningProcess || iOwningProcess==pP); |
|
53 pP->RemoveChunk(this); |
|
54 } |
|
55 TInt r=Dec(); |
|
56 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Close %d %O",r,this)); |
|
57 __NK_ASSERT_DEBUG(r > 0); // Should never be negative. |
|
58 if (r==1) |
|
59 { |
|
60 K::ObjDelete(this); |
|
61 return EObjectDeleted; |
|
62 } |
|
63 return 0; |
|
64 } |
|
65 |
|
66 |
|
67 void DMemModelChunk::SetPaging(TUint aCreateAtt) |
|
68 { |
|
69 // Only user data chunks should be able to be data paged, i.e. only those |
|
70 // that can be created via the RChunk create methods. |
|
71 if ((iChunkType != EUserData && iChunkType != EUserSelfModCode) || |
|
72 !(K::MemModelAttributes & EMemModelAttrDataPaging)) // Data paging device installed? |
|
73 { |
|
74 return; |
|
75 } |
|
76 // Pageable chunks must own their memory. |
|
77 __NK_ASSERT_DEBUG(!(iAttributes & EMemoryNotOwned)); |
|
78 |
|
79 // Set the data paging attributes |
|
80 TUint dataPolicy = TheSuperPage().KernelConfigFlags() & EKernelConfigDataPagingPolicyMask; |
|
81 if (dataPolicy == EKernelConfigDataPagingPolicyNoPaging) |
|
82 { |
|
83 return; |
|
84 } |
|
85 if (dataPolicy == EKernelConfigDataPagingPolicyAlwaysPage) |
|
86 { |
|
87 iAttributes |= EDataPaged; |
|
88 return; |
|
89 } |
|
90 TUint pagingAtt = aCreateAtt & TChunkCreate::EPagingMask; |
|
91 if (pagingAtt == TChunkCreate::EPaged) |
|
92 { |
|
93 iAttributes |= EDataPaged; |
|
94 return; |
|
95 } |
|
96 if (pagingAtt == TChunkCreate::EUnpaged) |
|
97 { |
|
98 return; |
|
99 } |
|
100 // No data paging attribute specified for this chunk so use the process's |
|
101 __NK_ASSERT_DEBUG(pagingAtt == TChunkCreate::EPagingUnspec); |
|
102 DProcess* currentProcess = TheCurrentThread->iOwningProcess; |
|
103 if (currentProcess->iAttributes & DProcess::EDataPaged) |
|
104 { |
|
105 iAttributes |= EDataPaged; |
|
106 } |
|
107 } |
|
108 |
|
109 |
|
110 TInt DMemModelChunk::DoCreate(SChunkCreateInfo& aInfo) |
|
111 { |
|
112 __ASSERT_COMPILE(!(EMMChunkAttributesMask & EChunkAttributesMask)); |
|
113 __KTRACE_OPT(KMMU,Kern::Printf("Chunk %O DoCreate att=%08x",this,iAttributes)); |
|
114 if (aInfo.iMaxSize<=0) |
|
115 return KErrArgument; |
|
116 |
|
117 iMaxSize = MM::RoundToPageSize(aInfo.iMaxSize); |
|
118 |
|
119 TInt maxpages=iMaxSize>>KPageShift; |
|
120 if (iAttributes & EDisconnected) |
|
121 { |
|
122 TBitMapAllocator* pM=TBitMapAllocator::New(maxpages,ETrue); |
|
123 if (!pM) |
|
124 return KErrNoMemory; |
|
125 iPageBitMap=pM; |
|
126 __KTRACE_OPT(KMMU,Kern::Printf("PageBitMap at %08x, MaxPages %d",pM,maxpages)); |
|
127 } |
|
128 if(iChunkType==ESharedKernelSingle || iChunkType==ESharedKernelMultiple) |
|
129 { |
|
130 TBitMapAllocator* pM=TBitMapAllocator::New(maxpages,ETrue); |
|
131 if (!pM) |
|
132 return KErrNoMemory; |
|
133 iPermanentPageBitMap = pM; |
|
134 } |
|
135 |
|
136 TMemoryAttributes attr = EMemoryAttributeStandard; |
|
137 TBool mapInKernel = false; |
|
138 TBool nowipe = false; |
|
139 TBool executable = false; |
|
140 TBool movable = false; |
|
141 TInt r; |
|
142 |
|
143 switch(iChunkType) |
|
144 { |
|
145 case EUserSelfModCode: |
|
146 executable = true; |
|
147 movable = true; |
|
148 break; |
|
149 |
|
150 case EUserData: |
|
151 case ERamDrive: |
|
152 movable = true; |
|
153 break; |
|
154 |
|
155 case EKernelMessage: |
|
156 case ESharedKernelSingle: |
|
157 case ESharedKernelMultiple: |
|
158 case ESharedIo: |
|
159 mapInKernel = true; |
|
160 r = MM::MemoryAttributes(attr,*(TMappingAttributes2*)&aInfo.iMapAttr); |
|
161 if(r!=KErrNone) |
|
162 return r; |
|
163 break; |
|
164 |
|
165 case EKernelData: |
|
166 nowipe = true; |
|
167 break; |
|
168 |
|
169 case EDllData: |
|
170 __NK_ASSERT_DEBUG(0); // invalid chunk type |
|
171 case EKernelStack: |
|
172 __NK_ASSERT_DEBUG(0); // invalid chunk type |
|
173 case EDll: // global code |
|
174 __NK_ASSERT_DEBUG(0); // invalid chunk type |
|
175 case EKernelCode: |
|
176 __NK_ASSERT_DEBUG(0); // invalid chunk type |
|
177 case EUserCode: // local code |
|
178 __NK_ASSERT_DEBUG(0); // invalid chunk type |
|
179 case ESharedKernelMirror: |
|
180 __NK_ASSERT_DEBUG(0); // invalid chunk type |
|
181 default: |
|
182 __NK_ASSERT_DEBUG(0); // invalid chunk type |
|
183 return KErrArgument; |
|
184 } |
|
185 |
|
186 // calculate memory type... |
|
187 TMemoryObjectType memoryType = EMemoryObjectUnpaged; |
|
188 if (iAttributes & EMemoryNotOwned) |
|
189 { |
|
190 if (memoryType != EMemoryObjectUnpaged) |
|
191 return KErrArgument; |
|
192 memoryType = EMemoryObjectHardware; |
|
193 } |
|
194 if (iAttributes & EDataPaged) |
|
195 { |
|
196 if (memoryType != EMemoryObjectUnpaged) |
|
197 return KErrArgument; |
|
198 memoryType = EMemoryObjectPaged; |
|
199 } |
|
200 if (iAttributes & ECache) |
|
201 { |
|
202 if (memoryType != EMemoryObjectUnpaged) |
|
203 return KErrArgument; |
|
204 memoryType = EMemoryObjectDiscardable; |
|
205 } |
|
206 if (memoryType == EMemoryObjectUnpaged) |
|
207 { |
|
208 if (movable) |
|
209 memoryType = EMemoryObjectMovable; |
|
210 } |
|
211 |
|
212 // calculate memory flags... |
|
213 TMemoryCreateFlags flags = nowipe ? EMemoryCreateNoWipe : EMemoryCreateDefault; |
|
214 flags = (TMemoryCreateFlags)(flags|EMemoryCreateUseCustomWipeByte|(iClearByte<<EMemoryCreateWipeByteShift)); |
|
215 if(executable) |
|
216 flags = (TMemoryCreateFlags)(flags|EMemoryCreateAllowExecution); |
|
217 |
|
218 r = MM::MemoryNew(iMemoryObject,memoryType,MM::BytesToPages(iMaxSize),flags,attr); |
|
219 if(r!=KErrNone) |
|
220 return r; |
|
221 |
|
222 if(mapInKernel) |
|
223 { |
|
224 TInt r = MM::MappingNew(iKernelMapping, iMemoryObject, ESupervisorReadWrite, KKernelOsAsid); |
|
225 if(r!=KErrNone) |
|
226 return r; // Note, iMemoryObject will get cleaned-up when chunk is destroyed |
|
227 const TMappingAttributes2& lma = MM::LegacyMappingAttributes(attr,EUserReadWrite); |
|
228 *(TMappingAttributes2*)&iMapAttr = lma; |
|
229 } |
|
230 |
|
231 #ifdef BTRACE_CHUNKS |
|
232 TKName nameBuf; |
|
233 Name(nameBuf); |
|
234 BTraceContextN(BTrace::EChunks,BTrace::EChunkCreated,this,iMaxSize,nameBuf.Ptr(),nameBuf.Size()); |
|
235 if(iOwningProcess) |
|
236 BTrace8(BTrace::EChunks,BTrace::EChunkOwner,this,iOwningProcess); |
|
237 BTraceContext12(BTrace::EChunks,BTrace::EChunkInfo,this,iChunkType,iAttributes); |
|
238 #endif |
|
239 #ifdef BTRACE_FLEXIBLE_MEM_MODEL |
|
240 BTrace8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsChunk,iMemoryObject,this); |
|
241 #endif |
|
242 return KErrNone; |
|
243 } |
|
244 |
|
245 |
|
246 void DMemModelChunk::SetFixedAddress(TLinAddr aAddr, TInt aInitialSize) |
|
247 { |
|
248 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O SetFixedAddress %08x size %08x",this,aAddr,aInitialSize)); |
|
249 iFixedBase = aAddr; |
|
250 iSize = MM::RoundToPageSize(aInitialSize); |
|
251 if(iSize) |
|
252 MM::MemoryClaimInitialPages(iMemoryObject,iFixedBase,iSize,ESupervisorReadWrite); |
|
253 } |
|
254 |
|
255 |
|
256 TInt DMemModelChunk::SetAttributes(SChunkCreateInfo& aInfo) |
|
257 { |
|
258 switch(iChunkType) |
|
259 { |
|
260 case EKernelData: |
|
261 case EKernelMessage: |
|
262 iAttributes = EPrivate; |
|
263 break; |
|
264 case ERamDrive: |
|
265 iAttributes = EPrivate; |
|
266 break; |
|
267 case EUserData: |
|
268 if (aInfo.iGlobal) |
|
269 iAttributes = EPublic; |
|
270 else |
|
271 iAttributes = EPrivate; |
|
272 break; |
|
273 case EUserSelfModCode: |
|
274 if (aInfo.iGlobal) |
|
275 iAttributes = EPublic|ECode; |
|
276 else |
|
277 iAttributes = EPrivate|ECode; |
|
278 break; |
|
279 case ESharedKernelSingle: |
|
280 case ESharedKernelMultiple: |
|
281 case ESharedIo: |
|
282 iAttributes = EPublic; |
|
283 break; |
|
284 case EDllData: |
|
285 __NK_ASSERT_DEBUG(0); // invalid chunk type |
|
286 case EKernelStack: |
|
287 __NK_ASSERT_DEBUG(0); // invalid chunk type |
|
288 case EDll: // global code |
|
289 __NK_ASSERT_DEBUG(0); // invalid chunk type |
|
290 case EKernelCode: |
|
291 __NK_ASSERT_DEBUG(0); // invalid chunk type |
|
292 case EUserCode: // local code |
|
293 __NK_ASSERT_DEBUG(0); // invalid chunk type |
|
294 case ESharedKernelMirror: |
|
295 __NK_ASSERT_DEBUG(0); // invalid chunk type |
|
296 default: |
|
297 FAULT(); |
|
298 } |
|
299 return KErrNone; |
|
300 } |
|
301 |
|
302 |
|
303 TInt DMemModelChunk::Adjust(TInt aNewSize) |
|
304 { |
|
305 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Adjust %08x",aNewSize)); |
|
306 if (iAttributes & (EDoubleEnded|EDisconnected)) |
|
307 return KErrGeneral; |
|
308 if (aNewSize<0 || aNewSize>iMaxSize) |
|
309 return KErrArgument; |
|
310 |
|
311 TInt r=KErrNone; |
|
312 TInt newSize=MM::RoundToPageSize(aNewSize); |
|
313 if (newSize!=iSize) |
|
314 { |
|
315 MM::MemoryLock(iMemoryObject); |
|
316 if (newSize>iSize) |
|
317 { |
|
318 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Adjust growing")); |
|
319 r=DoCommit(iSize,newSize-iSize); |
|
320 } |
|
321 else if (newSize<iSize) |
|
322 { |
|
323 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Adjust shrinking")); |
|
324 DoDecommit(newSize,iSize-newSize); |
|
325 } |
|
326 MM::MemoryUnlock(iMemoryObject); |
|
327 } |
|
328 __COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this); |
|
329 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O adjusted to %x",this,iSize)); |
|
330 return r; |
|
331 } |
|
332 |
|
333 |
|
334 TInt DMemModelChunk::Address(TInt aOffset, TInt aSize, TLinAddr& aKernelAddress) |
|
335 { |
|
336 if(!iPermanentPageBitMap) |
|
337 return KErrAccessDenied; |
|
338 if(TUint(aOffset)>=TUint(iMaxSize)) |
|
339 return KErrArgument; |
|
340 if(TUint(aOffset+aSize)>TUint(iMaxSize)) |
|
341 return KErrArgument; |
|
342 if(aSize<=0) |
|
343 return KErrArgument; |
|
344 TInt start = aOffset>>KPageShift; |
|
345 TInt size = ((aOffset+aSize-1)>>KPageShift)-start+1; |
|
346 if(iPermanentPageBitMap->NotAllocated(start,size)) |
|
347 return KErrNotFound; |
|
348 aKernelAddress = MM::MappingBase(iKernelMapping)+aOffset; |
|
349 return KErrNone; |
|
350 } |
|
351 |
|
352 |
|
353 TInt DMemModelChunk::PhysicalAddress(TInt aOffset, TInt aSize, TLinAddr& aKernelAddress, TUint32& aPhysicalAddress, TUint32* aPhysicalPageList) |
|
354 { |
|
355 if(aSize<=0) |
|
356 return KErrArgument; |
|
357 TInt r = Address(aOffset,aSize,aKernelAddress); |
|
358 if(r!=KErrNone) |
|
359 return r; |
|
360 TInt index = aOffset>>KPageShift; |
|
361 TInt count = ((aOffset+aSize-1)>>KPageShift)-index+1; |
|
362 r = MM::MemoryPhysAddr(iMemoryObject,index,count,aPhysicalAddress,aPhysicalPageList); |
|
363 if(r==KErrNone) |
|
364 aPhysicalAddress += aOffset&KPageMask; |
|
365 return r; |
|
366 } |
|
367 |
|
368 |
|
369 TInt DMemModelChunk::DoCommit(TInt aOffset, TInt aSize, TCommitType aCommitType, TUint32* aExtraArg) |
|
370 { |
|
371 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DoCommit %x+%x type=%d extra=%08x",aOffset,aSize,aCommitType,aExtraArg)); |
|
372 |
|
373 __NK_ASSERT_DEBUG(((aOffset|aSize)&KPageMask)==0); |
|
374 |
|
375 TInt r = KErrArgument; |
|
376 switch(aCommitType) |
|
377 { |
|
378 case DChunk::ECommitDiscontiguous: |
|
379 r = MM::MemoryAlloc(iMemoryObject, MM::BytesToPages(aOffset), MM::BytesToPages(aSize)); |
|
380 break; |
|
381 |
|
382 case DChunk::ECommitDiscontiguousPhysical: |
|
383 r = MM::MemoryAddPages(iMemoryObject, MM::BytesToPages(aOffset), MM::BytesToPages(aSize), (TPhysAddr*)aExtraArg); |
|
384 break; |
|
385 |
|
386 case DChunk::ECommitContiguous: |
|
387 r = MM::MemoryAllocContiguous(iMemoryObject, MM::BytesToPages(aOffset), MM::BytesToPages(aSize), 0, *(TPhysAddr*)aExtraArg); |
|
388 break; |
|
389 |
|
390 case DChunk::ECommitContiguousPhysical: |
|
391 r = MM::MemoryAddContiguous(iMemoryObject, MM::BytesToPages(aOffset), MM::BytesToPages(aSize), (TPhysAddr)aExtraArg); |
|
392 break; |
|
393 |
|
394 case DChunk::ECommitVirtual: |
|
395 default: |
|
396 __NK_ASSERT_DEBUG(0); // Invalid commit type |
|
397 r = KErrNotSupported; |
|
398 break; |
|
399 } |
|
400 |
|
401 if(r==KErrNone) |
|
402 { |
|
403 iSize += aSize; |
|
404 if(iPermanentPageBitMap) |
|
405 iPermanentPageBitMap->Alloc(aOffset>>KPageShift,aSize>>KPageShift); |
|
406 #ifdef BTRACE_CHUNKS |
|
407 TInt subcategory = (aCommitType & DChunk::ECommitPhysicalMask) ? BTrace::EChunkMemoryAdded : BTrace::EChunkMemoryAllocated; |
|
408 BTraceContext12(BTrace::EChunks,subcategory,this,aOffset,aSize); |
|
409 #endif |
|
410 } |
|
411 |
|
412 return r; |
|
413 } |
|
414 |
|
415 |
|
416 void DMemModelChunk::DoDecommit(TInt aOffset, TInt aSize) |
|
417 { |
|
418 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DoDecommit %x+%x",aOffset,aSize)); |
|
419 |
|
420 __NK_ASSERT_DEBUG(((aOffset|aSize)&KPageMask)==0); |
|
421 |
|
422 TUint index = MM::BytesToPages(aOffset); |
|
423 TUint count = MM::BytesToPages(aSize); |
|
424 iSize -= count*KPageSize; |
|
425 if(iAttributes&EMemoryNotOwned) |
|
426 MM::MemoryRemovePages(iMemoryObject, index, count, 0); |
|
427 else |
|
428 MM::MemoryFree(iMemoryObject, index, count); |
|
429 |
|
430 #ifdef BTRACE_CHUNKS |
|
431 if (count != 0) |
|
432 { |
|
433 TInt subcategory = (iAttributes & EMemoryNotOwned) ? BTrace::EChunkMemoryRemoved : BTrace::EChunkMemoryDeallocated; |
|
434 BTraceContext12(BTrace::EChunks,subcategory,this,aOffset,count*KPageSize); |
|
435 } |
|
436 #endif |
|
437 } |
|
438 |
|
439 |
|
440 TInt DMemModelChunk::AdjustDoubleEnded(TInt aBottom, TInt aTop) |
|
441 { |
|
442 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::AdjustDoubleEnded %x-%x",aBottom,aTop)); |
|
443 if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDoubleEnded) |
|
444 return KErrGeneral; |
|
445 if (aTop<0 || aBottom<0 || aTop<aBottom || aTop>iMaxSize) |
|
446 return KErrArgument; |
|
447 |
|
448 aBottom &= ~KPageMask; |
|
449 aTop = MM::RoundToPageSize(aTop); |
|
450 TInt newSize=aTop-aBottom; |
|
451 if (newSize>iMaxSize) |
|
452 return KErrArgument; |
|
453 |
|
454 MM::MemoryLock(iMemoryObject); |
|
455 TInt initBottom=iStartPos; |
|
456 TInt initTop=iStartPos+iSize; |
|
457 TInt nBottom=Max(aBottom,iStartPos); // intersection bottom |
|
458 TInt nTop=Min(aTop,iStartPos+iSize); // intersection top |
|
459 TInt r=KErrNone; |
|
460 if (nBottom<nTop) |
|
461 { |
|
462 __KTRACE_OPT(KMMU,Kern::Printf("Initial and final regions intersect")); |
|
463 if (initBottom<nBottom) |
|
464 { |
|
465 iStartPos=aBottom; |
|
466 DoDecommit(initBottom,nBottom-initBottom); |
|
467 } |
|
468 if (initTop>nTop) |
|
469 DoDecommit(nTop,initTop-nTop); // this changes iSize |
|
470 if (aBottom<nBottom) |
|
471 { |
|
472 r=DoCommit(aBottom,nBottom-aBottom); |
|
473 if (r==KErrNone) |
|
474 { |
|
475 if (aTop>nTop) |
|
476 r=DoCommit(nTop,aTop-nTop); |
|
477 if (r==KErrNone) |
|
478 iStartPos=aBottom; |
|
479 else |
|
480 DoDecommit(aBottom,nBottom-aBottom); |
|
481 } |
|
482 } |
|
483 else if (aTop>nTop) |
|
484 r=DoCommit(nTop,aTop-nTop); |
|
485 } |
|
486 else |
|
487 { |
|
488 __KTRACE_OPT(KMMU,Kern::Printf("Initial and final regions disjoint")); |
|
489 if (iSize) |
|
490 DoDecommit(initBottom,iSize); |
|
491 iStartPos=aBottom; |
|
492 if (newSize) |
|
493 r=DoCommit(iStartPos,newSize); |
|
494 } |
|
495 MM::MemoryUnlock(iMemoryObject); |
|
496 __COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this); |
|
497 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O adjusted to %x+%x",this,iStartPos,iSize)); |
|
498 return r; |
|
499 } |
|
500 |
|
501 |
|
502 TInt DMemModelChunk::CheckRegion(TInt& aOffset, TInt& aSize) |
|
503 { |
|
504 if((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected) |
|
505 return KErrGeneral; |
|
506 if(aOffset<0 || aSize<0) |
|
507 return KErrArgument; |
|
508 if(aSize==0) |
|
509 return KErrNone; |
|
510 |
|
511 TUint end = MM::RoundToPageSize(aOffset+aSize); |
|
512 if(end>TUint(iMaxSize)) |
|
513 return KErrArgument; |
|
514 aOffset &= ~KPageMask; |
|
515 aSize = end-aOffset; |
|
516 if(end<=TUint(aOffset)) |
|
517 return KErrArgument; |
|
518 |
|
519 return 1; |
|
520 } |
|
521 |
|
522 |
|
523 TInt DMemModelChunk::Commit(TInt aOffset, TInt aSize, TCommitType aCommitType, TUint32* aExtraArg) |
|
524 { |
|
525 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Commit %x+%x type=%d extra=%08x",aOffset,aSize,aCommitType,aExtraArg)); |
|
526 |
|
527 TInt r = CheckRegion(aOffset,aSize); |
|
528 if(r<=0) |
|
529 return r; |
|
530 |
|
531 MM::MemoryLock(iMemoryObject); |
|
532 TInt i=aOffset>>KPageShift; |
|
533 TInt n=aSize>>KPageShift; |
|
534 if (iPageBitMap->NotFree(i,n)) |
|
535 r=KErrAlreadyExists; |
|
536 else |
|
537 { |
|
538 r=DoCommit(aOffset,aSize,aCommitType,aExtraArg); |
|
539 if (r==KErrNone) |
|
540 iPageBitMap->Alloc(i,n); |
|
541 } |
|
542 MM::MemoryUnlock(iMemoryObject); |
|
543 __COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this); |
|
544 return r; |
|
545 } |
|
546 |
|
547 |
|
548 TInt DMemModelChunk::Allocate(TInt aSize, TInt aGuard, TInt aAlign) |
|
549 { |
|
550 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Allocate %x %x %d",aSize,aGuard,aAlign)); |
|
551 |
|
552 // the flexible memory model doesn't implement aGuard and aAlign... |
|
553 __NK_ASSERT_DEBUG(aGuard==0); |
|
554 (void)aGuard; |
|
555 __NK_ASSERT_DEBUG(aAlign==0); |
|
556 (void)aAlign; |
|
557 |
|
558 TInt dummyOffset = 0; |
|
559 TInt r = CheckRegion(dummyOffset,aSize); |
|
560 if(r<=0) |
|
561 return r; |
|
562 |
|
563 MM::MemoryLock(iMemoryObject); |
|
564 TInt n=aSize>>KPageShift; |
|
565 TInt i=iPageBitMap->AllocConsecutive(n, EFalse); // allocate the offset |
|
566 if (i<0) |
|
567 r=KErrNoMemory; // run out of reserved space for this chunk |
|
568 else |
|
569 { |
|
570 TInt offset=i<<KPageShift; |
|
571 __KTRACE_OPT(KMMU,Kern::Printf("Offset %x allocated",offset)); |
|
572 r=DoCommit(offset,aSize); |
|
573 if (r==KErrNone) |
|
574 { |
|
575 iPageBitMap->Alloc(i,n); |
|
576 r=offset; // if operation successful, return allocated offset |
|
577 } |
|
578 } |
|
579 MM::MemoryUnlock(iMemoryObject); |
|
580 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Allocate returns %x",r)); |
|
581 __COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this); |
|
582 return r; |
|
583 } |
|
584 |
|
585 |
|
586 TInt DMemModelChunk::Decommit(TInt aOffset, TInt aSize) |
|
587 { |
|
588 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Decommit %x+%x",aOffset,aSize)); |
|
589 TInt r = CheckRegion(aOffset,aSize); |
|
590 if(r<=0) |
|
591 return r; |
|
592 |
|
593 MM::MemoryLock(iMemoryObject); |
|
594 |
|
595 TInt i=aOffset>>KPageShift; |
|
596 TInt n=aSize>>KPageShift; |
|
597 __KTRACE_OPT(KMMU,Kern::Printf("Calling SelectiveFree(%d,%d)",i,n)); |
|
598 |
|
599 TUint oldAvail = iPageBitMap->iAvail; |
|
600 iPageBitMap->SelectiveFree(i,n); // free those positions which are actually allocated |
|
601 TUint oldSize = iSize; |
|
602 |
|
603 DoDecommit(aOffset,aSize); |
|
604 |
|
605 // Use the bit map to adjust the size of the chunk as unlocked and reclaimed pages |
|
606 // will have been unmapped but not removed from the bit map as DoDecommit() only |
|
607 // decommits the mapped pages. |
|
608 TUint actualFreedPages = iPageBitMap->iAvail - oldAvail; |
|
609 iSize = oldSize - (actualFreedPages << KPageShift); |
|
610 |
|
611 MM::MemoryUnlock(iMemoryObject); |
|
612 |
|
613 r=KErrNone; |
|
614 __COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this); |
|
615 return r; |
|
616 } |
|
617 |
|
618 |
|
619 TInt DMemModelChunk::Unlock(TInt aOffset, TInt aSize) |
|
620 { |
|
621 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Unlock %x+%x",aOffset,aSize)); |
|
622 if(!(iAttributes&ECache)) |
|
623 return KErrGeneral; |
|
624 TInt r = CheckRegion(aOffset,aSize); |
|
625 if(r<=0) |
|
626 return r; |
|
627 |
|
628 MM::MemoryLock(iMemoryObject); |
|
629 |
|
630 TInt i=aOffset>>KPageShift; |
|
631 TInt n=aSize>>KPageShift; |
|
632 if(iPageBitMap->NotAllocated(i,n)) |
|
633 r = KErrNotFound; |
|
634 else |
|
635 r = MM::MemoryAllowDiscard(iMemoryObject,i,n); |
|
636 |
|
637 MM::MemoryUnlock(iMemoryObject); |
|
638 |
|
639 return r; |
|
640 } |
|
641 |
|
642 |
|
643 TInt DMemModelChunk::Lock(TInt aOffset, TInt aSize) |
|
644 { |
|
645 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Lock %x+%x",aOffset,aSize)); |
|
646 if(!(iAttributes&ECache)) |
|
647 return KErrGeneral; |
|
648 TInt r = CheckRegion(aOffset,aSize); |
|
649 if(r<=0) |
|
650 return r; |
|
651 |
|
652 r = MM::MemoryDisallowDiscard(iMemoryObject, MM::BytesToPages(aOffset), MM::BytesToPages(aSize)); |
|
653 if(r!=KErrNone) |
|
654 Decommit(aOffset,aSize); |
|
655 |
|
656 return r; |
|
657 } |
|
658 |
|
659 |
|
660 TInt DMemModelChunk::CheckAccess() |
|
661 { |
|
662 if(iOwningProcess && iOwningProcess!=TheCurrentThread->iOwningProcess) |
|
663 return KErrAccessDenied; |
|
664 return KErrNone; |
|
665 } |
|
666 |
|
667 |
|
668 void DMemModelChunk::BTracePrime(TInt aCategory) |
|
669 { |
|
670 DChunk::BTracePrime(aCategory); |
|
671 #ifdef BTRACE_FLEXIBLE_MEM_MODEL |
|
672 if (aCategory == BTrace::EFlexibleMemModel || aCategory == -1) |
|
673 { |
|
674 if (iMemoryObject) |
|
675 { |
|
676 MM::MemoryBTracePrime(iMemoryObject); |
|
677 BTrace8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsChunk,iMemoryObject,this); |
|
678 } |
|
679 } |
|
680 #endif |
|
681 } |
|
682 |
|
683 |
|
684 void DMemModelChunk::Substitute(TInt /*aOffset*/, TPhysAddr /*aOldAddr*/, TPhysAddr /*aNewAddr*/) |
|
685 { |
|
686 MM::Panic(MM::EUnsupportedOperation); |
|
687 } |
|
688 |