|
1 // Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of the License "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // e32\memmodel\epoc\direct\mchunk.cpp |
|
15 // |
|
16 // |
|
17 |
|
18 #include <memmodel.h> |
|
19 |
|
20 DMemModelChunk::~DMemModelChunk() |
|
21 { |
|
22 __KTRACE_OPT(KTHREAD,Kern::Printf("DMemModelChunk destruct %O",this)); |
|
23 if (iRegionSize) |
|
24 { |
|
25 MM::WaitRamAlloc(); |
|
26 MM::FreeRegion(iRegionBase,iRegionSize); |
|
27 __KTRACE_OPT(KMEMTRACE, Kern::Printf("MT:D %d %x %O",NTickCount(),this,this);); |
|
28 MM::SignalRamAlloc(); |
|
29 #ifdef BTRACE_CHUNKS |
|
30 BTraceContext4(BTrace::EChunks,BTrace::EChunkDestroyed,this); |
|
31 #endif |
|
32 } |
|
33 iRegionSize=0; |
|
34 |
|
35 TDfc* dfc = (TDfc*)__e32_atomic_swp_ord_ptr(&iDestroyedDfc, 0); |
|
36 if(dfc) |
|
37 dfc->Enque(); |
|
38 } |
|
39 |
|
40 |
|
41 TUint8* DMemModelChunk::Base(DProcess* aProcess) |
|
42 { |
|
43 return iBase; |
|
44 } |
|
45 |
|
46 |
|
47 TInt DMemModelChunk::DoCreate(SChunkCreateInfo& anInfo) |
|
48 { |
|
49 __ASSERT_COMPILE(!(EMMChunkAttributesMask & EChunkAttributesMask)); |
|
50 |
|
51 if(iAttributes&EMemoryNotOwned) |
|
52 return KErrNotSupported; |
|
53 if (anInfo.iMaxSize<=0) |
|
54 return KErrArgument; |
|
55 TInt r=KErrNone; |
|
56 iMaxSize=MM::RoundToBlockSize(anInfo.iMaxSize); |
|
57 switch (anInfo.iType) |
|
58 { |
|
59 case EDll: |
|
60 case EUserCode: |
|
61 case EUserSelfModCode: |
|
62 case EUserData: |
|
63 case EDllData: |
|
64 case ESharedKernelSingle: |
|
65 case ESharedKernelMultiple: |
|
66 case ESharedIo: |
|
67 case EKernelMessage: |
|
68 MM::WaitRamAlloc(); |
|
69 r=MM::AllocRegion(iRegionBase, iMaxSize); |
|
70 if (r==KErrNone) |
|
71 iRegionSize=iMaxSize; |
|
72 else |
|
73 MM::AllocFailed=ETrue; |
|
74 MM::SignalRamAlloc(); |
|
75 iBase=(TUint8*)iRegionBase; |
|
76 iSize=iMaxSize; |
|
77 if(r==KErrNone) |
|
78 { |
|
79 iMapAttr = EMapAttrCachedMax; |
|
80 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DoCreate clear %x+%x",iRegionBase,iRegionSize)); |
|
81 |
|
82 // Clear memory to value determined by chunk member |
|
83 memset((TAny*)iRegionBase, iClearByte, MM::RoundToBlockSize(iRegionSize)); |
|
84 } |
|
85 break; |
|
86 default: |
|
87 break; |
|
88 } |
|
89 |
|
90 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DoCreate %O ret %d",this,r)); |
|
91 __KTRACE_OPT(KMMU,Kern::Printf("RegionBase=%08x, RegionSize=%08x",iRegionBase,iRegionSize)); |
|
92 __KTRACE_OPT(KMEMTRACE, {MM::WaitRamAlloc();Kern::Printf("MT:C %d %x %O",NTickCount(),this,this);MM::SignalRamAlloc();}); |
|
93 #ifdef BTRACE_CHUNKS |
|
94 TKName nameBuf; |
|
95 Name(nameBuf); |
|
96 BTraceContextN(BTrace::EChunks,BTrace::EChunkCreated,this,iMaxSize,nameBuf.Ptr(),nameBuf.Size()); |
|
97 if(iOwningProcess) |
|
98 BTrace8(BTrace::EChunks,BTrace::EChunkOwner,this,iOwningProcess); |
|
99 BTraceContext12(BTrace::EChunks,BTrace::EChunkInfo,this,iChunkType,iAttributes); |
|
100 #endif |
|
101 return r; |
|
102 } |
|
103 |
|
104 void DMemModelChunk::SetFixedAddress(TLinAddr anAddr, TInt aSize) |
|
105 { |
|
106 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O SetFixedAddress %08X size %08X",this,anAddr,aSize)); |
|
107 iSize=MM::RoundToBlockSize(aSize); |
|
108 if (iSize>iMaxSize) |
|
109 iMaxSize=iSize; |
|
110 iBase=(TUint8*)anAddr; |
|
111 } |
|
112 |
|
113 TInt DMemModelChunk::Adjust(TInt aNewSize) |
|
114 // |
|
115 // Adjust a standard chunk. |
|
116 // |
|
117 { |
|
118 |
|
119 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Adjust %08x",aNewSize)); |
|
120 if (iAttributes & (EDoubleEnded|EDisconnected)) |
|
121 return KErrGeneral; |
|
122 if (aNewSize<0 || aNewSize>iMaxSize) |
|
123 return KErrArgument; |
|
124 |
|
125 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O adjusted to %x",this,iSize)); |
|
126 __KTRACE_OPT(KMEMTRACE, {MM::WaitRamAlloc();Kern::Printf("MT:A %d %x %x %O",NTickCount(),this,iSize,this);MM::SignalRamAlloc();}); |
|
127 return KErrNone; |
|
128 } |
|
129 |
|
130 TInt DMemModelChunk::AdjustDoubleEnded(TInt aBottom, TInt aTop) |
|
131 // |
|
132 // Adjust a double-ended chunk. |
|
133 // |
|
134 { |
|
135 |
|
136 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::AdjustDoubleEnded %x-%x",aBottom,aTop)); |
|
137 if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDoubleEnded) |
|
138 return KErrGeneral; |
|
139 if (aTop<0 || aBottom<0 || aTop<aBottom || aTop>iMaxSize) |
|
140 return KErrArgument; |
|
141 TInt newSize=aTop-aBottom; |
|
142 if (newSize>iMaxSize) |
|
143 return KErrArgument; |
|
144 iStartPos=aBottom; |
|
145 |
|
146 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O adjusted to %x+%x",this,iStartPos,iSize)); |
|
147 __KTRACE_OPT(KMEMTRACE, {MM::WaitRamAlloc();Kern::Printf("MT:A %d %x %x %O",NTickCount(),this,iSize,this);MM::SignalRamAlloc();}); |
|
148 return KErrNone; |
|
149 } |
|
150 |
|
151 TInt DMemModelChunk::Address(TInt aOffset, TInt aSize, TLinAddr& aKernelAddress) |
|
152 { |
|
153 if(TUint(aOffset)>=TUint(iMaxSize)) |
|
154 return KErrArgument; |
|
155 if(TUint(aOffset+aSize)>TUint(iMaxSize)) |
|
156 return KErrArgument; |
|
157 if(aSize<=0) |
|
158 return KErrArgument; |
|
159 aKernelAddress = (TLinAddr)iBase+aOffset; |
|
160 return KErrNone; |
|
161 } |
|
162 |
|
163 TInt DMemModelChunk::PhysicalAddress(TInt aOffset, TInt aSize, TLinAddr& aKernelAddress, TUint32& aPhysicalAddress, TUint32* aPhysicalPageList) |
|
164 { |
|
165 TInt r=Address(aOffset,aSize,aKernelAddress); |
|
166 if(r!=KErrNone) |
|
167 return r; |
|
168 |
|
169 TPhysAddr physStart = Epoc::LinearToPhysical(aKernelAddress); |
|
170 |
|
171 TInt pageShift = 12; |
|
172 TUint32 page = aKernelAddress>>pageShift<<pageShift; |
|
173 TUint32 lastPage = (aKernelAddress+aSize-1)>>pageShift<<pageShift; |
|
174 TUint32* pageList = aPhysicalPageList; |
|
175 TUint32 nextPhys = Epoc::LinearToPhysical(page); |
|
176 TUint32 pageSize = 1<<pageShift; |
|
177 while(page<=lastPage) |
|
178 { |
|
179 TPhysAddr phys = Epoc::LinearToPhysical(page); |
|
180 if(pageList) |
|
181 *pageList++ = phys; |
|
182 if(phys!=nextPhys) |
|
183 nextPhys = KPhysAddrInvalid; |
|
184 else |
|
185 nextPhys += pageSize; |
|
186 page += pageSize; |
|
187 } |
|
188 if(nextPhys==KPhysAddrInvalid) |
|
189 { |
|
190 // Memory is discontiguous... |
|
191 aPhysicalAddress = KPhysAddrInvalid; |
|
192 return 1; |
|
193 } |
|
194 else |
|
195 { |
|
196 // Memory is contiguous... |
|
197 aPhysicalAddress = physStart; |
|
198 return KErrNone; |
|
199 } |
|
200 } |
|
201 |
|
202 TInt DMemModelChunk::Commit(TInt aOffset, TInt aSize, TCommitType aCommitType, TUint32* aExtraArg) |
|
203 // |
|
204 // Commit to a disconnected chunk. |
|
205 // |
|
206 { |
|
207 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Commit %x+%x type=%d extra=%08x",aOffset,aSize,aCommitType,aExtraArg)); |
|
208 if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected) |
|
209 return KErrGeneral; |
|
210 if (aOffset<0 || aSize<0 || (aOffset+aSize)>iMaxSize) |
|
211 return KErrArgument; |
|
212 if(LOGICAL_XOR((TInt)aCommitType&DChunk::ECommitPhysicalMask, iAttributes&DChunk::EMemoryNotOwned)) |
|
213 return KErrNotSupported; // Commit type doesn't match 'memory owned' type |
|
214 |
|
215 if((TInt)aCommitType&DChunk::ECommitPhysicalMask) |
|
216 return KErrNotSupported; |
|
217 if(aCommitType==DChunk::ECommitContiguous) |
|
218 { |
|
219 // We can't commit contiguous memory, we just have to take what's already there. |
|
220 // So check to see if memory is contiguous, and if not, return KErrNoMemory - |
|
221 // which is what other Memory Models do if they can't find enough contiguous RAM. |
|
222 TLinAddr kernAddr; |
|
223 if(PhysicalAddress(aOffset,aSize,kernAddr,*aExtraArg)!=KErrNone) |
|
224 return KErrNoMemory; |
|
225 } |
|
226 else if(aCommitType!=DChunk::ECommitDiscontiguous) |
|
227 return KErrArgument; |
|
228 |
|
229 return KErrNone; |
|
230 } |
|
231 |
|
232 TInt DMemModelChunk::Allocate(TInt aSize, TInt aGuard, TInt aAlign) |
|
233 // |
|
234 // Allocate offset and commit to a disconnected chunk. |
|
235 // |
|
236 { |
|
237 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Allocate %x %x %d",aSize,aGuard,aAlign)); |
|
238 if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected) |
|
239 return KErrGeneral; |
|
240 if (aSize<=0 || aSize>iMaxSize) |
|
241 return KErrArgument; |
|
242 TInt r=KErrNotSupported; |
|
243 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Allocate returns %x",r)); |
|
244 return r; |
|
245 } |
|
246 |
|
247 TInt DMemModelChunk::Decommit(TInt anOffset, TInt aSize) |
|
248 // |
|
249 // Decommit from a disconnected chunk. |
|
250 // |
|
251 { |
|
252 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Decommit %x+%x",anOffset,aSize)); |
|
253 if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected) |
|
254 return KErrGeneral; |
|
255 if (anOffset<0 || aSize<0 || (anOffset+aSize)>iMaxSize) |
|
256 return KErrArgument; |
|
257 return KErrNone; |
|
258 } |
|
259 |
|
260 void DMemModelChunk::Substitute(TInt /*aOffset*/, TPhysAddr /*aOldAddr*/, TPhysAddr /*aNewAddr*/) |
|
261 { |
|
262 MM::Panic(MM::EUnsupportedOperation); |
|
263 } |
|
264 |
|
265 TInt DMemModelChunk::Unlock(TInt anOffset, TInt aSize) |
|
266 { |
|
267 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Decommit %x+%x",anOffset,aSize)); |
|
268 if (!(iAttributes&ECache)) |
|
269 return KErrGeneral; |
|
270 if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected) |
|
271 return KErrGeneral; |
|
272 if (anOffset<0 || aSize<0 || (anOffset+aSize)>iMaxSize) |
|
273 return KErrArgument; |
|
274 return KErrNone; |
|
275 } |
|
276 |
|
277 TInt DMemModelChunk::Lock(TInt anOffset, TInt aSize) |
|
278 { |
|
279 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Decommit %x+%x",anOffset,aSize)); |
|
280 if (!(iAttributes&ECache)) |
|
281 return KErrGeneral; |
|
282 if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected) |
|
283 return KErrGeneral; |
|
284 if (anOffset<0 || aSize<0 || (anOffset+aSize)>iMaxSize) |
|
285 return KErrArgument; |
|
286 return KErrNone; |
|
287 } |
|
288 |
|
289 TInt DMemModelChunk::CheckAccess() |
|
290 { |
|
291 DProcess* pP=TheCurrentThread->iOwningProcess; |
|
292 if (iAttributes&EPrivate) |
|
293 { |
|
294 if (iOwningProcess && iOwningProcess!=pP && pP!=K::TheKernelProcess) |
|
295 return KErrAccessDenied; |
|
296 } |
|
297 return KErrNone; |
|
298 } |
|
299 |
|
300 TUint32 MM::RoundToBlockSize(TUint32 aSize) |
|
301 { |
|
302 TUint32 m=MM::RamBlockSize-1; |
|
303 return (aSize+m)&~m; |
|
304 } |
|
305 |
|
306 void MM::FreeRegion(TLinAddr aBase, TInt aSize) |
|
307 { |
|
308 __KTRACE_OPT(KMMU,Kern::Printf("MM::FreeRegion base %08x size %08x",aBase,aSize)); |
|
309 aSize=MM::RoundToBlockSize(aSize); |
|
310 __ASSERT_ALWAYS(aBase>=MM::UserDataSectionBase && aBase+aSize<=MM::UserDataSectionEnd, MM::Panic(MM::EFreeInvalidRegion)); |
|
311 TInt block=(aBase-MM::UserDataSectionBase)>>MM::RamBlockShift; |
|
312 TInt nBlocks=aSize>>MM::RamBlockShift; |
|
313 MM::RamAllocator->Free(block, nBlocks); |
|
314 } |
|
315 |
|
316 TInt MM::AllocRegion(TLinAddr& aBase, TInt aSize, TInt aAlign) |
|
317 { |
|
318 __KTRACE_OPT(KMMU,Kern::Printf("MM::AllocRegion size 0x%x align %d",aSize,aAlign)); |
|
319 TInt align=Max(aAlign-MM::RamBlockShift, 0); |
|
320 TInt nBlocks=MM::RoundToBlockSize(aSize)>>MM::RamBlockShift; |
|
321 TInt base=(TInt)(MM::UserDataSectionBase>>MM::RamBlockShift); |
|
322 TInt block=MM::RamAllocator->AllocAligned(nBlocks, align, base, ETrue); // returns first block number or -1 |
|
323 if (block<0) |
|
324 return KErrNoMemory; |
|
325 MM::RamAllocator->Alloc(block,nBlocks); |
|
326 aBase=MM::UserDataSectionBase+(block<<MM::RamBlockShift); |
|
327 __KTRACE_OPT(KMMU,Kern::Printf("MM::AllocRegion address %08x",aBase)); |
|
328 return KErrNone; |
|
329 } |
|
330 |
|
331 TInt MM::ClaimRegion(TLinAddr aBase, TInt aSize) |
|
332 { |
|
333 __KTRACE_OPT(KMMU,Kern::Printf("MM::ClaimRegion base %08x size %08x",aBase,aSize)); |
|
334 TUint32 m=MM::RamBlockSize-1; |
|
335 aSize=MM::RoundToBlockSize(aSize+(aBase&m)); |
|
336 aBase&=~m; |
|
337 if (aBase<MM::UserDataSectionBase || TUint32(aSize)>MM::UserDataSectionEnd-aBase) |
|
338 return KErrArgument; |
|
339 TInt block=(aBase-MM::UserDataSectionBase)>>MM::RamBlockShift; |
|
340 TInt nBlocks=aSize>>MM::RamBlockShift; |
|
341 if (MM::RamAllocator->NotFree(block, nBlocks)) |
|
342 return KErrInUse; |
|
343 MM::RamAllocator->Alloc(block, nBlocks); |
|
344 return KErrNone; |
|
345 } |
|
346 |
|
347 // Allocate a physically contiguous region |
|
348 TInt MM::AllocContiguousRegion(TLinAddr& aBase, TInt aSize, TInt aAlign) |
|
349 { |
|
350 #ifndef __CPU_HAS_MMU |
|
351 return MM::AllocRegion(aBase, aSize, aAlign); |
|
352 #else |
|
353 __KTRACE_OPT(KMMU,Kern::Printf("MM::AllocContiguousRegion size 0x%x align %d",aSize,aAlign)); |
|
354 TBitMapAllocator* sa = MM::SecondaryAllocator; |
|
355 if (!sa) |
|
356 return MM::AllocRegion(aBase, aSize, aAlign); // only one physical bank |
|
357 |
|
358 TBitMapAllocator* ra = MM::RamAllocator; |
|
359 TInt align=Max(aAlign-MM::RamBlockShift, 0); |
|
360 TUint32 alignmask = (1u<<align)-1; |
|
361 TInt nBlocks=MM::RoundToBlockSize(aSize)>>MM::RamBlockShift; |
|
362 TInt base=(TInt)(MM::UserDataSectionBase>>MM::RamBlockShift); |
|
363 const SRamBank* banks = (const SRamBank*)TheSuperPage().iRamBootData; |
|
364 const SRamBank* pB = banks; |
|
365 TInt bnum = 0; |
|
366 TInt block = -1; |
|
367 for (; pB->iSize; ++pB) |
|
368 { |
|
369 TInt nb = pB->iSize >> MM::RamBlockShift; |
|
370 sa->CopyAlignedRange(ra, bnum, nb); |
|
371 TInt basealign = (base + bnum) & alignmask; |
|
372 block = sa->AllocAligned(nBlocks, align, basealign, ETrue); // returns first block number or -1 |
|
373 if (block>=0) |
|
374 break; |
|
375 bnum += nb; |
|
376 } |
|
377 if (pB->iSize == 0) |
|
378 return KErrNoMemory; |
|
379 MM::RamAllocator->Alloc(block + bnum, nBlocks); |
|
380 aBase = MM::UserDataSectionBase + ((block + bnum)<<MM::RamBlockShift); |
|
381 __KTRACE_OPT(KMMU,Kern::Printf("MM::AllocContiguousRegion address %08x",aBase)); |
|
382 return KErrNone; |
|
383 #endif |
|
384 } |
|
385 |
|
386 TInt MM::BlockNumber(TPhysAddr aAddr) |
|
387 { |
|
388 __KTRACE_OPT(KMMU,Kern::Printf("MM::BlockNumber %08x",aAddr)); |
|
389 const SRamBank* banks = (const SRamBank*)TheSuperPage().iRamBootData; |
|
390 const SRamBank* pB = banks; |
|
391 TInt bnum = 0; |
|
392 for (; pB->iSize; ++pB) |
|
393 { |
|
394 if (aAddr >= pB->iBase) |
|
395 { |
|
396 TUint32 offset = aAddr - pB->iBase; |
|
397 if (offset < pB->iSize) |
|
398 { |
|
399 TInt bn = bnum + TInt(offset>>MM::RamBlockShift); |
|
400 __KTRACE_OPT(KMMU,Kern::Printf("MM::BlockNumber %08x->%x",aAddr,bn)); |
|
401 return bn; |
|
402 } |
|
403 } |
|
404 TInt nb = pB->iSize >> MM::RamBlockShift; |
|
405 bnum += nb; |
|
406 } |
|
407 return KErrNotFound; |
|
408 } |
|
409 |
|
410 /******************************************** |
|
411 * Hardware chunk abstraction |
|
412 ********************************************/ |
|
413 |
|
414 /** |
|
415 @pre Call in a thread context. |
|
416 @pre Interrupts must be enabled. |
|
417 @pre Kernel must be unlocked. |
|
418 @pre No fast mutex can be held. |
|
419 @pre Calling thread must be in a critical section. |
|
420 */ |
|
421 EXPORT_C TInt DPlatChunkHw::New(DPlatChunkHw*& aChunk, TPhysAddr aAddr, TInt aSize, TUint aAttribs) |
|
422 { |
|
423 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"DPlatChunkHw::New"); |
|
424 __KTRACE_OPT(KMMU,Kern::Printf("DPlatChunkHw::New phys=%08x, size=%x, attribs=%x",aAddr,aSize,aAttribs)); |
|
425 aChunk=NULL; |
|
426 if (aSize<=0) |
|
427 return KErrArgument; |
|
428 DPlatChunkHw* pC=new DPlatChunkHw; |
|
429 if (!pC) |
|
430 return KErrNoMemory; |
|
431 __KTRACE_OPT(KMMU,Kern::Printf("DPlatChunkHw created at %08x",pC)); |
|
432 |
|
433 pC->iPhysAddr=aAddr; |
|
434 pC->iLinAddr=aAddr; |
|
435 pC->iSize=aSize; |
|
436 aChunk=pC; |
|
437 return KErrNone; |
|
438 } |
|
439 |
|
440 |
|
441 void DMemModelChunk::BTracePrime(TInt aCategory) |
|
442 { |
|
443 DChunk::BTracePrime(aCategory); |
|
444 |
|
445 #ifdef BTRACE_CHUNKS |
|
446 if (aCategory == BTrace::EChunks || aCategory == -1) |
|
447 { |
|
448 BTrace12(BTrace::EChunks, BTrace::EChunkMemoryAllocated,this,0,this->iSize); |
|
449 } |
|
450 #endif |
|
451 } |