|
1 // Copyright (c) 1995-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of the License "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // e32\memmodel\epoc\multiple\mcodeseg.cpp |
|
15 // |
|
16 // |
|
17 |
|
18 #include "memmodel.h" |
|
19 #include <mmubase.inl> |
|
20 #include "cache_maintenance.h" |
|
21 #include <demand_paging.h> |
|
22 |
|
23 DCodeSeg* M::NewCodeSeg(TCodeSegCreateInfo&) |
|
24 // |
|
25 // Create a new instance of this class. |
|
26 // |
|
27 { |
|
28 |
|
29 __KTRACE_OPT(KDLL,Kern::Printf("M::NewCodeSeg")); |
|
30 return new DMemModelCodeSeg; |
|
31 } |
|
32 |
|
33 // |
|
34 // DMemModelCodeSegMemory |
|
35 // |
|
36 |
|
37 DEpocCodeSegMemory* DEpocCodeSegMemory::New(DEpocCodeSeg* aCodeSeg) |
|
38 { |
|
39 return new DMemModelCodeSegMemory(aCodeSeg); |
|
40 } |
|
41 |
|
42 |
|
43 DMemModelCodeSegMemory::DMemModelCodeSegMemory(DEpocCodeSeg* aCodeSeg) |
|
44 : DMmuCodeSegMemory(aCodeSeg) |
|
45 { |
|
46 } |
|
47 |
|
48 |
|
49 TInt DMemModelCodeSegMemory::Create(TCodeSegCreateInfo& aInfo) |
|
50 { |
|
51 TInt r = DMmuCodeSegMemory::Create(aInfo); |
|
52 if(r!=KErrNone) |
|
53 return r; |
|
54 |
|
55 Mmu& m=Mmu::Get(); |
|
56 |
|
57 iOsAsids = TBitMapAllocator::New(m.iNumOsAsids, EFalse); |
|
58 if(!iOsAsids) |
|
59 return KErrNoMemory; |
|
60 |
|
61 TInt totalPages = iPageCount+iDataPageCount; |
|
62 iPages = (TPhysAddr*)Kern::Alloc(totalPages*sizeof(TPhysAddr)); |
|
63 if(!iPages) |
|
64 return KErrNoMemory; |
|
65 TInt i; |
|
66 for (i=0; i<totalPages; ++i) |
|
67 iPages[i] = KPhysAddrInvalid; |
|
68 |
|
69 MmuBase::Wait(); |
|
70 |
|
71 // allocate RAM pages... |
|
72 __KTRACE_OPT(KDLL,Kern::Printf("Alloc DLL pages %x,%x", iPageCount,iDataPageCount)); |
|
73 TInt startPage = iIsDemandPaged ? iPageCount : 0; // if demand paged, skip pages for code |
|
74 TInt endPage = iPageCount+iDataPageCount; |
|
75 r=m.AllocRamPages(iPages+startPage, endPage-startPage, EPageMovable); |
|
76 |
|
77 // initialise SPageInfo objects for allocated pages... |
|
78 if (r==KErrNone) |
|
79 { |
|
80 NKern::LockSystem(); |
|
81 for (i=startPage; i<endPage; ++i) |
|
82 { |
|
83 SPageInfo* info = SPageInfo::FromPhysAddr(iPages[i]); |
|
84 info->SetCodeSegMemory(this,i); |
|
85 if((i&15)==15) |
|
86 NKern::FlashSystem(); |
|
87 } |
|
88 NKern::UnlockSystem(); |
|
89 } |
|
90 |
|
91 MmuBase::Signal(); |
|
92 |
|
93 if (r!=KErrNone) |
|
94 return r; |
|
95 |
|
96 #ifdef BTRACE_CODESEGS |
|
97 BTrace8(BTrace::ECodeSegs,BTrace::ECodeSegMemoryAllocated,iCodeSeg,iPageCount<<m.iPageShift); |
|
98 #endif |
|
99 |
|
100 DCodeSeg::Wait(); |
|
101 |
|
102 TInt code_alloc=((totalPages<<m.iPageShift)+m.iAliasMask)>>m.iAliasShift; |
|
103 r=MM::UserCodeAllocator->AllocConsecutive(code_alloc, ETrue); |
|
104 if (r<0) |
|
105 r = KErrNoMemory; |
|
106 else |
|
107 { |
|
108 MM::UserCodeAllocator->Alloc(r, code_alloc); |
|
109 iCodeAllocBase=r; |
|
110 iRamInfo.iCodeRunAddr=m.iUserCodeBase+(r<<m.iAliasShift); |
|
111 iRamInfo.iCodeLoadAddr=iRamInfo.iCodeRunAddr; |
|
112 if (iRamInfo.iDataSize) |
|
113 { |
|
114 if(iDataPageCount) |
|
115 iRamInfo.iDataLoadAddr=iRamInfo.iCodeLoadAddr+Mmu::RoundToPageSize(iRamInfo.iCodeSize); |
|
116 else |
|
117 iRamInfo.iDataLoadAddr=iRamInfo.iCodeLoadAddr+iRamInfo.iCodeSize; |
|
118 } |
|
119 |
|
120 DMemModelProcess* pP=(DMemModelProcess*)TheCurrentThread->iOwningProcess; |
|
121 r=pP->MapUserRamCode(this, ETrue); |
|
122 if (r==KErrNone) |
|
123 iCreator=pP; |
|
124 } |
|
125 |
|
126 DCodeSeg::Signal(); |
|
127 return r; |
|
128 } |
|
129 |
|
130 |
|
131 void DMemModelCodeSegMemory::Substitute(TInt aOffset, TPhysAddr aOld, TPhysAddr aNew) |
|
132 { |
|
133 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelCodeSegMemory::Substitute %x %08x %08x",aOffset,aOld,aNew)); |
|
134 Mmu& m=Mmu::Get(); |
|
135 |
|
136 if (iPages[aOffset>>KPageShift] != aOld) |
|
137 MM::Panic(MM::ECodeSegRemapWrongPage); |
|
138 |
|
139 iPages[aOffset>>KPageShift] = aNew; |
|
140 m.RemapPageByAsid(iOsAsids, iRamInfo.iCodeRunAddr+aOffset, aOld, aNew, m.PtePermissions(EUserCode)); |
|
141 } |
|
142 |
|
143 |
|
144 TInt DMemModelCodeSegMemory::Loaded(TCodeSegCreateInfo& aInfo) |
|
145 { |
|
146 __NK_ASSERT_DEBUG(iPages); |
|
147 |
|
148 TInt r = DMmuCodeSegMemory::Loaded(aInfo); |
|
149 if(r!=KErrNone) |
|
150 return r; |
|
151 |
|
152 Mmu& m=Mmu::Get(); |
|
153 |
|
154 if(!iIsDemandPaged) |
|
155 { |
|
156 UNLOCK_USER_MEMORY(); |
|
157 CacheMaintenance::CodeChanged(iRamInfo.iCodeLoadAddr, iRamInfo.iCodeSize); |
|
158 LOCK_USER_MEMORY(); |
|
159 } |
|
160 else |
|
161 { |
|
162 // apply code fixups to pages which have already been loaded... |
|
163 TInt pageShift = m.iPageShift; |
|
164 for (TInt i = 0 ; i < iPageCount ; ++i) |
|
165 { |
|
166 if (iPages[i] != KPhysAddrInvalid) |
|
167 { |
|
168 r = ApplyCodeFixupsOnLoad((TUint32*)(iRamInfo.iCodeLoadAddr+(i<<pageShift)),iRamInfo.iCodeRunAddr+(i<<pageShift)); |
|
169 if(r!=KErrNone) |
|
170 return r; |
|
171 } |
|
172 } |
|
173 |
|
174 // copy export directory (this will now have fixups applied)... |
|
175 TInt exportDirSize = iRamInfo.iExportDirCount * sizeof(TLinAddr); |
|
176 if (exportDirSize > 0 || (exportDirSize==0 && (iCodeSeg->iAttr&ECodeSegAttNmdExpData)) ) |
|
177 { |
|
178 exportDirSize += sizeof(TLinAddr); |
|
179 TLinAddr* expDir = (TLinAddr*)Kern::Alloc(exportDirSize); |
|
180 if (!expDir) |
|
181 return KErrNoMemory; |
|
182 iCopyOfExportDir = expDir; |
|
183 UNLOCK_USER_MEMORY(); |
|
184 memcpy(expDir,(TAny*)(iRamInfo.iExportDir-sizeof(TLinAddr)),exportDirSize); |
|
185 LOCK_USER_MEMORY(); |
|
186 } |
|
187 } |
|
188 |
|
189 // unmap code from loading process... |
|
190 DMemModelProcess* pP=(DMemModelProcess*)TheCurrentThread->iOwningProcess; |
|
191 __ASSERT_ALWAYS(iCreator==pP, MM::Panic(MM::ECodeSegLoadedNotCreator)); |
|
192 pP->UnmapUserRamCode(this, ETrue); |
|
193 iCreator=NULL; |
|
194 |
|
195 // discard any temporary pages used to store loaded data section... |
|
196 if(iDataPageCount) |
|
197 { |
|
198 MmuBase::Wait(); |
|
199 TPhysAddr* pages = iPages+iPageCount; |
|
200 m.FreePages(pages,iDataPageCount, EPageMovable); |
|
201 for (TInt i = 0 ; i < iDataPageCount ; ++i) |
|
202 pages[i] = KPhysAddrInvalid; |
|
203 MmuBase::Signal(); |
|
204 |
|
205 // see if we can free any virtual address space now we don't need any for loading data |
|
206 TInt data_start = ((iPageCount << m.iPageShift) + m.iAliasMask) >> m.iAliasShift; |
|
207 TInt data_end = (((iPageCount + iDataPageCount) << m.iPageShift) + m.iAliasMask) >> m.iAliasShift; |
|
208 if (data_end != data_start) |
|
209 { |
|
210 DCodeSeg::Wait(); |
|
211 MM::UserCodeAllocator->Free(iCodeAllocBase + data_start, data_end - data_start); |
|
212 DCodeSeg::Signal(); |
|
213 } |
|
214 |
|
215 iDataPageCount = 0; |
|
216 //Reduce the size of the DCodeSeg now the data section has been moved |
|
217 iCodeSeg->iSize = iPageCount << m.iPageShift; |
|
218 } |
|
219 |
|
220 return KErrNone; |
|
221 } |
|
222 |
|
223 |
|
224 void DMemModelCodeSegMemory::Destroy() |
|
225 { |
|
226 if(iCreator) |
|
227 iCreator->UnmapUserRamCode(this, ETrue); // remove from creating process if not fully loaded |
|
228 } |
|
229 |
|
230 |
|
231 DMemModelCodeSegMemory::~DMemModelCodeSegMemory() |
|
232 { |
|
233 __KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSegMemory::~DMemModelCodeSegMemory %x", this)); |
|
234 __NK_ASSERT_DEBUG(iAccessCount==0); |
|
235 __NK_ASSERT_DEBUG(iOsAsids==0 || iOsAsids->Avail()==0); // check not mapped (inverted logic!) |
|
236 |
|
237 Mmu& m=Mmu::Get(); |
|
238 |
|
239 if(iCodeAllocBase>=0) |
|
240 { |
|
241 // free allocated virtual memory space... |
|
242 TInt size = (iPageCount+iDataPageCount)<<KPageShift; |
|
243 TInt code_alloc=(size+m.iAliasMask)>>m.iAliasShift; |
|
244 DCodeSeg::Wait(); |
|
245 MM::UserCodeAllocator->Free(iCodeAllocBase, code_alloc); |
|
246 DCodeSeg::Signal(); |
|
247 } |
|
248 |
|
249 if(iPages) |
|
250 { |
|
251 #ifdef __DEMAND_PAGING__ |
|
252 if (iIsDemandPaged) |
|
253 { |
|
254 // Return any paged memory to the paging system |
|
255 MmuBase::Wait(); |
|
256 NKern::LockSystem(); |
|
257 DemandPaging& p = *DemandPaging::ThePager; |
|
258 for (TInt i = 0 ; i < iPageCount ; ++i) |
|
259 { |
|
260 if (iPages[i] != KPhysAddrInvalid) |
|
261 p.NotifyPageFree(iPages[i]); |
|
262 } |
|
263 NKern::UnlockSystem(); |
|
264 MmuBase::Signal(); |
|
265 |
|
266 Kern::Free(iCopyOfExportDir); |
|
267 iCopyOfExportDir = NULL; |
|
268 } |
|
269 #endif |
|
270 MmuBase::Wait(); |
|
271 m.FreePages(iPages,iPageCount+iDataPageCount, EPageMovable); |
|
272 MmuBase::Signal(); |
|
273 Kern::Free(iPages); |
|
274 iPages = NULL; |
|
275 #ifdef BTRACE_CODESEGS |
|
276 BTrace8(BTrace::ECodeSegs,BTrace::ECodeSegMemoryDeallocated,this,iPageCount<<m.iPageShift); |
|
277 #endif |
|
278 } |
|
279 delete iOsAsids; |
|
280 } |
|
281 |
|
282 |
|
283 DMemModelCodeSeg::DMemModelCodeSeg() |
|
284 // |
|
285 // Constructor |
|
286 // |
|
287 : iCodeAllocBase(-1), |
|
288 iDataAllocBase(-1) |
|
289 { |
|
290 } |
|
291 |
|
292 |
|
293 DMemModelCodeSeg::~DMemModelCodeSeg() |
|
294 // |
|
295 // Destructor |
|
296 // |
|
297 { |
|
298 __KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSeg::Destruct %C", this)); |
|
299 Mmu& m=Mmu::Get(); |
|
300 DCodeSeg::Wait(); |
|
301 if (iCodeAllocBase>=0) |
|
302 { |
|
303 TBool kernel=( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttKernel ); |
|
304 TBool global=( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttGlobal ); |
|
305 TInt r=KErrNone; |
|
306 if (kernel) |
|
307 { |
|
308 DMemModelProcess& kproc=*(DMemModelProcess*)K::TheKernelProcess; |
|
309 r=kproc.iCodeChunk->Decommit(iCodeAllocBase, iSize); |
|
310 } |
|
311 else if (global) |
|
312 { |
|
313 r=m.iGlobalCode->Decommit(iCodeAllocBase, iSize); |
|
314 } |
|
315 __ASSERT_DEBUG(r==KErrNone, MM::Panic(MM::EDecommitFailed)); |
|
316 r=r; // stop compiler warning |
|
317 } |
|
318 if(Memory()) |
|
319 Memory()->Destroy(); |
|
320 if (iDataAllocBase>=0 && !iXIP) |
|
321 { |
|
322 SRamCodeInfo& ri=RamInfo(); |
|
323 TInt data_alloc=(ri.iDataSize+ri.iBssSize+m.iPageMask)>>m.iPageShift; |
|
324 MM::DllDataAllocator->Free(iDataAllocBase, data_alloc); |
|
325 } |
|
326 DCodeSeg::Signal(); |
|
327 Kern::Free(iKernelData); |
|
328 DEpocCodeSeg::Destruct(); |
|
329 } |
|
330 |
|
331 |
|
332 TInt DMemModelCodeSeg::DoCreateRam(TCodeSegCreateInfo& aInfo, DProcess*) |
|
333 { |
|
334 __KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSeg::DoCreateRam %C", this)); |
|
335 TBool kernel=( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttKernel ); |
|
336 TBool global=( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttGlobal ); |
|
337 Mmu& m=Mmu::Get(); |
|
338 SRamCodeInfo& ri=RamInfo(); |
|
339 iSize = Mmu::RoundToPageSize(ri.iCodeSize+ri.iDataSize); |
|
340 if (iSize==0) |
|
341 return KErrCorrupt; |
|
342 TInt total_data_size=ri.iDataSize+ri.iBssSize; |
|
343 TInt r=KErrNone; |
|
344 if (kernel) |
|
345 { |
|
346 DMemModelProcess& kproc=*(DMemModelProcess*)K::TheKernelProcess; |
|
347 if (!kproc.iCodeChunk) |
|
348 r=kproc.CreateCodeChunk(); |
|
349 if (r!=KErrNone) |
|
350 return r; |
|
351 r=kproc.iCodeChunk->Allocate(iSize, 0, m.iAliasShift); |
|
352 if (r<0) |
|
353 return r; |
|
354 iCodeAllocBase=r; |
|
355 ri.iCodeRunAddr=(TUint32)kproc.iCodeChunk->Base(); |
|
356 ri.iCodeRunAddr+=r; |
|
357 ri.iCodeLoadAddr=ri.iCodeRunAddr; |
|
358 if (ri.iDataSize) |
|
359 ri.iDataLoadAddr=ri.iCodeLoadAddr+ri.iCodeSize; |
|
360 if (total_data_size) |
|
361 { |
|
362 iKernelData=Kern::Alloc(total_data_size); |
|
363 if (!iKernelData) |
|
364 return KErrNoMemory; |
|
365 ri.iDataRunAddr=(TLinAddr)iKernelData; |
|
366 } |
|
367 return KErrNone; |
|
368 } |
|
369 if (global) |
|
370 { |
|
371 if (!m.iGlobalCode) |
|
372 r=m.CreateGlobalCodeChunk(); |
|
373 if (r==KErrNone) |
|
374 r=m.iGlobalCode->Allocate(iSize, 0, m.iAliasShift); |
|
375 if (r<0) |
|
376 return r; |
|
377 iCodeAllocBase=r; |
|
378 ri.iCodeRunAddr=(TUint32)m.iGlobalCode->Base(); |
|
379 ri.iCodeRunAddr+=r; |
|
380 ri.iCodeLoadAddr=ri.iCodeRunAddr; |
|
381 ri.iDataLoadAddr=0; // we don't allow static data in global code |
|
382 ri.iDataRunAddr=0; |
|
383 TInt loadSize = ri.iCodeSize+ri.iDataSize; |
|
384 memset((TAny*)(ri.iCodeRunAddr+loadSize), 0x03, iSize-loadSize); |
|
385 return KErrNone; |
|
386 } |
|
387 |
|
388 DCodeSeg::Wait(); |
|
389 if (total_data_size && !IsExe()) |
|
390 { |
|
391 TInt data_alloc=(total_data_size+m.iPageMask)>>m.iPageShift; |
|
392 __KTRACE_OPT(KDLL,Kern::Printf("Alloc DLL data %x", data_alloc)); |
|
393 r=MM::DllDataAllocator->AllocConsecutive(data_alloc, ETrue); |
|
394 if (r<0) |
|
395 r = KErrNoMemory; |
|
396 else |
|
397 { |
|
398 MM::DllDataAllocator->Alloc(r, data_alloc); |
|
399 iDataAllocBase=r; |
|
400 ri.iDataRunAddr=m.iDllDataBase+m.iMaxDllDataSize-((r+data_alloc)<<m.iPageShift); |
|
401 r = KErrNone; |
|
402 } |
|
403 } |
|
404 DCodeSeg::Signal(); |
|
405 |
|
406 if(r==KErrNone) |
|
407 r = Memory()->Create(aInfo); |
|
408 |
|
409 return r; |
|
410 } |
|
411 |
|
412 |
|
413 TInt DMemModelCodeSeg::DoCreateXIP(DProcess* aProcess) |
|
414 { |
|
415 // __KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSeg::DoCreateXIP %C proc %O", this, aProcess)); |
|
416 return KErrNone; |
|
417 } |
|
418 |
|
419 |
|
420 TInt DMemModelCodeSeg::Loaded(TCodeSegCreateInfo& aInfo) |
|
421 { |
|
422 if(iXIP) |
|
423 return DEpocCodeSeg::Loaded(aInfo); |
|
424 |
|
425 TBool kernel=( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttKernel ); |
|
426 TBool global=( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttGlobal ); |
|
427 if (Pages()) |
|
428 { |
|
429 TInt r = Memory()->Loaded(aInfo); |
|
430 if(r!=KErrNone) |
|
431 return r; |
|
432 } |
|
433 else if (kernel && iExeCodeSeg!=this) |
|
434 { |
|
435 Mmu& m=Mmu::Get(); |
|
436 DMemModelProcess& kproc=*(DMemModelProcess*)K::TheKernelProcess; |
|
437 SRamCodeInfo& ri=RamInfo(); |
|
438 |
|
439 // NOTE: Must do IMB before changing permissions since ARMv6 is very pedantic and |
|
440 // doesn't let you clean a cache line which is marked as read only. |
|
441 CacheMaintenance::CodeChanged(ri.iCodeRunAddr, ri.iCodeSize); |
|
442 |
|
443 TInt offset=ri.iCodeRunAddr-TLinAddr(kproc.iCodeChunk->iBase); |
|
444 kproc.iCodeChunk->ApplyPermissions(offset, iSize, m.iKernelCodePtePerm); |
|
445 } |
|
446 else if (global) |
|
447 { |
|
448 Mmu& m=Mmu::Get(); |
|
449 SRamCodeInfo& ri=RamInfo(); |
|
450 CacheMaintenance::CodeChanged(ri.iCodeRunAddr, ri.iCodeSize); |
|
451 TInt offset=ri.iCodeRunAddr-TLinAddr(m.iGlobalCode->iBase); |
|
452 m.iGlobalCode->ApplyPermissions(offset, iSize, m.iGlobalCodePtePerm); |
|
453 } |
|
454 return DEpocCodeSeg::Loaded(aInfo); |
|
455 } |
|
456 |
|
457 void DMemModelCodeSeg::ReadExportDir(TUint32* aDest) |
|
458 { |
|
459 __KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSeg::ReadExportDir %C %08x",this, aDest)); |
|
460 |
|
461 if (!iXIP) |
|
462 { |
|
463 SRamCodeInfo& ri=RamInfo(); |
|
464 TInt size=(ri.iExportDirCount+1)*sizeof(TLinAddr); |
|
465 |
|
466 if (Memory()->iCopyOfExportDir) |
|
467 { |
|
468 kumemput(aDest, Memory()->iCopyOfExportDir, size); |
|
469 return; |
|
470 } |
|
471 |
|
472 NKern::ThreadEnterCS(); |
|
473 Mmu& m=Mmu::Get(); |
|
474 TLinAddr src=ri.iExportDir-sizeof(TLinAddr); |
|
475 |
|
476 MmuBase::Wait(); |
|
477 TInt offset=src-ri.iCodeRunAddr; |
|
478 TPhysAddr* physArray = Pages(); |
|
479 TPhysAddr* ppa=physArray+(offset>>m.iPageShift); |
|
480 while(size) |
|
481 { |
|
482 TInt pageOffset = src&m.iPageMask; |
|
483 TInt l=Min(size, m.iPageSize-pageOffset); |
|
484 TLinAddr alias_src = m.MapTemp(*ppa++,src-pageOffset)+pageOffset; |
|
485 // Note, the following memory access isn't XTRAP'ed, because... |
|
486 // a) This function is only called by the loader thread, so even if |
|
487 // exceptions were trapped the system is doomed anyway |
|
488 // b) Any exception will cause the crash debugger/logger to be called |
|
489 // which will provide more information than if trapped exceptions |
|
490 // and returned an error code. |
|
491 kumemput32(aDest, (const TAny*)alias_src, l); |
|
492 m.UnmapTemp(); |
|
493 size-=l; |
|
494 src+=l; |
|
495 aDest+=l/sizeof(TUint32); |
|
496 } |
|
497 MmuBase::Signal(); |
|
498 |
|
499 NKern::ThreadLeaveCS(); |
|
500 } |
|
501 } |
|
502 |
|
503 TBool DMemModelCodeSeg::OpenCheck(DProcess* aProcess) |
|
504 { |
|
505 return FindCheck(aProcess); |
|
506 } |
|
507 |
|
508 TBool DMemModelCodeSeg::FindCheck(DProcess* aProcess) |
|
509 { |
|
510 __KTRACE_OPT(KDLL,Kern::Printf("CSEG:%08x Compat? proc=%O",this,aProcess)); |
|
511 if (aProcess) |
|
512 { |
|
513 DMemModelProcess& p=*(DMemModelProcess*)aProcess; |
|
514 DCodeSeg* pPSeg=p.CodeSeg(); |
|
515 if (iAttachProcess && iAttachProcess!=aProcess) |
|
516 return EFalse; |
|
517 if (iExeCodeSeg && iExeCodeSeg!=pPSeg) |
|
518 return EFalse; |
|
519 } |
|
520 return ETrue; |
|
521 } |
|
522 |
|
523 |
|
524 void DMemModelCodeSeg::BTracePrime(TInt aCategory) |
|
525 { |
|
526 #ifdef BTRACE_CODESEGS |
|
527 if (aCategory == BTrace::ECodeSegs || aCategory == -1) |
|
528 { |
|
529 DCodeSeg::BTracePrime(aCategory); |
|
530 DMemModelCodeSegMemory* codeSegMemory = Memory(); |
|
531 if(codeSegMemory && codeSegMemory->iPages && codeSegMemory->iPageCount) |
|
532 { |
|
533 BTrace8(BTrace::ECodeSegs,BTrace::ECodeSegMemoryAllocated,this,codeSegMemory->iPageCount<<Mmu::Get().iPageShift); |
|
534 } |
|
535 } |
|
536 #endif |
|
537 } |