author | Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com> |
Wed, 18 Aug 2010 11:08:29 +0300 | |
changeset 247 | d8d70de2bd36 |
parent 90 | 947f0dc9f7a8 |
child 257 | 3e88ff8f41d5 |
permissions | -rw-r--r-- |
0 | 1 |
// Copyright (c) 1995-2009 Nokia Corporation and/or its subsidiary(-ies). |
2 |
// All rights reserved. |
|
3 |
// This component and the accompanying materials are made available |
|
4 |
// under the terms of the License "Eclipse Public License v1.0" |
|
5 |
// which accompanies this distribution, and is available |
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 |
// |
|
8 |
// Initial Contributors: |
|
9 |
// Nokia Corporation - initial contribution. |
|
10 |
// |
|
11 |
// Contributors: |
|
12 |
// |
|
13 |
// Description: |
|
14 |
// e32\memmodel\epoc\multiple\mcodeseg.cpp |
|
15 |
// |
|
16 |
// |
|
17 |
||
18 |
#include "memmodel.h" |
|
19 |
#include <mmubase.inl> |
|
20 |
#include "cache_maintenance.h" |
|
21 |
#include <demand_paging.h> |
|
22 |
||
23 |
DCodeSeg* M::NewCodeSeg(TCodeSegCreateInfo&) |
|
24 |
// |
|
25 |
// Create a new instance of this class. |
|
26 |
// |
|
27 |
{ |
|
28 |
||
29 |
__KTRACE_OPT(KDLL,Kern::Printf("M::NewCodeSeg")); |
|
30 |
return new DMemModelCodeSeg; |
|
31 |
} |
|
32 |
||
33 |
// |
|
34 |
// DMemModelCodeSegMemory |
|
35 |
// |
|
36 |
||
37 |
DEpocCodeSegMemory* DEpocCodeSegMemory::New(DEpocCodeSeg* aCodeSeg) |
|
38 |
{ |
|
39 |
return new DMemModelCodeSegMemory(aCodeSeg); |
|
40 |
} |
|
41 |
||
42 |
||
43 |
DMemModelCodeSegMemory::DMemModelCodeSegMemory(DEpocCodeSeg* aCodeSeg) |
|
44 |
: DMmuCodeSegMemory(aCodeSeg) |
|
45 |
{ |
|
46 |
} |
|
47 |
||
48 |
||
49 |
TInt DMemModelCodeSegMemory::Create(TCodeSegCreateInfo& aInfo) |
|
50 |
{ |
|
51 |
TInt r = DMmuCodeSegMemory::Create(aInfo); |
|
52 |
if(r!=KErrNone) |
|
53 |
return r; |
|
54 |
||
55 |
Mmu& m=Mmu::Get(); |
|
56 |
||
57 |
iOsAsids = TBitMapAllocator::New(m.iNumOsAsids, EFalse); |
|
58 |
if(!iOsAsids) |
|
59 |
return KErrNoMemory; |
|
60 |
||
61 |
TInt totalPages = iPageCount+iDataPageCount; |
|
62 |
iPages = (TPhysAddr*)Kern::Alloc(totalPages*sizeof(TPhysAddr)); |
|
63 |
if(!iPages) |
|
64 |
return KErrNoMemory; |
|
65 |
TInt i; |
|
66 |
for (i=0; i<totalPages; ++i) |
|
67 |
iPages[i] = KPhysAddrInvalid; |
|
68 |
||
69 |
MmuBase::Wait(); |
|
70 |
||
71 |
// allocate RAM pages... |
|
72 |
__KTRACE_OPT(KDLL,Kern::Printf("Alloc DLL pages %x,%x", iPageCount,iDataPageCount)); |
|
73 |
TInt startPage = iIsDemandPaged ? iPageCount : 0; // if demand paged, skip pages for code |
|
74 |
TInt endPage = iPageCount+iDataPageCount; |
|
75 |
r=m.AllocRamPages(iPages+startPage, endPage-startPage, EPageMovable); |
|
76 |
||
77 |
// initialise SPageInfo objects for allocated pages... |
|
78 |
if (r==KErrNone) |
|
79 |
{ |
|
80 |
NKern::LockSystem(); |
|
81 |
for (i=startPage; i<endPage; ++i) |
|
82 |
{ |
|
83 |
SPageInfo* info = SPageInfo::FromPhysAddr(iPages[i]); |
|
84 |
info->SetCodeSegMemory(this,i); |
|
85 |
if((i&15)==15) |
|
86 |
NKern::FlashSystem(); |
|
87 |
} |
|
88 |
NKern::UnlockSystem(); |
|
89 |
} |
|
90 |
||
91 |
MmuBase::Signal(); |
|
92 |
||
93 |
if (r!=KErrNone) |
|
94 |
return r; |
|
95 |
||
96 |
#ifdef BTRACE_CODESEGS |
|
97 |
BTrace8(BTrace::ECodeSegs,BTrace::ECodeSegMemoryAllocated,iCodeSeg,iPageCount<<m.iPageShift); |
|
98 |
#endif |
|
99 |
||
100 |
DCodeSeg::Wait(); |
|
101 |
||
102 |
TInt code_alloc=((totalPages<<m.iPageShift)+m.iAliasMask)>>m.iAliasShift; |
|
103 |
r=MM::UserCodeAllocator->AllocConsecutive(code_alloc, ETrue); |
|
104 |
if (r<0) |
|
105 |
r = KErrNoMemory; |
|
106 |
else |
|
107 |
{ |
|
108 |
MM::UserCodeAllocator->Alloc(r, code_alloc); |
|
109 |
iCodeAllocBase=r; |
|
110 |
iRamInfo.iCodeRunAddr=m.iUserCodeBase+(r<<m.iAliasShift); |
|
111 |
iRamInfo.iCodeLoadAddr=iRamInfo.iCodeRunAddr; |
|
112 |
if (iRamInfo.iDataSize) |
|
113 |
{ |
|
114 |
if(iDataPageCount) |
|
115 |
iRamInfo.iDataLoadAddr=iRamInfo.iCodeLoadAddr+Mmu::RoundToPageSize(iRamInfo.iCodeSize); |
|
116 |
else |
|
117 |
iRamInfo.iDataLoadAddr=iRamInfo.iCodeLoadAddr+iRamInfo.iCodeSize; |
|
118 |
} |
|
119 |
||
120 |
DMemModelProcess* pP=(DMemModelProcess*)TheCurrentThread->iOwningProcess; |
|
121 |
r=pP->MapUserRamCode(this, ETrue); |
|
122 |
if (r==KErrNone) |
|
123 |
iCreator=pP; |
|
124 |
} |
|
125 |
||
126 |
DCodeSeg::Signal(); |
|
127 |
return r; |
|
128 |
} |
|
129 |
||
130 |
||
131 |
void DMemModelCodeSegMemory::Substitute(TInt aOffset, TPhysAddr aOld, TPhysAddr aNew) |
|
132 |
{ |
|
133 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelCodeSegMemory::Substitute %x %08x %08x",aOffset,aOld,aNew)); |
|
134 |
Mmu& m=Mmu::Get(); |
|
135 |
||
136 |
if (iPages[aOffset>>KPageShift] != aOld) |
|
137 |
MM::Panic(MM::ECodeSegRemapWrongPage); |
|
138 |
||
139 |
iPages[aOffset>>KPageShift] = aNew; |
|
140 |
m.RemapPageByAsid(iOsAsids, iRamInfo.iCodeRunAddr+aOffset, aOld, aNew, m.PtePermissions(EUserCode)); |
|
141 |
} |
|
142 |
||
143 |
||
144 |
TInt DMemModelCodeSegMemory::Loaded(TCodeSegCreateInfo& aInfo) |
|
145 |
{ |
|
146 |
__NK_ASSERT_DEBUG(iPages); |
|
147 |
||
148 |
TInt r = DMmuCodeSegMemory::Loaded(aInfo); |
|
149 |
if(r!=KErrNone) |
|
150 |
return r; |
|
151 |
||
152 |
Mmu& m=Mmu::Get(); |
|
153 |
||
154 |
if(!iIsDemandPaged) |
|
155 |
{ |
|
156 |
UNLOCK_USER_MEMORY(); |
|
157 |
CacheMaintenance::CodeChanged(iRamInfo.iCodeLoadAddr, iRamInfo.iCodeSize); |
|
158 |
LOCK_USER_MEMORY(); |
|
159 |
} |
|
160 |
else |
|
161 |
{ |
|
162 |
// apply code fixups to pages which have already been loaded... |
|
163 |
TInt pageShift = m.iPageShift; |
|
164 |
for (TInt i = 0 ; i < iPageCount ; ++i) |
|
165 |
{ |
|
166 |
if (iPages[i] != KPhysAddrInvalid) |
|
167 |
{ |
|
168 |
r = ApplyCodeFixupsOnLoad((TUint32*)(iRamInfo.iCodeLoadAddr+(i<<pageShift)),iRamInfo.iCodeRunAddr+(i<<pageShift)); |
|
169 |
if(r!=KErrNone) |
|
170 |
return r; |
|
171 |
} |
|
172 |
} |
|
173 |
||
174 |
// copy export directory (this will now have fixups applied)... |
|
175 |
TInt exportDirSize = iRamInfo.iExportDirCount * sizeof(TLinAddr); |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
176 |
if (exportDirSize > 0 || (exportDirSize == 0 && (iCodeSeg->iAttr & ECodeSegAttNmdExpData)) ) |
0 | 177 |
{ |
178 |
exportDirSize += sizeof(TLinAddr); |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
179 |
TLinAddr expDirRunAddr = iRamInfo.iExportDir - sizeof(TLinAddr); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
180 |
if (expDirRunAddr < iRamInfo.iCodeRunAddr || |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
181 |
expDirRunAddr + exportDirSize > iRamInfo.iCodeRunAddr + iRamInfo.iCodeSize) |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
182 |
{// Invalid export section. |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
183 |
return KErrCorrupt; |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
184 |
} |
0 | 185 |
TLinAddr* expDir = (TLinAddr*)Kern::Alloc(exportDirSize); |
186 |
if (!expDir) |
|
187 |
return KErrNoMemory; |
|
188 |
iCopyOfExportDir = expDir; |
|
189 |
UNLOCK_USER_MEMORY(); |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
190 |
memcpy(expDir, (TAny*)expDirRunAddr, exportDirSize); |
0 | 191 |
LOCK_USER_MEMORY(); |
192 |
} |
|
193 |
} |
|
194 |
||
195 |
// unmap code from loading process... |
|
196 |
DMemModelProcess* pP=(DMemModelProcess*)TheCurrentThread->iOwningProcess; |
|
197 |
__ASSERT_ALWAYS(iCreator==pP, MM::Panic(MM::ECodeSegLoadedNotCreator)); |
|
198 |
pP->UnmapUserRamCode(this, ETrue); |
|
199 |
iCreator=NULL; |
|
200 |
||
201 |
// discard any temporary pages used to store loaded data section... |
|
202 |
if(iDataPageCount) |
|
203 |
{ |
|
204 |
MmuBase::Wait(); |
|
205 |
TPhysAddr* pages = iPages+iPageCount; |
|
206 |
m.FreePages(pages,iDataPageCount, EPageMovable); |
|
207 |
for (TInt i = 0 ; i < iDataPageCount ; ++i) |
|
208 |
pages[i] = KPhysAddrInvalid; |
|
209 |
MmuBase::Signal(); |
|
210 |
||
211 |
// see if we can free any virtual address space now we don't need any for loading data |
|
212 |
TInt data_start = ((iPageCount << m.iPageShift) + m.iAliasMask) >> m.iAliasShift; |
|
213 |
TInt data_end = (((iPageCount + iDataPageCount) << m.iPageShift) + m.iAliasMask) >> m.iAliasShift; |
|
214 |
if (data_end != data_start) |
|
215 |
{ |
|
216 |
DCodeSeg::Wait(); |
|
217 |
MM::UserCodeAllocator->Free(iCodeAllocBase + data_start, data_end - data_start); |
|
218 |
DCodeSeg::Signal(); |
|
219 |
} |
|
220 |
||
221 |
iDataPageCount = 0; |
|
222 |
//Reduce the size of the DCodeSeg now the data section has been moved |
|
223 |
iCodeSeg->iSize = iPageCount << m.iPageShift; |
|
224 |
} |
|
225 |
||
226 |
return KErrNone; |
|
227 |
} |
|
228 |
||
229 |
||
230 |
void DMemModelCodeSegMemory::Destroy() |
|
231 |
{ |
|
232 |
if(iCreator) |
|
233 |
iCreator->UnmapUserRamCode(this, ETrue); // remove from creating process if not fully loaded |
|
234 |
} |
|
235 |
||
236 |
||
237 |
DMemModelCodeSegMemory::~DMemModelCodeSegMemory() |
|
238 |
{ |
|
239 |
__KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSegMemory::~DMemModelCodeSegMemory %x", this)); |
|
240 |
__NK_ASSERT_DEBUG(iAccessCount==0); |
|
241 |
__NK_ASSERT_DEBUG(iOsAsids==0 || iOsAsids->Avail()==0); // check not mapped (inverted logic!) |
|
242 |
||
243 |
Mmu& m=Mmu::Get(); |
|
244 |
||
245 |
if(iCodeAllocBase>=0) |
|
246 |
{ |
|
247 |
// free allocated virtual memory space... |
|
248 |
TInt size = (iPageCount+iDataPageCount)<<KPageShift; |
|
249 |
TInt code_alloc=(size+m.iAliasMask)>>m.iAliasShift; |
|
250 |
DCodeSeg::Wait(); |
|
251 |
MM::UserCodeAllocator->Free(iCodeAllocBase, code_alloc); |
|
252 |
DCodeSeg::Signal(); |
|
253 |
} |
|
254 |
||
255 |
if(iPages) |
|
256 |
{ |
|
257 |
#ifdef __DEMAND_PAGING__ |
|
258 |
if (iIsDemandPaged) |
|
259 |
{ |
|
260 |
// Return any paged memory to the paging system |
|
261 |
MmuBase::Wait(); |
|
262 |
NKern::LockSystem(); |
|
263 |
DemandPaging& p = *DemandPaging::ThePager; |
|
264 |
for (TInt i = 0 ; i < iPageCount ; ++i) |
|
265 |
{ |
|
266 |
if (iPages[i] != KPhysAddrInvalid) |
|
267 |
p.NotifyPageFree(iPages[i]); |
|
268 |
} |
|
269 |
NKern::UnlockSystem(); |
|
270 |
MmuBase::Signal(); |
|
271 |
||
272 |
Kern::Free(iCopyOfExportDir); |
|
273 |
iCopyOfExportDir = NULL; |
|
274 |
} |
|
275 |
#endif |
|
276 |
MmuBase::Wait(); |
|
277 |
m.FreePages(iPages,iPageCount+iDataPageCount, EPageMovable); |
|
278 |
MmuBase::Signal(); |
|
279 |
Kern::Free(iPages); |
|
280 |
iPages = NULL; |
|
281 |
#ifdef BTRACE_CODESEGS |
|
282 |
BTrace8(BTrace::ECodeSegs,BTrace::ECodeSegMemoryDeallocated,this,iPageCount<<m.iPageShift); |
|
283 |
#endif |
|
284 |
} |
|
285 |
delete iOsAsids; |
|
286 |
} |
|
287 |
||
288 |
||
289 |
DMemModelCodeSeg::DMemModelCodeSeg() |
|
290 |
// |
|
291 |
// Constructor |
|
292 |
// |
|
293 |
: iCodeAllocBase(-1), |
|
294 |
iDataAllocBase(-1) |
|
295 |
{ |
|
296 |
} |
|
297 |
||
298 |
||
299 |
DMemModelCodeSeg::~DMemModelCodeSeg() |
|
300 |
// |
|
301 |
// Destructor |
|
302 |
// |
|
303 |
{ |
|
304 |
__KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSeg::Destruct %C", this)); |
|
305 |
Mmu& m=Mmu::Get(); |
|
306 |
DCodeSeg::Wait(); |
|
307 |
if (iCodeAllocBase>=0) |
|
308 |
{ |
|
309 |
TBool kernel=( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttKernel ); |
|
310 |
TBool global=( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttGlobal ); |
|
311 |
TInt r=KErrNone; |
|
312 |
if (kernel) |
|
313 |
{ |
|
314 |
DMemModelProcess& kproc=*(DMemModelProcess*)K::TheKernelProcess; |
|
315 |
r=kproc.iCodeChunk->Decommit(iCodeAllocBase, iSize); |
|
316 |
} |
|
317 |
else if (global) |
|
318 |
{ |
|
319 |
r=m.iGlobalCode->Decommit(iCodeAllocBase, iSize); |
|
320 |
} |
|
321 |
__ASSERT_DEBUG(r==KErrNone, MM::Panic(MM::EDecommitFailed)); |
|
322 |
r=r; // stop compiler warning |
|
323 |
} |
|
324 |
if(Memory()) |
|
325 |
Memory()->Destroy(); |
|
326 |
if (iDataAllocBase>=0 && !iXIP) |
|
327 |
{ |
|
328 |
SRamCodeInfo& ri=RamInfo(); |
|
329 |
TInt data_alloc=(ri.iDataSize+ri.iBssSize+m.iPageMask)>>m.iPageShift; |
|
330 |
MM::DllDataAllocator->Free(iDataAllocBase, data_alloc); |
|
331 |
} |
|
332 |
DCodeSeg::Signal(); |
|
333 |
Kern::Free(iKernelData); |
|
334 |
DEpocCodeSeg::Destruct(); |
|
335 |
} |
|
336 |
||
337 |
||
338 |
TInt DMemModelCodeSeg::DoCreateRam(TCodeSegCreateInfo& aInfo, DProcess*) |
|
339 |
{ |
|
340 |
__KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSeg::DoCreateRam %C", this)); |
|
341 |
TBool kernel=( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttKernel ); |
|
342 |
TBool global=( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttGlobal ); |
|
343 |
Mmu& m=Mmu::Get(); |
|
344 |
SRamCodeInfo& ri=RamInfo(); |
|
345 |
iSize = Mmu::RoundToPageSize(ri.iCodeSize+ri.iDataSize); |
|
346 |
if (iSize==0) |
|
347 |
return KErrCorrupt; |
|
348 |
TInt total_data_size=ri.iDataSize+ri.iBssSize; |
|
349 |
TInt r=KErrNone; |
|
350 |
if (kernel) |
|
351 |
{ |
|
352 |
DMemModelProcess& kproc=*(DMemModelProcess*)K::TheKernelProcess; |
|
353 |
if (!kproc.iCodeChunk) |
|
354 |
r=kproc.CreateCodeChunk(); |
|
355 |
if (r!=KErrNone) |
|
356 |
return r; |
|
357 |
r=kproc.iCodeChunk->Allocate(iSize, 0, m.iAliasShift); |
|
358 |
if (r<0) |
|
359 |
return r; |
|
360 |
iCodeAllocBase=r; |
|
361 |
ri.iCodeRunAddr=(TUint32)kproc.iCodeChunk->Base(); |
|
362 |
ri.iCodeRunAddr+=r; |
|
363 |
ri.iCodeLoadAddr=ri.iCodeRunAddr; |
|
364 |
if (ri.iDataSize) |
|
365 |
ri.iDataLoadAddr=ri.iCodeLoadAddr+ri.iCodeSize; |
|
366 |
if (total_data_size) |
|
367 |
{ |
|
368 |
iKernelData=Kern::Alloc(total_data_size); |
|
369 |
if (!iKernelData) |
|
370 |
return KErrNoMemory; |
|
371 |
ri.iDataRunAddr=(TLinAddr)iKernelData; |
|
372 |
} |
|
373 |
return KErrNone; |
|
374 |
} |
|
375 |
if (global) |
|
376 |
{ |
|
377 |
if (!m.iGlobalCode) |
|
378 |
r=m.CreateGlobalCodeChunk(); |
|
379 |
if (r==KErrNone) |
|
380 |
r=m.iGlobalCode->Allocate(iSize, 0, m.iAliasShift); |
|
381 |
if (r<0) |
|
382 |
return r; |
|
383 |
iCodeAllocBase=r; |
|
384 |
ri.iCodeRunAddr=(TUint32)m.iGlobalCode->Base(); |
|
385 |
ri.iCodeRunAddr+=r; |
|
386 |
ri.iCodeLoadAddr=ri.iCodeRunAddr; |
|
387 |
ri.iDataLoadAddr=0; // we don't allow static data in global code |
|
388 |
ri.iDataRunAddr=0; |
|
389 |
TInt loadSize = ri.iCodeSize+ri.iDataSize; |
|
390 |
memset((TAny*)(ri.iCodeRunAddr+loadSize), 0x03, iSize-loadSize); |
|
391 |
return KErrNone; |
|
392 |
} |
|
393 |
||
394 |
DCodeSeg::Wait(); |
|
395 |
if (total_data_size && !IsExe()) |
|
396 |
{ |
|
397 |
TInt data_alloc=(total_data_size+m.iPageMask)>>m.iPageShift; |
|
398 |
__KTRACE_OPT(KDLL,Kern::Printf("Alloc DLL data %x", data_alloc)); |
|
399 |
r=MM::DllDataAllocator->AllocConsecutive(data_alloc, ETrue); |
|
400 |
if (r<0) |
|
401 |
r = KErrNoMemory; |
|
402 |
else |
|
403 |
{ |
|
404 |
MM::DllDataAllocator->Alloc(r, data_alloc); |
|
405 |
iDataAllocBase=r; |
|
406 |
ri.iDataRunAddr=m.iDllDataBase+m.iMaxDllDataSize-((r+data_alloc)<<m.iPageShift); |
|
407 |
r = KErrNone; |
|
408 |
} |
|
409 |
} |
|
410 |
DCodeSeg::Signal(); |
|
411 |
||
412 |
if(r==KErrNone) |
|
413 |
r = Memory()->Create(aInfo); |
|
414 |
||
415 |
return r; |
|
416 |
} |
|
417 |
||
418 |
||
419 |
TInt DMemModelCodeSeg::DoCreateXIP(DProcess* aProcess) |
|
420 |
{ |
|
421 |
// __KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSeg::DoCreateXIP %C proc %O", this, aProcess)); |
|
422 |
return KErrNone; |
|
423 |
} |
|
424 |
||
425 |
||
426 |
TInt DMemModelCodeSeg::Loaded(TCodeSegCreateInfo& aInfo) |
|
427 |
{ |
|
428 |
if(iXIP) |
|
429 |
return DEpocCodeSeg::Loaded(aInfo); |
|
430 |
||
431 |
TBool kernel=( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttKernel ); |
|
432 |
TBool global=( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttGlobal ); |
|
433 |
if (Pages()) |
|
434 |
{ |
|
435 |
TInt r = Memory()->Loaded(aInfo); |
|
436 |
if(r!=KErrNone) |
|
437 |
return r; |
|
438 |
} |
|
439 |
else if (kernel && iExeCodeSeg!=this) |
|
440 |
{ |
|
441 |
Mmu& m=Mmu::Get(); |
|
442 |
DMemModelProcess& kproc=*(DMemModelProcess*)K::TheKernelProcess; |
|
443 |
SRamCodeInfo& ri=RamInfo(); |
|
444 |
||
445 |
// NOTE: Must do IMB before changing permissions since ARMv6 is very pedantic and |
|
446 |
// doesn't let you clean a cache line which is marked as read only. |
|
447 |
CacheMaintenance::CodeChanged(ri.iCodeRunAddr, ri.iCodeSize); |
|
448 |
||
449 |
TInt offset=ri.iCodeRunAddr-TLinAddr(kproc.iCodeChunk->iBase); |
|
450 |
kproc.iCodeChunk->ApplyPermissions(offset, iSize, m.iKernelCodePtePerm); |
|
451 |
} |
|
452 |
else if (global) |
|
453 |
{ |
|
454 |
Mmu& m=Mmu::Get(); |
|
455 |
SRamCodeInfo& ri=RamInfo(); |
|
456 |
CacheMaintenance::CodeChanged(ri.iCodeRunAddr, ri.iCodeSize); |
|
457 |
TInt offset=ri.iCodeRunAddr-TLinAddr(m.iGlobalCode->iBase); |
|
458 |
m.iGlobalCode->ApplyPermissions(offset, iSize, m.iGlobalCodePtePerm); |
|
459 |
} |
|
460 |
return DEpocCodeSeg::Loaded(aInfo); |
|
461 |
} |
|
462 |
||
463 |
void DMemModelCodeSeg::ReadExportDir(TUint32* aDest) |
|
464 |
{ |
|
465 |
__KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSeg::ReadExportDir %C %08x",this, aDest)); |
|
466 |
||
467 |
if (!iXIP) |
|
468 |
{ |
|
469 |
SRamCodeInfo& ri=RamInfo(); |
|
470 |
TInt size=(ri.iExportDirCount+1)*sizeof(TLinAddr); |
|
471 |
||
472 |
if (Memory()->iCopyOfExportDir) |
|
473 |
{ |
|
474 |
kumemput(aDest, Memory()->iCopyOfExportDir, size); |
|
475 |
return; |
|
476 |
} |
|
477 |
||
478 |
NKern::ThreadEnterCS(); |
|
479 |
Mmu& m=Mmu::Get(); |
|
480 |
TLinAddr src=ri.iExportDir-sizeof(TLinAddr); |
|
481 |
||
482 |
MmuBase::Wait(); |
|
483 |
TInt offset=src-ri.iCodeRunAddr; |
|
484 |
TPhysAddr* physArray = Pages(); |
|
485 |
TPhysAddr* ppa=physArray+(offset>>m.iPageShift); |
|
486 |
while(size) |
|
487 |
{ |
|
488 |
TInt pageOffset = src&m.iPageMask; |
|
489 |
TInt l=Min(size, m.iPageSize-pageOffset); |
|
490 |
TLinAddr alias_src = m.MapTemp(*ppa++,src-pageOffset)+pageOffset; |
|
491 |
// Note, the following memory access isn't XTRAP'ed, because... |
|
492 |
// a) This function is only called by the loader thread, so even if |
|
493 |
// exceptions were trapped the system is doomed anyway |
|
494 |
// b) Any exception will cause the crash debugger/logger to be called |
|
495 |
// which will provide more information than if trapped exceptions |
|
496 |
// and returned an error code. |
|
497 |
kumemput32(aDest, (const TAny*)alias_src, l); |
|
498 |
m.UnmapTemp(); |
|
499 |
size-=l; |
|
500 |
src+=l; |
|
501 |
aDest+=l/sizeof(TUint32); |
|
502 |
} |
|
503 |
MmuBase::Signal(); |
|
504 |
||
505 |
NKern::ThreadLeaveCS(); |
|
506 |
} |
|
507 |
} |
|
508 |
||
509 |
TBool DMemModelCodeSeg::OpenCheck(DProcess* aProcess) |
|
510 |
{ |
|
511 |
return FindCheck(aProcess); |
|
512 |
} |
|
513 |
||
514 |
TBool DMemModelCodeSeg::FindCheck(DProcess* aProcess) |
|
515 |
{ |
|
516 |
__KTRACE_OPT(KDLL,Kern::Printf("CSEG:%08x Compat? proc=%O",this,aProcess)); |
|
517 |
if (aProcess) |
|
518 |
{ |
|
519 |
DMemModelProcess& p=*(DMemModelProcess*)aProcess; |
|
520 |
DCodeSeg* pPSeg=p.CodeSeg(); |
|
521 |
if (iAttachProcess && iAttachProcess!=aProcess) |
|
522 |
return EFalse; |
|
523 |
if (iExeCodeSeg && iExeCodeSeg!=pPSeg) |
|
524 |
return EFalse; |
|
525 |
} |
|
526 |
return ETrue; |
|
527 |
} |
|
528 |
||
529 |
||
530 |
void DMemModelCodeSeg::BTracePrime(TInt aCategory) |
|
531 |
{ |
|
532 |
#ifdef BTRACE_CODESEGS |
|
533 |
if (aCategory == BTrace::ECodeSegs || aCategory == -1) |
|
534 |
{ |
|
535 |
DCodeSeg::BTracePrime(aCategory); |
|
536 |
DMemModelCodeSegMemory* codeSegMemory = Memory(); |
|
537 |
if(codeSegMemory && codeSegMemory->iPages && codeSegMemory->iPageCount) |
|
538 |
{ |
|
539 |
BTrace8(BTrace::ECodeSegs,BTrace::ECodeSegMemoryAllocated,this,codeSegMemory->iPageCount<<Mmu::Get().iPageShift); |
|
540 |
} |
|
541 |
} |
|
542 |
#endif |
|
543 |
} |