author | Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com> |
Tue, 06 Jul 2010 15:50:07 +0300 | |
changeset 201 | 43365a9b78a3 |
parent 90 | 947f0dc9f7a8 |
child 257 | 3e88ff8f41d5 |
permissions | -rw-r--r-- |
0 | 1 |
// Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies). |
2 |
// All rights reserved. |
|
3 |
// This component and the accompanying materials are made available |
|
4 |
// under the terms of the License "Eclipse Public License v1.0" |
|
5 |
// which accompanies this distribution, and is available |
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 |
// |
|
8 |
// Initial Contributors: |
|
9 |
// Nokia Corporation - initial contribution. |
|
10 |
// |
|
11 |
// Contributors: |
|
12 |
// |
|
13 |
// Description: |
|
14 |
// e32\memmodel\epoc\moving\mprocess.cpp |
|
15 |
// |
|
16 |
// |
|
17 |
||
18 |
#include "memmodel.h" |
|
19 |
#include "cache_maintenance.h" |
|
20 |
#include "mmboot.h" |
|
21 |
||
22 |
#define iMState iWaitLink.iSpare1 |
|
23 |
||
24 |
_LIT(KDollarDat,"$DAT"); |
|
25 |
_LIT(KLitDllDollarData,"DLL$DATA"); |
|
26 |
||
27 |
/******************************************** |
|
28 |
* Process |
|
29 |
********************************************/ |
|
30 |
void DMemModelProcess::Destruct() |
|
31 |
{ |
|
32 |
NKern::LockSystem(); |
|
33 |
if (this==TheCurrentAddressSpace) |
|
34 |
TheCurrentAddressSpace=NULL; |
|
35 |
if (this==TheCurrentVMProcess) |
|
36 |
TheCurrentVMProcess=NULL; |
|
37 |
if (this==TheCurrentDataSectionProcess) |
|
38 |
TheCurrentDataSectionProcess=NULL; |
|
39 |
if (this==TheCompleteDataSectionProcess) |
|
40 |
TheCompleteDataSectionProcess=NULL; |
|
41 |
NKern::UnlockSystem(); |
|
42 |
DProcess::Destruct(); |
|
43 |
} |
|
44 |
||
45 |
TInt DMemModelProcess::NewChunk(DChunk*& aChunk, SChunkCreateInfo& aInfo, TLinAddr& aRunAddr) |
|
46 |
{ |
|
47 |
aChunk=NULL; |
|
48 |
DMemModelChunk* pC=NULL; |
|
49 |
TInt r=GetNewChunk(pC,aInfo); |
|
50 |
if (r!=KErrNone) |
|
51 |
{ |
|
52 |
if (pC) |
|
53 |
pC->Close(NULL); |
|
54 |
return r; |
|
55 |
} |
|
56 |
if (aInfo.iForceFixed || iAttributes & DMemModelProcess::EFixedAddress) |
|
57 |
pC->iAttributes |= DMemModelChunk::EFixedAddress; |
|
58 |
if (!aInfo.iGlobal && (iAttributes & DMemModelProcess::EPrivate)!=0) |
|
59 |
pC->iAttributes |= DMemModelChunk::EPrivate; |
|
60 |
if (pC->iChunkType==EDll || pC->iChunkType==EUserCode || pC->iChunkType==EUserSelfModCode || pC->iChunkType==EKernelCode) |
|
61 |
pC->iAttributes |= (DMemModelChunk::EFixedAddress|DMemModelChunk::ECode); |
|
62 |
pC->iOwningProcess=(aInfo.iGlobal)?NULL:this; |
|
63 |
r=pC->Create(aInfo); |
|
64 |
if (r==KErrNone && (aInfo.iOperations & SChunkCreateInfo::EAdjust)) |
|
65 |
{ |
|
66 |
if (aInfo.iRunAddress!=0) |
|
67 |
pC->SetFixedAddress(aInfo.iRunAddress,aInfo.iPreallocated); |
|
68 |
if (aInfo.iPreallocated==0) |
|
69 |
{ |
|
70 |
if (pC->iAttributes & DChunk::EDisconnected) |
|
71 |
{ |
|
72 |
r=pC->Commit(aInfo.iInitialBottom,aInfo.iInitialTop-aInfo.iInitialBottom); |
|
73 |
} |
|
74 |
else if (pC->iAttributes & DChunk::EDoubleEnded) |
|
75 |
{ |
|
76 |
r=pC->AdjustDoubleEnded(aInfo.iInitialBottom,aInfo.iInitialTop); |
|
77 |
} |
|
78 |
else |
|
79 |
{ |
|
80 |
r=pC->Adjust(aInfo.iInitialTop); |
|
81 |
} |
|
82 |
} |
|
83 |
if (r==KErrNone && pC->iHomeRegionBase==0 && (pC->iAttributes&DMemModelChunk::EFixedAddress)!=0) |
|
84 |
{ |
|
85 |
r=pC->Reserve(0); |
|
86 |
aRunAddr=(TLinAddr)pC->Base(); |
|
87 |
} |
|
88 |
} |
|
89 |
if (r==KErrNone && (aInfo.iOperations & SChunkCreateInfo::EAdd)) |
|
90 |
{ |
|
91 |
if (pC->iAttributes & DMemModelChunk::ECode) |
|
92 |
Mmu::Get().SyncCodeMappings(); |
|
93 |
if (pC->iChunkType!=EUserCode) |
|
94 |
{ |
|
95 |
r=WaitProcessLock(); |
|
96 |
if (r==KErrNone) |
|
97 |
{ |
|
98 |
r=AddChunk(pC,aRunAddr,EFalse); |
|
99 |
SignalProcessLock(); |
|
100 |
} |
|
101 |
} |
|
102 |
else |
|
103 |
aRunAddr=(TLinAddr)pC->Base(); // code chunks always fixed address |
|
104 |
} |
|
105 |
if (r==KErrNone) |
|
106 |
{ |
|
107 |
pC->iDestroyedDfc = aInfo.iDestroyedDfc; |
|
108 |
aChunk=(DChunk*)pC; |
|
109 |
} |
|
110 |
else |
|
111 |
pC->Close(NULL); // NULL since chunk can't have been added to process |
|
112 |
return r; |
|
113 |
} |
|
114 |
||
115 |
TInt DMemModelProcess::DoCreate(TBool aKernelProcess, TProcessCreateInfo& aInfo) |
|
116 |
{ |
|
117 |
__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::DoCreate %O",this)); |
|
118 |
||
119 |
if (aKernelProcess) |
|
120 |
iAttributes=ESupervisor|EFixedAddress|EPrivate; |
|
121 |
else if (aInfo.iAttr & ECodeSegAttFixed) |
|
122 |
iAttributes=EFixedAddress|EPrivate; |
|
123 |
else |
|
124 |
iAttributes=0; |
|
125 |
if ((iAttributes & ESupervisor)==0 && (iAttributes & EFixedAddress)!=0) |
|
126 |
{ |
|
127 |
CheckForFixedAccess(); |
|
128 |
} |
|
129 |
return KErrNone; |
|
130 |
} |
|
131 |
||
132 |
TInt DMemModelProcess::CreateDataBssStackArea(TProcessCreateInfo& aInfo) |
|
133 |
{ |
|
134 |
__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::CreateDataBssStackArea %O",this)); |
|
135 |
TInt dataBssSize=Mmu::RoundToPageSize(aInfo.iTotalDataSize); |
|
136 |
TInt maxSize=dataBssSize+PP::MaxStackSpacePerProcess; |
|
137 |
TBool fixed=(iAttributes & EFixedAddress); |
|
138 |
||
139 |
__KTRACE_OPT(KPROC,Kern::Printf("DataBssSize=%x, chunk max size %x",dataBssSize,maxSize)); |
|
140 |
||
141 |
SChunkCreateInfo cinfo; |
|
142 |
cinfo.iGlobal=EFalse; |
|
143 |
cinfo.iAtt=TChunkCreate::EDisconnected; |
|
144 |
cinfo.iForceFixed=EFalse; |
|
145 |
cinfo.iOperations=SChunkCreateInfo::EAdjust|SChunkCreateInfo::EAdd; |
|
146 |
cinfo.iType=EUserData; |
|
147 |
cinfo.iMaxSize=maxSize; |
|
148 |
cinfo.iInitialBottom=0; |
|
149 |
cinfo.iInitialTop=dataBssSize; |
|
150 |
cinfo.iPreallocated=0; |
|
151 |
cinfo.iName.Set(KDollarDat); |
|
152 |
cinfo.iOwner=this; |
|
153 |
if (fixed && dataBssSize!=0 && aInfo.iCodeLoadAddress) |
|
154 |
{ |
|
155 |
const TRomImageHeader& rih=*(const TRomImageHeader*)aInfo.iCodeLoadAddress; |
|
156 |
cinfo.iRunAddress=rih.iDataBssLinearBase; |
|
157 |
} |
|
158 |
else |
|
159 |
cinfo.iRunAddress=0; |
|
160 |
TInt r=NewChunk((DChunk*&)iDataBssStackChunk,cinfo,iDataBssRunAddress); |
|
161 |
return r; |
|
162 |
} |
|
163 |
||
164 |
TInt DMemModelProcess::AddChunk(DChunk* aChunk,TBool isReadOnly) |
|
165 |
{ |
|
166 |
DMemModelChunk* pC=(DMemModelChunk*)aChunk; |
|
167 |
TInt r=WaitProcessLock(); |
|
168 |
if (r==KErrNone) |
|
169 |
{ |
|
170 |
TInt pos=0; |
|
171 |
r=ChunkIndex(pC,pos); |
|
172 |
TLinAddr dataSectionBase=0; |
|
173 |
if (r==0) // Found the chunk in this process, just up its count |
|
174 |
{ |
|
175 |
iChunks[pos].iAccessCount++; |
|
176 |
__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::AddChunk %08x to %08x (Access count incremented to %d)",aChunk,this,iChunks[pos].iAccessCount)); |
|
177 |
SignalProcessLock(); |
|
178 |
return KErrNone; |
|
179 |
} |
|
180 |
r=AddChunk(pC,dataSectionBase,isReadOnly); |
|
181 |
SignalProcessLock(); |
|
182 |
} |
|
183 |
__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::AddChunk returns %d",r)); |
|
184 |
return r; |
|
185 |
} |
|
186 |
||
187 |
void FlushBeforeChunkMove(DMemModelChunk* aChunk) |
|
188 |
{ |
|
189 |
Mmu& m = Mmu::Get(); |
|
190 |
TUint32 ff=Mmu::EFlushDMove|Mmu::EFlushDPermChg; |
|
191 |
if (aChunk->iAttributes & DMemModelChunk::ECode) // assumption here that code chunks don't move |
|
192 |
ff |= Mmu::EFlushIPermChg; |
|
193 |
m.GenericFlush(ff); |
|
194 |
} |
|
195 |
||
196 |
TInt DMemModelProcess::AddChunk(DMemModelChunk* aChunk, TLinAddr& aDataSectionBase, TBool isReadOnly) |
|
197 |
{ |
|
198 |
// |
|
199 |
// Must hold the process $LOCK mutex before calling this |
|
200 |
// |
|
201 |
__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::AddChunk %08x to %08x (for first time)",aChunk,this)); |
|
202 |
TInt r=AllocateDataSectionBase(*((DMemModelChunk*)aChunk),(TUint&)aDataSectionBase); |
|
203 |
if(r!=KErrNone) |
|
204 |
return r; |
|
205 |
||
206 |
if (iNumChunks==KMaxChunksInProcess) |
|
207 |
return KErrOverflow; // too many chunks in the process |
|
208 |
||
209 |
SChunkInfo *pC=iChunks; |
|
210 |
SChunkInfo *pE=pC+iNumChunks-1; |
|
211 |
NKern::LockSystem(); |
|
212 |
while(pE>=pC && TUint(pE->iDataSectionBase)>TUint(aDataSectionBase)) |
|
213 |
{ |
|
214 |
pE[1]=pE[0]; |
|
215 |
pE--; |
|
216 |
} |
|
217 |
pC=pE+1; |
|
218 |
pC->iDataSectionBase=aDataSectionBase; |
|
219 |
pC->isReadOnly=isReadOnly; |
|
220 |
pC->iAccessCount=1; |
|
221 |
pC->iChunk=aChunk; |
|
222 |
iNumChunks++; |
|
223 |
||
224 |
if(!(iAttributes&ESupervisor)) |
|
225 |
{ |
|
226 |
TInt attribs=aChunk->iAttributes; |
|
227 |
if (!(attribs&DMemModelChunk::EFixedAddress)) |
|
228 |
{ |
|
229 |
iNumMovingChunks++; |
|
230 |
iAttributes |= EMoving; |
|
231 |
} |
|
232 |
||
233 |
if (attribs&DMemModelChunk::EFixedAccess) |
|
234 |
{ |
|
235 |
NKern::UnlockSystem(); |
|
236 |
AddFixedAccessChunk(aChunk); |
|
237 |
goto done; // FINISHED |
|
238 |
} |
|
239 |
||
240 |
iAttributes |= EVariableAccess; |
|
241 |
if (attribs & DMemModelChunk::ECode) |
|
242 |
{ |
|
243 |
iNumNonFixedAccessCodeChunks++; |
|
244 |
iAttributes |= EVariableCode; |
|
245 |
} |
|
246 |
if (++iNumNonFixedAccessChunks==1) |
|
247 |
{ |
|
248 |
NKern::UnlockSystem(); |
|
249 |
DoAttributeChange(); // change process from fixed to variable access |
|
250 |
NKern::LockSystem(); |
|
251 |
} |
|
252 |
||
253 |
if (this!=TheCurrentThread->iOwningProcess) |
|
254 |
{ |
|
255 |
// Adding chunk to another process |
|
256 |
if (this==TheCurrentDataSectionProcess && !(attribs&DMemModelChunk::EFixedAddress)) |
|
257 |
TheCompleteDataSectionProcess=NULL; // just set partial state change flag and leave chunk alone |
|
258 |
if (this==TheCurrentAddressSpace) |
|
259 |
TheCurrentAddressSpace=NULL; |
|
260 |
NKern::UnlockSystem(); |
|
261 |
goto done; // FINISHED |
|
262 |
} |
|
263 |
||
264 |
// Adding chunk to currently active user process |
|
265 |
{ |
|
266 |
TheCurrentAddressSpace=NULL; |
|
267 |
Mmu& m = Mmu::Get(); |
|
268 |
TUint32 ff=0; // flush flags |
|
269 |
DMemModelChunk::TChunkState state=isReadOnly?DMemModelChunk::ERunningRO:DMemModelChunk::ERunningRW; |
|
270 |
if (attribs&DMemModelChunk::EFixedAddress) |
|
271 |
{ |
|
272 |
// Fixed address chunk, just change permissions |
|
273 |
ff|=aChunk->ApplyTopLevelPermissions(state); |
|
274 |
} |
|
275 |
else if (this==TheCurrentDataSectionProcess) |
|
276 |
{ |
|
277 |
// Moving chunk. |
|
278 |
// This process is already in the data section, so just move the chunk down. |
|
279 |
// Must do flushing first |
|
280 |
TheCompleteDataSectionProcess=NULL; |
|
281 |
FlushBeforeChunkMove(aChunk); |
|
282 |
aChunk->MoveToRunAddress(aDataSectionBase,state); // idempotent |
|
283 |
TheCompleteDataSectionProcess=this; |
|
284 |
} |
|
285 |
else if (iNumMovingChunks==1) |
|
286 |
{ |
|
287 |
// The first moving chunk being added to a process with the data section occupied by another process. |
|
288 |
// This is the problematic case - we must displace the other process from the data section. |
|
289 |
// However we must allow preemption after each chunk is moved. Note that if a reschedule does |
|
290 |
// occur the necessary chunk moves will have been done by the scheduler, so we can finish |
|
291 |
// immediately. |
|
292 |
// Must do cache flushing first |
|
293 |
m.GenericFlush(Mmu::EFlushDMove); |
|
294 |
if (TheCurrentDataSectionProcess) |
|
295 |
{ |
|
296 |
if (TheCurrentDataSectionProcess->iAttributes & EVariableCode) |
|
297 |
ff |= Mmu::EFlushIPermChg; |
|
298 |
SChunkInfo* pOtherProcChunks=TheCurrentDataSectionProcess->iChunks; |
|
299 |
SChunkInfo* pEndOtherProcChunks=pOtherProcChunks+TheCurrentDataSectionProcess->iNumChunks; |
|
300 |
NKern::FlashSystem(); |
|
301 |
// if a reschedule occurs, TheCompleteDataSectionProcess will become equal to this |
|
302 |
while (TheCompleteDataSectionProcess!=this && pOtherProcChunks<pEndOtherProcChunks) |
|
303 |
{ |
|
304 |
DMemModelChunk *pChunk=pOtherProcChunks->iChunk; |
|
305 |
pChunk->MoveToHomeSection(); |
|
306 |
++pOtherProcChunks; |
|
307 |
TheCompleteDataSectionProcess=NULL; |
|
308 |
NKern::FlashSystem(); |
|
309 |
} |
|
310 |
} |
|
311 |
if (TheCompleteDataSectionProcess!=this) |
|
312 |
{ |
|
313 |
if (attribs & DMemModelChunk::ECode) |
|
314 |
ff |= Mmu::EFlushIPermChg; |
|
315 |
aChunk->MoveToRunAddress(aDataSectionBase,state); |
|
316 |
TheCurrentDataSectionProcess=this; |
|
317 |
TheCompleteDataSectionProcess=this; |
|
318 |
} |
|
319 |
} |
|
320 |
TheCurrentAddressSpace=this; |
|
321 |
TheCurrentVMProcess=this; |
|
322 |
if (ff) |
|
323 |
m.GenericFlush(ff); |
|
324 |
} |
|
325 |
} |
|
326 |
NKern::UnlockSystem(); |
|
327 |
done: |
|
328 |
__KTRACE_OPT(KPROC,Kern::Printf("Added array entry for %x",aDataSectionBase)); |
|
329 |
__KTRACE_OPT(KPROC,Kern::Printf("Chunks maxsize %x",pC->iChunk->MaxSize())); |
|
330 |
__DEBUG_EVENT(EEventUpdateProcess, this); |
|
331 |
return KErrNone; |
|
332 |
} |
|
333 |
||
334 |
TInt DMemModelProcess::AllocateDataSectionBase(DMemModelChunk& aChunk, TUint& aBase) |
|
335 |
{ |
|
336 |
__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::AllocateDataSectionBase")); |
|
337 |
aBase=0; |
|
338 |
if ((aChunk.iAttributes & DMemModelChunk::EPrivate) && this!=aChunk.iOwningProcess) |
|
339 |
return KErrAccessDenied; |
|
340 |
if (aChunk.iAttributes & DMemModelChunk::EFixedAddress) |
|
341 |
{ |
|
342 |
aBase=aChunk.iHomeRegionBase; |
|
343 |
return KErrNone; |
|
344 |
} |
|
345 |
Mmu& m = Mmu::Get(); |
|
346 |
TLinAddr base=0; |
|
347 |
TLinAddr maxBase=0; |
|
348 |
switch (aChunk.iChunkType) |
|
349 |
{ |
|
350 |
case EUserData: |
|
351 |
base=m.iDataSectionBase; |
|
352 |
maxBase=m.iDllDataBase; |
|
353 |
break; |
|
354 |
case EUserCode: |
|
355 |
case EUserSelfModCode: |
|
356 |
MM::Panic(MM::EUserCodeNotFixed); |
|
357 |
break; |
|
358 |
case EDllData: |
|
359 |
aBase=m.iDllDataBase; |
|
360 |
return KErrNone; |
|
361 |
default: |
|
362 |
__KTRACE_OPT(KPANIC,Kern::Printf("DMemModelProcess::AllocateDataSectionBase BadChunkType %d",aChunk.iChunkType)); |
|
363 |
return KErrAccessDenied; |
|
364 |
} |
|
365 |
||
366 |
TLinAddr lastBase=base; |
|
367 |
SChunkInfo *pS=iChunks; |
|
368 |
SChunkInfo *pE=pS+iNumChunks; |
|
369 |
while (pS<pE) |
|
370 |
{ |
|
371 |
TLinAddr thisBase=pS->iDataSectionBase; |
|
372 |
__KTRACE_OPT(KPROC,Kern::Printf("Chunk already at %x",thisBase)); |
|
373 |
if (thisBase>=maxBase) |
|
374 |
break; |
|
375 |
if (thisBase>=base) // Within the range we are allocating |
|
376 |
{ |
|
377 |
TInt gap=thisBase-lastBase; |
|
378 |
if (gap>=aChunk.MaxSize()) |
|
379 |
break; |
|
380 |
lastBase=thisBase+pS->iChunk->MaxSize(); |
|
381 |
} |
|
382 |
pS++; |
|
383 |
} |
|
384 |
if (lastBase+aChunk.MaxSize()>maxBase) |
|
385 |
{ |
|
386 |
__KTRACE_OPT(KPROC,Kern::Printf("ERROR - none allocated, out of memory")); |
|
387 |
return KErrNoMemory; |
|
388 |
} |
|
389 |
aBase=lastBase; |
|
390 |
__KTRACE_OPT(KPROC,Kern::Printf("User allocated %x",aBase)); |
|
391 |
return KErrNone; |
|
392 |
} |
|
393 |
||
394 |
TUint8* DMemModelProcess::DataSectionBase(DMemModelChunk* aChunk) |
|
395 |
{ |
|
396 |
// this can't be called after $LOCK is deleted |
|
397 |
Kern::MutexWait(*iProcessLock); |
|
398 |
TInt pos=0; |
|
399 |
TInt r=ChunkIndex(aChunk,pos); |
|
400 |
if (r==0) // Found the chunk |
|
401 |
{ |
|
402 |
TUint8* answer=((TUint8*)iChunks[pos].iDataSectionBase); |
|
403 |
__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::DataSectionBase %x",answer)); |
|
404 |
Kern::MutexSignal(*iProcessLock); |
|
405 |
return answer; |
|
406 |
} |
|
407 |
__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::DataSectionBase chunk %08x not present in %08x",aChunk,this)); |
|
408 |
Kern::MutexSignal(*iProcessLock); |
|
409 |
return(NULL); |
|
410 |
} |
|
411 |
||
412 |
void DMemModelProcess::DoRemoveChunk(TInt aIndex) |
|
413 |
{ |
|
414 |
// Must be called with process $LOCK mutex held |
|
415 |
__DEBUG_EVENT(EEventUpdateProcess, this); |
|
416 |
DMemModelChunk* chunk = iChunks[aIndex].iChunk; |
|
417 |
Mmu& m = Mmu::Get(); |
|
418 |
NKern::LockSystem(); |
|
419 |
TInt attribs=chunk->iAttributes; |
|
420 |
__KTRACE_OPT(KPROC,Kern::Printf("Removing Chunk attribs=%08x, Process attribs=%08x",attribs,iAttributes)); |
|
421 |
if (!(attribs&DMemModelChunk::EFixedAccess)) |
|
422 |
{ |
|
423 |
// Must leave chunk in process chunk list until we have flushed the cache if necessary |
|
424 |
if (this==TheCurrentVMProcess && (attribs&DMemModelChunk::EFixedAddress)) |
|
425 |
{ |
|
426 |
TUint32 ff=chunk->ApplyTopLevelPermissions(DMemModelChunk::ENotRunning); |
|
427 |
m.GenericFlush(ff); |
|
428 |
// the system must now remain locked until the chunk is removed from the process chunk list |
|
429 |
} |
|
430 |
if (this==TheCurrentDataSectionProcess && !(attribs&DMemModelChunk::EFixedAddress)) |
|
431 |
{ |
|
432 |
// must do cache flush first |
|
433 |
FlushBeforeChunkMove(chunk); // preemptible, but on return cache is free of chunk data |
|
434 |
chunk->MoveToHomeSection(); |
|
435 |
// the system must now remain locked until the chunk is removed from the process chunk list |
|
436 |
} |
|
437 |
} |
|
438 |
||
439 |
// Remove the chunk from the process chunk list |
|
440 |
SChunkInfo *pD=iChunks+aIndex; |
|
441 |
SChunkInfo *pS=iChunks+aIndex+1; |
|
442 |
SChunkInfo *pE=iChunks+iNumChunks; |
|
443 |
while(pS<pE) |
|
444 |
*pD++=*pS++; |
|
445 |
iNumChunks--; |
|
446 |
||
447 |
// Update the process attribute flags |
|
448 |
if (!(attribs&DMemModelChunk::EFixedAddress)) |
|
449 |
{ |
|
450 |
if (--iNumMovingChunks==0) |
|
451 |
iAttributes &= ~EMoving; |
|
452 |
} |
|
453 |
if (!(attribs&DMemModelChunk::EFixedAccess)) |
|
454 |
{ |
|
455 |
if ((attribs&DMemModelChunk::ECode) && --iNumNonFixedAccessCodeChunks==0) |
|
456 |
iAttributes &= ~EVariableCode; |
|
457 |
if (this==TheCurrentDataSectionProcess && !(iAttributes&EMoving)) |
|
458 |
{ |
|
459 |
TheCurrentDataSectionProcess=NULL; |
|
460 |
TheCompleteDataSectionProcess=NULL; |
|
461 |
} |
|
462 |
if (--iNumNonFixedAccessChunks==0) |
|
463 |
{ |
|
464 |
iAttributes &= ~EVariableAccess; |
|
465 |
if (this==TheCurrentVMProcess) |
|
466 |
{ |
|
467 |
TheCurrentVMProcess=NULL; |
|
468 |
TheCurrentAddressSpace=NULL; |
|
469 |
} |
|
470 |
NKern::UnlockSystem(); |
|
471 |
DoAttributeChange(); // change process from variable to fixed access |
|
472 |
} |
|
473 |
else |
|
474 |
NKern::UnlockSystem(); |
|
475 |
} |
|
476 |
else |
|
477 |
{ |
|
478 |
NKern::UnlockSystem(); |
|
479 |
RemoveFixedAccessChunk(chunk); |
|
480 |
} |
|
481 |
} |
|
482 |
||
483 |
/** |
|
484 |
Final chance for process to release resources during its death. |
|
485 |
||
486 |
Called with process $LOCK mutex held (if it exists). |
|
487 |
This mutex will not be released before it is deleted. |
|
488 |
I.e. no other thread will ever hold the mutex again. |
|
489 |
*/ |
|
490 |
void DMemModelProcess::FinalRelease() |
|
491 |
{ |
|
492 |
// Clean up any left over chunks (such as SharedIo buffers) |
|
493 |
while(iNumChunks) |
|
494 |
DoRemoveChunk(0); |
|
495 |
} |
|
496 |
||
497 |
void DMemModelProcess::RemoveChunk(DMemModelChunk *aChunk) |
|
498 |
{ |
|
499 |
// note that this can't be called after the process $LOCK mutex has been deleted |
|
500 |
// since it can only be called by a thread in this process doing a handle close or |
|
501 |
// dying, or by the process handles array being deleted due to the process dying, |
|
502 |
// all of which happen before $LOCK is deleted. |
|
503 |
__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::RemoveChunk %08x from %08x",aChunk,this)); |
|
504 |
Kern::MutexWait(*iProcessLock); |
|
505 |
TInt pos=0; |
|
506 |
TInt r=ChunkIndex(aChunk,pos); |
|
507 |
__KTRACE_OPT(KPROC,if(r) Kern::Printf("Chunk lookup failed with %d",r)); |
|
508 |
if (r==0) // Found the chunk |
|
509 |
{ |
|
510 |
__KTRACE_OPT(KPROC,Kern::Printf("Chunk access count %d",iChunks[pos].iAccessCount)); |
|
511 |
if (--iChunks[pos].iAccessCount==0) |
|
512 |
DoRemoveChunk(pos); |
|
513 |
} |
|
514 |
Kern::MutexSignal(*iProcessLock); |
|
515 |
} |
|
516 |
||
517 |
TInt DMemModelProcess::ChunkIndex(DMemModelChunk* aChunk,TInt& aPos) |
|
518 |
{ |
|
519 |
if (aChunk==NULL) |
|
520 |
return(KErrNotFound); |
|
521 |
TInt i=0; |
|
522 |
SChunkInfo *pC=iChunks; |
|
523 |
SChunkInfo *pE=pC+iNumChunks; |
|
524 |
while(pC<pE && (pC->iChunk!=aChunk)) |
|
525 |
{ |
|
526 |
pC++; |
|
527 |
i++; |
|
528 |
} |
|
529 |
if (pC==pE) |
|
530 |
return KErrNotFound; |
|
531 |
aPos=i; |
|
532 |
return KErrNone; |
|
533 |
} |
|
534 |
||
535 |
void DMemModelProcess::RemoveDllData() |
|
536 |
// |
|
537 |
// Call with CodeSegLock held |
|
538 |
// |
|
539 |
{ |
|
540 |
Kern::SafeClose((DObject*&)iDllDataChunk, this); |
|
541 |
} |
|
542 |
||
543 |
TInt DMemModelProcess::CreateDllDataChunk() |
|
544 |
// |
|
545 |
// Call with CodeSegLock held |
|
546 |
// |
|
547 |
{ |
|
548 |
__KTRACE_OPT(KDLL,Kern::Printf("DMemModelProcess %O CreateDllDataChunk",this)); |
|
549 |
Mmu& m = Mmu::Get(); |
|
550 |
SChunkCreateInfo c; |
|
551 |
c.iGlobal=EFalse; |
|
552 |
c.iAtt=TChunkCreate::EDisconnected; |
|
553 |
c.iForceFixed=EFalse; |
|
554 |
c.iOperations=SChunkCreateInfo::EAdjust|SChunkCreateInfo::EAdd; |
|
555 |
c.iRunAddress=0; |
|
556 |
c.iPreallocated=0; |
|
557 |
c.iType=EDllData; |
|
558 |
c.iMaxSize=(iAttributes&EFixedAddress) ? 1 : m.iMaxDllDataSize; // minimal size for fixed processes |
|
559 |
c.iName.Set(KLitDllDollarData); |
|
560 |
c.iOwner=this; |
|
561 |
c.iInitialBottom=0; |
|
562 |
c.iInitialTop=0; |
|
563 |
TLinAddr runAddr; |
|
564 |
return NewChunk((DChunk*&)iDllDataChunk,c,runAddr); |
|
565 |
} |
|
566 |
||
567 |
void DMemModelProcess::FreeDllDataChunk() |
|
568 |
{ |
|
569 |
iDllDataChunk->Close(this); |
|
570 |
iDllDataChunk=NULL; |
|
571 |
} |
|
572 |
||
573 |
TInt DMemModelProcess::CommitDllData(TLinAddr aBase, TInt aSize) |
|
574 |
{ |
|
575 |
__KTRACE_OPT(KDLL,Kern::Printf("DMemModelProcess %O CommitDllData %08x+%x",this,aBase,aSize)); |
|
576 |
TInt r=KErrNone; |
|
577 |
if (!iDllDataChunk) |
|
578 |
r=CreateDllDataChunk(); |
|
579 |
if (r==KErrNone) |
|
580 |
{ |
|
581 |
Mmu& m = Mmu::Get(); |
|
582 |
TLinAddr dll_data_base=(iAttributes & EFixedAddress) ? (TLinAddr)iDllDataChunk->Base() |
|
583 |
: TLinAddr(m.iDllDataBase); |
|
584 |
TInt offset=aBase-dll_data_base; |
|
585 |
__ASSERT_ALWAYS(TUint32(offset)<TUint32(iDllDataChunk->iMaxSize),MM::Panic(MM::ECommitInvalidDllDataAddress)); |
|
586 |
r=iDllDataChunk->Commit(offset, aSize); |
|
587 |
if (r!=KErrNone && iDllDataChunk->iSize==0) |
|
588 |
FreeDllDataChunk(); |
|
589 |
} |
|
590 |
__KTRACE_OPT(KDLL,Kern::Printf("CommitDllData returns %d",r)); |
|
591 |
return r; |
|
592 |
} |
|
593 |
||
594 |
void DMemModelProcess::DecommitDllData(TLinAddr aBase, TInt aSize) |
|
595 |
{ |
|
596 |
__KTRACE_OPT(KDLL,Kern::Printf("DMemModelProcess %O DecommitDllData %08x+%x",this,aBase,aSize)); |
|
597 |
Mmu& m = Mmu::Get(); |
|
598 |
TLinAddr dll_data_base=(iAttributes & EFixedAddress) ? (TLinAddr)iDllDataChunk->Base() |
|
599 |
: TLinAddr(m.iDllDataBase); |
|
600 |
TInt offset=aBase-dll_data_base; |
|
601 |
TInt r=iDllDataChunk->Decommit(offset, aSize); |
|
602 |
__ASSERT_ALWAYS(r==KErrNone,MM::Panic(MM::EDecommitInvalidDllDataAddress)); |
|
603 |
if (iDllDataChunk->iSize==0) |
|
604 |
FreeDllDataChunk(); |
|
605 |
} |
|
606 |
||
607 |
TInt DMemModelProcess::MapCodeSeg(DCodeSeg* aSeg) |
|
608 |
{ |
|
609 |
DMemModelCodeSeg& seg=*(DMemModelCodeSeg*)aSeg; |
|
610 |
__KTRACE_OPT(KDLL,Kern::Printf("Process %O MapCodeSeg %C", this, aSeg)); |
|
611 |
TBool kernel_only=( (seg.iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttKernel ); |
|
612 |
if (kernel_only && !(iAttributes&ESupervisor)) |
|
613 |
return KErrNotSupported; |
|
614 |
if (seg.iAttr&ECodeSegAttKernel || seg.iDataAllocBase==-1) |
|
615 |
return KErrNone; // no extra mappings needed for kernel code or code with fixed data address |
|
616 |
TInt r=KErrNone; |
|
617 |
if (seg.IsDll()) |
|
618 |
{ |
|
619 |
TInt total_data_size; |
|
620 |
TLinAddr data_base; |
|
621 |
seg.GetDataSizeAndBase(total_data_size, data_base); |
|
622 |
if (r==KErrNone && total_data_size) |
|
623 |
{ |
|
624 |
TInt size=Mmu::RoundToPageSize(total_data_size); |
|
625 |
r=CommitDllData(data_base, size); |
|
626 |
} |
|
627 |
} |
|
628 |
return r; |
|
629 |
} |
|
630 |
||
631 |
void DMemModelProcess::UnmapCodeSeg(DCodeSeg* aSeg) |
|
632 |
{ |
|
633 |
DMemModelCodeSeg& seg=*(DMemModelCodeSeg*)aSeg; |
|
634 |
__KTRACE_OPT(KDLL,Kern::Printf("Process %O UnmapCodeSeg %C", this, aSeg)); |
|
635 |
if (seg.iAttr&ECodeSegAttKernel || seg.iDataAllocBase==-1) |
|
636 |
return; // no extra mappings needed for kernel code or code with fixed data address |
|
637 |
if (seg.IsDll()) |
|
638 |
{ |
|
639 |
TInt total_data_size; |
|
640 |
TLinAddr data_base; |
|
641 |
seg.GetDataSizeAndBase(total_data_size, data_base); |
|
642 |
if (total_data_size) |
|
643 |
DecommitDllData(data_base, Mmu::RoundToPageSize(total_data_size)); |
|
644 |
} |
|
645 |
} |
|
646 |
||
647 |
TInt DMemModelProcess::NewShPool(DShPool*& /* aPool */, TShPoolCreateInfo& /* aInfo */) |
|
648 |
{ |
|
649 |
return KErrNotSupported; |
|
650 |
} |
|
651 |
||
652 |
TInt DThread::RawRead(const TAny* aSrc, TAny* aDest, TInt aLength, TInt aFlags, TIpcExcTrap* aExcTrap) |
|
653 |
// |
|
654 |
// Read from the thread's process. |
|
655 |
// aSrc is run address of memory to read. The memory is in aThread's address space. |
|
656 |
// aDest is the address of destination. The memory is in the current process's address space. |
|
657 |
// aExcTrap, exception trap object to be updated if the actual memory access is performed on another memory area. It happens |
|
658 |
// when reading is performed in chunks or if home adress is read instead of the provided run address. |
|
659 |
// Enter and return with system locked. |
|
660 |
{ |
|
661 |
const TUint8* pS=(const TUint8*)aSrc; |
|
662 |
TUint8* pD=(TUint8*)aDest; |
|
663 |
const TUint8* pC=NULL; |
|
664 |
TBool check=ETrue; |
|
665 |
TBool suspect=EFalse; |
|
666 |
DThread* pT=TheCurrentThread; |
|
667 |
while (aLength) |
|
668 |
{ |
|
669 |
if (check) |
|
670 |
{ |
|
671 |
suspect=((aFlags & KCheckLocalAddress) && !MM::CurrentAddress(pT,pD,aLength,ETrue)); |
|
672 |
if (iMState==EDead) |
|
673 |
return KErrDied; |
|
674 |
pC=(const TUint8*)MM::CurrentAddress(this,pS,aLength,EFalse); |
|
675 |
__KTRACE_OPT(KTHREAD2,Kern::Printf("DThread::RawRead %08x<-[%08x::%08x]%08x+%x",pD,this,pS,pC,aLength)); |
|
676 |
if (!pC) |
|
677 |
return KErrBadDescriptor; |
|
678 |
} |
|
679 |
TInt len=Min(aLength,K::MaxMemCopyInOneGo); |
|
680 |
if (aExcTrap) |
|
681 |
{ |
|
682 |
aExcTrap->iSize = (len + 2*(sizeof(TInt32)-1));//+6 is for the worst case. We do not have to be precise here. |
|
683 |
aExcTrap->iRemoteBase = (TLinAddr)pC & ~(sizeof(TInt32)-1); |
|
684 |
if (aExcTrap->iLocalBase) |
|
685 |
aExcTrap->iLocalBase = (TLinAddr)pD & ~(sizeof(TInt32)-1); |
|
686 |
__KTRACE_OPT(KTHREAD2,Kern::Printf("DThread::RawRead exc. update: %08x %08x %08x",aExcTrap->iLocalBase,aExcTrap->iRemoteBase,aExcTrap->iSize)); |
|
687 |
} |
|
688 |
||
689 |
#ifdef __DEMAND_PAGING__ |
|
690 |
XTRAP_PAGING_START(check); |
|
691 |
CHECK_PAGING_SAFE; |
|
692 |
#endif |
|
693 |
||
694 |
suspect?(void)umemput(pD,pC,len):(void)memcpy(pD,pC,len); |
|
695 |
||
696 |
#ifdef __DEMAND_PAGING__ |
|
697 |
XTRAP_PAGING_END; |
|
698 |
if(check<0) |
|
699 |
return check; // paging error caused by bad client (I.e. 'this' thread was bad) |
|
700 |
if(check) |
|
701 |
{ |
|
702 |
__KTRACE_OPT(KTHREAD2,Kern::Printf("DThread::RawRead paging trap, suspect %d, dest %08x, source %08x, length %d\n", suspect, pD, pC, len)); |
|
703 |
continue; |
|
704 |
} |
|
705 |
#endif |
|
706 |
||
707 |
pD+=len; |
|
708 |
pS+=len; |
|
709 |
pC+=len; |
|
710 |
aLength-=len; |
|
711 |
if (aLength) |
|
712 |
check=NKern::FlashSystem(); |
|
713 |
} |
|
714 |
return KErrNone; |
|
715 |
} |
|
716 |
||
717 |
TInt DThread::RawWrite(const TAny* aDest, const TAny* aSrc, TInt aLength, TInt aFlags, DThread* aOriginatingThread, TIpcExcTrap* aExcTrap) |
|
718 |
// |
|
719 |
// Write to the thread's process. |
|
720 |
// aDest is run address of memory to write. It resides in this thread's address space. |
|
721 |
// aSrc is address of the source buffer. It resides in the current process's address space. |
|
722 |
// aOriginatingThread is the thread on behalf of which this operation is performed (eg client of device driver). |
|
723 |
// Enter and return with system locked |
|
724 |
// aExcTrap, exception trap object to be updated if the actual memory access is performed on another memory area. It happens |
|
725 |
// when reading is performed in chunks or if home adress is read instead of the provided run address. |
|
726 |
// |
|
727 |
{ |
|
728 |
TUint8* pD=(TUint8*)aDest; |
|
729 |
const TUint8* pS=(const TUint8*)aSrc; |
|
730 |
TUint8* pC=NULL; |
|
731 |
TBool check=ETrue; |
|
732 |
TBool suspect=EFalse; |
|
733 |
DThread* pT=TheCurrentThread; |
|
734 |
DThread* pO=aOriginatingThread; |
|
735 |
if (!pO) |
|
736 |
pO=pT; |
|
737 |
DProcess* pF=K::TheFileServerProcess; |
|
738 |
TBool special=(iOwningProcess==pF && pO->iOwningProcess==pF); |
|
739 |
while (aLength) |
|
740 |
{ |
|
741 |
if (check) |
|
742 |
{ |
|
743 |
suspect=((aFlags & KCheckLocalAddress) && !MM::CurrentAddress(pT,pS,aLength,EFalse)); |
|
744 |
if (iMState==EDead) |
|
745 |
return KErrDied; |
|
746 |
pC=(TUint8*)MM::CurrentAddress(this,pD,aLength,ETrue); |
|
747 |
__KTRACE_OPT(KTHREAD2,Kern::Printf("DThread::RawRead [%08x::%08x]%08x<-%08x+%x",this,pD,pC,pS,aLength)); |
|
748 |
if (!pC) |
|
749 |
{ |
|
750 |
if (special) |
|
751 |
pC=pD; |
|
752 |
else |
|
753 |
return KErrBadDescriptor; |
|
754 |
} |
|
755 |
} |
|
756 |
TInt len=Min(aLength,K::MaxMemCopyInOneGo); |
|
757 |
if (aExcTrap) |
|
758 |
{ |
|
759 |
aExcTrap->iSize = (len + 2*(sizeof(TInt32)-1));//+6 is for the worst case. We do not have to be precise here. |
|
760 |
aExcTrap->iRemoteBase = (TLinAddr)pC & ~(sizeof(TInt32)-1); |
|
761 |
if (aExcTrap->iLocalBase) |
|
762 |
aExcTrap->iLocalBase = (TLinAddr)pS & ~(sizeof(TInt32)-1); |
|
763 |
__KTRACE_OPT(KTHREAD2,Kern::Printf("DThread::RawWrite exc. update %08x %08x %08x",aExcTrap->iLocalBase,aExcTrap->iRemoteBase,aExcTrap->iSize)); |
|
764 |
} |
|
765 |
||
766 |
#ifdef __DEMAND_PAGING__ |
|
767 |
XTRAP_PAGING_START(check); |
|
768 |
// Must check that it is safe to page, unless we are reading from unpaged ROM in which case |
|
769 |
// we allow it. umemget does this anyway, so we just need to check if suspect is not set. |
|
770 |
if (!suspect) |
|
771 |
{ |
|
772 |
CHECK_PAGING_SAFE_RANGE((TLinAddr)aSrc, aLength); |
|
773 |
CHECK_DATA_PAGING_SAFE_RANGE((TLinAddr)aDest, aLength); |
|
774 |
} |
|
775 |
#endif |
|
776 |
||
777 |
suspect?(void)umemget(pC,pS,len):(void)memcpy(pC,pS,len); |
|
778 |
||
779 |
#ifdef __DEMAND_PAGING__ |
|
780 |
XTRAP_PAGING_END |
|
781 |
if(check<0) |
|
782 |
return check; // paging error caused by bad client (I.e. 'this' thread was bad) |
|
783 |
if(check) |
|
784 |
{ |
|
785 |
__KTRACE_OPT(KTHREAD2,Kern::Printf("DThread::RawWrite paging trap, suspect %d, dest %08x, src %08x, length %d\n", suspect, pC, pD, len)); |
|
786 |
continue; |
|
787 |
} |
|
788 |
#endif |
|
789 |
||
790 |
pD+=len; |
|
791 |
pS+=len; |
|
792 |
pC+=len; |
|
793 |
aLength-=len; |
|
794 |
if (aLength) |
|
795 |
check=NKern::FlashSystem(); |
|
796 |
} |
|
797 |
return KErrNone; |
|
798 |
} |
|
799 |
||
800 |
#ifdef __DEBUGGER_SUPPORT__ |
|
801 |
||
802 |
TInt CodeModifier::SafeWriteCode(DProcess* aProcess, TLinAddr aAddress, TInt aSize, TUint aValue, void* aOldValue) |
|
803 |
{ |
|
804 |
//Set exception handler. Make sure the boundaries cover the worst case (aSize = 4) |
|
805 |
TIpcExcTrap xt; |
|
806 |
xt.iLocalBase=0; |
|
807 |
xt.iRemoteBase=(TLinAddr)aAddress&~3; //word aligned. |
|
808 |
xt.iSize=sizeof(TInt); |
|
809 |
xt.iDir=1; |
|
810 |
NKern::LockSystem(); |
|
811 |
TInt r=xt.Trap(NULL); |
|
812 |
if (r==0) |
|
813 |
{ |
|
814 |
r = WriteCode(aAddress, aSize, aValue, aOldValue); |
|
815 |
xt.UnTrap(); |
|
816 |
} |
|
817 |
NKern::UnlockSystem(); |
|
818 |
return r; |
|
819 |
} |
|
820 |
||
821 |
TInt CodeModifier::WriteCode(TLinAddr aAddress, TInt aSize, TUint aValue, void* aOldValue) |
|
822 |
{ |
|
823 |
TUint userChunkBase = (TUint)MM::UserCodeChunk->Base(); |
|
824 |
TRomHeader romHeader = Epoc::RomHeader(); |
|
825 |
||
826 |
if (!((aAddress >= romHeader.iRomBase ) && (aAddress < (romHeader.iRomBase + romHeader.iUncompressedSize)))) //if not in ROM |
|
827 |
if ( (aAddress<userChunkBase) || (aAddress) > (userChunkBase+MM::UserCodeChunk->MaxSize()) ) //and not in non-XIP code |
|
828 |
return KErrBadDescriptor; |
|
829 |
||
830 |
// if page was moved by defrag there may be a cache line with the |
|
831 |
// wrong, old physical address, so we must invalidate this first. |
|
832 |
InternalCache::Invalidate(KCacheSelectD, (TLinAddr)aAddress, 4); |
|
833 |
||
834 |
//Copy data and clean/invalidate caches with interrupts disabled. |
|
835 |
TInt irq=NKern::DisableAllInterrupts(); |
|
836 |
switch(aSize) |
|
837 |
{ |
|
838 |
case 1: |
|
839 |
*(TUint8*) aOldValue = *(TUint8*)aAddress; |
|
840 |
*(TUint8*) aAddress = (TUint8)aValue; |
|
841 |
break; |
|
842 |
case 2: |
|
843 |
*(TUint16*) aOldValue = *(TUint16*)aAddress; |
|
844 |
*(TUint16*) aAddress = (TUint16)aValue; |
|
845 |
break; |
|
846 |
default://It is 4 otherwise |
|
847 |
*(TUint32*) aOldValue = *(TUint32*)aAddress; |
|
848 |
*(TUint32*) aAddress = (TUint32)aValue; |
|
849 |
break; |
|
850 |
}; |
|
851 |
CacheMaintenance::CodeChanged(aAddress, aSize, CacheMaintenance::ECodeModifier); |
|
852 |
NKern::RestoreInterrupts(irq); |
|
853 |
||
854 |
return KErrNone; |
|
855 |
} |
|
856 |
#endif //__DEBUGGER_SUPPORT__ |
|
857 |
||
858 |
TInt DThread::ReadAndParseDesHeader(const TAny* aSrc, TDesHeader& aDest) |
|
859 |
// |
|
860 |
// Read the header of a remote descriptor. |
|
861 |
// Enter and return with system locked |
|
862 |
// |
|
863 |
{ |
|
864 |
TInt r=KErrBadDescriptor; |
|
865 |
DThread* thread = TheCurrentThread; |
|
866 |
TRawDesHeader& header = (TRawDesHeader&)aDest; |
|
867 |
||
868 |
#ifdef __DEMAND_PAGING__ |
|
869 |
retry: |
|
870 |
TInt pagingFault; |
|
871 |
XTRAP_PAGING_START(pagingFault); |
|
872 |
CHECK_PAGING_SAFE; |
|
873 |
thread->iIpcClient = this; |
|
874 |
#endif |
|
875 |
||
876 |
const TUint32* pS=(const TUint32*)MM::CurrentAddress(this,aSrc,sizeof(TDesC8),EFalse); |
|
877 |
if (pS && KErrNone==Kern::SafeRead(pS,&header[0],sizeof(TUint32))) |
|
878 |
{ |
|
879 |
TInt type=header[0]>>KShiftDesType8; |
|
880 |
static const TUint8 LengthLookup[16]={4,8,12,8,12,0,0,0,0,0,0,0,0,0,0,0}; |
|
881 |
TInt len=LengthLookup[type]; |
|
882 |
if(len>(TInt)sizeof(TUint32)) |
|
883 |
{ |
|
884 |
if(KErrNone==Kern::SafeRead(pS+1,&header[1],len-sizeof(TUint32))) |
|
885 |
r = type; |
|
886 |
// else, bad descriptor |
|
887 |
} |
|
888 |
else if(len) |
|
889 |
r = type; |
|
890 |
// else, bad descriptor |
|
891 |
} |
|
892 |
||
893 |
#ifdef __DEMAND_PAGING__ |
|
894 |
thread->iIpcClient = NULL; |
|
895 |
XTRAP_PAGING_END; |
|
896 |
if(pagingFault<0) |
|
897 |
return pagingFault; // paging error caused by bad client (I.e. 'this' thread was bad) |
|
898 |
if(pagingFault) |
|
899 |
goto retry; |
|
900 |
#endif |
|
901 |
||
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
902 |
return (r < 0) ? r : K::ParseDesHeader(aSrc, header, aDest); |
0 | 903 |
} |
904 |
||
905 |
DMemModelChunk* ChunkFromAddress(DThread* aThread, const TAny* aAddress) |
|
906 |
{ |
|
907 |
DMemModelProcess* pP = (DMemModelProcess*)aThread->iOwningProcess; |
|
908 |
DMemModelProcess::SChunkInfo* pS=pP->iChunks; |
|
909 |
DMemModelProcess::SChunkInfo* pC=pS+pP->iNumChunks; |
|
910 |
while(--pC>=pS && TUint(pC->iDataSectionBase)>TUint(aAddress)) {}; |
|
911 |
if(pC<pS) |
|
912 |
return 0; |
|
913 |
return pC->iChunk; |
|
914 |
} |
|
915 |
||
916 |
/** |
|
917 |
Open a shared chunk in which a remote address range is located. |
|
918 |
*/ |
|
919 |
DChunk* DThread::OpenSharedChunk(const TAny* aAddress, TBool aWrite, TInt& aOffset) |
|
920 |
{ |
|
921 |
NKern::LockSystem(); |
|
922 |
||
923 |
DMemModelProcess* pP = (DMemModelProcess*)iOwningProcess; |
|
924 |
DMemModelProcess::SChunkInfo* pS=pP->iChunks; |
|
925 |
DMemModelProcess::SChunkInfo* pC=pS+pP->iNumChunks; |
|
926 |
while(--pC>=pS && TUint(pC->iDataSectionBase)>TUint(aAddress)) {}; |
|
927 |
if(pC>=pS) |
|
928 |
{ |
|
929 |
DMemModelChunk* chunk = pC->iChunk; |
|
930 |
if(chunk->iChunkType==ESharedKernelSingle || chunk->iChunkType==ESharedKernelMultiple) |
|
931 |
{ |
|
932 |
TInt offset = (TInt)aAddress-(TInt)chunk->Base(); |
|
933 |
if(TUint(offset)<TUint(chunk->iMaxSize) && chunk->Open()==KErrNone) |
|
934 |
{ |
|
935 |
aOffset = offset; |
|
936 |
NKern::UnlockSystem(); |
|
937 |
return chunk; |
|
938 |
} |
|
939 |
} |
|
940 |
} |
|
941 |
NKern::UnlockSystem(); |
|
942 |
return 0; |
|
943 |
} |
|
944 |
||
945 |
TInt DThread::PrepareMemoryForDMA(const TAny* aLinAddr, TInt aSize, TPhysAddr* aPhysicalPageList) |
|
946 |
{ |
|
947 |
if ((iOwningProcess->iAttributes & DMemModelProcess::EFixedAddress )==0) |
|
948 |
return KErrNotSupported; |
|
949 |
Mmu& m=(Mmu&)*MmuBase::TheMmu; |
|
950 |
return m.PreparePagesForDMA((TLinAddr)aLinAddr, aSize, aPhysicalPageList); |
|
951 |
} |
|
952 |
||
953 |
TInt DThread::ReleaseMemoryFromDMA(const TAny* aLinAddr, TInt aSize, TPhysAddr* aPhysicalPageList) |
|
954 |
{ |
|
955 |
if ((iOwningProcess->iAttributes & DMemModelProcess::EFixedAddress )==0) |
|
956 |
return KErrNotSupported; |
|
957 |
TInt pageCount = (((TInt)aLinAddr & KPageMask) + aSize + KPageMask) >> KPageShift; |
|
958 |
Mmu& m=(Mmu&)*MmuBase::TheMmu; |
|
959 |
return m.ReleasePagesFromDMA(aPhysicalPageList, pageCount); |
|
960 |
} |