|
1 // Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of the License "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // e32\memmodel\epoc\multiple\mprocess.cpp |
|
15 // |
|
16 // |
|
17 |
|
18 #include "memmodel.h" |
|
19 #include "mmboot.h" |
|
20 #include "cache_maintenance.h" |
|
21 #include <demand_paging.h> |
|
22 |
|
23 #define iMState iWaitLink.iSpare1 |
|
24 |
|
25 // just for convenience... |
|
26 #define KAmSelfMod (DMemModelChunk::ECode | DMemModelChunk::EAddressLocal) |
|
27 |
|
28 _LIT(KDollarDat,"$DAT"); |
|
29 _LIT(KLitDollarCode,"$CODE"); |
|
30 _LIT(KLitDllDollarData,"DLL$DATA"); |
|
31 |
|
32 #ifdef __CPU_HAS_BTB |
|
33 extern void __FlushBtb(); |
|
34 #endif |
|
35 |
|
36 const TInt KChunkGranularity=4; |
|
37 |
|
38 /******************************************** |
|
39 * Process |
|
40 ********************************************/ |
|
41 void DMemModelProcess::Destruct() |
|
42 { |
|
43 __ASSERT_ALWAYS(!iChunkCount && !iCodeChunk && !iDllDataChunk, MM::Panic(MM::EProcessDestructChunksRemaining)); |
|
44 Kern::Free(iChunks); |
|
45 Kern::Free(iLocalSection); |
|
46 if (iOsAsid) |
|
47 { |
|
48 Mmu& m=Mmu::Get(); |
|
49 MmuBase::Wait(); |
|
50 m.FreeOsAsid(iOsAsid); |
|
51 iOsAsid=0; |
|
52 MmuBase::Signal(); |
|
53 #ifndef __SMP__ |
|
54 LastUserSelfMod=0; // must force a BTB flush when next selfmod chunk switched in |
|
55 #endif |
|
56 } |
|
57 #ifdef __CPU_HAS_BTB |
|
58 __FlushBtb(); |
|
59 #endif |
|
60 DProcess::Destruct(); |
|
61 } |
|
62 |
|
63 TInt DMemModelProcess::NewChunk(DChunk*& aChunk, SChunkCreateInfo& aInfo, TLinAddr& aRunAddr) |
|
64 { |
|
65 aChunk=NULL; |
|
66 DMemModelChunk* pC=NULL; |
|
67 TInt r=GetNewChunk(pC,aInfo); |
|
68 if (r!=KErrNone) |
|
69 { |
|
70 if (pC) |
|
71 pC->Close(NULL); |
|
72 return r; |
|
73 } |
|
74 TInt mapType=pC->iAttributes & DMemModelChunk::EMapTypeMask; |
|
75 pC->iOwningProcess=(mapType==DMemModelChunk::EMapTypeLocal)?this:NULL; |
|
76 #ifdef __CPU_HAS_BTB |
|
77 if ((pC->iAttributes & KAmSelfMod) == KAmSelfMod) // it's a potentially overlapping self-mod |
|
78 { |
|
79 iSelfModChunks++; |
|
80 #ifndef __SMP__ |
|
81 LastUserSelfMod = this; // we become the last selfmodding process |
|
82 #endif |
|
83 __FlushBtb(); // we need to do this, as there may be bad branches already in the btb |
|
84 } |
|
85 #endif |
|
86 r=pC->Create(aInfo); |
|
87 if (r==KErrNone && (aInfo.iOperations & SChunkCreateInfo::EAdjust)) |
|
88 { |
|
89 if (aInfo.iRunAddress!=0) |
|
90 pC->SetFixedAddress(aInfo.iRunAddress,aInfo.iPreallocated); |
|
91 if (aInfo.iPreallocated==0 && aInfo.iInitialTop!=0) |
|
92 { |
|
93 if (pC->iAttributes & DChunk::EDisconnected) |
|
94 { |
|
95 r=pC->Commit(aInfo.iInitialBottom,aInfo.iInitialTop-aInfo.iInitialBottom); |
|
96 } |
|
97 else if (pC->iAttributes & DChunk::EDoubleEnded) |
|
98 { |
|
99 r=pC->AdjustDoubleEnded(aInfo.iInitialBottom,aInfo.iInitialTop); |
|
100 } |
|
101 else |
|
102 { |
|
103 r=pC->Adjust(aInfo.iInitialTop); |
|
104 } |
|
105 } |
|
106 } |
|
107 if (r==KErrNone && (aInfo.iOperations & SChunkCreateInfo::EAdd)) |
|
108 { |
|
109 // if (pC->iAttributes & DMemModelChunk::ECode) |
|
110 // MM::TheMmu->SyncCodeMappings(); |
|
111 if (mapType!=DMemModelChunk::EMapTypeGlobal) |
|
112 { |
|
113 r=WaitProcessLock(); |
|
114 if (r==KErrNone) |
|
115 { |
|
116 r=AddChunk(pC,aRunAddr,EFalse); |
|
117 SignalProcessLock(); |
|
118 } |
|
119 } |
|
120 else |
|
121 aRunAddr=(TLinAddr)pC->Base(); |
|
122 } |
|
123 if (r==KErrNone) |
|
124 { |
|
125 if(r==KErrNone) |
|
126 if(pC->iKernelMirror) |
|
127 aRunAddr = (TLinAddr)pC->iKernelMirror->Base(); |
|
128 pC->iDestroyedDfc = aInfo.iDestroyedDfc; |
|
129 aChunk=(DChunk*)pC; |
|
130 } |
|
131 else |
|
132 pC->Close(NULL); // NULL since chunk can't have been added to process |
|
133 return r; |
|
134 } |
|
135 |
|
136 TInt DMemModelProcess::DoCreate(TBool aKernelProcess, TProcessCreateInfo& aInfo) |
|
137 { |
|
138 __KTRACE_OPT(KPROC,Kern::Printf(">DMemModelProcess::DoCreate %O",this)); |
|
139 |
|
140 Mmu& m=Mmu::Get(); |
|
141 TInt r=KErrNone; |
|
142 |
|
143 iSelfModChunks=0; // we don't have any yet. |
|
144 |
|
145 if (aKernelProcess) |
|
146 { |
|
147 iAttributes |= ESupervisor; |
|
148 //iOsAsid=0; |
|
149 // Leave these till Mmu::Init2 |
|
150 // if (m.iLocalPdSize) |
|
151 // iLocalPageDir=m.LinearToPhysical(TLinAddr(m.LocalPageDir(0))); |
|
152 // iGlobalPageDir=m.LinearToPhysical(TLinAddr(m.GlobalPageDir(0))); |
|
153 m.iAsidInfo[0]=((TUint32)this)|1; |
|
154 iAddressCheckMaskR=0xffffffff; |
|
155 iAddressCheckMaskW=0xffffffff; |
|
156 } |
|
157 else |
|
158 { |
|
159 MmuBase::Wait(); |
|
160 r=m.NewOsAsid(EFalse); |
|
161 if (r>=0) |
|
162 { |
|
163 iOsAsid=r; |
|
164 if (m.iLocalPdSize) |
|
165 iLocalPageDir=m.LinearToPhysical(TLinAddr(m.LocalPageDir(r))); |
|
166 else |
|
167 iGlobalPageDir=m.LinearToPhysical(TLinAddr(m.GlobalPageDir(r))); |
|
168 m.iAsidInfo[r] |= (TUint32)this; |
|
169 r=KErrNone; |
|
170 } |
|
171 MmuBase::Signal(); |
|
172 if (r==KErrNone && 0==(iLocalSection=TLinearSection::New(m.iUserLocalBase, m.iUserLocalEnd)) ) |
|
173 r=KErrNoMemory; |
|
174 } |
|
175 |
|
176 __KTRACE_OPT(KPROC,Kern::Printf("OS ASID=%d, LPD=%08x, GPD=%08x, ASID info=%08x",iOsAsid,iLocalPageDir, |
|
177 iGlobalPageDir,m.iAsidInfo[iOsAsid])); |
|
178 __KTRACE_OPT(KPROC,Kern::Printf("<DMemModelProcess::DoCreate %d",r)); |
|
179 return r; |
|
180 } |
|
181 |
|
182 TInt DMemModelProcess::CreateDataBssStackArea(TProcessCreateInfo& aInfo) |
|
183 { |
|
184 __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::CreateDataBssStackArea %O",this)); |
|
185 Mmu& m=Mmu::Get(); |
|
186 TInt dataBssSize=Mmu::RoundToPageSize(aInfo.iTotalDataSize); |
|
187 TInt maxSize=dataBssSize+PP::MaxStackSpacePerProcess; |
|
188 TLinAddr dataRunAddress=m.iUserLocalBase; |
|
189 iDataBssRunAddress=dataRunAddress; |
|
190 |
|
191 __KTRACE_OPT(KPROC,Kern::Printf("DataBssSize=%x, chunk max size %x",dataBssSize,maxSize)); |
|
192 |
|
193 SChunkCreateInfo cinfo; |
|
194 cinfo.iGlobal=EFalse; |
|
195 cinfo.iAtt=TChunkCreate::EDisconnected; |
|
196 cinfo.iForceFixed=EFalse; |
|
197 cinfo.iOperations=SChunkCreateInfo::EAdjust|SChunkCreateInfo::EAdd; |
|
198 cinfo.iType=EUserData; |
|
199 cinfo.iMaxSize=maxSize; |
|
200 cinfo.iInitialBottom=0; |
|
201 cinfo.iInitialTop=dataBssSize; |
|
202 cinfo.iPreallocated=0; |
|
203 cinfo.iName.Set(KDollarDat); |
|
204 cinfo.iOwner=this; |
|
205 cinfo.iRunAddress=0; |
|
206 TLinAddr cb; |
|
207 TInt r=NewChunk((DChunk*&)iDataBssStackChunk,cinfo,cb); |
|
208 return r; |
|
209 } |
|
210 |
|
211 TInt DMemModelProcess::AddChunk(DChunk* aChunk, TBool isReadOnly) |
|
212 { |
|
213 DMemModelChunk* pC=(DMemModelChunk*)aChunk; |
|
214 if ((pC->iAttributes & DMemModelChunk::EPrivate) && this!=pC->iOwningProcess) |
|
215 return KErrAccessDenied; |
|
216 TInt r=WaitProcessLock(); |
|
217 if (r==KErrNone) |
|
218 { |
|
219 TInt pos=0; |
|
220 r=ChunkIndex(pC,pos); |
|
221 TLinAddr dataSectionBase=0; |
|
222 if (r==0) // Found the chunk in this process, just up its count |
|
223 { |
|
224 iChunks[pos].iAccessCount++; |
|
225 __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::AddChunk %08x to %08x (Access count incremented to %d)",aChunk,this,iChunks[pos].iAccessCount)); |
|
226 SignalProcessLock(); |
|
227 return KErrNone; |
|
228 } |
|
229 r=AddChunk(pC,dataSectionBase,isReadOnly); |
|
230 SignalProcessLock(); |
|
231 } |
|
232 __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::AddChunk returns %d",r)); |
|
233 return r; |
|
234 } |
|
235 |
|
236 void M::FsRegisterThread() |
|
237 { |
|
238 DMemModelChunk* pC=(DMemModelChunk*)PP::TheRamDriveChunk; |
|
239 TInt mapType=pC->iAttributes & DMemModelChunk::EMapTypeMask; |
|
240 if (mapType!=DMemModelChunk::EMapTypeLocal) |
|
241 { |
|
242 DMemModelProcess* pP=(DMemModelProcess*)TheCurrentThread->iOwningProcess; |
|
243 TLinAddr dataSectionBase; |
|
244 TInt r=pP->WaitProcessLock(); |
|
245 if (r==KErrNone) |
|
246 r=pP->AddChunk(pC,dataSectionBase,EFalse); |
|
247 __ASSERT_ALWAYS(r==KErrNone, MM::Panic(MM::EFsRegisterThread)); |
|
248 pP->SignalProcessLock(); |
|
249 } |
|
250 } |
|
251 |
|
252 TInt DMemModelProcess::AddChunk(DMemModelChunk* aChunk, TLinAddr& aDataSectionBase, TBool isReadOnly) |
|
253 { |
|
254 // |
|
255 // Must hold the process $LOCK mutex before calling this |
|
256 // |
|
257 __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::AddChunk %O to %O",aChunk,this)); |
|
258 SChunkInfo *pC=iChunks; |
|
259 SChunkInfo *pE=pC+iChunkCount-1; |
|
260 TLinAddr base=TLinAddr(aChunk->iBase); |
|
261 TInt i=0; |
|
262 |
|
263 #ifdef __CPU_HAS_BTB |
|
264 if ((aChunk->iAttributes & KAmSelfMod)==KAmSelfMod) // it's a potentially overlapping self-mod |
|
265 { |
|
266 iSelfModChunks++; |
|
267 #ifndef __SMP__ |
|
268 LastUserSelfMod = this; // we become the last selfmodding process |
|
269 #endif |
|
270 __FlushBtb(); // we need to do this, as there may be bad branches already in the btb |
|
271 } |
|
272 #endif |
|
273 if (iChunkCount) |
|
274 { |
|
275 for (; pE>=pC && TLinAddr(pE->iChunk->iBase)>base; --pE); |
|
276 if (pE>=pC && TLinAddr(pE->iChunk->iBase)+pE->iChunk->iMaxSize>base) |
|
277 return KErrInUse; |
|
278 pC=pE+1; |
|
279 if (pC<iChunks+iChunkCount && base+aChunk->iMaxSize>TLinAddr(pC->iChunk->iBase)) |
|
280 return KErrInUse; |
|
281 i=pC-iChunks; |
|
282 } |
|
283 if (iChunkCount==iChunkAlloc) |
|
284 { |
|
285 TInt newAlloc=iChunkAlloc+KChunkGranularity; |
|
286 TInt r=Kern::SafeReAlloc((TAny*&)iChunks,iChunkAlloc*sizeof(SChunkInfo),newAlloc*sizeof(SChunkInfo)); |
|
287 if (r!=KErrNone) |
|
288 return r; |
|
289 pC=iChunks+i; |
|
290 iChunkAlloc=newAlloc; |
|
291 } |
|
292 memmove(pC+1,pC,(iChunkCount-i)*sizeof(SChunkInfo)); |
|
293 ++iChunkCount; |
|
294 pC->isReadOnly=isReadOnly; |
|
295 pC->iAccessCount=1; |
|
296 pC->iChunk=aChunk; |
|
297 aDataSectionBase=base; |
|
298 Mmu& m=Mmu::Get(); |
|
299 if (aChunk->iOsAsids) |
|
300 { |
|
301 // only need to do address space manipulation for shared chunks |
|
302 MmuBase::Wait(); |
|
303 aChunk->iOsAsids->Alloc(iOsAsid,1); |
|
304 TLinAddr a; |
|
305 TInt i=0; |
|
306 for (a=TLinAddr(aChunk->iBase); a<TLinAddr(aChunk->iBase)+aChunk->iMaxSize; a+=m.iChunkSize, ++i) |
|
307 { |
|
308 TInt ptid=aChunk->iPageTables[i]; |
|
309 if (ptid!=0xffff) |
|
310 m.DoAssignPageTable(ptid,a,aChunk->iPdePermissions,(const TAny*)iOsAsid); |
|
311 } |
|
312 MmuBase::Signal(); |
|
313 } |
|
314 if (aChunk->iChunkType==ERamDrive) |
|
315 { |
|
316 NKern::LockSystem(); |
|
317 iAddressCheckMaskR |= m.iRamDriveMask; |
|
318 iAddressCheckMaskW |= m.iRamDriveMask; |
|
319 NKern::UnlockSystem(); |
|
320 } |
|
321 __DEBUG_EVENT(EEventUpdateProcess, this); |
|
322 return KErrNone; |
|
323 } |
|
324 |
|
325 void DMemModelProcess::DoRemoveChunk(TInt aIndex) |
|
326 { |
|
327 __DEBUG_EVENT(EEventUpdateProcess, this); |
|
328 DMemModelChunk* chunk = iChunks[aIndex].iChunk; |
|
329 memmove(iChunks+aIndex, iChunks+aIndex+1, (iChunkCount-aIndex-1)*sizeof(SChunkInfo)); |
|
330 --iChunkCount; |
|
331 Mmu& m=Mmu::Get(); |
|
332 if (chunk->iOsAsids) |
|
333 { |
|
334 // only need to do address space manipulation for shared chunks |
|
335 MmuBase::Wait(); |
|
336 chunk->iOsAsids->Free(iOsAsid); |
|
337 TLinAddr a; |
|
338 for (a=TLinAddr(chunk->iBase); a<TLinAddr(chunk->iBase)+chunk->iMaxSize; a+=m.iChunkSize) |
|
339 m.DoUnassignPageTable(a,(const TAny*)iOsAsid); |
|
340 TUint32 mask=(chunk->iAttributes&DMemModelChunk::ECode)?Mmu::EFlushITLB:0; |
|
341 m.GenericFlush(mask|Mmu::EFlushDTLB); |
|
342 |
|
343 MmuBase::Signal(); |
|
344 } |
|
345 if (chunk->iChunkType==ERamDrive) |
|
346 { |
|
347 NKern::LockSystem(); |
|
348 iAddressCheckMaskR &= ~m.iRamDriveMask; |
|
349 iAddressCheckMaskW &= ~m.iRamDriveMask; |
|
350 NKern::UnlockSystem(); |
|
351 } |
|
352 } |
|
353 |
|
354 /** |
|
355 Final chance for process to release resources during its death. |
|
356 |
|
357 Called with process $LOCK mutex held (if it exists). |
|
358 This mutex will not be released before it is deleted. |
|
359 I.e. no other thread will ever hold the mutex again. |
|
360 */ |
|
361 void DMemModelProcess::FinalRelease() |
|
362 { |
|
363 // Clean up any left over chunks (such as SharedIo buffers) |
|
364 if(iProcessLock) |
|
365 while(iChunkCount) |
|
366 DoRemoveChunk(0); |
|
367 } |
|
368 |
|
369 void DMemModelProcess::RemoveChunk(DMemModelChunk *aChunk) |
|
370 { |
|
371 // note that this can't be called after the process $LOCK mutex has been deleted |
|
372 // since it can only be called by a thread in this process doing a handle close or |
|
373 // dying, or by the process handles array being deleted due to the process dying, |
|
374 // all of which happen before $LOCK is deleted. |
|
375 __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess %O RemoveChunk %O",this,aChunk)); |
|
376 Kern::MutexWait(*iProcessLock); |
|
377 TInt pos=0; |
|
378 TInt r=ChunkIndex(aChunk,pos); |
|
379 |
|
380 if (r==KErrNone) // Found the chunk |
|
381 { |
|
382 __KTRACE_OPT(KPROC,Kern::Printf("Chunk access count %d",iChunks[pos].iAccessCount)); |
|
383 if (--iChunks[pos].iAccessCount==0) |
|
384 { |
|
385 DoRemoveChunk(pos); |
|
386 #ifdef __CPU_HAS_BTB |
|
387 if ((aChunk->iAttributes & KAmSelfMod)==KAmSelfMod) // was a self-mod code chunk |
|
388 if (iSelfModChunks) |
|
389 iSelfModChunks--; |
|
390 #endif |
|
391 } |
|
392 } |
|
393 Kern::MutexSignal(*iProcessLock); |
|
394 } |
|
395 |
|
396 TInt DMemModelProcess::ChunkIndex(DMemModelChunk* aChunk,TInt& aPos) |
|
397 { |
|
398 if (!aChunk) |
|
399 return KErrNotFound; |
|
400 SChunkInfo *pC=iChunks; |
|
401 SChunkInfo *pE=pC+iChunkCount; |
|
402 for (; pC<pE && pC->iChunk!=aChunk; ++pC); |
|
403 if (pC==pE) |
|
404 return KErrNotFound; |
|
405 aPos=pC-iChunks; |
|
406 return KErrNone; |
|
407 } |
|
408 |
|
409 TInt DMemModelProcess::MapCodeSeg(DCodeSeg* aSeg) |
|
410 { |
|
411 DMemModelCodeSeg& seg=*(DMemModelCodeSeg*)aSeg; |
|
412 __KTRACE_OPT(KDLL,Kern::Printf("Process %O MapCodeSeg %C", this, aSeg)); |
|
413 TBool kernel_only=( (seg.iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttKernel ); |
|
414 if (kernel_only && !(iAttributes&ESupervisor)) |
|
415 return KErrNotSupported; |
|
416 if (seg.iAttr&ECodeSegAttKernel) |
|
417 return KErrNone; // no extra mappings needed for kernel code |
|
418 TInt r=KErrNone; |
|
419 if (seg.Pages()) |
|
420 r=MapUserRamCode(seg.Memory(),EFalse); |
|
421 if (seg.IsDll()) |
|
422 { |
|
423 TInt total_data_size; |
|
424 TLinAddr data_base; |
|
425 seg.GetDataSizeAndBase(total_data_size, data_base); |
|
426 if (r==KErrNone && total_data_size) |
|
427 { |
|
428 TInt size=Mmu::RoundToPageSize(total_data_size); |
|
429 r=CommitDllData(data_base, size); |
|
430 if (r!=KErrNone && seg.Pages()) |
|
431 UnmapUserRamCode(seg.Memory(), EFalse); |
|
432 } |
|
433 } |
|
434 return r; |
|
435 } |
|
436 |
|
437 void DMemModelProcess::UnmapCodeSeg(DCodeSeg* aSeg) |
|
438 { |
|
439 DMemModelCodeSeg& seg=*(DMemModelCodeSeg*)aSeg; |
|
440 __KTRACE_OPT(KDLL,Kern::Printf("Process %O UnmapCodeSeg %C", this, aSeg)); |
|
441 if (seg.iAttr&ECodeSegAttKernel) |
|
442 return; // no extra mappings needed for kernel code |
|
443 if (seg.IsDll()) |
|
444 { |
|
445 TInt total_data_size; |
|
446 TLinAddr data_base; |
|
447 seg.GetDataSizeAndBase(total_data_size, data_base); |
|
448 if (total_data_size) |
|
449 DecommitDllData(data_base, Mmu::RoundToPageSize(total_data_size)); |
|
450 } |
|
451 if (seg.Pages()) |
|
452 UnmapUserRamCode(seg.Memory(), EFalse); |
|
453 } |
|
454 |
|
455 void DMemModelProcess::RemoveDllData() |
|
456 // |
|
457 // Call with CodeSegLock held |
|
458 // |
|
459 { |
|
460 } |
|
461 |
|
462 TInt DMemModelProcess::CreateCodeChunk() |
|
463 { |
|
464 __KTRACE_OPT(KDLL,Kern::Printf("DMemModelProcess %O CreateCodeChunk",this)); |
|
465 TBool kernel=iAttributes&ESupervisor; |
|
466 Mmu& m=Mmu::Get(); |
|
467 SChunkCreateInfo c; |
|
468 c.iGlobal=kernel; |
|
469 c.iAtt = TChunkCreate::EDisconnected | (kernel? 0 : TChunkCreate::EMemoryNotOwned); |
|
470 c.iForceFixed=EFalse; |
|
471 c.iOperations=SChunkCreateInfo::EAdjust|SChunkCreateInfo::EAdd; |
|
472 c.iRunAddress=kernel ? 0 : m.iUserCodeBase; |
|
473 c.iPreallocated=0; |
|
474 c.iType=kernel ? EKernelCode : EUserCode; |
|
475 c.iMaxSize=m.iMaxUserCodeSize; |
|
476 c.iName.Set(KLitDollarCode); |
|
477 c.iOwner=this; |
|
478 c.iInitialTop=0; |
|
479 TLinAddr runAddr; |
|
480 TInt r = NewChunk((DChunk*&)iCodeChunk,c,runAddr); |
|
481 return r; |
|
482 } |
|
483 |
|
484 void DMemModelProcess::FreeCodeChunk() |
|
485 { |
|
486 iCodeChunk->Close(this); |
|
487 iCodeChunk=NULL; |
|
488 } |
|
489 |
|
490 TInt DMemModelProcess::MapUserRamCode(DMemModelCodeSegMemory* aMemory, TBool aLoading) |
|
491 { |
|
492 __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess %O MapUserRamCode %C %d %d %d", |
|
493 this, aMemory->iCodeSeg, aLoading, iOsAsid, aMemory->iIsDemandPaged)); |
|
494 __ASSERT_MUTEX(DCodeSeg::CodeSegLock); |
|
495 |
|
496 TInt r; |
|
497 |
|
498 if (!iCodeChunk) |
|
499 { |
|
500 r=CreateCodeChunk(); |
|
501 __KTRACE_OPT(KPROC,Kern::Printf("CreateCodeChunk returns %d", r)); |
|
502 if (r!=KErrNone) |
|
503 return r; |
|
504 } |
|
505 |
|
506 MmuBase::Wait(); |
|
507 |
|
508 Mmu& m=Mmu::Get(); |
|
509 TInt offset=aMemory->iRamInfo.iCodeRunAddr-TLinAddr(iCodeChunk->iBase); |
|
510 TInt codeSize = aMemory->iPageCount<<m.iPageShift; |
|
511 TBool paged = aMemory->iIsDemandPaged; |
|
512 DChunk::TCommitType commitType = paged ? DChunk::ECommitVirtual : DChunk::ECommitDiscontiguousPhysical; |
|
513 r=iCodeChunk->Commit(offset, codeSize, commitType, aMemory->iPages); |
|
514 __KTRACE_OPT(KPROC,Kern::Printf("Commit Pages returns %d", r)); |
|
515 if(r==KErrNone) |
|
516 { |
|
517 if (aLoading && !paged) |
|
518 { |
|
519 iCodeChunk->ApplyPermissions(offset, codeSize, m.iUserCodeLoadPtePerm); |
|
520 UNLOCK_USER_MEMORY(); |
|
521 memset((TAny*)(aMemory->iRamInfo.iCodeLoadAddr+aMemory->iRamInfo.iCodeSize+aMemory->iRamInfo.iDataSize), 0x03, codeSize-(aMemory->iRamInfo.iCodeSize+aMemory->iRamInfo.iDataSize)); |
|
522 LOCK_USER_MEMORY(); |
|
523 } |
|
524 if(aLoading && aMemory->iDataPageCount) |
|
525 { |
|
526 TInt dataSize = aMemory->iDataPageCount<<m.iPageShift; |
|
527 r=iCodeChunk->Commit(offset+codeSize, dataSize, DChunk::ECommitDiscontiguousPhysical, aMemory->iPages+aMemory->iPageCount); |
|
528 if(r==KErrNone) |
|
529 { |
|
530 iCodeChunk->ApplyPermissions(offset+codeSize, dataSize, m.iUserCodeLoadPtePerm); |
|
531 UNLOCK_USER_MEMORY(); |
|
532 memset((TAny*)(aMemory->iRamInfo.iDataLoadAddr+aMemory->iRamInfo.iDataSize), 0x03, dataSize-aMemory->iRamInfo.iDataSize); |
|
533 LOCK_USER_MEMORY(); |
|
534 } |
|
535 } |
|
536 if(r!=KErrNone) |
|
537 { |
|
538 // error, so decommit up code pages we had already committed... |
|
539 DChunk::TDecommitType decommitType = paged ? DChunk::EDecommitVirtual : DChunk::EDecommitNormal; |
|
540 iCodeChunk->Decommit(offset, codeSize, decommitType); |
|
541 } |
|
542 else |
|
543 { |
|
544 // indicate codeseg is now successfully mapped into the process... |
|
545 NKern::LockSystem(); |
|
546 aMemory->iOsAsids->Free(iOsAsid); |
|
547 NKern::UnlockSystem(); |
|
548 } |
|
549 } |
|
550 |
|
551 MmuBase::Signal(); |
|
552 |
|
553 if(r!=KErrNone && iCodeChunk->iSize==0) |
|
554 FreeCodeChunk(); // cleanup any unused code chunk we would otherwise leave lying around |
|
555 |
|
556 return r; |
|
557 } |
|
558 |
|
559 void DMemModelProcess::UnmapUserRamCode(DMemModelCodeSegMemory* aMemory, TBool aLoading) |
|
560 { |
|
561 __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess %O UnmapUserRamCode %C %d %d", |
|
562 this, aMemory->iCodeSeg, iOsAsid, aMemory->iIsDemandPaged != 0)); |
|
563 |
|
564 __ASSERT_MUTEX(DCodeSeg::CodeSegLock); |
|
565 |
|
566 MmuBase::Wait(); |
|
567 |
|
568 NKern::LockSystem(); |
|
569 aMemory->iOsAsids->Alloc(iOsAsid, 1); |
|
570 NKern::UnlockSystem(); |
|
571 |
|
572 Mmu& m=Mmu::Get(); |
|
573 __NK_ASSERT_DEBUG(iCodeChunk); |
|
574 TInt offset=aMemory->iRamInfo.iCodeRunAddr-TLinAddr(iCodeChunk->iBase); |
|
575 TInt codeSize = aMemory->iPageCount<<m.iPageShift; |
|
576 TBool paged = aMemory->iIsDemandPaged; |
|
577 DChunk::TDecommitType decommitType = paged ? DChunk::EDecommitVirtual : DChunk::EDecommitNormal; |
|
578 TInt r=iCodeChunk->Decommit(offset, codeSize, decommitType); |
|
579 __ASSERT_DEBUG(r==KErrNone, MM::Panic(MM::EDecommitFailed)); |
|
580 (void)r; //Supress the warning in urel build |
|
581 |
|
582 if(aLoading && aMemory->iDataPageCount) |
|
583 { |
|
584 // decommit pages used to store data section... |
|
585 TInt dataSize = aMemory->iDataPageCount<<m.iPageShift; |
|
586 r=iCodeChunk->Decommit(offset+codeSize, dataSize); |
|
587 __ASSERT_DEBUG(r==KErrNone, MM::Panic(MM::EDecommitFailed)); |
|
588 (void)r; //Supress the warning in urel build |
|
589 } |
|
590 __NK_ASSERT_DEBUG(iCodeChunk->iSize >= 0); |
|
591 |
|
592 MmuBase::Signal(); |
|
593 |
|
594 if (iCodeChunk->iSize==0) |
|
595 FreeCodeChunk(); |
|
596 } |
|
597 |
|
598 TInt DMemModelProcess::CreateDllDataChunk() |
|
599 { |
|
600 __KTRACE_OPT(KDLL,Kern::Printf("DMemModelProcess %O CreateDllDataChunk",this)); |
|
601 Mmu& m=Mmu::Get(); |
|
602 SChunkCreateInfo c; |
|
603 c.iGlobal=EFalse; |
|
604 c.iAtt=TChunkCreate::EDisconnected; |
|
605 c.iForceFixed=EFalse; |
|
606 c.iOperations=SChunkCreateInfo::EAdjust|SChunkCreateInfo::EAdd; |
|
607 c.iRunAddress=m.iDllDataBase; |
|
608 c.iPreallocated=0; |
|
609 c.iType=EDllData; |
|
610 c.iMaxSize=m.iMaxDllDataSize; |
|
611 c.iName.Set(KLitDllDollarData); |
|
612 c.iOwner=this; |
|
613 c.iInitialTop=0; |
|
614 TLinAddr runAddr; |
|
615 return NewChunk((DChunk*&)iDllDataChunk,c,runAddr); |
|
616 } |
|
617 |
|
618 void DMemModelProcess::FreeDllDataChunk() |
|
619 { |
|
620 iDllDataChunk->Close(this); |
|
621 iDllDataChunk=NULL; |
|
622 } |
|
623 |
|
624 TInt DMemModelProcess::CommitDllData(TLinAddr aBase, TInt aSize) |
|
625 { |
|
626 __KTRACE_OPT(KDLL,Kern::Printf("DMemModelProcess %O CommitDllData %08x+%x",this,aBase,aSize)); |
|
627 TInt r=KErrNone; |
|
628 if (!iDllDataChunk) |
|
629 r=CreateDllDataChunk(); |
|
630 if (r==KErrNone) |
|
631 { |
|
632 TInt offset=aBase-(TLinAddr)iDllDataChunk->iBase; |
|
633 __ASSERT_ALWAYS(TUint32(offset)<TUint32(iDllDataChunk->iMaxSize),MM::Panic(MM::ECommitInvalidDllDataAddress)); |
|
634 r=iDllDataChunk->Commit(offset, aSize); |
|
635 if (r!=KErrNone && iDllDataChunk->iSize==0) |
|
636 FreeDllDataChunk(); |
|
637 } |
|
638 __KTRACE_OPT(KDLL,Kern::Printf("CommitDllData returns %d",r)); |
|
639 return r; |
|
640 } |
|
641 |
|
642 void DMemModelProcess::DecommitDllData(TLinAddr aBase, TInt aSize) |
|
643 { |
|
644 __KTRACE_OPT(KDLL,Kern::Printf("DMemModelProcess %O DecommitDllData %08x+%x",this,aBase,aSize)); |
|
645 TInt offset=aBase-(TLinAddr)iDllDataChunk->iBase; |
|
646 TInt r=iDllDataChunk->Decommit(offset, aSize); |
|
647 __ASSERT_ALWAYS(r==KErrNone,MM::Panic(MM::EDecommitInvalidDllDataAddress)); |
|
648 if (iDllDataChunk->iSize==0) |
|
649 FreeDllDataChunk(); |
|
650 } |
|
651 |
|
652 TInt DMemModelProcess::NewShPool(DShPool*& /* aPool */, TShPoolCreateInfo& /* aInfo */) |
|
653 { |
|
654 return KErrNotSupported; |
|
655 } |
|
656 |
|
657 |
|
658 TInt DThread::RawRead(const TAny* aSrc, TAny* aDest, TInt aLength, TInt aFlags, TIpcExcTrap* /*aExcTrap*/) |
|
659 // |
|
660 // Read from the thread's process. |
|
661 // Enter and return with system locked |
|
662 // aSrc Run address of memory to read |
|
663 // aDest Current address of destination |
|
664 // aExcTrap Exception trap object to be updated if the actual memory access is performed on other memory area then specified. |
|
665 // It happens when reading is performed on un-aligned memory area. |
|
666 // |
|
667 { |
|
668 Mmu& m=Mmu::Get(); |
|
669 DMemModelThread& t=*(DMemModelThread*)TheCurrentThread; |
|
670 DMemModelProcess* pP=(DMemModelProcess*)iOwningProcess; |
|
671 TLinAddr src=(TLinAddr)aSrc; |
|
672 TLinAddr dest=(TLinAddr)aDest; |
|
673 TBool localIsSafe=ETrue; |
|
674 TInt result = KErrNone; |
|
675 |
|
676 while (aLength) |
|
677 { |
|
678 if (iMState==EDead) |
|
679 { |
|
680 result = KErrDied; |
|
681 break; |
|
682 } |
|
683 TLinAddr alias_src; |
|
684 TInt alias_size; |
|
685 TInt alias_result=t.Alias(src, pP, aLength, EMapAttrReadUser, alias_src, alias_size); |
|
686 if (alias_result<0) |
|
687 { |
|
688 result = KErrBadDescriptor; // bad permissions |
|
689 break; |
|
690 } |
|
691 NKern::UnlockSystem(); |
|
692 |
|
693 __KTRACE_OPT(KTHREAD2,Kern::Printf("DThread::RawRead %08x<-%08x+%x",dest,alias_src,alias_size)); |
|
694 if(aFlags&KCheckLocalAddress) |
|
695 localIsSafe = m.ValidateLocalIpcAddress(dest,alias_size,ETrue); |
|
696 |
|
697 CHECK_PAGING_SAFE; |
|
698 |
|
699 COND_UNLOCK_USER_MEMORY(localIsSafe); |
|
700 |
|
701 if(alias_result) |
|
702 { |
|
703 // remote address is safe for direct access... |
|
704 if (localIsSafe) |
|
705 memcpy( (TAny*)dest, (const TAny*)alias_src, alias_size); |
|
706 else |
|
707 umemput( (TAny*)dest, (const TAny*)alias_src, alias_size); |
|
708 } |
|
709 else |
|
710 { |
|
711 // remote address is NOT safe for direct access, so use user permision checks when reading... |
|
712 if (localIsSafe) |
|
713 umemget( (TAny*)dest, (const TAny*)alias_src, alias_size); |
|
714 else |
|
715 uumemcpy( (TAny*)dest, (const TAny*)alias_src, alias_size); |
|
716 } |
|
717 |
|
718 LOCK_USER_MEMORY(); |
|
719 |
|
720 src+=alias_size; |
|
721 dest+=alias_size; |
|
722 aLength-=alias_size; |
|
723 NKern::LockSystem(); |
|
724 } |
|
725 t.RemoveAlias(); |
|
726 return result; |
|
727 } |
|
728 |
|
729 TInt DThread::RawWrite(const TAny* aDest, const TAny* aSrc, TInt aLength, TInt aFlags, DThread* anOriginatingThread, TIpcExcTrap* /*aExcTrap*/) |
|
730 // |
|
731 // Write to the thread's process. |
|
732 // Enter and return with system locked |
|
733 // aDest Run address of memory to write |
|
734 // aSrc Current address of destination |
|
735 // anOriginatingThread The thread on behalf of which this operation is performed (eg client of device driver). |
|
736 // aExcTrap Exception trap object to be updated if the actual memory access is performed on other memory area then specified. |
|
737 // It happens when reading is performed on un-aligned memory area. |
|
738 // |
|
739 { |
|
740 Mmu& m=Mmu::Get(); |
|
741 DMemModelThread& t=*(DMemModelThread*)TheCurrentThread; |
|
742 DMemModelProcess* pP=(DMemModelProcess*)iOwningProcess; |
|
743 TLinAddr src=(TLinAddr)aSrc; |
|
744 TLinAddr dest=(TLinAddr)aDest; |
|
745 TBool localIsSafe=ETrue; |
|
746 DThread* pO=anOriginatingThread?anOriginatingThread:&t; |
|
747 DProcess* pF=K::TheFileServerProcess; |
|
748 TBool special=(iOwningProcess==pF && pO->iOwningProcess==pF); |
|
749 TUint32 perm=special ? EMapAttrWriteSup : EMapAttrWriteUser; |
|
750 TInt result = KErrNone; |
|
751 |
|
752 while (aLength) |
|
753 { |
|
754 if (iMState==EDead) |
|
755 { |
|
756 result = KErrDied; |
|
757 break; |
|
758 } |
|
759 TLinAddr alias_dest; |
|
760 TInt alias_size; |
|
761 TInt alias_result=t.Alias(dest, pP, aLength, perm, alias_dest, alias_size); |
|
762 if (alias_result<0) |
|
763 { |
|
764 result = KErrBadDescriptor; // bad permissions |
|
765 break; |
|
766 } |
|
767 NKern::UnlockSystem(); |
|
768 |
|
769 __KTRACE_OPT(KTHREAD2,Kern::Printf("DThread::RawWrite %08x+%x->%08x",src,alias_size,alias_dest)); |
|
770 if(aFlags&KCheckLocalAddress) |
|
771 localIsSafe = m.ValidateLocalIpcAddress(src,alias_size,EFalse); |
|
772 |
|
773 // Must check that it is safe to page, unless we are reading from unpaged ROM in which case |
|
774 // we allow it. umemget and uumemcpy do this anyway, so we just need to check if |
|
775 // localIsSafe is set. |
|
776 if (localIsSafe) |
|
777 { |
|
778 CHECK_PAGING_SAFE_RANGE(src, aLength); |
|
779 CHECK_DATA_PAGING_SAFE_RANGE(dest, aLength); |
|
780 } |
|
781 |
|
782 COND_UNLOCK_USER_MEMORY(localIsSafe); |
|
783 |
|
784 if(alias_result) |
|
785 { |
|
786 // remote address is safe for direct access... |
|
787 if (localIsSafe) |
|
788 memcpy( (TAny*)alias_dest, (const TAny*)src, alias_size); |
|
789 else |
|
790 umemget( (TAny*)alias_dest, (const TAny*)src, alias_size); |
|
791 } |
|
792 else |
|
793 { |
|
794 // remote address is NOT safe for direct access, so use user permision checks when writing... |
|
795 if (localIsSafe) |
|
796 umemput( (TAny*)alias_dest, (const TAny*)src, alias_size); |
|
797 else |
|
798 uumemcpy( (TAny*)alias_dest, (const TAny*)src, alias_size); |
|
799 } |
|
800 |
|
801 LOCK_USER_MEMORY(); |
|
802 |
|
803 src+=alias_size; |
|
804 dest+=alias_size; |
|
805 aLength-=alias_size; |
|
806 NKern::LockSystem(); |
|
807 } |
|
808 t.RemoveAlias(); |
|
809 return result; |
|
810 } |
|
811 |
|
812 #ifdef __DEBUGGER_SUPPORT__ |
|
813 |
|
814 /** |
|
815 @pre Calling thread must be in critical section |
|
816 @pre CodeSeg mutex held |
|
817 */ |
|
818 TInt CodeModifier::SafeWriteCode(DProcess* aProcess, TLinAddr aAddress, TInt aSize, TUint aValue, void* aOldValue) |
|
819 { |
|
820 Mmu& m=Mmu::Get(); |
|
821 MmuBase::Wait(); |
|
822 |
|
823 NKern::LockSystem(); |
|
824 |
|
825 // Find physical address of the page, the breakpoint belongs to |
|
826 TPhysAddr physAddr = m.LinearToPhysical(aAddress,((DMemModelProcess*)aProcess)->iOsAsid); |
|
827 __KTRACE_OPT(KDEBUGGER,Kern::Printf("CodeModifier::SafeWriteCode - PA:%x", physAddr)); |
|
828 if (physAddr==KPhysAddrInvalid) |
|
829 { |
|
830 __KTRACE_OPT(KDEBUGGER,Kern::Printf("CodeModifier::SafeWriteCode - invalid VA")); |
|
831 NKern::UnlockSystem(); |
|
832 MmuBase::Signal(); |
|
833 return KErrBadDescriptor; |
|
834 } |
|
835 |
|
836 // Temporarily map physical page |
|
837 TLinAddr tempAddr = m.MapTemp (physAddr&~m.iPageMask, aAddress); |
|
838 tempAddr |= aAddress & m.iPageMask; |
|
839 __KTRACE_OPT(KDEBUGGER,Kern::Printf("CodeModifier::SafeWriteCode - tempAddr:%x",tempAddr)); |
|
840 |
|
841 //Set exception handler. Make sure the boundaries cover the worst case (aSize = 4) |
|
842 TIpcExcTrap xt; |
|
843 xt.iLocalBase=0; |
|
844 xt.iRemoteBase=(TLinAddr)tempAddr&~3; //word aligned. |
|
845 xt.iSize=sizeof(TInt); |
|
846 xt.iDir=1; |
|
847 |
|
848 TInt r=xt.Trap(NULL); |
|
849 if (r==0) |
|
850 { |
|
851 r = WriteCode(tempAddr, aSize, aValue, aOldValue); |
|
852 xt.UnTrap(); |
|
853 } |
|
854 |
|
855 m.UnmapTemp(); |
|
856 NKern::UnlockSystem(); |
|
857 MmuBase::Signal(); |
|
858 return r; |
|
859 } |
|
860 |
|
861 /** |
|
862 @pre Calling thread must be in critical section |
|
863 @pre CodeSeg mutex held |
|
864 */ |
|
865 TInt CodeModifier::WriteCode(TLinAddr aAddress, TInt aSize, TUint aValue, void* aOldValue) |
|
866 { |
|
867 // We do not want to be interrupted by e.g. ISR that will run altered code before IMB-Range. |
|
868 // Therefore, copy data and clean/invalidate caches with interrupts disabled. |
|
869 TInt irq=NKern::DisableAllInterrupts(); |
|
870 switch(aSize) |
|
871 { |
|
872 case 1: |
|
873 *(TUint8*) aOldValue = *(TUint8*)aAddress; |
|
874 *(TUint8*) aAddress = (TUint8)aValue; |
|
875 break; |
|
876 case 2: |
|
877 *(TUint16*) aOldValue = *(TUint16*)aAddress; |
|
878 *(TUint16*) aAddress = (TUint16)aValue; |
|
879 break; |
|
880 default://It is 4 otherwise |
|
881 *(TUint32*) aOldValue = *(TUint32*)aAddress; |
|
882 *(TUint32*) aAddress = (TUint32)aValue; |
|
883 break; |
|
884 }; |
|
885 CacheMaintenance::CodeChanged(aAddress, aSize, CacheMaintenance::ECodeModifier); |
|
886 NKern::RestoreInterrupts(irq); |
|
887 return KErrNone; |
|
888 } |
|
889 #endif //__DEBUGGER_SUPPORT__ |
|
890 |
|
891 |
|
892 #ifdef __MARM__ |
|
893 |
|
894 // the body of ReadDesHeader is machine coded on ARM... |
|
895 extern TInt ThreadDoReadAndParseDesHeader(DThread* aThread, const TAny* aSrc, TUint32* aDest); |
|
896 |
|
897 TInt DThread::ReadAndParseDesHeader(const TAny* aSrc, TDesHeader& aDest) |
|
898 // |
|
899 // Read and parse the header of a remote descriptor. |
|
900 // Enter and return with system locked |
|
901 // |
|
902 { |
|
903 // todo: remove use of system lock from callers, when they have been un-exported from the kernel |
|
904 NKern::UnlockSystem(); |
|
905 TInt r = ThreadDoReadAndParseDesHeader(this,aSrc,(TUint32*)&aDest); |
|
906 NKern::LockSystem(); |
|
907 return r; |
|
908 } |
|
909 |
|
910 |
|
911 #else // !__MARM__ |
|
912 |
|
913 |
|
914 TInt DThread::ReadAndParseDesHeader(const TAny* aSrc, TDesHeader& aDest) |
|
915 // |
|
916 // Read and parse the header of a remote descriptor. |
|
917 // Enter and return with system locked |
|
918 // |
|
919 { |
|
920 static const TUint8 LengthLookup[16] = {4,8,12,8,12,0,0,0,0,0,0,0,0,0,0,0}; |
|
921 |
|
922 DMemModelThread& t = *(DMemModelThread*)TheCurrentThread; |
|
923 TInt r = KErrBadDescriptor; |
|
924 |
|
925 CHECK_PAGING_SAFE; |
|
926 |
|
927 DMemModelProcess* pP = (DMemModelProcess*)iOwningProcess; |
|
928 TLinAddr src = (TLinAddr)aSrc; |
|
929 const TUint32* pAlias; |
|
930 TInt alias_size; |
|
931 TInt alias_result = t.Alias(src, pP, 12, EMapAttrReadUser, (TLinAddr&)pAlias, alias_size); |
|
932 if (alias_result<0) |
|
933 return KErrBadDescriptor; // bad permissions |
|
934 NKern::UnlockSystem(); |
|
935 t.iIpcClient = this; |
|
936 TUint32* dest = (TUint32*)&aDest; |
|
937 if (Kern::SafeRead(pAlias, dest, sizeof(TUint32))) |
|
938 goto fail; |
|
939 |
|
940 { |
|
941 TInt type=*dest>>KShiftDesType8; |
|
942 |
|
943 src += sizeof(TUint32); |
|
944 alias_size -= sizeof(TUint32); |
|
945 ++pAlias; |
|
946 ++dest; |
|
947 |
|
948 TInt l=LengthLookup[type]; |
|
949 if (l==0) |
|
950 goto fail; |
|
951 |
|
952 l -= sizeof(TUint32); // we've already read one word |
|
953 if (l>0 && alias_size) |
|
954 { |
|
955 get_more: |
|
956 // more to go - get rest or as much as is currently aliased |
|
957 TInt ll = alias_size>=l ? l : alias_size; |
|
958 if(Kern::SafeRead(pAlias, dest, l)) |
|
959 goto fail; |
|
960 l -= ll; |
|
961 src += TLinAddr(ll); |
|
962 dest = (TUint32*)(TLinAddr(dest) + TLinAddr(ll)); |
|
963 } |
|
964 if (l>0) |
|
965 { |
|
966 // more to go - need to step alias on |
|
967 NKern::LockSystem(); |
|
968 alias_result = t.Alias(src, pP, l, EMapAttrReadUser, (TLinAddr&)pAlias, alias_size); |
|
969 if (alias_result<0) |
|
970 goto fail_locked; |
|
971 NKern::UnlockSystem(); |
|
972 goto get_more; |
|
973 } |
|
974 |
|
975 r = K::ParseDesHeader(aSrc, *(TRawDesHeader*)&aDest, aDest); |
|
976 } |
|
977 |
|
978 fail: |
|
979 NKern::LockSystem(); |
|
980 fail_locked: |
|
981 t.RemoveAlias(); |
|
982 t.iIpcClient = NULL; |
|
983 return r; |
|
984 } |
|
985 |
|
986 |
|
987 #endif |
|
988 |
|
989 |
|
990 DChunk* DThread::OpenSharedChunk(const TAny* aAddress, TBool aWrite, TInt& aOffset) |
|
991 { |
|
992 NKern::LockSystem(); |
|
993 |
|
994 DMemModelProcess* pP = (DMemModelProcess*)iOwningProcess; |
|
995 DMemModelProcess::SChunkInfo* pS=pP->iChunks; |
|
996 DMemModelProcess::SChunkInfo* pC=pS+pP->iChunkCount; |
|
997 while(--pC>=pS && TUint(pC->iChunk->Base())>TUint(aAddress)) {}; |
|
998 if(pC>=pS) |
|
999 { |
|
1000 DMemModelChunk* chunk = pC->iChunk; |
|
1001 if(chunk->iChunkType==ESharedKernelSingle || chunk->iChunkType==ESharedKernelMultiple) |
|
1002 { |
|
1003 TInt offset = (TInt)aAddress-(TInt)chunk->Base(); |
|
1004 if(TUint(offset)<TUint(chunk->iMaxSize) && chunk->Open()==KErrNone) |
|
1005 { |
|
1006 aOffset = offset; |
|
1007 NKern::UnlockSystem(); |
|
1008 return chunk; |
|
1009 } |
|
1010 } |
|
1011 } |
|
1012 NKern::UnlockSystem(); |
|
1013 return 0; |
|
1014 } |
|
1015 |
|
1016 TInt DThread::PrepareMemoryForDMA(const TAny* aLinAddr, TInt aSize, TPhysAddr* aPhysicalPageList) |
|
1017 { |
|
1018 TInt asid = ((DMemModelProcess*)iOwningProcess)->iOsAsid; |
|
1019 Mmu& m=(Mmu&)*MmuBase::TheMmu; |
|
1020 return m.PreparePagesForDMA((TLinAddr)aLinAddr, aSize, asid, aPhysicalPageList); |
|
1021 } |
|
1022 |
|
1023 TInt DThread::ReleaseMemoryFromDMA(const TAny* aLinAddr, TInt aSize, TPhysAddr* aPhysicalPageList) |
|
1024 { |
|
1025 TInt pageCount = (((TInt)aLinAddr & KPageMask) + aSize + KPageMask) >> KPageShift; |
|
1026 Mmu& m=(Mmu&)*MmuBase::TheMmu; |
|
1027 return m.ReleasePagesFromDMA(aPhysicalPageList, pageCount); |
|
1028 } |
|
1029 |