author | William Roberts <williamr@symbian.org> |
Wed, 23 Dec 2009 11:47:04 +0000 | |
changeset 5 | c9417927a896 |
parent 4 | 56f325a607ea |
child 8 | 538db54a451d |
permissions | -rw-r--r-- |
0 | 1 |
// Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies). |
2 |
// All rights reserved. |
|
3 |
// This component and the accompanying materials are made available |
|
4 |
// under the terms of the License "Eclipse Public License v1.0" |
|
5 |
// which accompanies this distribution, and is available |
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 |
// |
|
8 |
// Initial Contributors: |
|
9 |
// Nokia Corporation - initial contribution. |
|
10 |
// |
|
11 |
// Contributors: |
|
12 |
// |
|
13 |
// Description: |
|
14 |
// |
|
15 |
||
16 |
#include <memmodel.h> |
|
17 |
#include "mmu/mm.h" |
|
18 |
#include "mmu/maddrcont.h" |
|
19 |
#include "mmboot.h" |
|
20 |
#include <kernel/cache.h> |
|
21 |
#include "execs.h" |
|
22 |
||
23 |
#define iMState iWaitLink.iSpare1 |
|
24 |
||
25 |
NFastMutex TheSharedChunkLock; |
|
26 |
||
27 |
#ifndef _DEBUG |
|
28 |
const TInt KChunkGranularity = 4; // amount to grow SChunkInfo list by |
|
29 |
const TInt KMaxChunkInfosInOneGo = 100; // max number of SChunkInfo objects to copy with System Lock held |
|
30 |
#else // if debug... |
|
31 |
const TInt KChunkGranularity = 1; |
|
32 |
const TInt KMaxChunkInfosInOneGo = 1; |
|
33 |
#endif |
|
34 |
||
35 |
||
36 |
||
37 |
/******************************************** |
|
38 |
* Process |
|
39 |
********************************************/ |
|
40 |
||
41 |
DMemModelProcess::~DMemModelProcess() |
|
42 |
{ |
|
43 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelProcess destruct")); |
|
44 |
Destruct(); |
|
45 |
} |
|
46 |
||
47 |
||
48 |
void DMemModelProcess::Destruct() |
|
49 |
{ |
|
50 |
__ASSERT_ALWAYS(!iOsAsidRefCount, MM::Panic(MM::EProcessDestructOsAsidRemaining)); |
|
51 |
__ASSERT_ALWAYS(!iChunkCount, MM::Panic(MM::EProcessDestructChunksRemaining)); |
|
52 |
Kern::Free(iChunks); |
|
53 |
__ASSERT_ALWAYS(!iSharedChunks || iSharedChunks->Count()==0, MM::Panic(MM::EProcessDestructChunksRemaining)); |
|
54 |
delete iSharedChunks; |
|
55 |
||
56 |
DProcess::Destruct(); |
|
57 |
} |
|
58 |
||
59 |
TInt DMemModelProcess::TryOpenOsAsid() |
|
60 |
{ |
|
61 |
if (__e32_atomic_tas_ord32(&iOsAsidRefCount, 1, 1, 0)) |
|
62 |
{ |
|
63 |
return iOsAsid; |
|
64 |
} |
|
65 |
return KErrDied; |
|
66 |
} |
|
67 |
||
68 |
void DMemModelProcess::CloseOsAsid() |
|
69 |
{ |
|
70 |
if (__e32_atomic_tas_ord32(&iOsAsidRefCount, 1, -1, 0) == 1) |
|
71 |
{// Last reference has been closed so free the asid. |
|
72 |
MM::AddressSpaceFree(iOsAsid); |
|
73 |
} |
|
74 |
} |
|
75 |
||
76 |
void DMemModelProcess::AsyncCloseOsAsid() |
|
77 |
{ |
|
78 |
if (__e32_atomic_tas_ord32(&iOsAsidRefCount, 1, -1, 0) == 1) |
|
79 |
{// Last reference has been closed so free the asid asynchronusly. |
|
80 |
MM::AsyncAddressSpaceFree(iOsAsid); |
|
81 |
} |
|
82 |
} |
|
83 |
||
84 |
TInt DMemModelProcess::NewChunk(DChunk*& aChunk, SChunkCreateInfo& aInfo, TLinAddr& aRunAddr) |
|
85 |
{ |
|
86 |
aChunk=NULL; |
|
87 |
||
88 |
DMemModelChunk* pC=new DMemModelChunk; |
|
89 |
if (!pC) |
|
90 |
return KErrNoMemory; |
|
91 |
||
92 |
TChunkType type = aInfo.iType; |
|
93 |
pC->iChunkType=type; |
|
94 |
TInt r=pC->SetAttributes(aInfo); |
|
95 |
if (r!=KErrNone) |
|
96 |
{ |
|
97 |
pC->Close(NULL); |
|
98 |
return r; |
|
99 |
} |
|
100 |
||
101 |
pC->iOwningProcess=(pC->iAttributes&DMemModelChunk::EPublic)?NULL:this; |
|
102 |
r=pC->Create(aInfo); |
|
103 |
if (r==KErrNone && (aInfo.iOperations & SChunkCreateInfo::EAdjust)) |
|
104 |
{ |
|
105 |
if (aInfo.iRunAddress!=0) |
|
106 |
pC->SetFixedAddress(aInfo.iRunAddress,aInfo.iPreallocated); |
|
107 |
if (aInfo.iPreallocated==0 && aInfo.iInitialTop!=0) |
|
108 |
{ |
|
109 |
if (pC->iAttributes & DChunk::EDisconnected) |
|
110 |
{ |
|
111 |
r=pC->Commit(aInfo.iInitialBottom,aInfo.iInitialTop-aInfo.iInitialBottom); |
|
112 |
} |
|
113 |
else if (pC->iAttributes & DChunk::EDoubleEnded) |
|
114 |
{ |
|
115 |
r=pC->AdjustDoubleEnded(aInfo.iInitialBottom,aInfo.iInitialTop); |
|
116 |
} |
|
117 |
else |
|
118 |
{ |
|
119 |
r=pC->Adjust(aInfo.iInitialTop); |
|
120 |
} |
|
121 |
} |
|
122 |
} |
|
123 |
if (r==KErrNone && (aInfo.iOperations & SChunkCreateInfo::EAdd)) |
|
124 |
{ |
|
125 |
r = AddChunk(pC, EFalse); |
|
126 |
} |
|
127 |
if (r==KErrNone) |
|
128 |
{ |
|
129 |
if(pC->iKernelMapping) |
|
130 |
aRunAddr = (TLinAddr)MM::MappingBase(pC->iKernelMapping); |
|
131 |
pC->iDestroyedDfc = aInfo.iDestroyedDfc; |
|
132 |
aChunk=(DChunk*)pC; |
|
133 |
} |
|
134 |
else |
|
135 |
pC->Close(NULL); // NULL since chunk can't have been added to process |
|
136 |
return r; |
|
137 |
} |
|
138 |
||
139 |
||
140 |
/** |
|
141 |
Determine whether this process should be data paged. |
|
142 |
||
143 |
@param aInfo A reference to the create info for this process. |
|
144 |
*/ |
|
145 |
TInt DMemModelProcess::SetPaging(const TProcessCreateInfo& aInfo) |
|
146 |
{ |
|
147 |
TUint pagedFlags = aInfo.iFlags & TProcessCreateInfo::EDataPagingMask; |
|
148 |
// If KImageDataPaged and KImageDataUnpaged flags present then corrupt |
|
149 |
// Check this first to ensure that it is always verified. |
|
150 |
if (pagedFlags == TProcessCreateInfo::EDataPagingMask) |
|
151 |
{ |
|
152 |
return KErrCorrupt; |
|
153 |
} |
|
154 |
||
155 |
if (aInfo.iAttr & ECodeSegAttKernel || |
|
156 |
!(K::MemModelAttributes & EMemModelAttrDataPaging)) |
|
157 |
{// Kernel process shouldn't be data paged or no data paging device installed. |
|
158 |
return KErrNone; |
|
159 |
} |
|
160 |
||
161 |
TUint dataPolicy = TheSuperPage().KernelConfigFlags() & EKernelConfigDataPagingPolicyMask; |
|
162 |
if (dataPolicy == EKernelConfigDataPagingPolicyAlwaysPage) |
|
163 |
{ |
|
164 |
iAttributes |= EDataPaged; |
|
165 |
return KErrNone; |
|
166 |
} |
|
167 |
if (dataPolicy == EKernelConfigDataPagingPolicyNoPaging) |
|
168 |
{// No paging allowed so just return. |
|
169 |
return KErrNone; |
|
170 |
} |
|
171 |
if (pagedFlags == TProcessCreateInfo::EDataPaged) |
|
172 |
{ |
|
173 |
iAttributes |= EDataPaged; |
|
174 |
return KErrNone; |
|
175 |
} |
|
176 |
if (pagedFlags == TProcessCreateInfo::EDataUnpaged) |
|
177 |
{// No paging set so just return. |
|
178 |
return KErrNone; |
|
179 |
} |
|
180 |
// Neither paged nor unpaged set so use default paging policy. |
|
181 |
// dataPolicy must be EKernelConfigDataPagingPolicyDefaultUnpaged or |
|
182 |
// EKernelConfigDataPagingPolicyDefaultPaged. |
|
183 |
__NK_ASSERT_DEBUG(pagedFlags == TProcessCreateInfo::EDataPagingUnspecified); |
|
184 |
__NK_ASSERT_DEBUG( dataPolicy == EKernelConfigDataPagingPolicyDefaultPaged || |
|
185 |
dataPolicy == EKernelConfigDataPagingPolicyDefaultUnpaged); |
|
186 |
if (dataPolicy == EKernelConfigDataPagingPolicyDefaultPaged) |
|
187 |
{ |
|
188 |
iAttributes |= EDataPaged; |
|
189 |
} |
|
190 |
return KErrNone; |
|
191 |
} |
|
192 |
||
193 |
||
194 |
TInt DMemModelProcess::DoCreate(TBool aKernelProcess, TProcessCreateInfo& aInfo) |
|
195 |
{ |
|
196 |
// Required so we can detect whether a process has been created and added |
|
197 |
// to its object container by checking for iContainerID!=EProcess. |
|
198 |
__ASSERT_COMPILE(EProcess != 0); |
|
199 |
__KTRACE_OPT(KPROC,Kern::Printf(">DMemModelProcess::DoCreate %O",this)); |
|
200 |
TInt r=KErrNone; |
|
201 |
||
202 |
if (aKernelProcess) |
|
203 |
{ |
|
204 |
iAttributes |= ESupervisor; |
|
205 |
iOsAsid = KKernelOsAsid; |
|
206 |
} |
|
207 |
else |
|
208 |
{ |
|
209 |
r = MM::AddressSpaceAlloc(iPageDir); |
|
210 |
if (r>=0) |
|
211 |
{ |
|
212 |
iOsAsid = r; |
|
213 |
r = KErrNone; |
|
214 |
} |
|
215 |
} |
|
216 |
if (r == KErrNone) |
|
217 |
{// Add this process's own reference to its os asid. |
|
218 |
__e32_atomic_store_ord32(&iOsAsidRefCount, 1); |
|
219 |
} |
|
220 |
||
221 |
#ifdef BTRACE_FLEXIBLE_MEM_MODEL |
|
222 |
BTrace8(BTrace::EFlexibleMemModel,BTrace::EAddressSpaceId,this,iOsAsid); |
|
223 |
#endif |
|
224 |
||
225 |
__KTRACE_OPT(KPROC,Kern::Printf("OS ASID=%d, PD=%08x",iOsAsid,iPageDir)); |
|
226 |
__KTRACE_OPT(KPROC,Kern::Printf("<DMemModelProcess::DoCreate %d",r)); |
|
227 |
return r; |
|
228 |
} |
|
229 |
||
230 |
TInt DMemModelProcess::CreateDataBssStackArea(TProcessCreateInfo& aInfo) |
|
231 |
{ |
|
232 |
__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::CreateDataBssStackArea %O",this)); |
|
233 |
TInt r = KErrNone; |
|
234 |
TInt dataBssSize = MM::RoundToPageSize(aInfo.iTotalDataSize); |
|
235 |
if(dataBssSize) |
|
236 |
{ |
|
237 |
DMemoryObject* memory; |
|
238 |
TMemoryObjectType memoryType = iAttributes&EDataPaged ? EMemoryObjectPaged : EMemoryObjectMovable; |
|
239 |
r = MM::MemoryNew(memory,memoryType,MM::BytesToPages(dataBssSize)); |
|
240 |
if(r==KErrNone) |
|
241 |
{ |
|
242 |
r = MM::MemoryAlloc(memory,0,MM::BytesToPages(dataBssSize)); |
|
243 |
if(r==KErrNone) |
|
244 |
{ |
|
245 |
r = MM::MappingNew(iDataBssMapping,memory,EUserReadWrite,OsAsid()); |
|
246 |
} |
|
247 |
if(r!=KErrNone) |
|
248 |
MM::MemoryDestroy(memory); |
|
249 |
else |
|
250 |
{ |
|
251 |
iDataBssRunAddress = MM::MappingBase(iDataBssMapping); |
|
252 |
#ifdef BTRACE_FLEXIBLE_MEM_MODEL |
|
253 |
BTrace8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsProcessStaticData,memory,this); |
|
254 |
#endif |
|
255 |
} |
|
256 |
} |
|
257 |
} |
|
258 |
__KTRACE_OPT(KPROC,Kern::Printf("DataBssSize=%x, ",dataBssSize)); |
|
259 |
||
260 |
return r; |
|
261 |
} |
|
262 |
||
263 |
||
264 |
TInt DMemModelProcess::AttachExistingCodeSeg(TProcessCreateInfo& aInfo) |
|
265 |
{ |
|
266 |
TInt r = DEpocProcess::AttachExistingCodeSeg(aInfo); |
|
267 |
if(r==KErrNone) |
|
268 |
{ |
|
269 |
// allocate virtual memory for the EXEs codeseg... |
|
270 |
DMemModelCodeSeg* seg = (DMemModelCodeSeg*)iTempCodeSeg; |
|
271 |
if(seg->iAttr&ECodeSegAttAddrNotUnique) |
|
272 |
{ |
|
273 |
TUint codeSize = seg->iSize; |
|
274 |
TLinAddr codeAddr = seg->RamInfo().iCodeRunAddr; |
|
275 |
TBool isDemandPaged = seg->iAttr&ECodeSegAttCodePaged; |
|
276 |
// Allocate virtual memory for the code seg using the os asid. |
|
277 |
// No need to open a reference on os asid as process not fully |
|
278 |
// created yet so it can't die and free the os asid. |
|
279 |
r = MM::VirtualAlloc(OsAsid(),codeAddr,codeSize,isDemandPaged); |
|
280 |
if(r==KErrNone) |
|
281 |
{ |
|
282 |
iCodeVirtualAllocSize = codeSize; |
|
283 |
iCodeVirtualAllocAddress = codeAddr; |
|
284 |
} |
|
285 |
} |
|
286 |
} |
|
287 |
||
288 |
return r; |
|
289 |
} |
|
290 |
||
291 |
||
292 |
TInt DMemModelProcess::AddChunk(DChunk* aChunk, TBool aIsReadOnly) |
|
293 |
{ |
|
294 |
DMemModelChunk* pC=(DMemModelChunk*)aChunk; |
|
295 |
if(pC->iOwningProcess && this!=pC->iOwningProcess) |
|
296 |
return KErrAccessDenied; |
|
297 |
||
298 |
TInt r = WaitProcessLock(); |
|
299 |
if(r==KErrNone) |
|
300 |
{ |
|
301 |
TInt i = ChunkIndex(pC); |
|
302 |
if(i>=0) // Found the chunk in this process, just up its count |
|
303 |
{ |
|
304 |
iChunks[i].iAccessCount++; |
|
305 |
__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::AddChunk %08x to %08x (Access count incremented to %d)",aChunk,this,iChunks[i].iAccessCount)); |
|
306 |
SignalProcessLock(); |
|
307 |
return KErrNone; |
|
308 |
} |
|
309 |
r = DoAddChunk(pC,aIsReadOnly); |
|
310 |
SignalProcessLock(); |
|
311 |
} |
|
312 |
__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::AddChunk returns %d",r)); |
|
313 |
return r; |
|
314 |
} |
|
315 |
||
316 |
||
317 |
void M::FsRegisterThread() |
|
318 |
{ |
|
319 |
TInternalRamDrive::Unlock(); |
|
320 |
} |
|
321 |
||
322 |
||
323 |
void ExecHandler::UnlockRamDrive() |
|
324 |
{ |
|
325 |
} |
|
326 |
||
327 |
||
328 |
EXPORT_C TLinAddr TInternalRamDrive::Base() |
|
329 |
{ |
|
330 |
DMemModelChunk* pC=(DMemModelChunk*)PP::TheRamDriveChunk; |
|
331 |
DMemModelProcess* pP=(DMemModelProcess*)TheCurrentThread->iOwningProcess; |
|
332 |
NKern::LockSystem(); |
|
333 |
TLinAddr addr = (TLinAddr)pC->Base(pP); |
|
334 |
NKern::UnlockSystem(); |
|
335 |
if(!addr) |
|
336 |
{ |
|
337 |
Unlock(); |
|
338 |
NKern::LockSystem(); |
|
339 |
addr = (TLinAddr)pC->Base(pP); |
|
340 |
NKern::UnlockSystem(); |
|
341 |
} |
|
342 |
return addr; |
|
343 |
} |
|
344 |
||
345 |
||
346 |
EXPORT_C void TInternalRamDrive::Unlock() |
|
347 |
{ |
|
348 |
DMemModelChunk* pC=(DMemModelChunk*)PP::TheRamDriveChunk; |
|
349 |
DMemModelProcess* pP=(DMemModelProcess*)TheCurrentThread->iOwningProcess; |
|
350 |
||
351 |
TInt r = pP->WaitProcessLock(); |
|
352 |
if(r==KErrNone) |
|
353 |
if(pP->ChunkIndex(pC)==KErrNotFound) |
|
354 |
r = pP->DoAddChunk(pC,EFalse); |
|
355 |
__ASSERT_ALWAYS(r==KErrNone, MM::Panic(MM::EFsRegisterThread)); |
|
356 |
pP->SignalProcessLock(); |
|
357 |
} |
|
358 |
||
359 |
||
360 |
EXPORT_C void TInternalRamDrive::Lock() |
|
361 |
{ |
|
362 |
} |
|
363 |
||
364 |
||
365 |
TInt DMemModelProcess::DoAddChunk(DMemModelChunk* aChunk, TBool aIsReadOnly) |
|
366 |
{ |
|
367 |
// |
|
368 |
// Must hold the process $LOCK mutex before calling this. |
|
369 |
// As the process lock is held it is safe to access iOsAsid without a reference. |
|
370 |
// |
|
371 |
||
372 |
__NK_ASSERT_DEBUG(ChunkIndex(aChunk)==KErrNotFound); // shouldn't be adding a chunk which is already added |
|
373 |
||
374 |
__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::DoAddChunk %O to %O",aChunk,this)); |
|
375 |
||
376 |
// create mapping for chunk... |
|
377 |
DMemoryMapping* mapping; |
|
378 |
TMappingPermissions perm = MM::MappingPermissions |
|
379 |
( |
|
380 |
iOsAsid!=(TInt)KKernelOsAsid, // user? |
|
381 |
aIsReadOnly==false, // write? |
|
382 |
aChunk->iAttributes&DMemModelChunk::ECode // execute? |
|
383 |
); |
|
384 |
TInt r; |
|
385 |
if(aChunk->iFixedBase) // HACK, kernel chunk has a fixed iBase |
|
386 |
r = MM::MappingNew(mapping,aChunk->iMemoryObject,perm,iOsAsid,EMappingCreateExactVirtual,(TLinAddr)aChunk->iFixedBase); |
|
387 |
else |
|
388 |
r = MM::MappingNew(mapping,aChunk->iMemoryObject,perm,iOsAsid); |
|
389 |
if(r!=KErrNone) |
|
390 |
return r; |
|
391 |
if(iOsAsid==0) |
|
392 |
aChunk->iKernelMapping = mapping; |
|
393 |
TLinAddr base = MM::MappingBase(mapping); |
|
394 |
||
395 |
// expand chunk info memory if required... |
|
396 |
if(iChunkCount==iChunkAlloc) |
|
397 |
{ |
|
398 |
TInt newAlloc = iChunkAlloc+KChunkGranularity; |
|
399 |
r = Kern::SafeReAlloc((TAny*&)iChunks,iChunkAlloc*sizeof(SChunkInfo),newAlloc*sizeof(SChunkInfo)); |
|
400 |
if(r!=KErrNone) |
|
401 |
{ |
|
402 |
MM::MappingDestroy(mapping); |
|
403 |
return r; |
|
404 |
} |
|
405 |
iChunkAlloc = newAlloc; |
|
406 |
} |
|
407 |
||
408 |
// insert new chunk info... |
|
409 |
TUint i = ChunkInsertIndex(aChunk); |
|
410 |
SChunkInfo* info = iChunks+i; |
|
411 |
SChunkInfo* infoEnd = iChunks+iChunkCount; |
|
412 |
NKern::LockSystem(); |
|
413 |
++iChunkCount; |
|
414 |
for(;;) |
|
415 |
{ |
|
416 |
// make space for new chunk info by shuffling along |
|
417 |
// existing infos KMaxChunkInfosInOneGo at a time... |
|
418 |
SChunkInfo* infoPtr = infoEnd-KMaxChunkInfosInOneGo; |
|
419 |
if(infoPtr<info) |
|
420 |
infoPtr = info; |
|
421 |
memmove(infoPtr+1,infoPtr,(TLinAddr)infoEnd-(TLinAddr)infoPtr); |
|
422 |
infoEnd = infoPtr; |
|
423 |
if(infoEnd<=info) |
|
424 |
break; |
|
425 |
NKern::FlashSystem(); |
|
426 |
} |
|
427 |
info->iChunk = aChunk; |
|
428 |
info->iMapping = mapping; |
|
429 |
info->iAccessCount = 1; |
|
430 |
info->iIsReadOnly = aIsReadOnly; |
|
431 |
NKern::UnlockSystem(); |
|
432 |
||
433 |
// add chunk to list of Shared Chunks... |
|
434 |
if(aChunk->iChunkType==ESharedKernelSingle || aChunk->iChunkType==ESharedKernelMultiple) |
|
435 |
{ |
|
436 |
if(!iSharedChunks) |
|
437 |
iSharedChunks = new RAddressedContainer(&TheSharedChunkLock,iProcessLock); |
|
438 |
if(!iSharedChunks) |
|
439 |
r = KErrNoMemory; |
|
440 |
else |
|
441 |
r = iSharedChunks->Add(base,aChunk); |
|
442 |
if(r!=KErrNone) |
|
443 |
{ |
|
444 |
DoRemoveChunk(i); |
|
445 |
return r; |
|
446 |
} |
|
447 |
} |
|
448 |
||
449 |
// done OK... |
|
450 |
__DEBUG_EVENT(EEventUpdateProcess, this); |
|
451 |
return KErrNone; |
|
452 |
} |
|
453 |
||
454 |
||
455 |
void DMemModelProcess::DoRemoveChunk(TInt aIndex) |
|
456 |
{ |
|
457 |
__DEBUG_EVENT(EEventUpdateProcess, this); |
|
458 |
||
459 |
DMemModelChunk* chunk = iChunks[aIndex].iChunk; |
|
460 |
DMemoryMapping* mapping = iChunks[aIndex].iMapping; |
|
461 |
||
462 |
if(chunk->iChunkType==ESharedKernelSingle || chunk->iChunkType==ESharedKernelMultiple) |
|
463 |
{ |
|
464 |
// remove chunk from list of Shared Chunks... |
|
465 |
if(iSharedChunks) |
|
466 |
{ |
|
467 |
iSharedChunks->Remove(MM::MappingBase(mapping)); |
|
468 |
#ifdef _DEBUG |
|
469 |
// delete iSharedChunks if it's empty, so memory leak test code passes... |
|
470 |
if(iSharedChunks->Count()==0) |
|
471 |
{ |
|
472 |
NKern::FMWait(&TheSharedChunkLock); |
|
473 |
RAddressedContainer* s = iSharedChunks; |
|
474 |
iSharedChunks = 0; |
|
475 |
NKern::FMSignal(&TheSharedChunkLock); |
|
476 |
delete s; |
|
477 |
} |
|
478 |
#endif |
|
479 |
} |
|
480 |
} |
|
481 |
||
482 |
// remove chunk from array... |
|
483 |
SChunkInfo* infoStart = iChunks+aIndex+1; |
|
484 |
SChunkInfo* infoEnd = iChunks+iChunkCount; |
|
485 |
NKern::LockSystem(); |
|
486 |
for(;;) |
|
487 |
{ |
|
488 |
// shuffle existing infos down KMaxChunkInfosInOneGo at a time... |
|
489 |
SChunkInfo* infoPtr = infoStart+KMaxChunkInfosInOneGo; |
|
490 |
if(infoPtr>infoEnd) |
|
491 |
infoPtr = infoEnd; |
|
492 |
memmove(infoStart-1,infoStart,(TLinAddr)infoPtr-(TLinAddr)infoStart); |
|
493 |
infoStart = infoPtr; |
|
494 |
if(infoStart>=infoEnd) |
|
495 |
break; |
|
496 |
NKern::FlashSystem(); |
|
497 |
} |
|
498 |
--iChunkCount; |
|
499 |
NKern::UnlockSystem(); |
|
500 |
||
501 |
if(mapping==chunk->iKernelMapping) |
|
502 |
chunk->iKernelMapping = 0; |
|
503 |
||
504 |
MM::MappingDestroy(mapping); |
|
505 |
} |
|
506 |
||
507 |
||
508 |
/** |
|
509 |
Final chance for process to release resources during its death. |
|
510 |
||
511 |
Called with process $LOCK mutex held (if it exists). |
|
512 |
This mutex will not be released before it is deleted. |
|
513 |
I.e. no other thread will ever hold the mutex again. |
|
514 |
*/ |
|
515 |
void DMemModelProcess::FinalRelease() |
|
516 |
{ |
|
517 |
// Clean up any left over chunks (such as SharedIo buffers) |
|
518 |
if(iProcessLock) |
|
519 |
while(iChunkCount) |
|
520 |
DoRemoveChunk(0); |
|
521 |
// Destroy the remaining mappings and memory objects owned by this process |
|
522 |
MM::MappingAndMemoryDestroy(iDataBssMapping); |
|
523 |
if(iCodeVirtualAllocSize) |
|
524 |
MM::VirtualFree(iOsAsid,iCodeVirtualAllocAddress,iCodeVirtualAllocSize); |
|
525 |
||
526 |
// Close the original reference on the os asid. |
|
527 |
CloseOsAsid(); |
|
528 |
} |
|
529 |
||
530 |
||
531 |
void DMemModelProcess::RemoveChunk(DMemModelChunk *aChunk) |
|
532 |
{ |
|
533 |
// note that this can't be called after the process $LOCK mutex has been deleted |
|
534 |
// since it can only be called by a thread in this process doing a handle close or |
|
535 |
// dying, or by the process handles array being deleted due to the process dying, |
|
536 |
// all of which happen before $LOCK is deleted. |
|
537 |
__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess %O RemoveChunk %O",this,aChunk)); |
|
538 |
Kern::MutexWait(*iProcessLock); |
|
539 |
TInt i = ChunkIndex(aChunk); |
|
540 |
if(i>=0) // Found the chunk |
|
541 |
{ |
|
542 |
__KTRACE_OPT(KPROC,Kern::Printf("Chunk access count %d",iChunks[i].iAccessCount)); |
|
543 |
if(--iChunks[i].iAccessCount==0) |
|
544 |
{ |
|
545 |
DoRemoveChunk(i); |
|
546 |
} |
|
547 |
} |
|
548 |
Kern::MutexSignal(*iProcessLock); |
|
549 |
} |
|
550 |
||
551 |
||
552 |
TUint8* DMemModelChunk::Base(DProcess* aProcess) |
|
553 |
{ |
|
554 |
DMemModelProcess* pP = (DMemModelProcess*)aProcess; |
|
555 |
DMemoryMapping* mapping = 0; |
|
556 |
||
557 |
if(iKernelMapping && pP==K::TheKernelProcess) |
|
558 |
{ |
|
559 |
// shortcut for shared chunks... |
|
560 |
mapping = iKernelMapping; |
|
561 |
} |
|
562 |
else |
|
563 |
{ |
|
564 |
// find chunk in process... |
|
565 |
TInt i = pP->ChunkIndex(this); |
|
566 |
if(i>=0) |
|
567 |
mapping = pP->iChunks[i].iMapping; |
|
568 |
} |
|
569 |
||
570 |
if(!mapping) |
|
571 |
return 0; |
|
572 |
||
573 |
return (TUint8*)MM::MappingBase(mapping); |
|
574 |
} |
|
575 |
||
576 |
||
577 |
DChunk* DThread::OpenSharedChunk(const TAny* aAddress, TBool aWrite, TInt& aOffset) |
|
578 |
{ |
|
579 |
DMemModelChunk* chunk = 0; |
|
580 |
||
581 |
NKern::FMWait(&TheSharedChunkLock); |
|
582 |
RAddressedContainer* list = ((DMemModelProcess*)iOwningProcess)->iSharedChunks; |
|
583 |
if(list) |
|
584 |
{ |
|
585 |
// search list... |
|
586 |
TUint offset; |
|
587 |
chunk = (DMemModelChunk*)list->Find((TLinAddr)aAddress,offset); |
|
588 |
if(chunk && offset<TUint(chunk->iMaxSize) && chunk->Open()==KErrNone) |
|
589 |
aOffset = offset; // chunk found and opened successfully |
|
590 |
else |
|
591 |
chunk = 0; // failed |
|
592 |
} |
|
593 |
NKern::FMSignal(&TheSharedChunkLock); |
|
594 |
||
595 |
return chunk; |
|
596 |
} |
|
597 |
||
598 |
||
599 |
TUint DMemModelProcess::ChunkInsertIndex(DMemModelChunk* aChunk) |
|
600 |
{ |
|
601 |
// need to hold iProcessLock or System Lock... |
|
602 |
#ifdef _DEBUG |
|
603 |
if(K::Initialising==false && iProcessLock!=NULL && iProcessLock->iCleanup.iThread!=&Kern::CurrentThread()) |
|
604 |
{ |
|
605 |
// don't hold iProcessLock, so... |
|
606 |
__ASSERT_SYSTEM_LOCK; |
|
607 |
} |
|
608 |
#endif |
|
609 |
||
610 |
// binary search... |
|
611 |
SChunkInfo* list = iChunks; |
|
612 |
TUint l = 0; |
|
613 |
TUint r = iChunkCount; |
|
614 |
TUint m; |
|
615 |
while(l<r) |
|
616 |
{ |
|
617 |
m = (l+r)>>1; |
|
618 |
DChunk* x = list[m].iChunk; |
|
619 |
if(x<=aChunk) |
|
620 |
l = m+1; |
|
621 |
else |
|
622 |
r = m; |
|
623 |
} |
|
624 |
return r; |
|
625 |
} |
|
626 |
||
627 |
||
628 |
TInt DMemModelProcess::ChunkIndex(DMemModelChunk* aChunk) |
|
629 |
{ |
|
630 |
TUint i = ChunkInsertIndex(aChunk); |
|
631 |
if(i && iChunks[--i].iChunk==aChunk) |
|
632 |
return i; |
|
633 |
return KErrNotFound; |
|
634 |
} |
|
635 |
||
636 |
||
637 |
TInt DMemModelProcess::MapCodeSeg(DCodeSeg* aSeg) |
|
638 |
{ |
|
639 |
__ASSERT_CRITICAL; // Must be in critical section so can't leak os asid references. |
|
640 |
||
641 |
DMemModelCodeSeg& seg=*(DMemModelCodeSeg*)aSeg; |
|
642 |
__KTRACE_OPT(KDLL,Kern::Printf("Process %O MapCodeSeg %C", this, aSeg)); |
|
643 |
TBool kernel_only=( (seg.iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttKernel ); |
|
644 |
TBool user_local=( (seg.iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == 0 ); |
|
645 |
if (kernel_only && !(iAttributes&ESupervisor)) |
|
646 |
return KErrNotSupported; |
|
647 |
if (seg.iAttr&ECodeSegAttKernel) |
|
648 |
return KErrNone; // no extra mappings needed for kernel code |
|
649 |
||
650 |
// Attempt to open a reference on the os asid it is required so |
|
651 |
// MapUserRamCode() and CommitDllData() can use iOsAsid safely. |
|
652 |
TInt osAsid = TryOpenOsAsid(); |
|
653 |
if (osAsid < 0) |
|
654 |
{// The process has died. |
|
655 |
return KErrDied; |
|
656 |
} |
|
657 |
||
658 |
TInt r=KErrNone; |
|
659 |
if (user_local) |
|
660 |
r=MapUserRamCode(seg.Memory()); |
|
661 |
if (seg.IsDll()) |
|
662 |
{ |
|
663 |
TInt total_data_size; |
|
664 |
TLinAddr data_base; |
|
665 |
seg.GetDataSizeAndBase(total_data_size, data_base); |
|
666 |
if (r==KErrNone && total_data_size) |
|
667 |
{ |
|
668 |
TInt size=MM::RoundToPageSize(total_data_size); |
|
669 |
r=CommitDllData(data_base, size, aSeg); |
|
670 |
if (r!=KErrNone && user_local) |
|
671 |
UnmapUserRamCode(seg.Memory()); |
|
672 |
} |
|
673 |
} |
|
674 |
CloseOsAsid(); |
|
675 |
||
676 |
return r; |
|
677 |
} |
|
678 |
||
679 |
||
680 |
void DMemModelProcess::UnmapCodeSeg(DCodeSeg* aSeg) |
|
681 |
{ |
|
682 |
__ASSERT_CRITICAL; // Must be in critical section so can't leak os asid references. |
|
683 |
||
684 |
DMemModelCodeSeg& seg=*(DMemModelCodeSeg*)aSeg; |
|
685 |
__KTRACE_OPT(KDLL,Kern::Printf("Process %O UnmapCodeSeg %C", this, aSeg)); |
|
686 |
if (seg.iAttr&ECodeSegAttKernel) |
|
687 |
return; // no extra mappings needed for kernel code |
|
688 |
||
689 |
// Attempt to open a reference on the os asid it is required so |
|
690 |
// UnmapUserRamCode() and DecommitDllData() can use iOsAsid safely. |
|
691 |
TInt osAsid = TryOpenOsAsid(); |
|
692 |
if (osAsid < 0) |
|
693 |
{// The process has died and it the process it will have cleaned up any code segs. |
|
694 |
return; |
|
695 |
} |
|
696 |
||
697 |
if (seg.IsDll()) |
|
698 |
{ |
|
699 |
TInt total_data_size; |
|
700 |
TLinAddr data_base; |
|
701 |
seg.GetDataSizeAndBase(total_data_size, data_base); |
|
702 |
if (total_data_size) |
|
703 |
DecommitDllData(data_base, MM::RoundToPageSize(total_data_size)); |
|
704 |
} |
|
705 |
if (seg.Memory()) |
|
706 |
UnmapUserRamCode(seg.Memory()); |
|
707 |
||
708 |
CloseOsAsid(); |
|
709 |
} |
|
710 |
||
711 |
void DMemModelProcess::RemoveDllData() |
|
712 |
// |
|
713 |
// Call with CodeSegLock held |
|
714 |
// |
|
715 |
{ |
|
716 |
} |
|
717 |
||
718 |
||
719 |
TInt DMemModelProcess::MapUserRamCode(DMemModelCodeSegMemory* aMemory) |
|
720 |
{ |
|
721 |
__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess %O MapUserRamCode %C %d %d", |
|
722 |
this, aMemory->iCodeSeg, iOsAsid, aMemory->iPagedCodeInfo!=0)); |
|
723 |
__ASSERT_MUTEX(DCodeSeg::CodeSegLock); |
|
724 |
||
725 |
TMappingCreateFlags createFlags = EMappingCreateExactVirtual; |
|
726 |
||
727 |
if(!(aMemory->iCodeSeg->iAttr&ECodeSegAttAddrNotUnique)) |
|
728 |
{ |
|
729 |
// codeseg memory address is globally unique, (common address across all processes)... |
|
730 |
FlagSet(createFlags,EMappingCreateCommonVirtual); |
|
731 |
} |
|
732 |
||
733 |
if(aMemory->iCodeSeg->IsExe()) |
|
734 |
{ |
|
735 |
// EXE codesegs have already had their virtual address allocated so we must adopt that... |
|
736 |
__NK_ASSERT_DEBUG(iCodeVirtualAllocSize); |
|
737 |
__NK_ASSERT_DEBUG(iCodeVirtualAllocAddress==aMemory->iRamInfo.iCodeRunAddr); |
|
738 |
iCodeVirtualAllocSize = 0; |
|
739 |
iCodeVirtualAllocAddress = 0; |
|
740 |
FlagSet(createFlags,EMappingCreateAdoptVirtual); |
|
741 |
} |
|
742 |
||
743 |
DMemoryMapping* mapping; |
|
744 |
return MM::MappingNew(mapping,aMemory->iCodeMemoryObject,EUserExecute,iOsAsid,createFlags,aMemory->iRamInfo.iCodeRunAddr); |
|
745 |
} |
|
746 |
||
747 |
||
748 |
void DMemModelProcess::UnmapUserRamCode(DMemModelCodeSegMemory* aMemory) |
|
749 |
{ |
|
750 |
__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess %O UnmapUserRamCode %C %d %d", |
|
751 |
this, aMemory->iCodeSeg, iOsAsid, aMemory->iPagedCodeInfo!=0)); |
|
752 |
||
753 |
__ASSERT_MUTEX(DCodeSeg::CodeSegLock); |
|
754 |
MM::MappingDestroy(aMemory->iRamInfo.iCodeRunAddr,iOsAsid); |
|
755 |
} |
|
756 |
||
757 |
||
758 |
TInt DMemModelProcess::CommitDllData(TLinAddr aBase, TInt aSize, DCodeSeg* aCodeSeg) |
|
759 |
{ |
|
760 |
__KTRACE_OPT(KDLL,Kern::Printf("DMemModelProcess %O CommitDllData %08x+%x",this,aBase,aSize)); |
|
761 |
||
762 |
DMemoryObject* memory; |
|
763 |
TMemoryObjectType memoryType = aCodeSeg->iAttr&ECodeSegAttDataPaged ? EMemoryObjectPaged : EMemoryObjectMovable; |
|
764 |
TInt r = MM::MemoryNew(memory,memoryType,MM::BytesToPages(aSize)); |
|
765 |
if(r==KErrNone) |
|
766 |
{ |
|
767 |
r = MM::MemoryAlloc(memory,0,MM::BytesToPages(aSize)); |
|
768 |
if(r==KErrNone) |
|
769 |
{ |
|
770 |
DMemoryMapping* mapping; |
|
771 |
r = MM::MappingNew(mapping,memory,EUserReadWrite,iOsAsid,EMappingCreateCommonVirtual,aBase); |
|
772 |
} |
|
773 |
if(r!=KErrNone) |
|
774 |
MM::MemoryDestroy(memory); |
|
775 |
else |
|
776 |
{ |
|
777 |
#ifdef BTRACE_FLEXIBLE_MEM_MODEL |
|
778 |
BTrace12(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsDllStaticData,memory,aCodeSeg,this); |
|
779 |
#endif |
|
780 |
} |
|
781 |
||
782 |
} |
|
783 |
__KTRACE_OPT(KDLL,Kern::Printf("CommitDllData returns %d",r)); |
|
784 |
return r; |
|
785 |
} |
|
786 |
||
787 |
||
788 |
void DMemModelProcess::DecommitDllData(TLinAddr aBase, TInt aSize) |
|
789 |
{ |
|
790 |
__KTRACE_OPT(KDLL,Kern::Printf("DMemModelProcess %O DecommitDllData %08x+%x",this,aBase,aSize)); |
|
791 |
MM::MappingAndMemoryDestroy(aBase,iOsAsid); |
|
792 |
} |
|
793 |
||
794 |
void DMemModelProcess::BTracePrime(TInt aCategory) |
|
795 |
{ |
|
796 |
DProcess::BTracePrime(aCategory); |
|
797 |
||
798 |
#ifdef BTRACE_FLEXIBLE_MEM_MODEL |
|
799 |
if (aCategory == BTrace::EFlexibleMemModel || aCategory == -1) |
|
800 |
{ |
|
801 |
BTrace8(BTrace::EFlexibleMemModel,BTrace::EAddressSpaceId,this,iOsAsid); |
|
802 |
||
803 |
if (iDataBssMapping) |
|
804 |
{ |
|
805 |
DMemoryObject* memory = MM::MappingGetAndOpenMemory(iDataBssMapping); |
|
806 |
if (memory) |
|
807 |
{ |
|
808 |
MM::MemoryBTracePrime(memory); |
|
809 |
BTrace8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsProcessStaticData,memory,this); |
|
810 |
MM::MemoryClose(memory); |
|
811 |
} |
|
812 |
} |
|
813 |
||
814 |
// Trace memory objects for DLL static data |
|
815 |
SDblQue cs_list; |
|
816 |
DCodeSeg::UnmarkAll(DCodeSeg::EMarkListDeps|DCodeSeg::EMarkUnListDeps); |
|
817 |
TraverseCodeSegs(&cs_list, NULL, DCodeSeg::EMarkListDeps, 0); |
|
818 |
SDblQueLink* anchor=&cs_list.iA; |
|
819 |
SDblQueLink* pL=cs_list.First(); |
|
820 |
for(; pL!=anchor; pL=pL->iNext) |
|
821 |
{ |
|
822 |
DMemModelCodeSeg* seg = _LOFF(pL,DMemModelCodeSeg,iTempLink); |
|
823 |
if (seg->IsDll()) |
|
824 |
{ |
|
825 |
TInt total_data_size; |
|
826 |
TLinAddr data_base; |
|
827 |
seg->GetDataSizeAndBase(total_data_size, data_base); |
|
828 |
if (total_data_size) |
|
829 |
{ |
|
830 |
TUint offset; |
|
831 |
// The instance count can be ignored as a dll data mapping is only ever |
|
832 |
// used with a single memory object. |
|
833 |
TUint mappingInstanceCount; |
|
834 |
NKern::ThreadEnterCS(); |
|
835 |
DMemoryMapping* mapping = MM::FindMappingInAddressSpace(iOsAsid, data_base, 0, offset, mappingInstanceCount); |
|
836 |
if (mapping) |
|
837 |
{ |
|
838 |
DMemoryObject* memory = MM::MappingGetAndOpenMemory(mapping); |
|
839 |
if (memory) |
|
840 |
{ |
|
841 |
MM::MemoryBTracePrime(memory); |
|
842 |
BTrace12(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsDllStaticData,memory,seg,this); |
|
843 |
MM::MemoryClose(memory); |
|
844 |
} |
|
845 |
MM::MappingClose(mapping); |
|
846 |
} |
|
847 |
NKern::ThreadLeaveCS(); |
|
848 |
} |
|
849 |
} |
|
850 |
} |
|
851 |
DCodeSeg::EmptyQueue(cs_list, 0); // leave cs_list empty |
|
852 |
} |
|
853 |
#endif |
|
854 |
} |
|
855 |
||
856 |
||
857 |
TInt DMemModelProcess::NewShPool(DShPool*& aPool, TShPoolCreateInfo& aInfo) |
|
858 |
{ |
|
859 |
aPool = NULL; |
|
860 |
DMemModelShPool* pC = NULL; |
|
861 |
||
862 |
if (aInfo.iInfo.iFlags & TShPoolCreateInfo::EPageAlignedBuffer) |
|
863 |
{ |
|
864 |
pC = new DMemModelAlignedShPool(); |
|
865 |
} |
|
866 |
else |
|
867 |
{ |
|
868 |
pC = new DMemModelNonAlignedShPool(); |
|
869 |
} |
|
870 |
||
871 |
if (pC == NULL) |
|
872 |
{ |
|
873 |
return KErrNoMemory; |
|
874 |
} |
|
875 |
||
876 |
TInt r = pC->Create(this, aInfo); |
|
877 |
||
878 |
if (r == KErrNone) |
|
879 |
{ |
|
880 |
aPool = pC; |
|
881 |
} |
|
882 |
else |
|
883 |
{ |
|
884 |
pC->Close(NULL); |
|
885 |
} |
|
886 |
||
887 |
return r; |
|
888 |
} |
|
889 |
||
890 |
||
891 |
TInt DThread::RawRead(const TAny* aSrc, TAny* aDest, TInt aLength, TInt aFlags, TIpcExcTrap* aExcTrap) |
|
892 |
// |
|
893 |
// Read from the thread's process. |
|
894 |
// aSrc Run address of memory to read |
|
895 |
// aDest Current address of destination |
|
896 |
// aExcTrap Exception trap object to be updated if the actual memory access is performed on other memory area than specified. |
|
897 |
// It happens when reading is performed on un-aligned memory area. |
|
898 |
// |
|
899 |
{ |
|
900 |
(void)aExcTrap; |
|
901 |
DMemModelThread& t=*(DMemModelThread*)TheCurrentThread; |
|
902 |
DMemModelProcess* pP=(DMemModelProcess*)iOwningProcess; |
|
903 |
TLinAddr src=(TLinAddr)aSrc; |
|
904 |
TLinAddr dest=(TLinAddr)aDest; |
|
905 |
TInt result = KErrNone; |
|
906 |
TBool have_taken_fault = EFalse; |
|
907 |
||
908 |
while (aLength) |
|
909 |
{ |
|
910 |
if (iMState==EDead) |
|
911 |
{ |
|
912 |
result = KErrDied; |
|
913 |
break; |
|
914 |
} |
|
915 |
TLinAddr alias_src; |
|
916 |
TUint alias_size; |
|
917 |
||
918 |
#ifdef __BROADCAST_CACHE_MAINTENANCE__ |
|
919 |
TInt pagingTrap; |
|
920 |
XTRAP_PAGING_START(pagingTrap); |
|
921 |
#endif |
|
922 |
||
923 |
TInt len = have_taken_fault ? Min(aLength, KPageSize - (src & KPageMask)) : aLength; |
|
924 |
TInt alias_result=t.Alias(src, pP, len, alias_src, alias_size); |
|
925 |
if (alias_result<0) |
|
926 |
{ |
|
927 |
result = KErrBadDescriptor; // bad permissions |
|
928 |
break; |
|
929 |
} |
|
930 |
||
931 |
#ifdef __BROADCAST_CACHE_MAINTENANCE__ |
|
932 |
// need to let the trap handler know where we are accessing in case we take a page fault |
|
933 |
// and the alias gets removed |
|
934 |
aExcTrap->iRemoteBase = alias_src; |
|
935 |
aExcTrap->iSize = alias_size; |
|
936 |
#endif |
|
937 |
||
938 |
__KTRACE_OPT(KTHREAD2,Kern::Printf("DThread::RawRead %08x<-%08x+%x",dest,alias_src,alias_size)); |
|
939 |
||
940 |
CHECK_PAGING_SAFE; |
|
941 |
||
942 |
if(aFlags&KCheckLocalAddress) |
|
943 |
MM::ValidateLocalIpcAddress(dest,alias_size,ETrue); |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
944 |
UNLOCK_USER_MEMORY(); |
0 | 945 |
memcpy( (TAny*)dest, (const TAny*)alias_src, alias_size); |
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
946 |
LOCK_USER_MEMORY(); |
0 | 947 |
|
948 |
src+=alias_size; |
|
949 |
dest+=alias_size; |
|
950 |
aLength-=alias_size; |
|
951 |
||
952 |
#ifdef __BROADCAST_CACHE_MAINTENANCE__ |
|
953 |
XTRAP_PAGING_END; |
|
954 |
if(pagingTrap) |
|
955 |
have_taken_fault = ETrue; |
|
956 |
#endif |
|
957 |
} |
|
958 |
t.RemoveAlias(); |
|
959 |
#ifdef __BROADCAST_CACHE_MAINTENANCE__ |
|
960 |
t.iPagingExcTrap = NULL; // in case we broke out of the loop and skipped XTRAP_PAGING_END |
|
961 |
#endif |
|
962 |
||
963 |
return result; |
|
964 |
} |
|
965 |
||
966 |
||
967 |
TInt DThread::RawWrite(const TAny* aDest, const TAny* aSrc, TInt aLength, TInt aFlags, DThread* /*anOriginatingThread*/, TIpcExcTrap* aExcTrap) |
|
968 |
// |
|
969 |
// Write to the thread's process. |
|
970 |
// aDest Run address of memory to write |
|
971 |
// aSrc Current address of destination |
|
972 |
// aExcTrap Exception trap object to be updated if the actual memory access is performed on other memory area then specified. |
|
973 |
// It happens when reading is performed on un-aligned memory area. |
|
974 |
// |
|
975 |
{ |
|
976 |
(void)aExcTrap; |
|
977 |
DMemModelThread& t=*(DMemModelThread*)TheCurrentThread; |
|
978 |
DMemModelProcess* pP=(DMemModelProcess*)iOwningProcess; |
|
979 |
TLinAddr src=(TLinAddr)aSrc; |
|
980 |
TLinAddr dest=(TLinAddr)aDest; |
|
981 |
TInt result = KErrNone; |
|
982 |
TBool have_taken_fault = EFalse; |
|
983 |
||
984 |
while (aLength) |
|
985 |
{ |
|
986 |
if (iMState==EDead) |
|
987 |
{ |
|
988 |
result = KErrDied; |
|
989 |
break; |
|
990 |
} |
|
991 |
TLinAddr alias_dest; |
|
992 |
TUint alias_size; |
|
993 |
||
994 |
#ifdef __BROADCAST_CACHE_MAINTENANCE__ |
|
995 |
TInt pagingTrap; |
|
996 |
XTRAP_PAGING_START(pagingTrap); |
|
997 |
#endif |
|
998 |
||
999 |
TInt len = have_taken_fault ? Min(aLength, KPageSize - (dest & KPageMask)) : aLength; |
|
1000 |
TInt alias_result=t.Alias(dest, pP, len, alias_dest, alias_size); |
|
1001 |
if (alias_result<0) |
|
1002 |
{ |
|
1003 |
result = KErrBadDescriptor; // bad permissions |
|
1004 |
break; |
|
1005 |
} |
|
1006 |
||
1007 |
#ifdef __BROADCAST_CACHE_MAINTENANCE__ |
|
1008 |
// need to let the trap handler know where we are accessing in case we take a page fault |
|
1009 |
// and the alias gets removed |
|
1010 |
aExcTrap->iRemoteBase = alias_dest; |
|
1011 |
aExcTrap->iSize = alias_size; |
|
1012 |
#endif |
|
1013 |
||
1014 |
__KTRACE_OPT(KTHREAD2,Kern::Printf("DThread::RawWrite %08x+%x->%08x",src,alias_size,alias_dest)); |
|
1015 |
||
1016 |
// Must check that it is safe to page, unless we are reading from unpaged ROM in which case |
|
1017 |
// we allow it. |
|
1018 |
CHECK_PAGING_SAFE_RANGE(src, aLength); |
|
1019 |
CHECK_DATA_PAGING_SAFE_RANGE(dest, aLength); |
|
1020 |
||
1021 |
if(aFlags&KCheckLocalAddress) |
|
1022 |
MM::ValidateLocalIpcAddress(src,alias_size,EFalse); |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1023 |
UNLOCK_USER_MEMORY(); |
0 | 1024 |
memcpy( (TAny*)alias_dest, (const TAny*)src, alias_size); |
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1025 |
LOCK_USER_MEMORY(); |
0 | 1026 |
|
1027 |
src+=alias_size; |
|
1028 |
dest+=alias_size; |
|
1029 |
aLength-=alias_size; |
|
1030 |
||
1031 |
#ifdef __BROADCAST_CACHE_MAINTENANCE__ |
|
1032 |
XTRAP_PAGING_END; |
|
1033 |
if(pagingTrap) |
|
1034 |
have_taken_fault = ETrue; |
|
1035 |
#endif |
|
1036 |
} |
|
1037 |
t.RemoveAlias(); |
|
1038 |
#ifdef __BROADCAST_CACHE_MAINTENANCE__ |
|
1039 |
t.iPagingExcTrap = NULL; // in case we broke out of the loop and skipped XTRAP_PAGING_END |
|
1040 |
#endif |
|
1041 |
||
1042 |
return result; |
|
1043 |
} |
|
1044 |
||
1045 |
||
1046 |
#ifndef __MARM__ |
|
1047 |
||
1048 |
TInt DThread::ReadAndParseDesHeader(const TAny* aSrc, TDesHeader& aDest) |
|
1049 |
// |
|
1050 |
// Read the header of a remote descriptor. |
|
1051 |
// |
|
1052 |
{ |
|
1053 |
static const TUint8 LengthLookup[16]={4,8,12,8,12,0,0,0,0,0,0,0,0,0,0,0}; |
|
1054 |
||
1055 |
CHECK_PAGING_SAFE; |
|
1056 |
||
1057 |
DMemModelThread& t=*(DMemModelThread*)TheCurrentThread; |
|
1058 |
DMemModelProcess* pP=(DMemModelProcess*)iOwningProcess; |
|
1059 |
TLinAddr src=(TLinAddr)aSrc; |
|
1060 |
||
1061 |
__NK_ASSERT_DEBUG(t.iIpcClient==NULL); |
|
1062 |
t.iIpcClient = this; |
|
1063 |
||
1064 |
TLinAddr pAlias; |
|
1065 |
TUint8* pDest = (TUint8*)&aDest; |
|
1066 |
TUint alias_size = 0; |
|
1067 |
TInt length = 12; |
|
1068 |
TInt type = KErrBadDescriptor; |
|
1069 |
while (length > 0) |
|
1070 |
{ |
|
1071 |
#ifdef __BROADCAST_CACHE_MAINTENANCE__ |
|
1072 |
TInt pagingTrap; |
|
1073 |
XTRAP_PAGING_START(pagingTrap); |
|
1074 |
#endif |
|
1075 |
||
1076 |
if (alias_size == 0) |
|
1077 |
{ |
|
1078 |
// no alias present, so must create one here |
|
1079 |
if (t.Alias(src, pP, length, pAlias, alias_size) != KErrNone) |
|
1080 |
break; |
|
1081 |
__NK_ASSERT_DEBUG(alias_size >= sizeof(TUint32)); |
|
1082 |
} |
|
1083 |
||
1084 |
// read either the first word, or as much as aliased of the remainder |
|
1085 |
TInt l = length == 12 ? sizeof(TUint32) : Min(length, alias_size); |
|
1086 |
if (Kern::SafeRead((TAny*)pAlias, (TAny*)pDest, l)) |
|
1087 |
break; // exception reading from user space |
|
1088 |
||
1089 |
if (length == 12) |
|
1090 |
{ |
|
1091 |
// we have just read the first word, so decode the descriptor type |
|
1092 |
type = *(TUint32*)pDest >> KShiftDesType8; |
|
1093 |
length = LengthLookup[type]; |
|
1094 |
// invalid descriptor type will have length 0 which will get decrease by 'l' and |
|
1095 |
// terminate the loop with length < 0 |
|
1096 |
} |
|
1097 |
||
1098 |
src += l; |
|
1099 |
alias_size -= l; |
|
1100 |
pAlias += l; |
|
1101 |
pDest += l; |
|
1102 |
length -= l; |
|
1103 |
||
1104 |
#ifdef __BROADCAST_CACHE_MAINTENANCE__ |
|
1105 |
XTRAP_PAGING_END; |
|
1106 |
if (pagingTrap) |
|
1107 |
alias_size = 0; // a page fault caused the alias to be removed |
|
1108 |
#endif |
|
1109 |
} |
|
1110 |
||
1111 |
t.RemoveAlias(); |
|
1112 |
t.iIpcClient = NULL; |
|
1113 |
#ifdef __BROADCAST_CACHE_MAINTENANCE__ |
|
1114 |
t.iPagingExcTrap = NULL; // in case we broke out of the loop and skipped XTRAP_PAGING_END |
|
1115 |
#endif |
|
1116 |
return length == 0 ? K::ParseDesHeader(aSrc, (TRawDesHeader&)aDest, aDest) : KErrBadDescriptor; |
|
1117 |
} |
|
1118 |
||
1119 |
||
1120 |
#endif |
|
1121 |
||
1122 |
||
1123 |
TInt DThread::PrepareMemoryForDMA(const TAny* aLinAddr, TInt aSize, TPhysAddr* aPhysicalPageList) |
|
1124 |
{ |
|
1125 |
// not supported, new Physical Pinning APIs should be used for DMA |
|
1126 |
return KErrNotSupported; |
|
1127 |
} |
|
1128 |
||
1129 |
TInt DThread::ReleaseMemoryFromDMA(const TAny* aLinAddr, TInt aSize, TPhysAddr* aPhysicalPageList) |
|
1130 |
{ |
|
1131 |
// not supported, new Physical Pinning APIs should be used for DMA |
|
1132 |
return KErrNotSupported; |
|
1133 |
} |
|
1134 |