author | Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com> |
Thu, 19 Aug 2010 11:14:22 +0300 | |
branch | RCL_3 |
changeset 42 | a179b74831c9 |
parent 8 | 538db54a451d |
permissions | -rw-r--r-- |
0 | 1 |
// Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies). |
2 |
// All rights reserved. |
|
3 |
// This component and the accompanying materials are made available |
|
4 |
// under the terms of the License "Eclipse Public License v1.0" |
|
5 |
// which accompanies this distribution, and is available |
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 |
// |
|
8 |
// Initial Contributors: |
|
9 |
// Nokia Corporation - initial contribution. |
|
10 |
// |
|
11 |
// Contributors: |
|
12 |
// |
|
13 |
// Description: |
|
14 |
// |
|
15 |
||
16 |
#include <memmodel.h> |
|
17 |
#include "mmu/mm.h" |
|
18 |
#include "mmu/maddrcont.h" |
|
19 |
#include "mmboot.h" |
|
20 |
#include <kernel/cache.h> |
|
21 |
#include "execs.h" |
|
22 |
||
23 |
#define iMState iWaitLink.iSpare1 |
|
24 |
||
25 |
NFastMutex TheSharedChunkLock; |
|
26 |
||
27 |
#ifndef _DEBUG |
|
28 |
const TInt KChunkGranularity = 4; // amount to grow SChunkInfo list by |
|
29 |
const TInt KMaxChunkInfosInOneGo = 100; // max number of SChunkInfo objects to copy with System Lock held |
|
30 |
#else // if debug... |
|
31 |
const TInt KChunkGranularity = 1; |
|
32 |
const TInt KMaxChunkInfosInOneGo = 1; |
|
33 |
#endif |
|
34 |
||
35 |
||
36 |
||
37 |
/******************************************** |
|
38 |
* Process |
|
39 |
********************************************/ |
|
40 |
||
41 |
DMemModelProcess::~DMemModelProcess() |
|
42 |
{ |
|
43 |
__KTRACE_OPT(KMMU,Kern::Printf("DMemModelProcess destruct")); |
|
44 |
Destruct(); |
|
45 |
} |
|
46 |
||
47 |
||
48 |
void DMemModelProcess::Destruct() |
|
49 |
{ |
|
50 |
__ASSERT_ALWAYS(!iOsAsidRefCount, MM::Panic(MM::EProcessDestructOsAsidRemaining)); |
|
51 |
__ASSERT_ALWAYS(!iChunkCount, MM::Panic(MM::EProcessDestructChunksRemaining)); |
|
52 |
Kern::Free(iChunks); |
|
53 |
__ASSERT_ALWAYS(!iSharedChunks || iSharedChunks->Count()==0, MM::Panic(MM::EProcessDestructChunksRemaining)); |
|
54 |
delete iSharedChunks; |
|
55 |
||
56 |
DProcess::Destruct(); |
|
57 |
} |
|
58 |
||
8
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
59 |
|
0 | 60 |
TInt DMemModelProcess::TryOpenOsAsid() |
61 |
{ |
|
62 |
if (__e32_atomic_tas_ord32(&iOsAsidRefCount, 1, 1, 0)) |
|
63 |
{ |
|
64 |
return iOsAsid; |
|
65 |
} |
|
66 |
return KErrDied; |
|
67 |
} |
|
68 |
||
8
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
69 |
|
0 | 70 |
void DMemModelProcess::CloseOsAsid() |
71 |
{ |
|
72 |
if (__e32_atomic_tas_ord32(&iOsAsidRefCount, 1, -1, 0) == 1) |
|
73 |
{// Last reference has been closed so free the asid. |
|
74 |
MM::AddressSpaceFree(iOsAsid); |
|
75 |
} |
|
76 |
} |
|
77 |
||
8
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
78 |
|
0 | 79 |
void DMemModelProcess::AsyncCloseOsAsid() |
80 |
{ |
|
81 |
if (__e32_atomic_tas_ord32(&iOsAsidRefCount, 1, -1, 0) == 1) |
|
82 |
{// Last reference has been closed so free the asid asynchronusly. |
|
83 |
MM::AsyncAddressSpaceFree(iOsAsid); |
|
84 |
} |
|
85 |
} |
|
86 |
||
8
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
87 |
|
0 | 88 |
TInt DMemModelProcess::NewChunk(DChunk*& aChunk, SChunkCreateInfo& aInfo, TLinAddr& aRunAddr) |
89 |
{ |
|
90 |
aChunk=NULL; |
|
91 |
||
92 |
DMemModelChunk* pC=new DMemModelChunk; |
|
93 |
if (!pC) |
|
94 |
return KErrNoMemory; |
|
95 |
||
96 |
TChunkType type = aInfo.iType; |
|
97 |
pC->iChunkType=type; |
|
98 |
TInt r=pC->SetAttributes(aInfo); |
|
99 |
if (r!=KErrNone) |
|
100 |
{ |
|
101 |
pC->Close(NULL); |
|
102 |
return r; |
|
103 |
} |
|
104 |
||
105 |
pC->iOwningProcess=(pC->iAttributes&DMemModelChunk::EPublic)?NULL:this; |
|
106 |
r=pC->Create(aInfo); |
|
107 |
if (r==KErrNone && (aInfo.iOperations & SChunkCreateInfo::EAdjust)) |
|
108 |
{ |
|
109 |
if (aInfo.iRunAddress!=0) |
|
110 |
pC->SetFixedAddress(aInfo.iRunAddress,aInfo.iPreallocated); |
|
111 |
if (aInfo.iPreallocated==0 && aInfo.iInitialTop!=0) |
|
112 |
{ |
|
113 |
if (pC->iAttributes & DChunk::EDisconnected) |
|
114 |
{ |
|
115 |
r=pC->Commit(aInfo.iInitialBottom,aInfo.iInitialTop-aInfo.iInitialBottom); |
|
116 |
} |
|
117 |
else if (pC->iAttributes & DChunk::EDoubleEnded) |
|
118 |
{ |
|
119 |
r=pC->AdjustDoubleEnded(aInfo.iInitialBottom,aInfo.iInitialTop); |
|
120 |
} |
|
121 |
else |
|
122 |
{ |
|
123 |
r=pC->Adjust(aInfo.iInitialTop); |
|
124 |
} |
|
125 |
} |
|
126 |
} |
|
127 |
if (r==KErrNone && (aInfo.iOperations & SChunkCreateInfo::EAdd)) |
|
128 |
{ |
|
129 |
r = AddChunk(pC, EFalse); |
|
130 |
} |
|
131 |
if (r==KErrNone) |
|
132 |
{ |
|
133 |
if(pC->iKernelMapping) |
|
134 |
aRunAddr = (TLinAddr)MM::MappingBase(pC->iKernelMapping); |
|
135 |
pC->iDestroyedDfc = aInfo.iDestroyedDfc; |
|
136 |
aChunk=(DChunk*)pC; |
|
137 |
} |
|
138 |
else |
|
139 |
pC->Close(NULL); // NULL since chunk can't have been added to process |
|
140 |
return r; |
|
141 |
} |
|
142 |
||
143 |
||
144 |
/** |
|
145 |
Determine whether this process should be data paged. |
|
146 |
||
147 |
@param aInfo A reference to the create info for this process. |
|
148 |
*/ |
|
149 |
TInt DMemModelProcess::SetPaging(const TProcessCreateInfo& aInfo) |
|
150 |
{ |
|
151 |
TUint pagedFlags = aInfo.iFlags & TProcessCreateInfo::EDataPagingMask; |
|
152 |
// If KImageDataPaged and KImageDataUnpaged flags present then corrupt |
|
153 |
// Check this first to ensure that it is always verified. |
|
154 |
if (pagedFlags == TProcessCreateInfo::EDataPagingMask) |
|
155 |
{ |
|
156 |
return KErrCorrupt; |
|
157 |
} |
|
158 |
||
159 |
if (aInfo.iAttr & ECodeSegAttKernel || |
|
160 |
!(K::MemModelAttributes & EMemModelAttrDataPaging)) |
|
161 |
{// Kernel process shouldn't be data paged or no data paging device installed. |
|
162 |
return KErrNone; |
|
163 |
} |
|
164 |
||
165 |
TUint dataPolicy = TheSuperPage().KernelConfigFlags() & EKernelConfigDataPagingPolicyMask; |
|
166 |
if (dataPolicy == EKernelConfigDataPagingPolicyAlwaysPage) |
|
167 |
{ |
|
168 |
iAttributes |= EDataPaged; |
|
169 |
return KErrNone; |
|
170 |
} |
|
171 |
if (dataPolicy == EKernelConfigDataPagingPolicyNoPaging) |
|
172 |
{// No paging allowed so just return. |
|
173 |
return KErrNone; |
|
174 |
} |
|
175 |
if (pagedFlags == TProcessCreateInfo::EDataPaged) |
|
176 |
{ |
|
177 |
iAttributes |= EDataPaged; |
|
178 |
return KErrNone; |
|
179 |
} |
|
180 |
if (pagedFlags == TProcessCreateInfo::EDataUnpaged) |
|
181 |
{// No paging set so just return. |
|
182 |
return KErrNone; |
|
183 |
} |
|
184 |
// Neither paged nor unpaged set so use default paging policy. |
|
185 |
// dataPolicy must be EKernelConfigDataPagingPolicyDefaultUnpaged or |
|
186 |
// EKernelConfigDataPagingPolicyDefaultPaged. |
|
187 |
__NK_ASSERT_DEBUG(pagedFlags == TProcessCreateInfo::EDataPagingUnspecified); |
|
188 |
__NK_ASSERT_DEBUG( dataPolicy == EKernelConfigDataPagingPolicyDefaultPaged || |
|
189 |
dataPolicy == EKernelConfigDataPagingPolicyDefaultUnpaged); |
|
190 |
if (dataPolicy == EKernelConfigDataPagingPolicyDefaultPaged) |
|
191 |
{ |
|
192 |
iAttributes |= EDataPaged; |
|
193 |
} |
|
194 |
return KErrNone; |
|
195 |
} |
|
196 |
||
197 |
||
198 |
TInt DMemModelProcess::DoCreate(TBool aKernelProcess, TProcessCreateInfo& aInfo) |
|
199 |
{ |
|
200 |
// Required so we can detect whether a process has been created and added |
|
201 |
// to its object container by checking for iContainerID!=EProcess. |
|
202 |
__ASSERT_COMPILE(EProcess != 0); |
|
203 |
__KTRACE_OPT(KPROC,Kern::Printf(">DMemModelProcess::DoCreate %O",this)); |
|
204 |
TInt r=KErrNone; |
|
205 |
||
206 |
if (aKernelProcess) |
|
207 |
{ |
|
208 |
iAttributes |= ESupervisor; |
|
209 |
iOsAsid = KKernelOsAsid; |
|
210 |
} |
|
211 |
else |
|
212 |
{ |
|
213 |
r = MM::AddressSpaceAlloc(iPageDir); |
|
214 |
if (r>=0) |
|
215 |
{ |
|
216 |
iOsAsid = r; |
|
217 |
r = KErrNone; |
|
218 |
} |
|
219 |
} |
|
220 |
if (r == KErrNone) |
|
221 |
{// Add this process's own reference to its os asid. |
|
222 |
__e32_atomic_store_ord32(&iOsAsidRefCount, 1); |
|
223 |
} |
|
224 |
||
225 |
#ifdef BTRACE_FLEXIBLE_MEM_MODEL |
|
226 |
BTrace8(BTrace::EFlexibleMemModel,BTrace::EAddressSpaceId,this,iOsAsid); |
|
227 |
#endif |
|
228 |
||
229 |
__KTRACE_OPT(KPROC,Kern::Printf("OS ASID=%d, PD=%08x",iOsAsid,iPageDir)); |
|
230 |
__KTRACE_OPT(KPROC,Kern::Printf("<DMemModelProcess::DoCreate %d",r)); |
|
231 |
return r; |
|
232 |
} |
|
233 |
||
234 |
TInt DMemModelProcess::CreateDataBssStackArea(TProcessCreateInfo& aInfo) |
|
235 |
{ |
|
236 |
__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::CreateDataBssStackArea %O",this)); |
|
237 |
TInt r = KErrNone; |
|
238 |
TInt dataBssSize = MM::RoundToPageSize(aInfo.iTotalDataSize); |
|
239 |
if(dataBssSize) |
|
240 |
{ |
|
241 |
DMemoryObject* memory; |
|
242 |
TMemoryObjectType memoryType = iAttributes&EDataPaged ? EMemoryObjectPaged : EMemoryObjectMovable; |
|
243 |
r = MM::MemoryNew(memory,memoryType,MM::BytesToPages(dataBssSize)); |
|
244 |
if(r==KErrNone) |
|
245 |
{ |
|
246 |
r = MM::MemoryAlloc(memory,0,MM::BytesToPages(dataBssSize)); |
|
247 |
if(r==KErrNone) |
|
248 |
{ |
|
249 |
r = MM::MappingNew(iDataBssMapping,memory,EUserReadWrite,OsAsid()); |
|
250 |
} |
|
251 |
if(r!=KErrNone) |
|
252 |
MM::MemoryDestroy(memory); |
|
253 |
else |
|
254 |
{ |
|
255 |
iDataBssRunAddress = MM::MappingBase(iDataBssMapping); |
|
256 |
#ifdef BTRACE_FLEXIBLE_MEM_MODEL |
|
257 |
BTrace8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsProcessStaticData,memory,this); |
|
258 |
#endif |
|
259 |
} |
|
260 |
} |
|
261 |
} |
|
262 |
__KTRACE_OPT(KPROC,Kern::Printf("DataBssSize=%x, ",dataBssSize)); |
|
263 |
||
264 |
return r; |
|
265 |
} |
|
266 |
||
267 |
||
268 |
TInt DMemModelProcess::AttachExistingCodeSeg(TProcessCreateInfo& aInfo) |
|
269 |
{ |
|
270 |
TInt r = DEpocProcess::AttachExistingCodeSeg(aInfo); |
|
271 |
if(r==KErrNone) |
|
272 |
{ |
|
273 |
// allocate virtual memory for the EXEs codeseg... |
|
274 |
DMemModelCodeSeg* seg = (DMemModelCodeSeg*)iTempCodeSeg; |
|
275 |
if(seg->iAttr&ECodeSegAttAddrNotUnique) |
|
276 |
{ |
|
277 |
TUint codeSize = seg->iSize; |
|
278 |
TLinAddr codeAddr = seg->RamInfo().iCodeRunAddr; |
|
279 |
TBool isDemandPaged = seg->iAttr&ECodeSegAttCodePaged; |
|
280 |
// Allocate virtual memory for the code seg using the os asid. |
|
281 |
// No need to open a reference on os asid as process not fully |
|
282 |
// created yet so it can't die and free the os asid. |
|
283 |
r = MM::VirtualAlloc(OsAsid(),codeAddr,codeSize,isDemandPaged); |
|
284 |
if(r==KErrNone) |
|
285 |
{ |
|
286 |
iCodeVirtualAllocSize = codeSize; |
|
287 |
iCodeVirtualAllocAddress = codeAddr; |
|
288 |
} |
|
289 |
} |
|
290 |
} |
|
291 |
||
292 |
return r; |
|
293 |
} |
|
294 |
||
295 |
||
296 |
TInt DMemModelProcess::AddChunk(DChunk* aChunk, TBool aIsReadOnly) |
|
297 |
{ |
|
298 |
DMemModelChunk* pC=(DMemModelChunk*)aChunk; |
|
299 |
if(pC->iOwningProcess && this!=pC->iOwningProcess) |
|
300 |
return KErrAccessDenied; |
|
301 |
||
302 |
TInt r = WaitProcessLock(); |
|
303 |
if(r==KErrNone) |
|
304 |
{ |
|
305 |
TInt i = ChunkIndex(pC); |
|
306 |
if(i>=0) // Found the chunk in this process, just up its count |
|
307 |
{ |
|
308 |
iChunks[i].iAccessCount++; |
|
309 |
__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::AddChunk %08x to %08x (Access count incremented to %d)",aChunk,this,iChunks[i].iAccessCount)); |
|
310 |
SignalProcessLock(); |
|
311 |
return KErrNone; |
|
312 |
} |
|
313 |
r = DoAddChunk(pC,aIsReadOnly); |
|
314 |
SignalProcessLock(); |
|
315 |
} |
|
316 |
__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::AddChunk returns %d",r)); |
|
317 |
return r; |
|
318 |
} |
|
319 |
||
320 |
||
321 |
void M::FsRegisterThread() |
|
322 |
{ |
|
323 |
TInternalRamDrive::Unlock(); |
|
324 |
} |
|
325 |
||
326 |
||
327 |
void ExecHandler::UnlockRamDrive() |
|
328 |
{ |
|
329 |
} |
|
330 |
||
331 |
||
332 |
EXPORT_C TLinAddr TInternalRamDrive::Base() |
|
333 |
{ |
|
334 |
DMemModelChunk* pC=(DMemModelChunk*)PP::TheRamDriveChunk; |
|
335 |
DMemModelProcess* pP=(DMemModelProcess*)TheCurrentThread->iOwningProcess; |
|
336 |
NKern::LockSystem(); |
|
337 |
TLinAddr addr = (TLinAddr)pC->Base(pP); |
|
338 |
NKern::UnlockSystem(); |
|
339 |
if(!addr) |
|
340 |
{ |
|
341 |
Unlock(); |
|
342 |
NKern::LockSystem(); |
|
343 |
addr = (TLinAddr)pC->Base(pP); |
|
344 |
NKern::UnlockSystem(); |
|
345 |
} |
|
346 |
return addr; |
|
347 |
} |
|
348 |
||
349 |
||
350 |
EXPORT_C void TInternalRamDrive::Unlock() |
|
351 |
{ |
|
352 |
DMemModelChunk* pC=(DMemModelChunk*)PP::TheRamDriveChunk; |
|
353 |
DMemModelProcess* pP=(DMemModelProcess*)TheCurrentThread->iOwningProcess; |
|
354 |
||
355 |
TInt r = pP->WaitProcessLock(); |
|
356 |
if(r==KErrNone) |
|
357 |
if(pP->ChunkIndex(pC)==KErrNotFound) |
|
358 |
r = pP->DoAddChunk(pC,EFalse); |
|
359 |
__ASSERT_ALWAYS(r==KErrNone, MM::Panic(MM::EFsRegisterThread)); |
|
360 |
pP->SignalProcessLock(); |
|
361 |
} |
|
362 |
||
363 |
||
364 |
EXPORT_C void TInternalRamDrive::Lock() |
|
365 |
{ |
|
366 |
} |
|
367 |
||
368 |
||
369 |
TInt DMemModelProcess::DoAddChunk(DMemModelChunk* aChunk, TBool aIsReadOnly) |
|
370 |
{ |
|
371 |
// |
|
372 |
// Must hold the process $LOCK mutex before calling this. |
|
373 |
// As the process lock is held it is safe to access iOsAsid without a reference. |
|
374 |
// |
|
375 |
||
376 |
__NK_ASSERT_DEBUG(ChunkIndex(aChunk)==KErrNotFound); // shouldn't be adding a chunk which is already added |
|
377 |
||
378 |
__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::DoAddChunk %O to %O",aChunk,this)); |
|
379 |
||
380 |
// create mapping for chunk... |
|
381 |
DMemoryMapping* mapping; |
|
382 |
TMappingPermissions perm = MM::MappingPermissions |
|
383 |
( |
|
384 |
iOsAsid!=(TInt)KKernelOsAsid, // user? |
|
385 |
aIsReadOnly==false, // write? |
|
386 |
aChunk->iAttributes&DMemModelChunk::ECode // execute? |
|
387 |
); |
|
388 |
TInt r; |
|
389 |
if(aChunk->iFixedBase) // HACK, kernel chunk has a fixed iBase |
|
390 |
r = MM::MappingNew(mapping,aChunk->iMemoryObject,perm,iOsAsid,EMappingCreateExactVirtual,(TLinAddr)aChunk->iFixedBase); |
|
391 |
else |
|
392 |
r = MM::MappingNew(mapping,aChunk->iMemoryObject,perm,iOsAsid); |
|
393 |
if(r!=KErrNone) |
|
394 |
return r; |
|
395 |
if(iOsAsid==0) |
|
396 |
aChunk->iKernelMapping = mapping; |
|
397 |
TLinAddr base = MM::MappingBase(mapping); |
|
398 |
||
399 |
// expand chunk info memory if required... |
|
400 |
if(iChunkCount==iChunkAlloc) |
|
401 |
{ |
|
402 |
TInt newAlloc = iChunkAlloc+KChunkGranularity; |
|
403 |
r = Kern::SafeReAlloc((TAny*&)iChunks,iChunkAlloc*sizeof(SChunkInfo),newAlloc*sizeof(SChunkInfo)); |
|
404 |
if(r!=KErrNone) |
|
405 |
{ |
|
406 |
MM::MappingDestroy(mapping); |
|
407 |
return r; |
|
408 |
} |
|
409 |
iChunkAlloc = newAlloc; |
|
410 |
} |
|
411 |
||
412 |
// insert new chunk info... |
|
413 |
TUint i = ChunkInsertIndex(aChunk); |
|
414 |
SChunkInfo* info = iChunks+i; |
|
415 |
SChunkInfo* infoEnd = iChunks+iChunkCount; |
|
416 |
NKern::LockSystem(); |
|
417 |
++iChunkCount; |
|
418 |
for(;;) |
|
419 |
{ |
|
420 |
// make space for new chunk info by shuffling along |
|
421 |
// existing infos KMaxChunkInfosInOneGo at a time... |
|
422 |
SChunkInfo* infoPtr = infoEnd-KMaxChunkInfosInOneGo; |
|
423 |
if(infoPtr<info) |
|
424 |
infoPtr = info; |
|
425 |
memmove(infoPtr+1,infoPtr,(TLinAddr)infoEnd-(TLinAddr)infoPtr); |
|
426 |
infoEnd = infoPtr; |
|
427 |
if(infoEnd<=info) |
|
428 |
break; |
|
429 |
NKern::FlashSystem(); |
|
430 |
} |
|
431 |
info->iChunk = aChunk; |
|
432 |
info->iMapping = mapping; |
|
433 |
info->iAccessCount = 1; |
|
434 |
info->iIsReadOnly = aIsReadOnly; |
|
435 |
NKern::UnlockSystem(); |
|
436 |
||
437 |
// add chunk to list of Shared Chunks... |
|
438 |
if(aChunk->iChunkType==ESharedKernelSingle || aChunk->iChunkType==ESharedKernelMultiple) |
|
439 |
{ |
|
440 |
if(!iSharedChunks) |
|
441 |
iSharedChunks = new RAddressedContainer(&TheSharedChunkLock,iProcessLock); |
|
442 |
if(!iSharedChunks) |
|
443 |
r = KErrNoMemory; |
|
444 |
else |
|
445 |
r = iSharedChunks->Add(base,aChunk); |
|
446 |
if(r!=KErrNone) |
|
447 |
{ |
|
448 |
DoRemoveChunk(i); |
|
449 |
return r; |
|
450 |
} |
|
451 |
} |
|
452 |
||
453 |
// done OK... |
|
454 |
__DEBUG_EVENT(EEventUpdateProcess, this); |
|
455 |
return KErrNone; |
|
456 |
} |
|
457 |
||
458 |
||
459 |
void DMemModelProcess::DoRemoveChunk(TInt aIndex) |
|
460 |
{ |
|
461 |
__DEBUG_EVENT(EEventUpdateProcess, this); |
|
462 |
||
463 |
DMemModelChunk* chunk = iChunks[aIndex].iChunk; |
|
464 |
DMemoryMapping* mapping = iChunks[aIndex].iMapping; |
|
465 |
||
466 |
if(chunk->iChunkType==ESharedKernelSingle || chunk->iChunkType==ESharedKernelMultiple) |
|
467 |
{ |
|
468 |
// remove chunk from list of Shared Chunks... |
|
469 |
if(iSharedChunks) |
|
470 |
{ |
|
471 |
iSharedChunks->Remove(MM::MappingBase(mapping)); |
|
472 |
#ifdef _DEBUG |
|
473 |
// delete iSharedChunks if it's empty, so memory leak test code passes... |
|
474 |
if(iSharedChunks->Count()==0) |
|
475 |
{ |
|
476 |
NKern::FMWait(&TheSharedChunkLock); |
|
477 |
RAddressedContainer* s = iSharedChunks; |
|
478 |
iSharedChunks = 0; |
|
479 |
NKern::FMSignal(&TheSharedChunkLock); |
|
480 |
delete s; |
|
481 |
} |
|
482 |
#endif |
|
483 |
} |
|
484 |
} |
|
485 |
||
486 |
// remove chunk from array... |
|
487 |
SChunkInfo* infoStart = iChunks+aIndex+1; |
|
488 |
SChunkInfo* infoEnd = iChunks+iChunkCount; |
|
489 |
NKern::LockSystem(); |
|
490 |
for(;;) |
|
491 |
{ |
|
492 |
// shuffle existing infos down KMaxChunkInfosInOneGo at a time... |
|
493 |
SChunkInfo* infoPtr = infoStart+KMaxChunkInfosInOneGo; |
|
494 |
if(infoPtr>infoEnd) |
|
495 |
infoPtr = infoEnd; |
|
496 |
memmove(infoStart-1,infoStart,(TLinAddr)infoPtr-(TLinAddr)infoStart); |
|
497 |
infoStart = infoPtr; |
|
498 |
if(infoStart>=infoEnd) |
|
499 |
break; |
|
500 |
NKern::FlashSystem(); |
|
501 |
} |
|
502 |
--iChunkCount; |
|
503 |
NKern::UnlockSystem(); |
|
504 |
||
505 |
if(mapping==chunk->iKernelMapping) |
|
506 |
chunk->iKernelMapping = 0; |
|
507 |
||
508 |
MM::MappingDestroy(mapping); |
|
509 |
} |
|
510 |
||
511 |
||
512 |
/** |
|
513 |
Final chance for process to release resources during its death. |
|
514 |
||
515 |
Called with process $LOCK mutex held (if it exists). |
|
516 |
This mutex will not be released before it is deleted. |
|
517 |
I.e. no other thread will ever hold the mutex again. |
|
518 |
*/ |
|
519 |
void DMemModelProcess::FinalRelease() |
|
520 |
{ |
|
521 |
// Clean up any left over chunks (such as SharedIo buffers) |
|
522 |
if(iProcessLock) |
|
523 |
while(iChunkCount) |
|
524 |
DoRemoveChunk(0); |
|
525 |
// Destroy the remaining mappings and memory objects owned by this process |
|
526 |
MM::MappingAndMemoryDestroy(iDataBssMapping); |
|
527 |
if(iCodeVirtualAllocSize) |
|
528 |
MM::VirtualFree(iOsAsid,iCodeVirtualAllocAddress,iCodeVirtualAllocSize); |
|
529 |
||
530 |
// Close the original reference on the os asid. |
|
531 |
CloseOsAsid(); |
|
532 |
} |
|
533 |
||
534 |
||
535 |
void DMemModelProcess::RemoveChunk(DMemModelChunk *aChunk) |
|
536 |
{ |
|
537 |
// note that this can't be called after the process $LOCK mutex has been deleted |
|
538 |
// since it can only be called by a thread in this process doing a handle close or |
|
539 |
// dying, or by the process handles array being deleted due to the process dying, |
|
540 |
// all of which happen before $LOCK is deleted. |
|
541 |
__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess %O RemoveChunk %O",this,aChunk)); |
|
542 |
Kern::MutexWait(*iProcessLock); |
|
543 |
TInt i = ChunkIndex(aChunk); |
|
544 |
if(i>=0) // Found the chunk |
|
545 |
{ |
|
546 |
__KTRACE_OPT(KPROC,Kern::Printf("Chunk access count %d",iChunks[i].iAccessCount)); |
|
547 |
if(--iChunks[i].iAccessCount==0) |
|
548 |
{ |
|
549 |
DoRemoveChunk(i); |
|
550 |
} |
|
551 |
} |
|
552 |
Kern::MutexSignal(*iProcessLock); |
|
553 |
} |
|
554 |
||
555 |
||
556 |
TUint8* DMemModelChunk::Base(DProcess* aProcess) |
|
557 |
{ |
|
558 |
DMemModelProcess* pP = (DMemModelProcess*)aProcess; |
|
559 |
DMemoryMapping* mapping = 0; |
|
560 |
||
561 |
if(iKernelMapping && pP==K::TheKernelProcess) |
|
562 |
{ |
|
563 |
// shortcut for shared chunks... |
|
564 |
mapping = iKernelMapping; |
|
565 |
} |
|
566 |
else |
|
567 |
{ |
|
568 |
// find chunk in process... |
|
569 |
TInt i = pP->ChunkIndex(this); |
|
570 |
if(i>=0) |
|
571 |
mapping = pP->iChunks[i].iMapping; |
|
572 |
} |
|
573 |
||
574 |
if(!mapping) |
|
575 |
return 0; |
|
576 |
||
577 |
return (TUint8*)MM::MappingBase(mapping); |
|
578 |
} |
|
579 |
||
580 |
||
581 |
DChunk* DThread::OpenSharedChunk(const TAny* aAddress, TBool aWrite, TInt& aOffset) |
|
582 |
{ |
|
583 |
DMemModelChunk* chunk = 0; |
|
584 |
||
585 |
NKern::FMWait(&TheSharedChunkLock); |
|
586 |
RAddressedContainer* list = ((DMemModelProcess*)iOwningProcess)->iSharedChunks; |
|
587 |
if(list) |
|
588 |
{ |
|
589 |
// search list... |
|
590 |
TUint offset; |
|
591 |
chunk = (DMemModelChunk*)list->Find((TLinAddr)aAddress,offset); |
|
592 |
if(chunk && offset<TUint(chunk->iMaxSize) && chunk->Open()==KErrNone) |
|
593 |
aOffset = offset; // chunk found and opened successfully |
|
594 |
else |
|
595 |
chunk = 0; // failed |
|
596 |
} |
|
597 |
NKern::FMSignal(&TheSharedChunkLock); |
|
598 |
||
599 |
return chunk; |
|
600 |
} |
|
601 |
||
602 |
||
603 |
TUint DMemModelProcess::ChunkInsertIndex(DMemModelChunk* aChunk) |
|
604 |
{ |
|
605 |
// need to hold iProcessLock or System Lock... |
|
606 |
#ifdef _DEBUG |
|
607 |
if(K::Initialising==false && iProcessLock!=NULL && iProcessLock->iCleanup.iThread!=&Kern::CurrentThread()) |
|
608 |
{ |
|
609 |
// don't hold iProcessLock, so... |
|
610 |
__ASSERT_SYSTEM_LOCK; |
|
611 |
} |
|
612 |
#endif |
|
613 |
||
614 |
// binary search... |
|
615 |
SChunkInfo* list = iChunks; |
|
616 |
TUint l = 0; |
|
617 |
TUint r = iChunkCount; |
|
618 |
TUint m; |
|
619 |
while(l<r) |
|
620 |
{ |
|
621 |
m = (l+r)>>1; |
|
622 |
DChunk* x = list[m].iChunk; |
|
623 |
if(x<=aChunk) |
|
624 |
l = m+1; |
|
625 |
else |
|
626 |
r = m; |
|
627 |
} |
|
628 |
return r; |
|
629 |
} |
|
630 |
||
631 |
||
632 |
TInt DMemModelProcess::ChunkIndex(DMemModelChunk* aChunk) |
|
633 |
{ |
|
634 |
TUint i = ChunkInsertIndex(aChunk); |
|
635 |
if(i && iChunks[--i].iChunk==aChunk) |
|
636 |
return i; |
|
637 |
return KErrNotFound; |
|
638 |
} |
|
639 |
||
640 |
||
641 |
TInt DMemModelProcess::MapCodeSeg(DCodeSeg* aSeg) |
|
642 |
{ |
|
643 |
__ASSERT_CRITICAL; // Must be in critical section so can't leak os asid references. |
|
644 |
||
645 |
DMemModelCodeSeg& seg=*(DMemModelCodeSeg*)aSeg; |
|
646 |
__KTRACE_OPT(KDLL,Kern::Printf("Process %O MapCodeSeg %C", this, aSeg)); |
|
647 |
TBool kernel_only=( (seg.iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttKernel ); |
|
648 |
TBool user_local=( (seg.iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == 0 ); |
|
649 |
if (kernel_only && !(iAttributes&ESupervisor)) |
|
650 |
return KErrNotSupported; |
|
651 |
if (seg.iAttr&ECodeSegAttKernel) |
|
652 |
return KErrNone; // no extra mappings needed for kernel code |
|
653 |
||
654 |
// Attempt to open a reference on the os asid it is required so |
|
655 |
// MapUserRamCode() and CommitDllData() can use iOsAsid safely. |
|
656 |
TInt osAsid = TryOpenOsAsid(); |
|
657 |
if (osAsid < 0) |
|
658 |
{// The process has died. |
|
659 |
return KErrDied; |
|
660 |
} |
|
661 |
||
662 |
TInt r=KErrNone; |
|
663 |
if (user_local) |
|
664 |
r=MapUserRamCode(seg.Memory()); |
|
665 |
if (seg.IsDll()) |
|
666 |
{ |
|
667 |
TInt total_data_size; |
|
668 |
TLinAddr data_base; |
|
669 |
seg.GetDataSizeAndBase(total_data_size, data_base); |
|
670 |
if (r==KErrNone && total_data_size) |
|
671 |
{ |
|
672 |
TInt size=MM::RoundToPageSize(total_data_size); |
|
673 |
r=CommitDllData(data_base, size, aSeg); |
|
674 |
if (r!=KErrNone && user_local) |
|
675 |
UnmapUserRamCode(seg.Memory()); |
|
676 |
} |
|
677 |
} |
|
678 |
CloseOsAsid(); |
|
679 |
||
680 |
return r; |
|
681 |
} |
|
682 |
||
683 |
||
684 |
void DMemModelProcess::UnmapCodeSeg(DCodeSeg* aSeg) |
|
685 |
{ |
|
686 |
__ASSERT_CRITICAL; // Must be in critical section so can't leak os asid references. |
|
687 |
||
688 |
DMemModelCodeSeg& seg=*(DMemModelCodeSeg*)aSeg; |
|
689 |
__KTRACE_OPT(KDLL,Kern::Printf("Process %O UnmapCodeSeg %C", this, aSeg)); |
|
690 |
if (seg.iAttr&ECodeSegAttKernel) |
|
691 |
return; // no extra mappings needed for kernel code |
|
692 |
||
693 |
// Attempt to open a reference on the os asid it is required so |
|
694 |
// UnmapUserRamCode() and DecommitDllData() can use iOsAsid safely. |
|
695 |
TInt osAsid = TryOpenOsAsid(); |
|
696 |
if (osAsid < 0) |
|
697 |
{// The process has died and it the process it will have cleaned up any code segs. |
|
698 |
return; |
|
699 |
} |
|
700 |
||
701 |
if (seg.IsDll()) |
|
702 |
{ |
|
703 |
TInt total_data_size; |
|
704 |
TLinAddr data_base; |
|
705 |
seg.GetDataSizeAndBase(total_data_size, data_base); |
|
706 |
if (total_data_size) |
|
707 |
DecommitDllData(data_base, MM::RoundToPageSize(total_data_size)); |
|
708 |
} |
|
709 |
if (seg.Memory()) |
|
710 |
UnmapUserRamCode(seg.Memory()); |
|
711 |
||
712 |
CloseOsAsid(); |
|
713 |
} |
|
714 |
||
715 |
void DMemModelProcess::RemoveDllData() |
|
716 |
// |
|
717 |
// Call with CodeSegLock held |
|
718 |
// |
|
719 |
{ |
|
720 |
} |
|
721 |
||
722 |
||
723 |
TInt DMemModelProcess::MapUserRamCode(DMemModelCodeSegMemory* aMemory) |
|
724 |
{ |
|
725 |
__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess %O MapUserRamCode %C %d %d", |
|
726 |
this, aMemory->iCodeSeg, iOsAsid, aMemory->iPagedCodeInfo!=0)); |
|
727 |
__ASSERT_MUTEX(DCodeSeg::CodeSegLock); |
|
728 |
||
729 |
TMappingCreateFlags createFlags = EMappingCreateExactVirtual; |
|
730 |
||
731 |
if(!(aMemory->iCodeSeg->iAttr&ECodeSegAttAddrNotUnique)) |
|
732 |
{ |
|
733 |
// codeseg memory address is globally unique, (common address across all processes)... |
|
734 |
FlagSet(createFlags,EMappingCreateCommonVirtual); |
|
735 |
} |
|
736 |
||
737 |
if(aMemory->iCodeSeg->IsExe()) |
|
738 |
{ |
|
739 |
// EXE codesegs have already had their virtual address allocated so we must adopt that... |
|
740 |
__NK_ASSERT_DEBUG(iCodeVirtualAllocSize); |
|
741 |
__NK_ASSERT_DEBUG(iCodeVirtualAllocAddress==aMemory->iRamInfo.iCodeRunAddr); |
|
742 |
iCodeVirtualAllocSize = 0; |
|
743 |
iCodeVirtualAllocAddress = 0; |
|
744 |
FlagSet(createFlags,EMappingCreateAdoptVirtual); |
|
745 |
} |
|
746 |
||
747 |
DMemoryMapping* mapping; |
|
748 |
return MM::MappingNew(mapping,aMemory->iCodeMemoryObject,EUserExecute,iOsAsid,createFlags,aMemory->iRamInfo.iCodeRunAddr); |
|
749 |
} |
|
750 |
||
751 |
||
752 |
void DMemModelProcess::UnmapUserRamCode(DMemModelCodeSegMemory* aMemory) |
|
753 |
{ |
|
754 |
__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess %O UnmapUserRamCode %C %d %d", |
|
755 |
this, aMemory->iCodeSeg, iOsAsid, aMemory->iPagedCodeInfo!=0)); |
|
756 |
||
757 |
__ASSERT_MUTEX(DCodeSeg::CodeSegLock); |
|
758 |
MM::MappingDestroy(aMemory->iRamInfo.iCodeRunAddr,iOsAsid); |
|
759 |
} |
|
760 |
||
761 |
||
762 |
TInt DMemModelProcess::CommitDllData(TLinAddr aBase, TInt aSize, DCodeSeg* aCodeSeg) |
|
763 |
{ |
|
764 |
__KTRACE_OPT(KDLL,Kern::Printf("DMemModelProcess %O CommitDllData %08x+%x",this,aBase,aSize)); |
|
765 |
||
766 |
DMemoryObject* memory; |
|
767 |
TMemoryObjectType memoryType = aCodeSeg->iAttr&ECodeSegAttDataPaged ? EMemoryObjectPaged : EMemoryObjectMovable; |
|
768 |
TInt r = MM::MemoryNew(memory,memoryType,MM::BytesToPages(aSize)); |
|
769 |
if(r==KErrNone) |
|
770 |
{ |
|
771 |
r = MM::MemoryAlloc(memory,0,MM::BytesToPages(aSize)); |
|
772 |
if(r==KErrNone) |
|
773 |
{ |
|
774 |
DMemoryMapping* mapping; |
|
775 |
r = MM::MappingNew(mapping,memory,EUserReadWrite,iOsAsid,EMappingCreateCommonVirtual,aBase); |
|
776 |
} |
|
777 |
if(r!=KErrNone) |
|
778 |
MM::MemoryDestroy(memory); |
|
779 |
else |
|
780 |
{ |
|
781 |
#ifdef BTRACE_FLEXIBLE_MEM_MODEL |
|
782 |
BTrace12(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsDllStaticData,memory,aCodeSeg,this); |
|
783 |
#endif |
|
784 |
} |
|
785 |
||
786 |
} |
|
787 |
__KTRACE_OPT(KDLL,Kern::Printf("CommitDllData returns %d",r)); |
|
788 |
return r; |
|
789 |
} |
|
790 |
||
791 |
||
792 |
void DMemModelProcess::DecommitDllData(TLinAddr aBase, TInt aSize) |
|
793 |
{ |
|
794 |
__KTRACE_OPT(KDLL,Kern::Printf("DMemModelProcess %O DecommitDllData %08x+%x",this,aBase,aSize)); |
|
795 |
MM::MappingAndMemoryDestroy(aBase,iOsAsid); |
|
796 |
} |
|
797 |
||
798 |
void DMemModelProcess::BTracePrime(TInt aCategory) |
|
799 |
{ |
|
800 |
DProcess::BTracePrime(aCategory); |
|
801 |
||
802 |
#ifdef BTRACE_FLEXIBLE_MEM_MODEL |
|
803 |
if (aCategory == BTrace::EFlexibleMemModel || aCategory == -1) |
|
804 |
{ |
|
805 |
BTrace8(BTrace::EFlexibleMemModel,BTrace::EAddressSpaceId,this,iOsAsid); |
|
806 |
||
807 |
if (iDataBssMapping) |
|
808 |
{ |
|
809 |
DMemoryObject* memory = MM::MappingGetAndOpenMemory(iDataBssMapping); |
|
810 |
if (memory) |
|
811 |
{ |
|
812 |
MM::MemoryBTracePrime(memory); |
|
813 |
BTrace8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsProcessStaticData,memory,this); |
|
814 |
MM::MemoryClose(memory); |
|
815 |
} |
|
816 |
} |
|
817 |
||
818 |
// Trace memory objects for DLL static data |
|
819 |
SDblQue cs_list; |
|
820 |
DCodeSeg::UnmarkAll(DCodeSeg::EMarkListDeps|DCodeSeg::EMarkUnListDeps); |
|
821 |
TraverseCodeSegs(&cs_list, NULL, DCodeSeg::EMarkListDeps, 0); |
|
822 |
SDblQueLink* anchor=&cs_list.iA; |
|
823 |
SDblQueLink* pL=cs_list.First(); |
|
824 |
for(; pL!=anchor; pL=pL->iNext) |
|
825 |
{ |
|
826 |
DMemModelCodeSeg* seg = _LOFF(pL,DMemModelCodeSeg,iTempLink); |
|
827 |
if (seg->IsDll()) |
|
828 |
{ |
|
829 |
TInt total_data_size; |
|
830 |
TLinAddr data_base; |
|
831 |
seg->GetDataSizeAndBase(total_data_size, data_base); |
|
832 |
if (total_data_size) |
|
833 |
{ |
|
834 |
TUint offset; |
|
835 |
// The instance count can be ignored as a dll data mapping is only ever |
|
836 |
// used with a single memory object. |
|
837 |
TUint mappingInstanceCount; |
|
838 |
NKern::ThreadEnterCS(); |
|
839 |
DMemoryMapping* mapping = MM::FindMappingInAddressSpace(iOsAsid, data_base, 0, offset, mappingInstanceCount); |
|
840 |
if (mapping) |
|
841 |
{ |
|
842 |
DMemoryObject* memory = MM::MappingGetAndOpenMemory(mapping); |
|
843 |
if (memory) |
|
844 |
{ |
|
845 |
MM::MemoryBTracePrime(memory); |
|
846 |
BTrace12(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsDllStaticData,memory,seg,this); |
|
847 |
MM::MemoryClose(memory); |
|
848 |
} |
|
849 |
MM::MappingClose(mapping); |
|
850 |
} |
|
851 |
NKern::ThreadLeaveCS(); |
|
852 |
} |
|
853 |
} |
|
854 |
} |
|
855 |
DCodeSeg::EmptyQueue(cs_list, 0); // leave cs_list empty |
|
856 |
} |
|
857 |
#endif |
|
858 |
} |
|
859 |
||
860 |
||
861 |
TInt DMemModelProcess::NewShPool(DShPool*& aPool, TShPoolCreateInfo& aInfo) |
|
862 |
{ |
|
863 |
aPool = NULL; |
|
864 |
DMemModelShPool* pC = NULL; |
|
865 |
||
866 |
if (aInfo.iInfo.iFlags & TShPoolCreateInfo::EPageAlignedBuffer) |
|
867 |
{ |
|
868 |
pC = new DMemModelAlignedShPool(); |
|
869 |
} |
|
870 |
else |
|
871 |
{ |
|
872 |
pC = new DMemModelNonAlignedShPool(); |
|
873 |
} |
|
874 |
||
875 |
if (pC == NULL) |
|
876 |
{ |
|
877 |
return KErrNoMemory; |
|
878 |
} |
|
879 |
||
880 |
TInt r = pC->Create(this, aInfo); |
|
881 |
||
882 |
if (r == KErrNone) |
|
883 |
{ |
|
884 |
aPool = pC; |
|
885 |
} |
|
886 |
else |
|
887 |
{ |
|
888 |
pC->Close(NULL); |
|
889 |
} |
|
890 |
||
891 |
return r; |
|
892 |
} |
|
893 |
||
894 |
||
895 |
TInt DThread::RawRead(const TAny* aSrc, TAny* aDest, TInt aLength, TInt aFlags, TIpcExcTrap* aExcTrap) |
|
896 |
// |
|
897 |
// Read from the thread's process. |
|
898 |
// aSrc Run address of memory to read |
|
899 |
// aDest Current address of destination |
|
900 |
// aExcTrap Exception trap object to be updated if the actual memory access is performed on other memory area than specified. |
|
901 |
// It happens when reading is performed on un-aligned memory area. |
|
902 |
// |
|
903 |
{ |
|
904 |
(void)aExcTrap; |
|
905 |
DMemModelThread& t=*(DMemModelThread*)TheCurrentThread; |
|
906 |
DMemModelProcess* pP=(DMemModelProcess*)iOwningProcess; |
|
907 |
TLinAddr src=(TLinAddr)aSrc; |
|
908 |
TLinAddr dest=(TLinAddr)aDest; |
|
909 |
TInt result = KErrNone; |
|
910 |
TBool have_taken_fault = EFalse; |
|
911 |
||
912 |
while (aLength) |
|
913 |
{ |
|
914 |
if (iMState==EDead) |
|
915 |
{ |
|
916 |
result = KErrDied; |
|
917 |
break; |
|
918 |
} |
|
919 |
TLinAddr alias_src; |
|
920 |
TUint alias_size; |
|
921 |
||
922 |
#ifdef __BROADCAST_CACHE_MAINTENANCE__ |
|
923 |
TInt pagingTrap; |
|
924 |
XTRAP_PAGING_START(pagingTrap); |
|
925 |
#endif |
|
926 |
||
927 |
TInt len = have_taken_fault ? Min(aLength, KPageSize - (src & KPageMask)) : aLength; |
|
928 |
TInt alias_result=t.Alias(src, pP, len, alias_src, alias_size); |
|
929 |
if (alias_result<0) |
|
930 |
{ |
|
931 |
result = KErrBadDescriptor; // bad permissions |
|
932 |
break; |
|
933 |
} |
|
934 |
||
935 |
#ifdef __BROADCAST_CACHE_MAINTENANCE__ |
|
936 |
// need to let the trap handler know where we are accessing in case we take a page fault |
|
937 |
// and the alias gets removed |
|
938 |
aExcTrap->iRemoteBase = alias_src; |
|
939 |
aExcTrap->iSize = alias_size; |
|
940 |
#endif |
|
941 |
||
942 |
__KTRACE_OPT(KTHREAD2,Kern::Printf("DThread::RawRead %08x<-%08x+%x",dest,alias_src,alias_size)); |
|
943 |
||
944 |
CHECK_PAGING_SAFE; |
|
945 |
||
946 |
if(aFlags&KCheckLocalAddress) |
|
947 |
MM::ValidateLocalIpcAddress(dest,alias_size,ETrue); |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
948 |
UNLOCK_USER_MEMORY(); |
0 | 949 |
memcpy( (TAny*)dest, (const TAny*)alias_src, alias_size); |
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
950 |
LOCK_USER_MEMORY(); |
0 | 951 |
|
952 |
src+=alias_size; |
|
953 |
dest+=alias_size; |
|
954 |
aLength-=alias_size; |
|
955 |
||
956 |
#ifdef __BROADCAST_CACHE_MAINTENANCE__ |
|
957 |
XTRAP_PAGING_END; |
|
958 |
if(pagingTrap) |
|
959 |
have_taken_fault = ETrue; |
|
960 |
#endif |
|
961 |
} |
|
962 |
t.RemoveAlias(); |
|
963 |
#ifdef __BROADCAST_CACHE_MAINTENANCE__ |
|
964 |
t.iPagingExcTrap = NULL; // in case we broke out of the loop and skipped XTRAP_PAGING_END |
|
965 |
#endif |
|
966 |
||
967 |
return result; |
|
968 |
} |
|
969 |
||
970 |
||
971 |
TInt DThread::RawWrite(const TAny* aDest, const TAny* aSrc, TInt aLength, TInt aFlags, DThread* /*anOriginatingThread*/, TIpcExcTrap* aExcTrap) |
|
972 |
// |
|
973 |
// Write to the thread's process. |
|
974 |
// aDest Run address of memory to write |
|
975 |
// aSrc Current address of destination |
|
976 |
// aExcTrap Exception trap object to be updated if the actual memory access is performed on other memory area then specified. |
|
977 |
// It happens when reading is performed on un-aligned memory area. |
|
978 |
// |
|
979 |
{ |
|
980 |
(void)aExcTrap; |
|
981 |
DMemModelThread& t=*(DMemModelThread*)TheCurrentThread; |
|
982 |
DMemModelProcess* pP=(DMemModelProcess*)iOwningProcess; |
|
983 |
TLinAddr src=(TLinAddr)aSrc; |
|
984 |
TLinAddr dest=(TLinAddr)aDest; |
|
985 |
TInt result = KErrNone; |
|
986 |
TBool have_taken_fault = EFalse; |
|
987 |
||
988 |
while (aLength) |
|
989 |
{ |
|
990 |
if (iMState==EDead) |
|
991 |
{ |
|
992 |
result = KErrDied; |
|
993 |
break; |
|
994 |
} |
|
995 |
TLinAddr alias_dest; |
|
996 |
TUint alias_size; |
|
997 |
||
998 |
#ifdef __BROADCAST_CACHE_MAINTENANCE__ |
|
999 |
TInt pagingTrap; |
|
1000 |
XTRAP_PAGING_START(pagingTrap); |
|
1001 |
#endif |
|
1002 |
||
1003 |
TInt len = have_taken_fault ? Min(aLength, KPageSize - (dest & KPageMask)) : aLength; |
|
1004 |
TInt alias_result=t.Alias(dest, pP, len, alias_dest, alias_size); |
|
1005 |
if (alias_result<0) |
|
1006 |
{ |
|
1007 |
result = KErrBadDescriptor; // bad permissions |
|
1008 |
break; |
|
1009 |
} |
|
1010 |
||
1011 |
#ifdef __BROADCAST_CACHE_MAINTENANCE__ |
|
1012 |
// need to let the trap handler know where we are accessing in case we take a page fault |
|
1013 |
// and the alias gets removed |
|
1014 |
aExcTrap->iRemoteBase = alias_dest; |
|
1015 |
aExcTrap->iSize = alias_size; |
|
1016 |
#endif |
|
1017 |
||
1018 |
__KTRACE_OPT(KTHREAD2,Kern::Printf("DThread::RawWrite %08x+%x->%08x",src,alias_size,alias_dest)); |
|
1019 |
||
1020 |
// Must check that it is safe to page, unless we are reading from unpaged ROM in which case |
|
1021 |
// we allow it. |
|
1022 |
CHECK_PAGING_SAFE_RANGE(src, aLength); |
|
1023 |
CHECK_DATA_PAGING_SAFE_RANGE(dest, aLength); |
|
1024 |
||
1025 |
if(aFlags&KCheckLocalAddress) |
|
1026 |
MM::ValidateLocalIpcAddress(src,alias_size,EFalse); |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1027 |
UNLOCK_USER_MEMORY(); |
0 | 1028 |
memcpy( (TAny*)alias_dest, (const TAny*)src, alias_size); |
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1029 |
LOCK_USER_MEMORY(); |
0 | 1030 |
|
1031 |
src+=alias_size; |
|
1032 |
dest+=alias_size; |
|
1033 |
aLength-=alias_size; |
|
1034 |
||
1035 |
#ifdef __BROADCAST_CACHE_MAINTENANCE__ |
|
1036 |
XTRAP_PAGING_END; |
|
1037 |
if(pagingTrap) |
|
1038 |
have_taken_fault = ETrue; |
|
1039 |
#endif |
|
1040 |
} |
|
1041 |
t.RemoveAlias(); |
|
1042 |
#ifdef __BROADCAST_CACHE_MAINTENANCE__ |
|
1043 |
t.iPagingExcTrap = NULL; // in case we broke out of the loop and skipped XTRAP_PAGING_END |
|
1044 |
#endif |
|
1045 |
||
1046 |
return result; |
|
1047 |
} |
|
1048 |
||
1049 |
||
1050 |
#ifndef __MARM__ |
|
1051 |
||
1052 |
TInt DThread::ReadAndParseDesHeader(const TAny* aSrc, TDesHeader& aDest) |
|
1053 |
// |
|
1054 |
// Read the header of a remote descriptor. |
|
1055 |
// |
|
1056 |
{ |
|
1057 |
static const TUint8 LengthLookup[16]={4,8,12,8,12,0,0,0,0,0,0,0,0,0,0,0}; |
|
1058 |
||
1059 |
CHECK_PAGING_SAFE; |
|
1060 |
||
1061 |
DMemModelThread& t=*(DMemModelThread*)TheCurrentThread; |
|
1062 |
DMemModelProcess* pP=(DMemModelProcess*)iOwningProcess; |
|
1063 |
TLinAddr src=(TLinAddr)aSrc; |
|
1064 |
||
1065 |
__NK_ASSERT_DEBUG(t.iIpcClient==NULL); |
|
1066 |
t.iIpcClient = this; |
|
1067 |
||
1068 |
TLinAddr pAlias; |
|
1069 |
TUint8* pDest = (TUint8*)&aDest; |
|
1070 |
TUint alias_size = 0; |
|
1071 |
TInt length = 12; |
|
1072 |
TInt type = KErrBadDescriptor; |
|
1073 |
while (length > 0) |
|
1074 |
{ |
|
1075 |
#ifdef __BROADCAST_CACHE_MAINTENANCE__ |
|
1076 |
TInt pagingTrap; |
|
1077 |
XTRAP_PAGING_START(pagingTrap); |
|
1078 |
#endif |
|
1079 |
||
1080 |
if (alias_size == 0) |
|
1081 |
{ |
|
1082 |
// no alias present, so must create one here |
|
1083 |
if (t.Alias(src, pP, length, pAlias, alias_size) != KErrNone) |
|
1084 |
break; |
|
1085 |
__NK_ASSERT_DEBUG(alias_size >= sizeof(TUint32)); |
|
1086 |
} |
|
1087 |
||
1088 |
// read either the first word, or as much as aliased of the remainder |
|
1089 |
TInt l = length == 12 ? sizeof(TUint32) : Min(length, alias_size); |
|
1090 |
if (Kern::SafeRead((TAny*)pAlias, (TAny*)pDest, l)) |
|
1091 |
break; // exception reading from user space |
|
1092 |
||
1093 |
if (length == 12) |
|
1094 |
{ |
|
1095 |
// we have just read the first word, so decode the descriptor type |
|
1096 |
type = *(TUint32*)pDest >> KShiftDesType8; |
|
1097 |
length = LengthLookup[type]; |
|
1098 |
// invalid descriptor type will have length 0 which will get decrease by 'l' and |
|
1099 |
// terminate the loop with length < 0 |
|
1100 |
} |
|
1101 |
||
1102 |
src += l; |
|
1103 |
alias_size -= l; |
|
1104 |
pAlias += l; |
|
1105 |
pDest += l; |
|
1106 |
length -= l; |
|
1107 |
||
1108 |
#ifdef __BROADCAST_CACHE_MAINTENANCE__ |
|
1109 |
XTRAP_PAGING_END; |
|
1110 |
if (pagingTrap) |
|
1111 |
alias_size = 0; // a page fault caused the alias to be removed |
|
1112 |
#endif |
|
1113 |
} |
|
1114 |
||
1115 |
t.RemoveAlias(); |
|
1116 |
t.iIpcClient = NULL; |
|
1117 |
#ifdef __BROADCAST_CACHE_MAINTENANCE__ |
|
1118 |
t.iPagingExcTrap = NULL; // in case we broke out of the loop and skipped XTRAP_PAGING_END |
|
1119 |
#endif |
|
1120 |
return length == 0 ? K::ParseDesHeader(aSrc, (TRawDesHeader&)aDest, aDest) : KErrBadDescriptor; |
|
1121 |
} |
|
1122 |
||
1123 |
||
1124 |
#endif |
|
1125 |
||
1126 |
||
1127 |
TInt DThread::PrepareMemoryForDMA(const TAny* aLinAddr, TInt aSize, TPhysAddr* aPhysicalPageList) |
|
1128 |
{ |
|
1129 |
// not supported, new Physical Pinning APIs should be used for DMA |
|
1130 |
return KErrNotSupported; |
|
1131 |
} |
|
1132 |
||
1133 |
TInt DThread::ReleaseMemoryFromDMA(const TAny* aLinAddr, TInt aSize, TPhysAddr* aPhysicalPageList) |
|
1134 |
{ |
|
1135 |
// not supported, new Physical Pinning APIs should be used for DMA |
|
1136 |
return KErrNotSupported; |
|
1137 |
} |
|
1138 |