author | hgs |
Fri, 23 Apr 2010 22:20:31 +0100 | |
changeset 123 | fc55edbf3919 |
parent 90 | 947f0dc9f7a8 |
child 176 | af6ec97d9189 |
permissions | -rw-r--r-- |
0 | 1 |
// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies). |
2 |
// All rights reserved. |
|
3 |
// This component and the accompanying materials are made available |
|
4 |
// under the terms of the License "Eclipse Public License v1.0" |
|
5 |
// which accompanies this distribution, and is available |
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 |
// |
|
8 |
// Initial Contributors: |
|
9 |
// Nokia Corporation - initial contribution. |
|
10 |
// |
|
11 |
// Contributors: |
|
12 |
// |
|
13 |
// Description: |
|
14 |
// |
|
15 |
||
16 |
#include <plat_priv.h> |
|
17 |
#include <kernel/cache.h> |
|
18 |
#include "mm.h" |
|
19 |
#include "mmu.h" |
|
20 |
#include "mrom.h" |
|
21 |
#include "mpager.h" |
|
22 |
#include "mmanager.h" |
|
23 |
#include "mobject.h" |
|
24 |
#include "mmapping.h" |
|
25 |
#include "maddrcont.h" |
|
26 |
#include "mptalloc.h" |
|
27 |
#include "mlargemappings.h" |
|
28 |
||
29 |
#include "cache_maintenance.inl" |
|
30 |
||
31 |
||
32 |
/** |
|
33 |
Class representing the resources allocated for a ROM shadow page. |
|
34 |
||
35 |
A shadow page is a page of RAM which is mapped by the MMU to replace |
|
36 |
a prior existing page at a particular virtual address. |
|
37 |
*/ |
|
38 |
class DShadowPage : public DVirtualPinMapping |
|
39 |
{ |
|
40 |
public: |
|
41 |
/** |
|
42 |
Create a new #DShadowPage to shadow a specified memory page. |
|
43 |
||
44 |
On success, #iOriginalPage holds the physical address of the original page |
|
45 |
and #iNewPage the physical address of the newly allocated RAM page; the |
|
46 |
contents of this are a copy of the original. |
|
47 |
||
48 |
No MMU entries for the shadow page are changed - it is the responsibility |
|
49 |
of the caller to handle this. However, the new #DShadowPage object will |
|
50 |
have pinned the page table used by \a aMapping which maps the page being |
|
51 |
shadowed, prevent demand paging from discarding any modifications made to |
|
52 |
this. |
|
53 |
||
54 |
@param aMemory The memory object whose memory is to be shadowed. |
|
55 |
@param aIndex Page index, within the memory, of the page to shadow. |
|
56 |
@param aMapping A memory mapping which currently maps the page to be |
|
57 |
shadowed. |
|
58 |
||
59 |
@return The newly created DShadowPage or the null pointer if there was |
|
60 |
insufficient memory. |
|
61 |
*/ |
|
62 |
static DShadowPage* New(DMemoryObject* aMemory, TUint aIndex, DMemoryMappingBase* aMapping); |
|
63 |
||
64 |
/** |
|
65 |
Free the allocated shadow page (#iNewPage) and unpin any pages table which |
|
66 |
was pinned, then free this shadow page object. |
|
67 |
||
68 |
The called of this function must ensure that all references to the shadow |
|
69 |
RAM page have been removed from any MMU mappings. |
|
70 |
*/ |
|
71 |
void Destroy(); |
|
72 |
||
73 |
private: |
|
74 |
DShadowPage(); |
|
75 |
~DShadowPage(); |
|
76 |
||
77 |
/** |
|
78 |
Second phase constructor. For arguments, see #New. |
|
79 |
*/ |
|
80 |
TInt Construct(DMemoryObject* aMemory, TUint aIndex, DMemoryMappingBase* aMapping); |
|
81 |
||
82 |
public: |
|
83 |
/** |
|
84 |
The physical address of the original page being shadowed. |
|
85 |
*/ |
|
86 |
TPhysAddr iOriginalPage; |
|
87 |
||
88 |
/** |
|
89 |
The physical address of the allocated shadow page. |
|
90 |
*/ |
|
91 |
TPhysAddr iNewPage; |
|
92 |
}; |
|
93 |
||
94 |
||
95 |
/** |
|
96 |
Specialised manager for the memory object representing the system ROM. |
|
97 |
This handles demand paging of the ROM contents if it is not stored in a memory |
|
98 |
device capable of execute-in-place random access. E.g. when stored in NAND |
|
99 |
flash. |
|
100 |
*/ |
|
101 |
class DRomMemoryManager : public DPagedMemoryManager |
|
102 |
{ |
|
103 |
public: |
|
104 |
DRomMemoryManager(); |
|
105 |
||
106 |
/** |
|
107 |
Allocate a shadow page for the specified ROM address. |
|
108 |
||
109 |
Shadow pages are pages of RAM which are mapped by the MMU so that |
|
110 |
they replace the original ROM memory. The contents of a shadow page |
|
111 |
are initially the same as the ROM they replace, but may be modified with |
|
112 |
#CopyToShadowMemory. |
|
113 |
||
114 |
@param aRomAddr An virtual address which lies within the ROM. |
|
115 |
||
116 |
@return KErrNone if successful, |
|
117 |
KErrAlreadyExists if the specified address already has a show page, |
|
118 |
otherwise one of the system wide error codes. |
|
119 |
*/ |
|
120 |
TInt AllocShadowPage(TLinAddr aRomAddr); |
|
121 |
||
122 |
/** |
|
123 |
Free a shadow page previously allocated with #AllocShadowPage. |
|
124 |
||
125 |
The original ROM memory page is again mapped at the specified address. |
|
126 |
||
127 |
@param aRomAddr An virtual address which lies within the ROM. |
|
128 |
||
129 |
@return KErrNone if successful, |
|
130 |
otherwise one of the system wide error codes. |
|
131 |
*/ |
|
132 |
TInt FreeShadowPage(TLinAddr aRomAddr); |
|
133 |
||
134 |
/** |
|
135 |
Copy data into a shadow page, modifying its contents. |
|
136 |
||
137 |
@param aDst An virtual address which lies within the ROM for which a shadow |
|
138 |
page has previously been allocated with #AllocShadowPage. |
|
139 |
@param aSrc The start address of the data to copy to \a aDst. |
|
140 |
@param aSize The size, in bytes, of the data to copy. |
|
141 |
||
142 |
@return KErrNone if successful, |
|
143 |
KErrNotFound if the specified address didn't have a shadow page, |
|
144 |
otherwise one of the system wide error codes. |
|
145 |
*/ |
|
146 |
TInt CopyToShadowMemory(TLinAddr aDst, TLinAddr aSrc, TUint32 aSize); |
|
147 |
||
148 |
protected: |
|
149 |
||
150 |
// from DPagedMemoryManager... |
|
151 |
virtual TInt PageInPinnedDone(DMemoryObject* aMemory, TUint aIndex, SPageInfo* aPageInfo, TPhysAddr* aPageArrayEntry, TPinArgs& aPinArgs); |
|
152 |
||
153 |
private: |
|
154 |
// from DMemoryManager... |
|
155 |
virtual void Destruct(DMemoryObject* aMemory); |
|
156 |
virtual TInt HandleFault( DMemoryObject* aMemory, TUint aIndex, DMemoryMapping* aMapping, |
|
157 |
TUint aMapInstanceCount, TUint aAccessPermissions); |
|
158 |
virtual TInt Pin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs); |
|
159 |
virtual void Unpin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs); |
|
160 |
||
161 |
// methods inherited from DPagedMemoryManager |
|
162 |
||
163 |
/** |
|
164 |
@copydoc DPagedMemoryManager::Init3 |
|
165 |
This acts as a second phase constructor for the manager which |
|
166 |
creates the memory objects and mappings to represent the ROM. |
|
167 |
*/ |
|
168 |
virtual void Init3(); |
|
169 |
||
170 |
virtual TInt InstallPagingDevice(DPagingDevice* aDevice); |
|
171 |
virtual TInt AcquirePageReadRequest(DPageReadRequest*& aRequest, DMemoryObject* aMemory, TUint aIndex, TUint aCount); |
|
172 |
virtual TInt ReadPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageReadRequest* aRequest); |
|
173 |
virtual TBool IsAllocated(DMemoryObject* aMemory, TUint aIndex, TUint aCount); |
|
174 |
virtual void DoUnpin(DMemoryObject* aMemory, TUint aIndex, TUint aCount, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs); |
|
175 |
||
176 |
/** |
|
177 |
Acquire the mutex used to protect shadow page allocation. |
|
178 |
*/ |
|
179 |
void ShadowLock(); |
|
180 |
||
181 |
/** |
|
182 |
Release the mutex used to protect shadow page allocation. |
|
183 |
*/ |
|
184 |
void ShadowUnlock(); |
|
185 |
||
186 |
private: |
|
187 |
/** |
|
188 |
The ROM paging device which was passed to #InstallPagingDevice. |
|
189 |
*/ |
|
190 |
DPagingDevice* iDevice; |
|
191 |
||
192 |
/** |
|
193 |
The memory object containing the ROM. |
|
194 |
*/ |
|
195 |
DMemoryObject* iRomMemory; |
|
196 |
||
197 |
/** |
|
198 |
The memory mapping which maps the ROM into a global visible virtual address. |
|
199 |
*/ |
|
200 |
DMemoryMapping* iRomMapping; |
|
201 |
||
202 |
/** |
|
203 |
The virtual address for the start of the ROM in the global memory region. |
|
204 |
*/ |
|
205 |
TLinAddr iBase; |
|
206 |
||
207 |
/** |
|
208 |
The size, in bytes, of the ROM image. |
|
209 |
This may not be an exact multiple of a page size. |
|
210 |
*/ |
|
211 |
TUint iSize; |
|
212 |
||
213 |
/** |
|
214 |
The size, in pages, of the ROM image. |
|
215 |
*/ |
|
216 |
TUint iSizeInPages; |
|
217 |
||
218 |
/** |
|
219 |
The offset from the ROM start, in bytes, for the region of the |
|
220 |
ROM which is demand paged. |
|
221 |
*/ |
|
222 |
TUint iPagedStart; |
|
223 |
||
224 |
/** |
|
225 |
The size, in bytes, for the region of the ROM which is demand paged. |
|
226 |
*/ |
|
227 |
TUint iPagedSize; |
|
228 |
||
229 |
/** |
|
230 |
The address within the ROM for the ROM page index. |
|
231 |
@see TRomHeader::iRomPageIndex. |
|
232 |
*/ |
|
233 |
SRomPageInfo* iRomPageIndex; |
|
234 |
||
235 |
/** |
|
236 |
The mutex used to protect shadow page allocation. |
|
237 |
*/ |
|
238 |
DMutex* iShadowLock; |
|
239 |
||
240 |
/** |
|
241 |
Container for all allocated DShadowPage objects. |
|
242 |
*/ |
|
243 |
RAddressedContainer iShadowPages; |
|
244 |
||
245 |
#ifdef __SUPPORT_DEMAND_PAGING_EMULATION__ |
|
246 |
TInt iOriginalRomPageCount; |
|
247 |
TPhysAddr* iOriginalRomPages; |
|
248 |
friend void RomOriginalPages(TPhysAddr*& aPages, TUint& aPageCount); |
|
249 |
#endif |
|
250 |
||
251 |
friend TBool IsUnpagedRom(TLinAddr aBase, TUint aSize); |
|
252 |
||
253 |
public: |
|
254 |
/** |
|
255 |
The single instance of this manager class. |
|
256 |
*/ |
|
257 |
static DRomMemoryManager TheManager; |
|
258 |
}; |
|
259 |
||
260 |
||
261 |
DRomMemoryManager DRomMemoryManager::TheManager; |
|
262 |
DPagedMemoryManager* TheRomMemoryManager = &DRomMemoryManager::TheManager; |
|
263 |
||
264 |
||
265 |
const TInt KMutexOrdRomMemory = KMutexOrdPageIn+1; |
|
266 |
||
267 |
||
268 |
#ifdef __SUPPORT_DEMAND_PAGING_EMULATION__ |
|
269 |
/** |
|
270 |
For use by the emulated paging device to get the location and size of the ROM. |
|
271 |
||
272 |
@param aPages A reference to store a pointer to an array of the physical addresses of each ROM page. |
|
273 |
@param aPageCount A reference to store the number of rom pages. |
|
274 |
*/ |
|
275 |
void RomOriginalPages(TPhysAddr*& aPages, TUint& aPageCount) |
|
276 |
{ |
|
277 |
aPages = DRomMemoryManager::TheManager.iOriginalRomPages; |
|
278 |
aPageCount = DRomMemoryManager::TheManager.iOriginalRomPageCount; |
|
279 |
} |
|
280 |
||
281 |
#endif |
|
282 |
||
283 |
||
284 |
TBool IsUnpagedRom(TLinAddr aBase, TUint aSize) |
|
285 |
{ |
|
286 |
TUint offset = aBase-DRomMemoryManager::TheManager.iBase; |
|
287 |
TUint limit = DRomMemoryManager::TheManager.iPagedStart; |
|
288 |
if(offset>=limit) |
|
289 |
return false; |
|
290 |
offset += aSize; |
|
291 |
if(offset>limit || offset<aSize) |
|
292 |
return false; |
|
293 |
return true; |
|
294 |
} |
|
295 |
||
296 |
||
297 |
TInt PagifyChunk(TLinAddr aAddress) |
|
298 |
{ |
|
299 |
TRACE(("PagifyChunk(0x%08x)",aAddress)); |
|
300 |
||
301 |
aAddress &= ~KChunkMask; |
|
302 |
TPde* pPde = Mmu::PageDirectoryEntry(KKernelOsAsid,aAddress); |
|
303 |
||
304 |
retry: |
|
305 |
// check there is actually some memory mapped... |
|
306 |
TPde pde = *pPde; |
|
307 |
if(pde==KPdeUnallocatedEntry) |
|
308 |
{ |
|
309 |
TRACE(("PagifyChunk returns %d",KErrNotFound)); |
|
310 |
return KErrNotFound; |
|
311 |
} |
|
312 |
||
313 |
// end if memory is not a section mapping... |
|
314 |
TPhysAddr pdePhys = Mmu::PdePhysAddr(pde); |
|
315 |
if(pdePhys==KPhysAddrInvalid) |
|
316 |
{ |
|
317 |
TRACE(("PagifyChunk returns %d",KErrAlreadyExists)); |
|
318 |
return KErrAlreadyExists; |
|
319 |
} |
|
320 |
||
321 |
// get a new page table... |
|
322 |
::PageTables.Lock(); |
|
323 |
TPte* pt = ::PageTables.Alloc(false); |
|
324 |
if(!pt) |
|
325 |
{ |
|
326 |
TRACE(("PagifyChunk returns %d",KErrNoMemory)); |
|
327 |
::PageTables.Unlock(); |
|
328 |
return KErrNoMemory; |
|
329 |
} |
|
330 |
||
331 |
// fill page table so it maps the same physical addresses as the section mapping... |
|
332 |
TPte pte = Mmu::SectionToPageEntry(pde); |
|
333 |
pte |= pdePhys; |
|
334 |
TPte* pPte = pt; |
|
335 |
do |
|
336 |
{ |
|
337 |
TRACE2(("!PTE %x=%x",pPte,pte)); |
|
338 |
*pPte++ = pte; |
|
339 |
pte += KPageSize; |
|
340 |
} |
|
341 |
while(TLinAddr(pPte)&(KPageTableMask/sizeof(TPte)*sizeof(TPte))); |
|
342 |
CacheMaintenance::MultiplePtesUpdated((TLinAddr)pt,KPageTableSize); |
|
343 |
||
344 |
// check memory not changed... |
|
345 |
MmuLock::Lock(); |
|
346 |
if(Mmu::PdePhysAddr(*pPde)!=pdePhys) |
|
347 |
{ |
|
348 |
// pde was changed whilst we were creating a new page table, need to retry... |
|
349 |
MmuLock::Unlock(); |
|
350 |
::PageTables.Free(pt); |
|
351 |
::PageTables.Unlock(); |
|
352 |
goto retry; |
|
353 |
} |
|
354 |
||
355 |
// update page counts... |
|
356 |
SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt); |
|
357 |
TUint count = pti->IncPageCount(KPageTableSize/sizeof(TPte)); |
|
358 |
(void)count; |
|
359 |
TRACE2(("pt %x page count=%d",pt,pti->PageCount())); |
|
360 |
__NK_ASSERT_DEBUG(pti->CheckPageCount()); |
|
361 |
||
362 |
// swap pde entry to point to new page table... |
|
363 |
pde |= Mmu::PageTablePhysAddr(pt); |
|
364 |
TRACE2(("!PDE %x=%x",pPde,pde)); |
|
365 |
*pPde = pde; |
|
366 |
SinglePdeUpdated(pPde); |
|
367 |
InvalidateTLB(); |
|
368 |
||
369 |
// done... |
|
370 |
MmuLock::Unlock(); |
|
371 |
::PageTables.Unlock(); |
|
372 |
TRACE(("PagifyChunk returns %d",KErrNone)); |
|
373 |
return KErrNone; |
|
374 |
} |
|
375 |
||
376 |
||
377 |
void UnmapROM(TLinAddr aStart, TLinAddr aEnd) |
|
378 |
{ |
|
379 |
TRACEB(("UnmapROM 0x%08x..0x%08x",aStart,aEnd)); |
|
380 |
||
381 |
TLinAddr p = aStart; |
|
382 |
if(p>=aEnd) |
|
383 |
return; |
|
384 |
||
385 |
PagifyChunk(p); |
|
386 |
||
387 |
MmuLock::Lock(); // hold MmuLock for long time, shouldn't matter as this is only done during boot |
|
388 |
||
389 |
TPte* pPte = Mmu::PtePtrFromLinAddr(p,KKernelOsAsid); |
|
390 |
__NK_ASSERT_ALWAYS(pPte); |
|
391 |
while(p<aEnd && p&KChunkMask) |
|
392 |
{ |
|
393 |
*pPte++ = KPteUnallocatedEntry; |
|
394 |
p += KPageSize; |
|
395 |
} |
|
396 |
||
397 |
if(p<aEnd) |
|
398 |
{ |
|
399 |
TPde* pPde = Mmu::PageDirectoryEntry(KKernelOsAsid,p); |
|
400 |
while(p<aEnd) |
|
401 |
{ |
|
402 |
*pPde++ = KPdeUnallocatedEntry; |
|
403 |
p += KChunkSize; |
|
404 |
} |
|
405 |
} |
|
406 |
||
407 |
MmuLock::Unlock(); |
|
408 |
||
409 |
__NK_ASSERT_DEBUG(p==aEnd); |
|
410 |
} |
|
411 |
||
412 |
||
413 |
DRomMemoryManager::DRomMemoryManager() |
|
414 |
: iShadowPages(0,iShadowLock) |
|
415 |
{ |
|
416 |
} |
|
417 |
||
418 |
||
419 |
void DRomMemoryManager::Init3() |
|
420 |
{ |
|
421 |
// get ROM info... |
|
422 |
const TRomHeader& romHeader = TheRomHeader(); |
|
423 |
iBase = (TLinAddr)&romHeader; |
|
424 |
iSize = romHeader.iUncompressedSize; |
|
425 |
iSizeInPages = MM::RoundToPageCount(iSize); |
|
426 |
TUint chunkSize = ((iSize+KChunkMask)&~KChunkMask); |
|
427 |
TUint committedSize = TheSuperPage().iTotalRomSize; // size of memory loaded by bootstrap |
|
428 |
TRACEB(("DRomMemoryManager::Init3 rom=0x%08x+0x%x",iBase,iSize)); |
|
429 |
||
430 |
// get paged rom info... |
|
431 |
if(romHeader.iRomPageIndex) |
|
432 |
iRomPageIndex = (SRomPageInfo*)((TInt)&romHeader+romHeader.iRomPageIndex); |
|
433 |
iPagedSize = romHeader.iPageableRomSize; |
|
434 |
iPagedStart = iPagedSize ? romHeader.iPageableRomStart : 0; |
|
435 |
if(iPagedStart) |
|
436 |
{ |
|
437 |
TRACEB(("DRomMemoryManager::Init3() paged=0x%08x+0x%x",(TLinAddr)&romHeader+iPagedStart,iPagedSize)); |
|
438 |
__NK_ASSERT_ALWAYS(iPagedStart<iSize && iPagedStart+iPagedSize>iPagedStart && iPagedStart+iPagedSize<=iSize); |
|
439 |
||
440 |
#ifdef __SUPPORT_DEMAND_PAGING_EMULATION__ |
|
441 |
// get physical addresses of ROM pages... |
|
442 |
iOriginalRomPageCount = iSizeInPages; |
|
443 |
iOriginalRomPages = new TPhysAddr[iOriginalRomPageCount]; |
|
444 |
__NK_ASSERT_ALWAYS(iOriginalRomPages); |
|
445 |
MmuLock::Lock(); // hold MmuLock for long time, shouldn't matter as this is only done during boot |
|
446 |
TInt i; |
|
447 |
for(i=0; i<iOriginalRomPageCount; i++) |
|
448 |
iOriginalRomPages[i] = Mmu::LinearToPhysical(iBase+i*KPageSize); |
|
449 |
MmuLock::Unlock(); |
|
450 |
||
451 |
// unmap paged part of ROM as the bootstrap will have left it mapped. |
|
452 |
// See CFG_SupportEmulatedRomPaging in the bootstrap code. |
|
453 |
// todo: use FMM for this after memory object created |
|
454 |
UnmapROM(iBase+iPagedStart,iBase+chunkSize); |
|
455 |
committedSize = iPagedStart; |
|
456 |
#endif |
|
457 |
} |
|
458 |
||
459 |
if(iPagedStart && committedSize!=iPagedStart) |
|
460 |
{ |
|
461 |
// unmap any paged ROM which the bootstrap mapped... |
|
462 |
TRACEB(("DRomMemoryManager::Init3() unmapping unpaged ROM offsets 0x%x thru 0x%x",iPagedStart,committedSize)); |
|
463 |
// todo: use FMM for this after memory object created |
|
464 |
UnmapROM(iBase+iPagedStart,iBase+committedSize); |
|
465 |
committedSize = iPagedStart; |
|
466 |
} |
|
467 |
||
468 |
// create memory object for ROM... |
|
469 |
TRACEB(("DRomMemoryManager::Init3() committed ROM memory 0x%x of 0x%x",committedSize,chunkSize)); |
|
470 |
TMemoryCreateFlags flags = (TMemoryCreateFlags)(EMemoryCreateNoWipe | EMemoryCreateReadOnly | |
|
471 |
EMemoryCreateDemandPaged | EMemoryCreateAllowExecution); |
|
472 |
iRomMemory = DLargeMappedMemory::New(&DRomMemoryManager::TheManager,chunkSize>>KPageShift,EMemoryAttributeStandard,flags); |
|
473 |
__NK_ASSERT_ALWAYS(iRomMemory); |
|
474 |
TInt r = MM::MemoryClaimInitialPages(iRomMemory,iBase,committedSize,EUserExecute,false,true); |
|
475 |
__NK_ASSERT_ALWAYS(r==KErrNone); |
|
476 |
r = iRomMemory->iPages.Alloc(committedSize>>KPageShift,(chunkSize-committedSize)>>KPageShift); |
|
477 |
__NK_ASSERT_ALWAYS(r==KErrNone); |
|
478 |
||
479 |
// create mapping for ROM... |
|
480 |
r = MM::MappingNew(iRomMapping, iRomMemory, EUserExecute, KKernelOsAsid, EMappingCreateExactVirtual, iBase); |
|
481 |
__NK_ASSERT_ALWAYS(r==KErrNone); |
|
482 |
__NK_ASSERT_ALWAYS(iRomMapping->IsLarge()); |
|
483 |
||
484 |
// Set the paging device to be uninstalled, i.e. NULL. |
|
485 |
iDevice = NULL; |
|
486 |
||
487 |
_LIT(KRomMemoryLockName,"RomMemory"); |
|
488 |
r = K::MutexCreate(iShadowLock, KRomMemoryLockName, NULL, EFalse, KMutexOrdRomMemory); |
|
489 |
__NK_ASSERT_ALWAYS(r==KErrNone); |
|
490 |
MM::MemorySetLock(iRomMemory,iShadowLock); |
|
491 |
} |
|
492 |
||
493 |
||
494 |
TInt DRomMemoryManager::InstallPagingDevice(DPagingDevice* aDevice) |
|
495 |
{ |
|
496 |
TRACEB(("DRomMemoryManager::InstallPagingDevice(0x%08x)",aDevice)); |
|
497 |
||
498 |
if(!iPagedStart) |
|
499 |
{ |
|
500 |
TRACEB(("ROM is not paged")); |
|
501 |
return KErrNone; |
|
502 |
} |
|
503 |
||
504 |
TAny* null = 0; |
|
123 | 505 |
if(aDevice->iType & DPagingDevice::EMediaExtension) |
506 |
__e32_atomic_store_ord_ptr(&iDevice, null); |
|
0 | 507 |
if(!__e32_atomic_cas_ord_ptr(&iDevice, &null, aDevice)) // set iDevice=aDevice if it was originally 0 |
508 |
{ |
|
509 |
// ROM paging device already registered... |
|
510 |
TRACEB(("DRomMemoryManager::InstallPagingDevice returns ALREADY EXISTS!")); |
|
511 |
return KErrAlreadyExists; |
|
512 |
} |
|
513 |
||
514 |
__e32_atomic_ior_ord32(&K::MemModelAttributes, (TUint32)EMemModelAttrRomPaging); |
|
515 |
||
516 |
return KErrNone; |
|
517 |
} |
|
518 |
||
519 |
||
520 |
TInt DRomMemoryManager::AcquirePageReadRequest(DPageReadRequest*& aRequest, DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
|
521 |
{ |
|
522 |
aRequest = iDevice->iRequestPool->AcquirePageReadRequest(aMemory,aIndex,aCount); |
|
523 |
return KErrNone; |
|
524 |
} |
|
525 |
||
526 |
||
527 |
void DRomMemoryManager::Destruct(DMemoryObject* aMemory) |
|
528 |
{ |
|
529 |
__NK_ASSERT_DEBUG(0); |
|
530 |
} |
|
531 |
||
532 |
||
533 |
TInt DRomMemoryManager::ReadPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageReadRequest* aRequest) |
|
534 |
{ |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
535 |
__NK_ASSERT_DEBUG(aRequest->CheckUseContiguous(aMemory,aIndex,aCount)); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
536 |
__ASSERT_CRITICAL; |
0 | 537 |
|
538 |
TLinAddr linAddr = aRequest->MapPages(aIndex,aCount,aPages); |
|
539 |
TInt r = KErrNone; |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
540 |
TThreadMessage message; |
0 | 541 |
|
542 |
const TInt readUnitShift = iDevice->iReadUnitShift; |
|
543 |
||
544 |
for(; aCount; ++aIndex, --aCount, linAddr+=KPageSize) |
|
545 |
{ |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
546 |
|
0 | 547 |
START_PAGING_BENCHMARK; |
548 |
if(!iRomPageIndex) |
|
549 |
{ |
|
550 |
// ROM not broken into pages, so just read it in directly. |
|
551 |
// KPageShift > readUnitShift so page size is exact multiple of read |
|
552 |
// units. Therefore it is ok to just shift offset and KPageSize |
|
553 |
// by readUnitShift. |
|
554 |
const TInt dataOffset = aIndex << KPageShift; |
|
555 |
START_PAGING_BENCHMARK; |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
556 |
r = iDevice->Read( &message, |
0 | 557 |
linAddr, dataOffset >> readUnitShift, |
558 |
KPageSize >> readUnitShift, DPagingDevice::EDriveRomPaging); |
|
559 |
__NK_ASSERT_DEBUG(r!=KErrNoMemory); // not allowed to allocated memory, therefore can't fail with KErrNoMemory |
|
560 |
END_PAGING_BENCHMARK(EPagingBmReadMedia); |
|
561 |
} |
|
562 |
else |
|
563 |
{ |
|
564 |
// Work out where data for page is located |
|
565 |
SRomPageInfo* romPageInfo = iRomPageIndex + aIndex; |
|
566 |
const TInt dataOffset = romPageInfo->iDataStart; |
|
567 |
const TInt dataSize = romPageInfo->iDataSize; |
|
568 |
if(!dataSize) |
|
569 |
{ |
|
570 |
// empty page, fill it with 0xff... |
|
571 |
memset((TAny*)linAddr, 0xff, KPageSize); |
|
572 |
r = KErrNone; |
|
573 |
} |
|
574 |
else |
|
575 |
{ |
|
576 |
__NK_ASSERT_ALWAYS(romPageInfo->iPagingAttributes & SRomPageInfo::EPageable); |
|
577 |
||
578 |
// Read data for page... |
|
579 |
const TLinAddr buffer = aRequest->iBuffer; |
|
580 |
const TUint readStart = dataOffset >> readUnitShift; |
|
581 |
const TUint readSize = ((dataOffset + dataSize - 1) >> readUnitShift) - readStart + 1; |
|
582 |
__NK_ASSERT_DEBUG((readSize << readUnitShift) <= (DPageReadRequest::EMaxPages << KPageShift)); |
|
583 |
START_PAGING_BENCHMARK; |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
584 |
r = iDevice->Read(&message, buffer, readStart, readSize, DPagingDevice::EDriveRomPaging); |
0 | 585 |
__NK_ASSERT_DEBUG(r!=KErrNoMemory); // not allowed to allocated memory, therefore can't fail with KErrNoMemory |
586 |
END_PAGING_BENCHMARK(EPagingBmReadMedia); |
|
587 |
if(r==KErrNone) |
|
588 |
{ |
|
589 |
// Decompress data, remembering that the data to decompress may be offset from |
|
590 |
// the start of the data just read in, due to reads having to be aligned by |
|
591 |
// readUnitShift. |
|
592 |
const TLinAddr data = buffer + dataOffset - (readStart << readUnitShift); |
|
593 |
__ASSERT_COMPILE(SRomPageInfo::ENoCompression==0); // decompress assumes this |
|
594 |
r = Decompress(romPageInfo->iCompressionType, linAddr, KPageSize, data, dataSize); |
|
595 |
if(r >= 0) |
|
596 |
{ |
|
597 |
if (r != KPageSize) |
|
598 |
__KTRACE_OPT(KPANIC, Kern::Printf("DRomMemoryManager::ReadPage: error decompressing page at %08x + %x: %d", dataOffset, dataSize, r)); |
|
599 |
__NK_ASSERT_ALWAYS(r == KPageSize); |
|
600 |
r = KErrNone; |
|
601 |
} |
|
602 |
} |
|
603 |
else |
|
604 |
__KTRACE_OPT(KPANIC, Kern::Printf("DRomMemoryManager::ReadPage: error reading media at %08x + %x: %d", dataOffset, dataSize, r)); |
|
605 |
} |
|
606 |
} |
|
607 |
END_PAGING_BENCHMARK(EPagingBmReadRomPage); |
|
608 |
||
609 |
if(r!=KErrNone) |
|
610 |
break; |
|
611 |
} |
|
612 |
||
613 |
aRequest->UnmapPages(true); |
|
614 |
||
615 |
return r; |
|
616 |
} |
|
617 |
||
618 |
||
619 |
TBool DRomMemoryManager::IsAllocated(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
|
620 |
{ |
|
621 |
// all pages in the ROM memory object are always allocated... |
|
622 |
return true; |
|
623 |
} |
|
624 |
||
625 |
||
626 |
TInt DRomMemoryManager::HandleFault(DMemoryObject* aMemory, TUint aIndex, DMemoryMapping* aMapping, |
|
627 |
TUint aMapInstanceCount, TUint aAccessPermissions) |
|
628 |
{ |
|
629 |
__NK_ASSERT_DEBUG(aMemory==iRomMemory); |
|
630 |
||
631 |
TUint offset = aIndex*KPageSize; |
|
632 |
if(offset<iPagedStart || offset>=iPagedStart+iPagedSize) |
|
633 |
return KErrAbort; |
|
634 |
||
635 |
return DPagedMemoryManager::HandleFault(aMemory, aIndex, aMapping, aMapInstanceCount, aAccessPermissions); |
|
636 |
} |
|
637 |
||
638 |
||
639 |
TInt DRomMemoryManager::Pin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs) |
|
640 |
{ |
|
641 |
TRACE(("DRomMemoryManager::Pin %08x %08x", aMemory, aMapping)); |
|
642 |
TUint index = aMapping->iStartIndex; |
|
643 |
TUint endIndex = index+aMapping->iSizeInPages; |
|
644 |
if(endIndex>iSizeInPages) |
|
645 |
return KErrNotFound; |
|
646 |
||
647 |
TInt r = KErrNone; |
|
648 |
TUint pagedIndex = iPagedStart>>KPageShift; |
|
649 |
if(pagedIndex && pagedIndex<endIndex) |
|
650 |
{ |
|
651 |
TUint start = index; |
|
652 |
if(start<pagedIndex) |
|
653 |
start = pagedIndex; |
|
654 |
r = DoPin(aMemory,start,endIndex-start,aMapping,aPinArgs); |
|
655 |
} |
|
656 |
||
657 |
return r; |
|
658 |
} |
|
659 |
||
660 |
||
661 |
TInt DRomMemoryManager::PageInPinnedDone(DMemoryObject* aMemory, TUint aIndex, SPageInfo* aPageInfo, TPhysAddr* aPageArrayEntry, TPinArgs& aPinArgs) |
|
662 |
{ |
|
663 |
TRACE(("DRomMemoryManager::PageInPinnedDone %08x %d", aMemory, aIndex)); |
|
664 |
||
665 |
// Only the paged part of rom should be pinned. |
|
666 |
__NK_ASSERT_DEBUG(aIndex >= iPagedStart >> KPageShift); |
|
667 |
||
668 |
TInt r = DoPageInDone(aMemory,aIndex,aPageInfo,aPageArrayEntry,true); |
|
669 |
||
670 |
// Rom page can't be decommitted so this must succeed. |
|
671 |
__NK_ASSERT_DEBUG(r >= 0); |
|
672 |
||
673 |
if (aPageInfo->Type() == SPageInfo::EShadow) |
|
674 |
{// The page is being shadowed so pin the original page. |
|
675 |
// This is safe as the original page was physically pinned when shadowed. |
|
676 |
__NK_ASSERT_DEBUG(RPageArray::IsPresent(*aPageArrayEntry)); |
|
677 |
aPageInfo = aPageInfo->GetOriginalPage(); |
|
678 |
} |
|
679 |
||
680 |
ThePager.PagedInPinned(aPageInfo,aPinArgs); |
|
681 |
||
682 |
// check page assigned correctly... |
|
683 |
#ifdef _DEBUG |
|
684 |
if(RPageArray::IsPresent(*aPageArrayEntry)) |
|
685 |
{ |
|
686 |
SPageInfo* pi = SPageInfo::FromPhysAddr(*aPageArrayEntry); |
|
687 |
if (pi->Type() != SPageInfo::EShadow) |
|
688 |
{ |
|
689 |
__NK_ASSERT_DEBUG(pi->Type() == SPageInfo::EManaged); |
|
690 |
__NK_ASSERT_DEBUG(pi->Owner()==aMemory); |
|
691 |
__NK_ASSERT_DEBUG(pi->Index()==aIndex); |
|
692 |
__NK_ASSERT_DEBUG(pi->PagedState()==SPageInfo::EPagedPinned); |
|
693 |
} |
|
694 |
} |
|
695 |
#endif |
|
696 |
return r; |
|
697 |
} |
|
698 |
||
699 |
||
700 |
void DRomMemoryManager::Unpin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs) |
|
701 |
{ |
|
702 |
TRACE(("DRomMemoryManager::Unpin %08x %08x", aMemory, aMapping)); |
|
703 |
||
704 |
__ASSERT_CRITICAL; |
|
705 |
TUint index = aMapping->iStartIndex; |
|
706 |
TUint endIndex = index+aMapping->iSizeInPages; |
|
707 |
__NK_ASSERT_DEBUG(endIndex<=iSizeInPages); // Pin() should have already ensured this |
|
708 |
||
709 |
TUint pagedIndex = iPagedStart>>KPageShift; |
|
710 |
if(pagedIndex && pagedIndex<endIndex) |
|
711 |
{ |
|
712 |
TUint start = index; |
|
713 |
if(start<pagedIndex) |
|
714 |
start = pagedIndex; |
|
715 |
// unpin pages (but only if they were successfully pinned)... |
|
716 |
if(aMapping->Flags()&DMemoryMapping::EPagesPinned) |
|
717 |
DoUnpin(aMemory,start,endIndex-start,aMapping,aPinArgs); |
|
718 |
} |
|
719 |
||
720 |
__NK_ASSERT_DEBUG((aMapping->Flags()&DMemoryMapping::EPageUnmapVetoed)==0); // we shouldn't have tried to Free paged ROM |
|
721 |
} |
|
722 |
||
723 |
||
724 |
void DRomMemoryManager::DoUnpin(DMemoryObject* aMemory, TUint aIndex, TUint aCount, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs) |
|
725 |
{ |
|
726 |
TRACE(("DRomMemoryManager::DoUnpin(0x%08x,0x%08x,0x%08x,0x%08x,?)",aMemory, aIndex, aCount, aMapping)); |
|
727 |
||
728 |
// This should only be invoked on the paged part of rom. |
|
729 |
__NK_ASSERT_DEBUG(iPagedStart && aIndex >= (iPagedStart >> KPageShift)); |
|
730 |
||
731 |
MmuLock::Lock(); |
|
732 |
TUint endIndex = aIndex+aCount; |
|
733 |
for(TUint i = aIndex; i < endIndex; ++i) |
|
734 |
{ |
|
735 |
TPhysAddr page = aMemory->iPages.Page(i); |
|
736 |
__NK_ASSERT_DEBUG(RPageArray::IsPresent(page)); |
|
737 |
SPageInfo* pi = SPageInfo::FromPhysAddr(page); |
|
738 |
if(pi->Type() == SPageInfo::EShadow) |
|
739 |
{ |
|
740 |
pi = pi->GetOriginalPage(); |
|
741 |
} |
|
742 |
ThePager.Unpin(pi,aPinArgs); |
|
743 |
MmuLock::Flash(); |
|
744 |
} |
|
745 |
||
746 |
MmuLock::Unlock(); |
|
747 |
||
748 |
// clear EPagesPinned flag... |
|
749 |
__e32_atomic_and_ord8(&aMapping->Flags(), TUint8(~DMemoryMapping::EPagesPinned)); |
|
750 |
} |
|
751 |
||
752 |
||
753 |
void DRomMemoryManager::ShadowLock() |
|
754 |
{ |
|
755 |
MM::MemoryLock(iRomMemory); |
|
756 |
} |
|
757 |
||
758 |
||
759 |
void DRomMemoryManager::ShadowUnlock() |
|
760 |
{ |
|
761 |
MM::MemoryUnlock(iRomMemory); |
|
762 |
} |
|
763 |
||
764 |
||
765 |
TInt DRomMemoryManager::AllocShadowPage(TLinAddr aRomAddr) |
|
766 |
{ |
|
767 |
TRACE(("DRomMemoryManager::AllocShadowPage %08x", aRomAddr)); |
|
768 |
||
769 |
TUint index = (aRomAddr-iBase)>>KPageShift; |
|
770 |
if (index >= iSizeInPages) |
|
771 |
return KErrArgument; |
|
772 |
__NK_ASSERT_DEBUG(iRomMemory->CheckRegion(index,1)); |
|
773 |
||
774 |
TInt r; |
|
775 |
||
776 |
ShadowLock(); |
|
777 |
||
778 |
DShadowPage* shadow = (DShadowPage*)iShadowPages.Find(index); |
|
779 |
if(shadow) |
|
780 |
r = KErrAlreadyExists; |
|
781 |
else |
|
782 |
{ |
|
783 |
shadow = DShadowPage::New(iRomMemory,index,iRomMapping); |
|
784 |
if(!shadow) |
|
785 |
r = KErrNoMemory; |
|
786 |
else |
|
787 |
{ |
|
788 |
r = iShadowPages.Add(index,shadow); |
|
789 |
if(r!=KErrNone) |
|
790 |
{ |
|
791 |
shadow->Destroy(); |
|
792 |
} |
|
793 |
else |
|
794 |
{ |
|
795 |
// Remap the shadowed rom page to the shadow page. Update the |
|
796 |
// page array entry for the page being shadowed, this ensures |
|
797 |
// that any page moving attempts will remap the shadow page when |
|
798 |
// they realise that the page is physically pinned. |
|
799 |
MmuLock::Lock(); |
|
800 |
TPhysAddr& pageEntry = *iRomMemory->iPages.PageEntry(index); |
|
801 |
TPhysAddr newPageAddr = shadow->iNewPage; |
|
802 |
pageEntry = (pageEntry & KPageMask) | newPageAddr; |
|
803 |
||
804 |
// Mark the SPageInfo of the shadow page with pointer to the original page's |
|
805 |
// SPageInfo, this is safe as we've physically pinned the original page |
|
806 |
// so it can't be freed or reused until this shadow page is destroyed. |
|
807 |
SPageInfo* origPi = SPageInfo::FromPhysAddr(shadow->iOriginalPage); |
|
808 |
SPageInfo* newPi = SPageInfo::FromPhysAddr(newPageAddr); |
|
809 |
newPi->SetOriginalPage(origPi); |
|
810 |
MmuLock::Unlock(); |
|
811 |
||
812 |
iRomMemory->RemapPage(pageEntry, index, ETrue); |
|
813 |
} |
|
814 |
} |
|
815 |
} |
|
816 |
||
817 |
ShadowUnlock(); |
|
818 |
||
819 |
return r; |
|
820 |
} |
|
821 |
||
822 |
||
823 |
TInt DRomMemoryManager::FreeShadowPage(TLinAddr aRomAddr) |
|
824 |
{ |
|
825 |
TUint index = (aRomAddr-iBase)>>KPageShift; |
|
826 |
if(!iRomMemory->CheckRegion(index,1)) |
|
827 |
return KErrArgument; |
|
828 |
||
829 |
TInt r; |
|
830 |
||
831 |
ShadowLock(); |
|
832 |
||
833 |
DShadowPage* shadow = (DShadowPage*)iShadowPages.Remove(index); |
|
834 |
if(!shadow) |
|
835 |
{ |
|
836 |
r = KErrNotFound; |
|
837 |
} |
|
838 |
else |
|
839 |
{ |
|
840 |
// Remap the rom page and update the page array entry for the page |
|
841 |
// back to the original rom page. This is safe as the page is physically |
|
842 |
// pinned until shadow is destroyed. |
|
843 |
MmuLock::Lock(); |
|
844 |
TPhysAddr& pageEntry = *iRomMemory->iPages.PageEntry(index); |
|
845 |
pageEntry = (pageEntry & KPageMask) | shadow->iOriginalPage; |
|
846 |
MmuLock::Unlock(); |
|
847 |
||
848 |
iRomMemory->RemapPage(pageEntry, index, ETrue); |
|
849 |
||
850 |
shadow->Destroy(); |
|
851 |
r = KErrNone; |
|
852 |
} |
|
853 |
||
854 |
ShadowUnlock(); |
|
855 |
||
856 |
return r; |
|
857 |
} |
|
858 |
||
859 |
||
860 |
TInt DRomMemoryManager::CopyToShadowMemory(TLinAddr aDst, TLinAddr aSrc, TUint32 aSize) |
|
861 |
{ |
|
862 |
TRACE(("DRomMemoryManager::CopyToShadowMemory(0x%08x,0x%08x,0x%x)",aDst,aSrc,aSize)); |
|
863 |
Mmu& m = TheMmu; |
|
864 |
TLinAddr offset = aDst-iBase; |
|
865 |
TLinAddr end = offset+aSize; |
|
866 |
if(end<offset || end>iSize) |
|
867 |
return KErrArgument; |
|
868 |
||
869 |
while(aSize) |
|
870 |
{ |
|
871 |
TUint size = KPageSize-(offset&KPageMask); // bytes left in page at 'offset' |
|
872 |
if(size>aSize) |
|
873 |
size = aSize; |
|
874 |
||
875 |
TInt r; |
|
876 |
||
877 |
ShadowLock(); |
|
878 |
||
879 |
DShadowPage* shadow = (DShadowPage*)iShadowPages.Find(offset>>KPageShift); |
|
880 |
if(!shadow) |
|
881 |
{ |
|
882 |
r = KErrNotFound; |
|
883 |
} |
|
884 |
else |
|
885 |
{ |
|
886 |
RamAllocLock::Lock(); |
|
887 |
TLinAddr dst = m.MapTemp(shadow->iNewPage,offset>>KPageShift); |
|
888 |
dst += offset&KPageMask; |
|
889 |
memcpy((TAny*)dst,(TAny*)aSrc,size); |
|
890 |
m.UnmapTemp(); |
|
891 |
RamAllocLock::Unlock(); |
|
892 |
||
893 |
r = KErrNone; |
|
894 |
} |
|
895 |
||
896 |
ShadowUnlock(); |
|
897 |
||
898 |
if(r!=KErrNone) |
|
899 |
return r; |
|
900 |
||
901 |
offset += size; |
|
902 |
aSrc += size; |
|
903 |
aSize -= size; |
|
904 |
} |
|
905 |
||
906 |
return KErrNone; |
|
907 |
} |
|
908 |
||
909 |
||
910 |
// |
|
911 |
// DShadowPage |
|
912 |
// |
|
913 |
||
914 |
DShadowPage* DShadowPage::New(DMemoryObject* aMemory, TUint aIndex, DMemoryMappingBase* aMapping) |
|
915 |
{ |
|
916 |
TRACE(("DShadowPage::New(0x%08x,0x%x,0x%08x)",aMemory, aIndex, aMapping)); |
|
917 |
__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory)); |
|
918 |
||
919 |
DShadowPage* self = new DShadowPage; |
|
920 |
if(self) |
|
921 |
if(self->Construct(aMemory,aIndex,aMapping)!=KErrNone) |
|
922 |
{ |
|
923 |
self->Destroy(); |
|
924 |
self = 0; |
|
925 |
} |
|
926 |
||
927 |
TRACE(("DShadowPage::New(0x%08x,0x%x,0x%08x) returns 0x%08x",aMemory, aIndex, aMapping, self)); |
|
928 |
return self; |
|
929 |
} |
|
930 |
||
931 |
||
932 |
DShadowPage::DShadowPage() |
|
933 |
: iOriginalPage(KPhysAddrInvalid), iNewPage(KPhysAddrInvalid) |
|
934 |
{ |
|
935 |
// Set flag so that the rom page that is being shadowed can't be moved, |
|
936 |
// otherwise iOriginalPage will become invalid if the page is moved. |
|
937 |
Flags() |= EPhysicalPinningMapping; |
|
938 |
} |
|
939 |
||
940 |
||
941 |
||
942 |
||
943 |
TInt DShadowPage::Construct(DMemoryObject* aMemory, TUint aIndex, DMemoryMappingBase* aMapping) |
|
944 |
{ |
|
945 |
__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory)); |
|
946 |
||
947 |
// Pin the page. It is ok to get the mapping instance count here without |
|
948 |
// MmuLock as there is only one permenant mapping used for the ROM. |
|
949 |
TInt r = Pin(aMemory,aIndex,1,EUserReadOnly,aMapping,aMapping->MapInstanceCount()); |
|
950 |
if(r!=KErrNone) |
|
951 |
return r; |
|
952 |
||
953 |
r = PhysAddr(0,1,iOriginalPage,0); |
|
954 |
__NK_ASSERT_DEBUG(r>=0); |
|
955 |
if(r<0) |
|
956 |
return r; |
|
957 |
||
958 |
RamAllocLock::Lock(); |
|
959 |
||
960 |
Mmu& m = TheMmu; |
|
961 |
r = m.AllocRam(&iNewPage, 1, aMemory->RamAllocFlags(), EPageFixed); |
|
962 |
if(r==KErrNone) |
|
963 |
{ |
|
964 |
TLinAddr dst = m.MapTemp(iNewPage,aIndex,0); |
|
965 |
TLinAddr src = m.MapTemp(iOriginalPage,aIndex,1); |
|
966 |
pagecpy((TAny*)dst,(TAny*)src); |
|
967 |
CacheMaintenance::CodeChanged(dst,KPageSize); // IMB not needed, just clean to PoU (but we don't have a function to do that) |
|
968 |
||
969 |
m.UnmapTemp(0); |
|
970 |
m.UnmapTemp(1); |
|
971 |
MmuLock::Lock(); |
|
972 |
SPageInfo::FromPhysAddr(iNewPage)->SetShadow(aIndex,aMemory->PageInfoFlags()); |
|
973 |
MmuLock::Unlock(); |
|
123 | 974 |
|
975 |
#ifdef BTRACE_KERNEL_MEMORY |
|
976 |
BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscAlloc, KPageSize); |
|
977 |
++Epoc::KernelMiscPages; |
|
978 |
#endif |
|
0 | 979 |
} |
980 |
||
981 |
RamAllocLock::Unlock(); |
|
982 |
||
983 |
if(r!=KErrNone) |
|
984 |
return r; |
|
985 |
||
986 |
return r; |
|
987 |
} |
|
988 |
||
989 |
||
990 |
DShadowPage::~DShadowPage() |
|
991 |
{ |
|
992 |
} |
|
993 |
||
994 |
||
995 |
void DShadowPage::Destroy() |
|
996 |
{ |
|
997 |
TRACE2(("DShadowPage[%x]::Destroy()",this)); |
|
998 |
if(iNewPage!=KPhysAddrInvalid) |
|
999 |
{ |
|
1000 |
RamAllocLock::Lock(); |
|
1001 |
TheMmu.FreeRam(&iNewPage, 1, EPageFixed); |
|
123 | 1002 |
|
1003 |
#ifdef BTRACE_KERNEL_MEMORY |
|
1004 |
BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscFree, KPageSize); |
|
1005 |
--Epoc::KernelMiscPages; |
|
1006 |
#endif |
|
0 | 1007 |
RamAllocLock::Unlock(); |
1008 |
} |
|
1009 |
if(IsAttached()) |
|
1010 |
Unpin(); |
|
1011 |
Close(); |
|
1012 |
} |
|
1013 |
||
1014 |
||
1015 |
/** |
|
1016 |
Replace a page of the system's execute-in-place (XIP) ROM image with a page of |
|
1017 |
RAM having the same contents. This RAM can subsequently be written to in order |
|
1018 |
to apply patches to the XIP ROM or to insert software breakpoints for debugging |
|
1019 |
purposes. |
|
1020 |
Call Epoc::FreeShadowPage() when you wish to revert to the original ROM page. |
|
1021 |
||
1022 |
@param aRomAddr The virtual address of the ROM page to be replaced. |
|
1023 |
@return KErrNone if the operation completed successfully. |
|
1024 |
KErrArgument if the specified address is not a valid XIP ROM address. |
|
1025 |
KErrNoMemory if the operation failed due to insufficient free RAM. |
|
1026 |
KErrAlreadyExists if the XIP ROM page at the specified address has |
|
1027 |
already been shadowed by a RAM page. |
|
1028 |
||
1029 |
@pre Calling thread must be in a critical section. |
|
1030 |
@pre Interrupts must be enabled. |
|
1031 |
@pre Kernel must be unlocked. |
|
1032 |
@pre No fast mutex can be held. |
|
1033 |
@pre Call in a thread context. |
|
1034 |
*/ |
|
1035 |
EXPORT_C TInt Epoc::AllocShadowPage(TLinAddr aRomAddr) |
|
1036 |
{ |
|
1037 |
CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::AllocShadowPage"); |
|
1038 |
return DRomMemoryManager::TheManager.AllocShadowPage(aRomAddr); |
|
1039 |
} |
|
1040 |
||
1041 |
||
1042 |
/** |
|
1043 |
Copies data into shadow memory. Source data is presumed to be in Kernel memory. |
|
1044 |
||
1045 |
@param aSrc Data to copy from. |
|
1046 |
@param aDest Address to copy into. |
|
1047 |
@param aLength Number of bytes to copy. Maximum of 32 bytes of data can be copied. |
|
1048 |
||
1049 |
@return KErrNone if the operation completed successfully. |
|
1050 |
KErrArgument if any part of destination region is not shadow page or |
|
1051 |
if aLength is greater then 32 bytes. |
|
1052 |
||
1053 |
@pre Calling thread must be in a critical section. |
|
1054 |
@pre Interrupts must be enabled. |
|
1055 |
@pre Kernel must be unlocked. |
|
1056 |
@pre No fast mutex can be held. |
|
1057 |
@pre Call in a thread context. |
|
1058 |
*/ |
|
1059 |
EXPORT_C TInt Epoc::CopyToShadowMemory(TLinAddr aDest, TLinAddr aSrc, TUint32 aLength) |
|
1060 |
{ |
|
1061 |
CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::CopyToShadowMemory"); |
|
1062 |
return DRomMemoryManager::TheManager.CopyToShadowMemory(aDest,aSrc,aLength); |
|
1063 |
} |
|
1064 |
||
1065 |
||
1066 |
/** |
|
1067 |
Revert an XIP ROM address which has previously been shadowed to the original |
|
1068 |
page of ROM. |
|
1069 |
||
1070 |
@param aRomAddr The virtual address of the ROM page to be reverted. |
|
1071 |
@return KErrNone if the operation completed successfully. |
|
1072 |
KErrArgument if the specified address is not a valid XIP ROM address. |
|
1073 |
KErrGeneral if the specified address has not previously been shadowed |
|
1074 |
using Epoc::AllocShadowPage(). |
|
1075 |
||
1076 |
@pre Calling thread must be in a critical section. |
|
1077 |
@pre Interrupts must be enabled. |
|
1078 |
@pre Kernel must be unlocked. |
|
1079 |
@pre No fast mutex can be held. |
|
1080 |
@pre Call in a thread context. |
|
1081 |
*/ |
|
1082 |
EXPORT_C TInt Epoc::FreeShadowPage(TLinAddr aRomAddr) |
|
1083 |
{ |
|
1084 |
return DRomMemoryManager::TheManager.FreeShadowPage(aRomAddr); |
|
1085 |
} |
|
1086 |
||
1087 |
||
1088 |
/** |
|
1089 |
Change the permissions on an XIP ROM address which has previously been shadowed |
|
1090 |
by a RAM page so that the RAM page may no longer be written to. |
|
1091 |
||
1092 |
Note: Shadow page on the latest platforms (that use the reduced set of access permissions: |
|
1093 |
arm11mpcore, arm1176, cortex) is implemented with read only permissions. Therefore, calling |
|
1094 |
this function in not necessary, as shadow page is already created as 'frozen'. |
|
1095 |
||
1096 |
@param aRomAddr The virtual address of the shadow RAM page to be frozen. |
|
1097 |
@return KErrNone if the operation completed successfully. |
|
1098 |
KErrArgument if the specified address is not a valid XIP ROM address. |
|
1099 |
KErrGeneral if the specified address has not previously been shadowed |
|
1100 |
using Epoc::AllocShadowPage(). |
|
1101 |
||
1102 |
@pre Calling thread must be in a critical section. |
|
1103 |
@pre Interrupts must be enabled. |
|
1104 |
@pre Kernel must be unlocked. |
|
1105 |
@pre No fast mutex can be held. |
|
1106 |
@pre Call in a thread context. |
|
1107 |
*/ |
|
1108 |
EXPORT_C TInt Epoc::FreezeShadowPage(TLinAddr aRomAddr) |
|
1109 |
{ |
|
1110 |
// Null operation for flexible memory model... |
|
1111 |
return KErrNone; |
|
1112 |
} |
|
1113 |
||
1114 |