|
1 // Copyright (c) 2006-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of the License "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // e32\memmodel\epoc\multiple\mdefrag.cpp |
|
15 // |
|
16 // |
|
17 #include <memmodel.h> |
|
18 #include <defrag.h> |
|
19 #include "mmboot.h" |
|
20 #include <ramalloc.h> |
|
21 #include "cache_maintenance.h" |
|
22 /* |
|
23 * Move a kernel page from aOld to aNew, updating the page table in aChunk. |
|
24 * Enter with system locked, exit with system unlocked (!!) |
|
25 * Must hold RAM alloc mutex. |
|
26 */ |
|
27 TInt Mmu::MoveKernelPage(DChunk* aChunk, TUint32 aOffset, TPhysAddr aOld, TPhysAddr& aNew, TUint aBlockZoneId, TBool aBlockRest) |
|
28 { |
|
29 __KTRACE_OPT(KMMU,Kern::Printf("Defrag::MoveKernelPage() off=%08x old=%08x",aOffset,aOld)); |
|
30 Mmu& m=Mmu::Get(); |
|
31 |
|
32 // Release the system lock - the kernel chunks can't ever be freed |
|
33 // and the ramalloc mutex protects us from decommit. |
|
34 NKern::UnlockSystem(); |
|
35 |
|
36 // Allocate new page, map old and new |
|
37 TPhysAddr newPage; |
|
38 if (m.AllocRamPages(&newPage, 1, EPageMovable, aBlockZoneId, aBlockRest) != KErrNone) |
|
39 return KErrNoMemory; |
|
40 TLinAddr vOld = m.MapTemp(aOld, aOffset); // enough of address for page colour |
|
41 TLinAddr vNew = m.MapSecondTemp(newPage, aOffset); |
|
42 |
|
43 // With interrupts disabled, copy the page's contents and remap its PTE |
|
44 // System lock is required as well for Substitute |
|
45 NKern::LockSystem(); |
|
46 TInt irq = NKern::DisableAllInterrupts(); |
|
47 pagecpy((TAny*)vNew, (TAny*)vOld); |
|
48 aChunk->Substitute(aOffset, aOld, newPage); |
|
49 NKern::RestoreInterrupts(irq); |
|
50 NKern::UnlockSystem(); |
|
51 |
|
52 // Before we sort out cache for the old page, check if the required mapping |
|
53 // atributes for that operation is what we have at the moment. |
|
54 if (CacheMaintenance::TemporaryMapping() != EMemAttNormalCached) |
|
55 { |
|
56 // Remove temporary mapping and map old page as required by CacheMaintenance |
|
57 m.UnmapTemp(); |
|
58 vOld = m.MapTemp(aOld, aOffset,1, CacheMaintenance::TemporaryMapping()); |
|
59 } |
|
60 |
|
61 //Sort out cache for the memory not in use anymore. |
|
62 CacheMaintenance::PageToReuse(vOld, EMemAttNormalCached, aOld); |
|
63 |
|
64 // Unalias pages |
|
65 m.UnmapTemp(); |
|
66 m.UnmapSecondTemp(); |
|
67 |
|
68 // Free old page |
|
69 #ifdef _DEBUG |
|
70 m.ClearPages(1, (TPhysAddr*)(aOld|1)); |
|
71 #endif |
|
72 m.iRamPageAllocator->FreeRamPage(aOld, EPageMovable); |
|
73 |
|
74 aNew = newPage; |
|
75 return KErrNone; |
|
76 } |
|
77 |
|
78 /* |
|
79 * Move a code page from aOld to aNew, updating all page tables which refer |
|
80 * to it. |
|
81 * Enter with system locked, exit with system unlocked (!!) |
|
82 * Must hold RAM alloc mutex. |
|
83 */ |
|
84 TInt Mmu::MoveCodeSegMemoryPage(DMemModelCodeSegMemory* aCodeSegMemory, TUint32 aOffset, TPhysAddr aOld, |
|
85 TPhysAddr& aNew, TUint aBlockZoneId, TBool aBlockRest) |
|
86 { |
|
87 __KTRACE_OPT(KMMU,Kern::Printf("Defrag::MoveCodeSegMemoryPage() off=%08x old=%08x",aOffset,aOld)); |
|
88 Mmu& m=Mmu::Get(); |
|
89 |
|
90 // if the code seg is not done loading yet, we can't move it the easy way |
|
91 // also, if it's being unloaded the codeseg will have gone. |
|
92 DCodeSeg* codeseg = aCodeSegMemory->iCodeSeg; |
|
93 if (!codeseg || !(codeseg->iMark & DCodeSeg::EMarkLoaded)) |
|
94 { |
|
95 NKern::UnlockSystem(); |
|
96 return KErrInUse; |
|
97 } |
|
98 |
|
99 // Release system lock as page can't be decommitted while we hold ramalloc mutex |
|
100 NKern::UnlockSystem(); |
|
101 |
|
102 // Allocate new page, map old and new |
|
103 TPhysAddr newPage; |
|
104 if (m.AllocRamPages(&newPage, 1, EPageMovable, aBlockZoneId, aBlockRest) != KErrNone) |
|
105 return KErrNoMemory; |
|
106 TLinAddr vOld = m.MapTemp(aOld, aOffset); // enough of address for page colour |
|
107 TLinAddr vNew = m.MapSecondTemp(newPage, aOffset); |
|
108 |
|
109 // Copy the page and remap it wherever it's still mapped |
|
110 // Need to clean the new page to get the data to icache |
|
111 pagecpy((TAny*)vNew, (TAny*)vOld); |
|
112 |
|
113 //Sort out cache for the code that has just been altered. |
|
114 CacheMaintenance::CodeChanged(vNew, KPageSize); |
|
115 |
|
116 //Replace old page in the mapping with the new one. |
|
117 aCodeSegMemory->Substitute(aOffset, aOld, newPage); |
|
118 |
|
119 // Before we sort out cache for the old page, check if the required mapping |
|
120 // atributes for that operation is what we have at the moment. |
|
121 if (CacheMaintenance::TemporaryMapping() != EMemAttNormalCached) |
|
122 { |
|
123 // Remove temporary mapping and map old page as required by CacheMaintenance |
|
124 m.UnmapTemp(); |
|
125 vOld = m.MapTemp(aOld, aOffset,1, CacheMaintenance::TemporaryMapping()); |
|
126 } |
|
127 |
|
128 //Sort out cache for the memory not in use anymore. |
|
129 CacheMaintenance::PageToReuse(vOld, EMemAttNormalCached, aOld); |
|
130 |
|
131 // Unalias pages |
|
132 m.UnmapTemp(); |
|
133 m.UnmapSecondTemp(); |
|
134 |
|
135 // Free old page |
|
136 #ifdef _DEBUG |
|
137 m.ClearPages(1, (TPhysAddr*)(aOld|1)); |
|
138 #endif |
|
139 m.iRamPageAllocator->FreeRamPage(aOld, EPageMovable); |
|
140 |
|
141 aNew = newPage; |
|
142 return KErrNone; |
|
143 } |
|
144 |
|
145 /* |
|
146 * Move a code chunk page from aOld to aNew, updating the page table in aChunk. |
|
147 * Enter with system locked, exit with system unlocked (!!) |
|
148 * Must hold RAM alloc mutex. |
|
149 */ |
|
150 TInt Mmu::MoveCodeChunkPage(DChunk* aChunk, TUint32 aOffset, TPhysAddr aOld, TPhysAddr& aNew, TUint aBlockZoneId, TBool aBlockRest) |
|
151 { |
|
152 __KTRACE_OPT(KMMU,Kern::Printf("Defrag::MoveCodeChunkPage() off=%08x old=%08x",aOffset,aOld)); |
|
153 Mmu& m=Mmu::Get(); |
|
154 |
|
155 // look up the code seg that corresponds to this page |
|
156 TLinAddr aLinearAddress = (TLinAddr)(aChunk->Base() + (aOffset)); |
|
157 DMemModelCodeSeg* codeseg = (DMemModelCodeSeg*)DCodeSeg::CodeSegsByAddress.Find(aLinearAddress); |
|
158 |
|
159 // if the code seg is not done loading yet, we can't move it the easy way |
|
160 if (!(codeseg->iMark & DCodeSeg::EMarkLoaded)) |
|
161 { |
|
162 NKern::UnlockSystem(); |
|
163 return KErrInUse; |
|
164 } |
|
165 |
|
166 // Release system lock as page can't be decommitted while we hold ramalloc mutex |
|
167 NKern::UnlockSystem(); |
|
168 |
|
169 // Allocate new page, map old and new |
|
170 TPhysAddr newPage; |
|
171 if (m.AllocRamPages(&newPage, 1, EPageMovable, aBlockZoneId, aBlockRest) != KErrNone) |
|
172 return KErrNoMemory; |
|
173 TLinAddr vOld = m.MapTemp(aOld, aOffset); // enough of address for page colour |
|
174 TLinAddr vNew = m.MapSecondTemp(newPage, aOffset); |
|
175 |
|
176 // Copy the page and remap it |
|
177 // Need to clean the new page to get the data to icache |
|
178 pagecpy((TAny*)vNew, (TAny*)vOld); |
|
179 |
|
180 //Sort out cache for the code that has just been altered. |
|
181 CacheMaintenance::CodeChanged(vNew, KPageSize); |
|
182 |
|
183 NKern::LockSystem(); |
|
184 aChunk->Substitute(aOffset, aOld, newPage); |
|
185 NKern::UnlockSystem(); |
|
186 |
|
187 // Before we sort out cache for the old page, check if the required mapping |
|
188 // atributes for that operation is what we have at the moment. |
|
189 if (CacheMaintenance::TemporaryMapping() != EMemAttNormalCached) |
|
190 { |
|
191 // Remove temporary mapping and map old page as required by CacheMaintenance |
|
192 m.UnmapTemp(); |
|
193 vOld = m.MapTemp(aOld, aOffset,1, CacheMaintenance::TemporaryMapping()); |
|
194 } |
|
195 |
|
196 //Sort out cache for the memory not in use anymore. |
|
197 CacheMaintenance::PageToReuse(vOld, EMemAttNormalCached, aOld); |
|
198 |
|
199 // Unalias pages |
|
200 m.UnmapTemp(); |
|
201 m.UnmapSecondTemp(); |
|
202 |
|
203 // Free old page |
|
204 #ifdef _DEBUG |
|
205 m.ClearPages(1, (TPhysAddr*)(aOld|1)); |
|
206 #endif |
|
207 m.iRamPageAllocator->FreeRamPage(aOld, EPageMovable); |
|
208 |
|
209 aNew = newPage; |
|
210 return KErrNone; |
|
211 } |
|
212 |
|
213 /* |
|
214 * Move a data chunk page from aOld to aNew, updating the page table in aChunk. |
|
215 * Enter with system locked, exit with system unlocked (!!) |
|
216 * Must hold RAM alloc mutex. |
|
217 */ |
|
218 TInt Mmu::MoveDataChunkPage(DChunk* aChunk, TUint32 aOffset, TPhysAddr aOld, TPhysAddr& aNew, TUint aBlockZoneId, TBool aBlockRest) |
|
219 { |
|
220 __KTRACE_OPT(KMMU,Kern::Printf("Defrag::MoveDataChunkPage() off=%08x old=%08x",aOffset,aOld)); |
|
221 Mmu& m=Mmu::Get(); |
|
222 TInt r; |
|
223 |
|
224 // Release system lock as page can't be decommitted while we hold ramalloc mutex |
|
225 NKern::UnlockSystem(); |
|
226 |
|
227 // Allocate new page, map old and new |
|
228 TPhysAddr newPage; |
|
229 if (m.AllocRamPages(&newPage, 1, EPageMovable, aBlockZoneId, aBlockRest) != KErrNone) |
|
230 return KErrNoMemory; |
|
231 TLinAddr vOld = m.MapTemp(aOld, aOffset); // enough of address for page colour |
|
232 TLinAddr vNew = m.MapSecondTemp(newPage, aOffset); |
|
233 |
|
234 // Mark the PTE as readonly to avoid the data being overwritten while we copy |
|
235 DisablePageModification((DMemModelChunk*)aChunk, aOffset); |
|
236 |
|
237 // Copy the page's contents and remap its PTE |
|
238 pagecpy((TAny*)vNew, (TAny*)vOld); |
|
239 if (aChunk->iChunkType == EUserSelfModCode)//Sort out cache for the code that has just been altered |
|
240 CacheMaintenance::CodeChanged(vNew, KPageSize); |
|
241 |
|
242 NKern::LockSystem(); |
|
243 if (iDisabledPte != NULL) |
|
244 { |
|
245 // Access wasn't reenabled, so we can continue |
|
246 aChunk->Substitute(aOffset, aOld, newPage); |
|
247 iDisabledAddr = 0; |
|
248 iDisabledAddrAsid = -1; |
|
249 iDisabledPte = NULL; |
|
250 iDisabledOldVal = 0; |
|
251 r = KErrNone; |
|
252 } |
|
253 else |
|
254 r = KErrInUse; |
|
255 NKern::UnlockSystem(); |
|
256 |
|
257 |
|
258 TLinAddr vUnused = vOld; |
|
259 TPhysAddr pUnused = aOld; |
|
260 |
|
261 if (r != KErrNone) |
|
262 { |
|
263 //Substitute has failed. Sort out cache for the new page, not the old one. |
|
264 vUnused = vNew; |
|
265 pUnused = newPage; |
|
266 } |
|
267 // Before we sort out cache for the unused page, check if the required mapping |
|
268 // atributes for that operation is what we have at the moment. |
|
269 if (CacheMaintenance::TemporaryMapping() != EMemAttNormalCached) |
|
270 { |
|
271 // Remove temporary mapping and map the page as required by CacheMaintenance |
|
272 m.UnmapTemp(); |
|
273 vUnused = m.MapTemp(pUnused, aOffset,1, CacheMaintenance::TemporaryMapping()); |
|
274 } |
|
275 |
|
276 //Sort out cache for the memory not in use anymore. |
|
277 CacheMaintenance::PageToReuse(vUnused, EMemAttNormalCached, pUnused); |
|
278 |
|
279 // Unalias pages |
|
280 m.UnmapTemp(); |
|
281 m.UnmapSecondTemp(); |
|
282 |
|
283 if (r == KErrNone) |
|
284 { |
|
285 // Free old page |
|
286 #ifdef _DEBUG |
|
287 m.ClearPages(1, (TPhysAddr*)(aOld|1)); |
|
288 #endif |
|
289 m.iRamPageAllocator->FreeRamPage(aOld, EPageMovable); |
|
290 aNew = newPage; |
|
291 } |
|
292 else |
|
293 { |
|
294 // Free new page |
|
295 m.iRamPageAllocator->FreeRamPage(newPage, EPageMovable); |
|
296 } |
|
297 |
|
298 return r; |
|
299 } |