|
1 // Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of the License "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // e32\memmodel\emul\win32\mutils.cpp |
|
15 // |
|
16 // |
|
17 |
|
18 #include "memmodel.h" |
|
19 #include <kernel/cache.h> |
|
20 #include <emulator.h> |
|
21 |
|
22 void MM::Panic(MM::TMemModelPanic aPanic) |
|
23 { |
|
24 Kern::Fault("MemModel", aPanic); |
|
25 } |
|
26 |
|
27 TInt M::PageSizeInBytes() |
|
28 { |
|
29 return MM::RamPageSize; |
|
30 } |
|
31 |
|
32 TBool M::IsRomAddress(const TAny* ) |
|
33 { |
|
34 return EFalse; |
|
35 } |
|
36 |
|
37 TUint32 MM::RoundToPageSize(TUint32 aSize) |
|
38 { |
|
39 TUint32 m=MM::RamPageSize-1; |
|
40 return (aSize+m)&~m; |
|
41 } |
|
42 |
|
43 EXPORT_C TUint32 Kern::RoundToPageSize(TUint32 aSize) |
|
44 { |
|
45 return MM::RoundToPageSize(aSize); |
|
46 } |
|
47 |
|
48 EXPORT_C TUint32 Kern::RoundToChunkSize(TUint32 aSize) |
|
49 { |
|
50 return MM::RoundToChunkSize(aSize); |
|
51 } |
|
52 |
|
53 void MM::Init1() |
|
54 { |
|
55 TheScheduler.SetProcessHandler((TLinAddr)DoProcessSwitch); |
|
56 } |
|
57 void MM::Wait() |
|
58 { |
|
59 Kern::MutexWait(*RamAllocatorMutex); |
|
60 if (RamAllocatorMutex->iHoldCount==1) |
|
61 { |
|
62 InitialFreeMemory=FreeMemory; |
|
63 AllocFailed=EFalse; |
|
64 } |
|
65 } |
|
66 |
|
67 TInt MM::Commit(TLinAddr aBase, TInt aSize, TInt aClearByte, TBool aExecute) |
|
68 // |
|
69 // Get win32 to commit the pages. |
|
70 // We know they are not already committed - this is guaranteed by the caller so we can update the memory info easily |
|
71 // |
|
72 { |
|
73 __ASSERT_MUTEX(RamAllocatorMutex); |
|
74 |
|
75 if (aSize==0) |
|
76 return KErrNone; |
|
77 |
|
78 if (MM::FreeMemory+MM::CacheMemory >= aSize) |
|
79 { |
|
80 __LOCK_HOST; |
|
81 DWORD protect = aExecute ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; |
|
82 if (VirtualAlloc(LPVOID(aBase), aSize, MEM_COMMIT, protect)) |
|
83 { |
|
84 TInt reclaimed = aSize - MM::FreeMemory; |
|
85 if(reclaimed<=0) |
|
86 MM::FreeMemory -= aSize; |
|
87 else |
|
88 { |
|
89 // some cache memory was needed for this commit... |
|
90 MM::FreeMemory = 0; |
|
91 MM::CacheMemory -= reclaimed; |
|
92 MM::ReclaimedCacheMemory += reclaimed; |
|
93 } |
|
94 MM::CheckMemoryCounters(); |
|
95 |
|
96 // Clear memory to value determined by chunk member |
|
97 memset(reinterpret_cast<void*>(aBase), aClearByte, aSize); |
|
98 |
|
99 return KErrNone; |
|
100 } |
|
101 } |
|
102 MM::AllocFailed = ETrue; |
|
103 return KErrNoMemory; |
|
104 } |
|
105 |
|
106 TInt MM::Decommit(TLinAddr aBase, TInt aSize) |
|
107 // |
|
108 // Get win32 to decommit the pages. |
|
109 // The pages may or may not be committed: we need to find out which ones are so that the memory info is updated correctly |
|
110 // |
|
111 { |
|
112 __ASSERT_MUTEX(RamAllocatorMutex); |
|
113 |
|
114 TInt freed = 0; |
|
115 TInt remain = aSize; |
|
116 TLinAddr base = aBase; |
|
117 __LOCK_HOST; |
|
118 while (remain > 0) |
|
119 { |
|
120 MEMORY_BASIC_INFORMATION info; |
|
121 VirtualQuery(LPVOID(base), &info, sizeof(info)); |
|
122 TInt size = Min(remain, info.RegionSize); |
|
123 if (info.State == MEM_COMMIT) |
|
124 freed += size; |
|
125 |
|
126 #ifdef BTRACE_CHUNKS |
|
127 BTraceContext12(BTrace::EChunks,BTrace::EChunkMemoryDeallocated,NULL,base,size); |
|
128 #endif |
|
129 |
|
130 base += info.RegionSize; |
|
131 remain -= info.RegionSize; |
|
132 } |
|
133 VirtualFree(LPVOID(aBase), aSize, MEM_DECOMMIT); |
|
134 MM::FreeMemory += freed; |
|
135 __KTRACE_OPT(KMEMTRACE, {Kern::Printf("MT:A %d %x %x %O",NTickCount(),NULL,aSize,NULL);}); |
|
136 |
|
137 return freed; |
|
138 } |
|
139 |
|
140 void MM::CheckMemoryCounters() |
|
141 { |
|
142 __NK_ASSERT_ALWAYS(MM::CacheMemory>=0); |
|
143 __NK_ASSERT_ALWAYS(MM::ReclaimedCacheMemory>=0); |
|
144 __NK_ASSERT_ALWAYS(MM::FreeMemory+MM::CacheMemory>=0); |
|
145 } |
|
146 |
|
147 void MM::Signal() |
|
148 { |
|
149 if (RamAllocatorMutex->iHoldCount>1) |
|
150 { |
|
151 Kern::MutexSignal(*RamAllocatorMutex); |
|
152 return; |
|
153 } |
|
154 TInt initial=InitialFreeMemory; |
|
155 TBool failed=AllocFailed; |
|
156 TInt final=FreeMemory; |
|
157 Kern::MutexSignal(*RamAllocatorMutex); |
|
158 K::CheckFreeMemoryLevel(initial,final,failed); |
|
159 } |
|
160 |
|
161 void MM::DoProcessSwitch(TAny* aAddressSpace) |
|
162 // Kernel locked on entry and exit |
|
163 { |
|
164 __NK_ASSERT_LOCKED; |
|
165 |
|
166 if (!aAddressSpace) |
|
167 return; |
|
168 |
|
169 DWin32Process* proc = (DWin32Process*)aAddressSpace; |
|
170 |
|
171 if (proc == K::TheKernelProcess) return; |
|
172 |
|
173 int count = proc->iDllData.Count(); |
|
174 for (int ii=0; ii<count; ii++) |
|
175 { |
|
176 SProcessDllDataBlock& procData = proc->iDllData[ii]; |
|
177 DWin32CodeSeg* codeSeg = procData.iCodeSeg; |
|
178 if (!codeSeg) |
|
179 continue; |
|
180 DWin32Process*& liveProc = codeSeg->iLiveProcess; |
|
181 if (liveProc == proc) |
|
182 continue; // no change in live mapping |
|
183 if (liveProc) |
|
184 { |
|
185 // copy out old process data |
|
186 TInt liveIx = liveProc->iDllData.FindInUnsignedKeyOrder(procData); |
|
187 __ASSERT_ALWAYS(liveIx >= 0,MM::Panic(MM::EWsdDllNotInProcess)); |
|
188 SProcessDllDataBlock& oldProcData = liveProc->iDllData[liveIx]; |
|
189 memcpy(oldProcData.iDataCopy, (const TAny*)codeSeg->iDataDest, codeSeg->iRealDataSize); |
|
190 memcpy(oldProcData.iBssCopy, (const TAny*)codeSeg->iBssDest, codeSeg->iRealBssSize); |
|
191 } |
|
192 // copy new data in |
|
193 memcpy((TAny*)codeSeg->iDataDest, procData.iDataCopy, codeSeg->iRealDataSize); |
|
194 memcpy((TAny*)codeSeg->iBssDest, procData.iBssCopy, codeSeg->iRealBssSize); |
|
195 liveProc = proc; |
|
196 } |
|
197 } |
|
198 |
|
199 TAny* MM::CurrentAddress(DThread* aThread, const TAny* aPtr, TInt aSize, TBool /*aWrite*/, TBool& aLocked) |
|
200 // Enter and leave with system locked |
|
201 // Kernel unlocked on entry |
|
202 // Kernel may be locked on exit, iff aPtr is in DLL WSD. |
|
203 // this is because the returned address is only valid until the |
|
204 // target process DLL WSD changes live status, which can happen |
|
205 // independently when another thread runs. |
|
206 // Lock status signaled in aLocked. |
|
207 // Why? This allows the optimisation that WSD is only copied on |
|
208 // process switch when necessary. The gain from that optimisation is |
|
209 // expected to be much higher than the cost of leaving the kernel locked |
|
210 // during (rare) IPC to DLL WSD |
|
211 { |
|
212 DWin32Process* proc = (DWin32Process*)aThread->iOwningProcess; |
|
213 // Is the address in DLL static data? |
|
214 NKern::Lock(); |
|
215 |
|
216 TInt count = proc->iDllData.Count(); |
|
217 TLinAddr p = (TLinAddr)aPtr; |
|
218 TLinAddr base = 0; |
|
219 TInt size = 0; |
|
220 TBool data = EFalse; |
|
221 aLocked = EFalse; |
|
222 for (TInt ii=0; ii<count; ii++) |
|
223 { |
|
224 const SProcessDllDataBlock& procData = proc->iDllData[ii]; |
|
225 DWin32CodeSeg* codeSeg = procData.iCodeSeg; |
|
226 if (codeSeg->iDataDest <= p && p < codeSeg->iDataDest + codeSeg->iRealDataSize) |
|
227 { |
|
228 base = codeSeg->iDataDest; |
|
229 size = codeSeg->iRealDataSize; |
|
230 data = ETrue; |
|
231 } |
|
232 else if (codeSeg->iBssDest <= p && p < codeSeg->iBssDest + codeSeg->iRealBssSize) |
|
233 { |
|
234 base = codeSeg->iBssDest; |
|
235 size = codeSeg->iRealBssSize; |
|
236 } |
|
237 if (base) |
|
238 { |
|
239 // This is a DLL static address, check range validity |
|
240 if (p + aSize > base + size) |
|
241 { |
|
242 NKern::Unlock(); |
|
243 return NULL; |
|
244 } |
|
245 |
|
246 DWin32Process* liveProc = codeSeg->iLiveProcess; |
|
247 |
|
248 if (proc == liveProc) |
|
249 { |
|
250 // If the target process is live, don't remap |
|
251 NKern::Unlock(); |
|
252 return (TAny*)aPtr; |
|
253 } |
|
254 else |
|
255 { |
|
256 aLocked = ETrue; |
|
257 TLinAddr procBase = (TLinAddr)(data ? procData.iDataCopy : procData.iBssCopy); |
|
258 TLinAddr remapped = procBase + (p - base); |
|
259 return (TAny*) remapped; |
|
260 } |
|
261 } |
|
262 } |
|
263 NKern::Unlock(); |
|
264 // No, the address does not need to be remapped |
|
265 return (TAny*)aPtr; |
|
266 } |
|
267 |
|
268 void M::BTracePrime(TUint aCategory) |
|
269 { |
|
270 (void)aCategory; |
|
271 #ifdef BTRACE_KERNEL_MEMORY |
|
272 // Must check for -1 as that is the default value of aCategory for |
|
273 // BTrace::Prime() which is intended to prime all categories that are |
|
274 // currently enabled via a single invocation of BTrace::Prime(). |
|
275 if(aCategory==BTrace::EKernelMemory || (TInt)aCategory == -1) |
|
276 { |
|
277 BTrace4(BTrace::EKernelMemory,BTrace::EKernelMemoryInitialFree,TheSuperPage().iTotalRamSize); |
|
278 BTrace4(BTrace::EKernelMemory,BTrace::EKernelMemoryCurrentFree,Kern::FreeRamInBytes()); |
|
279 } |
|
280 #endif |
|
281 } |
|
282 |
|
283 /** |
|
284 Restart the system. |
|
285 On hardware targets this calls the Restart Vector in the ROM Header. |
|
286 Note, aMode is set to zero when this function is used by Kern::Fault() |
|
287 |
|
288 @param aMode Argument passed to the restart routine. The meaning of this value |
|
289 depends on the bootstrap implementation. |
|
290 */ |
|
291 EXPORT_C void Kern::Restart(TInt) |
|
292 { |
|
293 ExitProcess(0); |
|
294 } |
|
295 |
|
296 EXPORT_C TInt TInternalRamDrive::MaxSize() |
|
297 { |
|
298 return PP::RamDriveMaxSize; |
|
299 } |
|
300 |
|
301 void M::FsRegisterThread() |
|
302 { |
|
303 } |
|
304 |
|
305 void P::SetSuperPageSignature() |
|
306 { |
|
307 TUint32* sig = TheSuperPage().iSignature; |
|
308 sig[0] = 0xb504f333; |
|
309 sig[1] = 0xf9de6484; |
|
310 } |
|
311 |
|
312 TBool P::CheckSuperPageSignature() |
|
313 { |
|
314 const TUint32* sig = TheSuperPage().iSignature; |
|
315 return ( sig[0]==0xb504f333 && sig[1]==0xf9de6484 ); |
|
316 } |
|
317 |
|
318 // Dummy implementation of kernel pin APIs |
|
319 |
|
320 class TVirtualPinObject |
|
321 { |
|
322 }; |
|
323 |
|
324 TInt M::CreateVirtualPinObject(TVirtualPinObject*& aPinObject) |
|
325 { |
|
326 aPinObject = new TVirtualPinObject; |
|
327 return aPinObject != NULL ? KErrNone : KErrNoMemory; |
|
328 } |
|
329 |
|
330 TInt M::PinVirtualMemory(TVirtualPinObject* aPinObject, TLinAddr, TUint, DThread*) |
|
331 { |
|
332 __ASSERT_DEBUG(aPinObject, K::Fault(K::EVirtualPinObjectBad)); |
|
333 (void)aPinObject; |
|
334 return KErrNone; |
|
335 } |
|
336 |
|
337 TInt M::CreateAndPinVirtualMemory(TVirtualPinObject*& aPinObject, TLinAddr, TUint) |
|
338 { |
|
339 aPinObject = 0; |
|
340 return KErrNone; |
|
341 } |
|
342 |
|
343 void M::UnpinVirtualMemory(TVirtualPinObject* aPinObject) |
|
344 { |
|
345 __ASSERT_DEBUG(aPinObject, K::Fault(K::EVirtualPinObjectBad)); |
|
346 (void)aPinObject; |
|
347 } |
|
348 |
|
349 void M::DestroyVirtualPinObject(TVirtualPinObject*& aPinObject) |
|
350 { |
|
351 TVirtualPinObject* object = (TVirtualPinObject*)__e32_atomic_swp_ord_ptr(&aPinObject, 0); |
|
352 if (object) |
|
353 Kern::AsyncFree(object); |
|
354 } |
|
355 |
|
356 |
|
357 class TPhysicalPinObject |
|
358 { |
|
359 }; |
|
360 |
|
361 TInt M::CreatePhysicalPinObject(TPhysicalPinObject*& aPinObject) |
|
362 { |
|
363 aPinObject = new TPhysicalPinObject; |
|
364 return aPinObject != NULL ? KErrNone : KErrNoMemory; |
|
365 } |
|
366 |
|
367 TInt M::PinPhysicalMemory(TPhysicalPinObject* aPinObject, TLinAddr, TUint, TBool, TPhysAddr&, TPhysAddr*, TUint32&, TUint&, DThread*) |
|
368 { |
|
369 __ASSERT_DEBUG(aPinObject, K::Fault(K::EPhysicalPinObjectBad)); |
|
370 (void)aPinObject; |
|
371 return KErrNone; |
|
372 } |
|
373 |
|
374 void M::UnpinPhysicalMemory(TPhysicalPinObject* aPinObject) |
|
375 { |
|
376 __ASSERT_DEBUG(aPinObject, K::Fault(K::EPhysicalPinObjectBad)); |
|
377 (void)aPinObject; |
|
378 } |
|
379 |
|
380 void M::DestroyPhysicalPinObject(TPhysicalPinObject*& aPinObject) |
|
381 { |
|
382 TPhysicalPinObject* object = (TPhysicalPinObject*)__e32_atomic_swp_ord_ptr(&aPinObject, 0); |
|
383 if (object) |
|
384 Kern::AsyncFree(object); |
|
385 } |
|
386 |
|
387 // Misc DPagingDevice methods |
|
388 |
|
389 EXPORT_C void DPagingDevice::NotifyIdle() |
|
390 { |
|
391 // Not used on this memory model |
|
392 } |
|
393 |
|
394 EXPORT_C void DPagingDevice::NotifyBusy() |
|
395 { |
|
396 // Not used on this memory model |
|
397 } |
|
398 |
|
399 EXPORT_C TInt Cache::SyncPhysicalMemoryBeforeDmaWrite(TPhysAddr* , TUint , TUint , TUint , TUint32 ) |
|
400 { |
|
401 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Cache::SyncPhysicalMemoryBeforeDmaWrite"); |
|
402 return KErrNotSupported; |
|
403 } |
|
404 |
|
405 EXPORT_C TInt Cache::SyncPhysicalMemoryBeforeDmaRead(TPhysAddr* , TUint , TUint , TUint , TUint32 ) |
|
406 { |
|
407 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Cache::SyncPhysicalMemoryBeforeDmaRead"); |
|
408 return KErrNotSupported; |
|
409 } |
|
410 EXPORT_C TInt Cache::SyncPhysicalMemoryAfterDmaRead(TPhysAddr* , TUint , TUint , TUint , TUint32 ) |
|
411 { |
|
412 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Cache::SyncPhysicalMemoryAfterDmaRead"); |
|
413 return KErrNotSupported; |
|
414 } |