author | Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com> |
Tue, 26 Jan 2010 13:13:38 +0200 | |
changeset 14 | 5d2844f35677 |
parent 0 | a41df078684a |
child 43 | c1f20ce4abcf |
permissions | -rw-r--r-- |
0 | 1 |
// Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies). |
2 |
// All rights reserved. |
|
3 |
// This component and the accompanying materials are made available |
|
4 |
// under the terms of the License "Eclipse Public License v1.0" |
|
5 |
// which accompanies this distribution, and is available |
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 |
// |
|
8 |
// Initial Contributors: |
|
9 |
// Nokia Corporation - initial contribution. |
|
10 |
// |
|
11 |
// Contributors: |
|
12 |
// |
|
13 |
// Description: |
|
14 |
// eka\include\kernel\cache.h |
|
15 |
// |
|
16 |
// |
|
17 |
||
18 |
/** |
|
19 |
@file |
|
20 |
@publishedPartner |
|
21 |
@released |
|
22 |
*/ |
|
23 |
||
24 |
#ifndef __CACHE_H__ |
|
25 |
#define __CACHE_H__ |
|
26 |
#include <e32err.h> |
|
27 |
#include <nk_cpu.h> |
|
28 |
||
29 |
/** |
|
30 |
Cache Thresholds container used to Set/Get cache thresholds from/to Kernel. |
|
31 |
@see Cache::GetThresholds |
|
32 |
@see Cache::SetThresholds |
|
33 |
*/ |
|
34 |
struct TCacheThresholds |
|
35 |
{ |
|
36 |
TUint32 iPurge; /**< Invalidate threshold in bytes*/ |
|
37 |
TUint32 iClean; /**< Clean threshold in bytes*/ |
|
38 |
TUint32 iFlush; /**< Clean and invalidate threshold in bytes*/ |
|
39 |
}; |
|
40 |
||
41 |
const TUint KCacheSelectI=1; /**<Specifies instruction cache.*/ |
|
42 |
const TUint KCacheSelectD=2; /**<Specifies data cache to the point of coherency.*/ |
|
43 |
const TUint KCacheSelectAltD=4; /**<Specifies alternative cache. @deprecated*/ |
|
44 |
const TUint KCacheSelectD_IMB=8; /**<Specifies data cache to the point of unification.*/ |
|
45 |
const TUint KCacheSelect_L2=0x10; /**<Specifies external cache.*/ |
|
46 |
||
47 |
/** |
|
48 |
A set of Kernel APIs for cache utility functions. |
|
49 |
*/ |
|
50 |
class Cache |
|
51 |
{ |
|
52 |
public: |
|
53 |
||
54 |
/** |
|
55 |
Synchronises cache(s) for instruction execution in the specified address range. |
|
56 |
||
57 |
The function performs the cache/memory synchronisation required to guarantee |
|
58 |
correct execution of code in the specified virtual address range. |
|
59 |
||
60 |
@param aBase The start of the virtual address range. |
|
61 |
@param aSize The size of the address range. |
|
62 |
||
63 |
@pre Call in a thread context. |
|
64 |
@pre No fast mutex can be held. |
|
65 |
@pre Interrupts must be enabled. |
|
66 |
@pre Kernel must be unlocked. |
|
67 |
*/ |
|
68 |
IMPORT_C static void IMB_Range(TLinAddr aBase, TUint aSize); |
|
69 |
||
70 |
/** |
|
71 |
Synchronises cache(s) prior to a DMA write (memory to HW DMA transfer) operation. |
|
72 |
It is assumed that the memory region is fully cached. |
|
73 |
||
74 |
The purpose of SyncMemoryBeforeDmaWrite is to ensure that the main memory is |
|
75 |
synchronised with the content of cache memory before the start of DMA write transfer. |
|
76 |
||
77 |
@param aBase The start of the virtual address range. |
|
78 |
@param aSize The size of the address range. |
|
79 |
||
80 |
@pre Call in a thread context. |
|
81 |
@pre No fast mutex can be held. |
|
82 |
@pre Interrupts must be enabled. |
|
83 |
@pre Kernel must be unlocked. |
|
84 |
*/ |
|
85 |
IMPORT_C static void SyncMemoryBeforeDmaWrite(TLinAddr aBase, TUint aSize); |
|
86 |
||
87 |
/** |
|
88 |
Synchronises cache(s) prior to a DMA read (HW to memory DMA transfer) operation. |
|
89 |
It is assumed that the memory region is fully cached. |
|
90 |
||
91 |
The purpose of SyncMemoryBeforeDmaRead is to make sure that the content of memory |
|
92 |
(that is about to be DMA-ed) won't be destroyed by interaction between cache |
|
93 |
and the main memory. (E.g. by cache eviction or remaining content of write buffers.) |
|
94 |
||
95 |
@param aBase The start of the virtual address range. |
|
96 |
@param aSize The size of the address range. |
|
97 |
||
98 |
@pre Call in a thread context. |
|
99 |
@pre No fast mutex can be held. |
|
100 |
@pre Interrupts must be enabled. |
|
101 |
@pre Kernel must be unlocked. |
|
102 |
*/ |
|
103 |
IMPORT_C static void SyncMemoryBeforeDmaRead(TLinAddr aBase, TUint aSize); |
|
104 |
||
105 |
/** |
|
106 |
Synchronises cache(s) after a DMA read (HW to memory DMA transfer) operation. |
|
107 |
It is assumed that the memory region is fully cached. |
|
108 |
||
109 |
The purpose of SyncMemoryAfterDmaRead is to make sure that CPU won't read |
|
110 |
old data from cache instead of DMA-ed data from the main memory. |
|
111 |
||
112 |
@param aBase The start of the virtual address range. |
|
113 |
@param aSize The size of the address range. |
|
114 |
||
115 |
@pre Call in a thread context. |
|
116 |
@pre No fast mutex can be held. |
|
117 |
@pre Interrupts must be enabled. |
|
118 |
@pre Kernel must be unlocked. |
|
119 |
*/ |
|
120 |
IMPORT_C static void SyncMemoryAfterDmaRead(TLinAddr aBase, TUint aSize); |
|
121 |
||
122 |
/** |
|
123 |
Synchronises cache(s) with main memory prior to power off or reboot. |
|
124 |
It ensures the content of cache is copied down to the main memory. It doesn't necessarily |
|
125 |
invalidates the content of cache(s). |
|
126 |
On SMP platforms, it only maintains internal cache of the CPU that executes the call. |
|
127 |
Cache memory common to the all cores (like external cache controllers) are also synchronised. |
|
128 |
@pre Interrupts must be disabled. |
|
129 |
*/ |
|
130 |
IMPORT_C static void AtomicSyncMemory(); |
|
131 |
||
132 |
/** |
|
133 |
Synchronises cache(s) prior to a DMA write (memory to HW DMA transfer) operation. |
|
134 |
||
135 |
The purpose of SyncMemoryBeforeDmaWrite is to make sure that the main memory is synchronised |
|
136 |
with the content of cache memory before DMA transfer from main memory starts. |
|
137 |
||
138 |
@param aBase The start of the virtual address range. |
|
139 |
||
140 |
@param aSize The size of the address range. |
|
141 |
||
142 |
@param aMapAttr The mapping attributes with which the address range has been mapped. |
|
143 |
This is a value constructed from the bit masks in the enumeration |
|
144 |
TMappingAttributes. |
|
145 |
||
146 |
@pre Call in a thread context. |
|
147 |
@pre No fast mutex can be held. |
|
148 |
@pre Interrupts must be enabled. |
|
149 |
@pre Kernel must be unlocked. |
|
150 |
||
151 |
@see TMappingAttributes |
|
152 |
*/ |
|
153 |
IMPORT_C static void SyncMemoryBeforeDmaWrite(TLinAddr aBase, TUint aSize, TUint32 aMapAttr); |
|
154 |
||
155 |
/** |
|
156 |
Synchronises cache(s) prior to a DMA read ((HW to memory DMA transfer) operation. |
|
157 |
||
158 |
The purpose of SyncMemoryBeforeDmaRead is to make sure that the content of memory |
|
159 |
(that is about to be DMA-ed) won't be destroyed by interaction between cache |
|
160 |
and the main memory. (E.g. by write buffers' flushing or cache eviction.) |
|
161 |
||
162 |
@param aBase The start of the virtual address range. |
|
163 |
||
164 |
@param aSize The size of the address range. |
|
165 |
||
166 |
@param aMapAttr The mapping attributes with which the address range has been mapped. |
|
167 |
This is a value constructed from the bit masks in the enumeration |
|
168 |
TMappingAttributes. |
|
169 |
||
170 |
@pre Call in a thread context. |
|
171 |
@pre No fast mutex can be held. |
|
172 |
@pre Interrupts must be enabled. |
|
173 |
@pre Kernel must be unlocked. |
|
174 |
||
175 |
@see TMappingAttributes |
|
176 |
*/ |
|
177 |
IMPORT_C static void SyncMemoryBeforeDmaRead(TLinAddr aBase, TUint aSize, TUint32 aMapAttr); |
|
178 |
||
179 |
/** |
|
180 |
Synchronises cache(s) after a DMA read (HW to memory DMA transfer) operation. |
|
181 |
||
182 |
The purpose of SyncMemoryAfterDmaRead is to make sure that CPU won't read |
|
183 |
old data from cache instead of DMA-ed data from the main memory. |
|
184 |
||
185 |
@param aBase The start of the virtual address range. |
|
186 |
@param aSize The size of the address range. |
|
187 |
@param aMapAttr The mapping attributes with which the address range has been mapped. |
|
188 |
This is a value constructed from the bit masks in the enumeration |
|
189 |
TMappingAttributes. |
|
190 |
||
191 |
@pre Call in a thread context. |
|
192 |
@pre No fast mutex can be held. |
|
193 |
@pre Interrupts must be enabled. |
|
194 |
@pre Kernel must be unlocked. |
|
195 |
||
196 |
@see TMappingAttributes |
|
197 |
*/ |
|
198 |
IMPORT_C static void SyncMemoryAfterDmaRead(TLinAddr aBase, TUint aSize, TUint32 aMapAttr); |
|
199 |
||
200 |
/** |
|
201 |
Prepares physical memory for DMA writing (memory to H/W DMA copy). If the physical memory is the |
|
202 |
subject of RAM defragmentation framework (e.g. if it is mapped in user side space) the driver |
|
203 |
should make sure it is pinned (@see Kern::PinPhysicalMemory) before calling this Kernel service. |
|
204 |
Kern::PinPhysicalMemory also generates input parameters aPages and aColour for this Kernel service. |
|
205 |
||
206 |
@param aPages Pointer to the the list of physical memory pages to be prepared for DMA write. |
|
207 |
The exact portion of physical memory to be prepared is defined by aOffset and |
|
208 |
aSize parameters. For example, if aOffset is 0x1800 and aSize is 0x2000, the method |
|
209 |
will sync the last 0x800 bytes of the second page in the list, the whole third page |
|
210 |
and the first 0x800 bytes of the fourth page. (0x1000 page size is assumed). |
|
211 |
@param aColour The mapping colour of the first physical page in the list. |
|
212 |
@param aOffset Offset in memory list where DMA transfer should start. |
|
213 |
@param aSize The size of the memory in bytes to be sync'ed. |
|
214 |
@param aMapAttr Mapping attributes of the the existing mapping. The value is either: |
|
215 |
- Formed by ORing together values from the TMappingAttributes enumeration as |
|
216 |
returned by Kern::ChunkCreate interface, or |
|
217 |
- TMappingAttributes2 object. |
|
218 |
For user memory (always fully cached), EMapAttrCachedMax enum value can be passed. |
|
219 |
||
14
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
220 |
@return KErrNotSupported on memory models other than flexible. |
0 | 221 |
KErrNone, on flexible memory model. |
222 |
||
223 |
@pre Interrupts must be enabled. |
|
224 |
@pre Kernel must be unlocked. |
|
225 |
@pre No fast mutex can be held. |
|
226 |
@pre Call in a thread context. |
|
227 |
@pre Can be used in a device driver. |
|
228 |
*/ |
|
229 |
IMPORT_C static TInt SyncPhysicalMemoryBeforeDmaWrite(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr); |
|
230 |
||
231 |
/** |
|
232 |
Prepares physical memory for DMA read (H/W to memory DMA copy). |
|
233 |
For all the details @see Cache::SyncPhysicalMemoryBeforeDmaWrite |
|
234 |
*/ |
|
235 |
IMPORT_C static TInt SyncPhysicalMemoryBeforeDmaRead (TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr); |
|
236 |
||
237 |
/** |
|
238 |
Maintains physical memory after DMA read (H/W to memory DMA copy). |
|
239 |
For all the details @see Cache::SyncPhysicalMemoryBeforeDmaWrite |
|
240 |
*/ |
|
241 |
IMPORT_C static TInt SyncPhysicalMemoryAfterDmaRead (TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr); |
|
242 |
||
243 |
/* |
|
244 |
* Gets threshold values for the specified cache. |
|
245 |
* |
|
246 |
* When Kernel is about to perform cache maintenance operation, the size of the region to act |
|
247 |
* against is compared to the corresponding threshold value: |
|
248 |
* - When invalidating memory region: |
|
249 |
* @code |
|
250 |
* if (invalidate region > invalidate threshold) |
|
251 |
* clean and invalidate entire cache |
|
252 |
* else |
|
253 |
* invalidate the specified region |
|
254 |
* @endcode |
|
255 |
* |
|
256 |
* - When cleaning memory region: |
|
257 |
* @code |
|
258 |
* if (clean region > clean threshold) |
|
259 |
* clean entire cache |
|
260 |
* else |
|
261 |
* clean the specified region |
|
262 |
* @endcode |
|
263 |
* |
|
264 |
* - When invalidating and cleaning memory region: |
|
265 |
* @code |
|
266 |
* if (invalidate and clean region > invalidate and clean threshold) |
|
267 |
* invalidate and clean entire cache |
|
268 |
* else |
|
269 |
* invalidate and clean the specified region |
|
270 |
* @endcode |
|
271 |
* |
|
272 |
* This function returns the current threshold values for the specified type of cache. Threshold |
|
273 |
* values could be changed by Cache::SetThresholds. |
|
274 |
* |
|
275 |
* @param aCacheType Specifies which type of cache the thresholds belong to: |
|
276 |
* - KCacheSelectI Instruction Cache |
|
277 |
* - KCacheSelectD Data Cache. For ARMv7 platforms, it specifies Point-of-Coherency thresholds. |
|
278 |
* These threshold are relevant for DMA related cache maintenance. |
|
279 |
* - KCacheSelectD_IMB Data Cache for the Point-of-Unification. |
|
280 |
* These threshold are relevant for Instruction Memory Barrier (IMB). |
|
281 |
* - KCacheSelectAltD Alternative Data Cache. This type of cache is depricated on today's platforms. |
|
282 |
* - KCacheSelect_L2 External Cache, such as L210, L220 or PL310. |
|
283 |
* @param aThresholds If KErrNone is returned, holds thresholds values(in bytes) for the specified cache. |
|
284 |
* |
|
285 |
* @return KErrNone if successfull or KErrNotSupported if aCacheType is not valid on the |
|
286 |
* running platform. |
|
287 |
* |
|
288 |
* @see Cache::SetThresholds |
|
289 |
* @see TCacheThresholds |
|
290 |
* @released 9.3 |
|
291 |
*/ |
|
292 |
IMPORT_C static TInt GetThresholds(TCacheThresholds& aThresholds, TUint aCacheType); |
|
293 |
||
294 |
/* |
|
295 |
* Sets threshold values for the specified cache. @See GetThresholds for details. |
|
296 |
* |
|
297 |
* @param aCacheType Specifies which type of cache the thresholds belong to: |
|
298 |
* - KCacheSelectI Instruction Cache |
|
299 |
* - KCacheSelectD Data Cache. For ARMv7 platforms, it specifies Point-of-Coherency thresholds. |
|
300 |
* These threshold are relevant for DMA related cache maintenance. |
|
301 |
* - KCacheSelectD_IMB Data Cache for the Point-of-Unification. |
|
302 |
* These threshold are relevant for Instruction Memory Barrier (IMB). |
|
303 |
* - KCacheSelectAltD Alternative Data Cache. This type of cache is depricated on today's platforms. |
|
304 |
* - KCacheSelect_L2 External Cache, such as L210, L220 or PL310. |
|
305 |
* @param aThresholds New threshold values (in bytes) for the cache.. |
|
306 |
* |
|
307 |
* @return KErrNone if successfull or KErrNotSupported if aCacheType is not valid on the |
|
308 |
* running platform. |
|
309 |
* |
|
310 |
* @see Cache::GetThresholds |
|
311 |
* @see TCacheThresholds |
|
312 |
* @released 9.3 |
|
313 |
*/ |
|
314 |
IMPORT_C static TInt SetThresholds(const TCacheThresholds& aThresholds, TUint aCacheType); |
|
315 |
||
316 |
/* |
|
317 |
* Returns the required alignment for fully cached memory buffer used in DMA transfer. |
|
318 |
* Use this value to separate DMA from non-DMA memory. |
|
319 |
* |
|
320 |
* Note that a single DMA transfer can still start/stop from any memory location. However, |
|
321 |
* the content of memory just before (if start address is unaligned) or after (if end address |
|
322 |
* is unaligned) DMA buffer may be corrupted. |
|
323 |
* |
|
324 |
* Here is an example of code that allocates DMA buffer from the heap. |
|
325 |
* |
|
326 |
* @code |
|
327 |
* class DMABufferAlloc |
|
328 |
* { |
|
329 |
* public: |
|
330 |
* DMABufferAlloc():iPtr(NULL){}; |
|
331 |
* |
|
332 |
* //Return value is guaranteed to be aligned. |
|
333 |
* TAny* Alloc(TInt aSize) |
|
334 |
* { |
|
335 |
* TInt alignmentMask = Cache::DmaBufferAlignment()-1; |
|
336 |
* NKern::ThreadEnterCS(); |
|
337 |
* |
|
338 |
* // Assume that the return value of Kern::Alloc is unaligned. |
|
339 |
* // Allocate sufficient memory to cover the worst case, for example: |
|
340 |
* // Alignment = 32, aSize = 2. If (iPtr==31) 64 bytes are required |
|
341 |
* iPtr = Kern::Alloc( (aSize+2*alignmentMask) & ~alignmentMask); |
|
342 |
* |
|
343 |
* NKern::ThreadLeaveCS(); |
|
344 |
* |
|
345 |
* //Return the first aligned location in the allocated buffer. |
|
346 |
* return (TAny*)(((TInt)iPtr + alignmentMask) & ~alignmentMask); |
|
347 |
* } |
|
348 |
* |
|
349 |
* void Free() |
|
350 |
* { |
|
351 |
* NKern::ThreadEnterCS(); |
|
352 |
* Kern::Free(iPtr); |
|
353 |
* iPtr = NULL; |
|
354 |
* NKern::ThreadLeaveCS(); |
|
355 |
* } |
|
356 |
* |
|
357 |
* ~DMABufferAlloc() |
|
358 |
* { |
|
359 |
* if (iPtr) |
|
360 |
* Free(); |
|
361 |
* }; |
|
362 |
* |
|
363 |
* private: |
|
364 |
* TAny* iPtr; |
|
365 |
* }; |
|
366 |
* @codeend |
|
367 |
*/ |
|
368 |
IMPORT_C static TUint DmaBufferAlignment(); |
|
369 |
}; |
|
370 |
||
371 |
#endif // def __CACHE_H__ |