author | Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com> |
Thu, 19 Aug 2010 11:14:22 +0300 | |
branch | RCL_3 |
changeset 42 | a179b74831c9 |
parent 22 | 2f92ad2dc5db |
permissions | -rw-r--r-- |
0 | 1 |
// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies). |
2 |
// All rights reserved. |
|
3 |
// This component and the accompanying materials are made available |
|
4 |
// under the terms of the License "Eclipse Public License v1.0" |
|
5 |
// which accompanies this distribution, and is available |
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 |
// |
|
8 |
// Initial Contributors: |
|
9 |
// Nokia Corporation - initial contribution. |
|
10 |
// |
|
11 |
// Contributors: |
|
12 |
// |
|
13 |
// Description: |
|
14 |
// |
|
15 |
||
16 |
#include <plat_priv.h> |
|
17 |
#include "mm.h" |
|
18 |
#include "mmu.h" |
|
19 |
#include "mvalloc.h" |
|
20 |
#include "maddrcont.h" |
|
21 |
||
22 |
||
23 |
/** |
|
24 |
Log2 of the minimum granularity and alignment of virtual address allocation. |
|
25 |
Must be greater than or equal to #KPageShift+#KPageColourShift. |
|
26 |
*/ |
|
27 |
const TUint KVirtualAllocShift = KPageShift+KPageColourShift; |
|
28 |
||
29 |
/** |
|
30 |
Log2 of the size of the region covered by a single 'slab' of virtual addresses. |
|
31 |
Must be greater than or equal to KChunkShift. |
|
32 |
*/ |
|
33 |
const TUint KVirtualAllocSlabShift = KChunkShift; |
|
34 |
||
35 |
/** |
|
36 |
Size, in bytes, of the size of the region covered by a single 'slab' of virtual addresses. |
|
37 |
*/ |
|
38 |
const TUint KVirtualAllocSlabSize = 1<<KVirtualAllocSlabShift; |
|
39 |
||
40 |
const TUint KVirtualAllocSlabMask = KVirtualAllocSlabSize-1; |
|
41 |
||
42 |
__ASSERT_COMPILE(KVirtualAllocShift>=KPageShift+KPageColourShift); |
|
43 |
__ASSERT_COMPILE(KVirtualAllocSlabShift>=TUint(KChunkShift)); |
|
44 |
||
45 |
||
46 |
#if defined(__GCCXML__) |
|
47 |
FORCE_INLINE TUint CountLeadingZeroes(TUint32 /*aValue*/) |
|
48 |
{ |
|
49 |
// empty |
|
50 |
return 0; |
|
51 |
} |
|
52 |
||
53 |
#elif defined(__MARM__) |
|
54 |
||
55 |
#ifdef __ARMCC__ |
|
56 |
FORCE_INLINE TUint CountLeadingZeroes(TUint32 aValue) |
|
57 |
{ |
|
58 |
#if __ARMCC_VERSION < 310000 |
|
59 |
TUint r; |
|
60 |
asm("clz r,aValue"); |
|
61 |
return r; |
|
62 |
#else |
|
63 |
// Inline assembler is deprecated in RVCT 3.1 so we use an intrinsic. |
|
64 |
return __clz(aValue); |
|
65 |
#endif |
|
66 |
} |
|
67 |
#endif // __ARMCC__ |
|
68 |
||
69 |
#ifdef __MARM_ARM4__ |
|
70 |
__declspec(naked) static TUint CountLeadingZeroes(TUint32) |
|
71 |
{ |
|
72 |
CLZ(0,0); |
|
73 |
__JUMP(,lr); |
|
74 |
} |
|
75 |
||
76 |
#elif defined(__GNUC__) |
|
77 |
FORCE_INLINE TUint CountLeadingZeroes(TUint32 aValue) |
|
78 |
{ |
|
79 |
TUint r; |
|
80 |
asm("clz %0,%1" : "=r"(r) : "r"(aValue)); |
|
81 |
return r; |
|
82 |
} |
|
83 |
#endif // __GNUC__ |
|
84 |
||
85 |
#else // !__MARM__ |
|
86 |
||
87 |
inline TUint CountLeadingZeroes(TUint32 aValue) |
|
88 |
{ |
|
89 |
if(!aValue) |
|
90 |
return 32; |
|
91 |
TUint count = 31; |
|
92 |
if(aValue>=(1<<16)) |
|
93 |
{ |
|
94 |
count -= 16; |
|
95 |
aValue >>= 16; |
|
96 |
} |
|
97 |
if(aValue>=(1<<8)) |
|
98 |
{ |
|
99 |
count -= 8; |
|
100 |
aValue >>= 8; |
|
101 |
} |
|
102 |
if(aValue>=(1<<4)) |
|
103 |
{ |
|
104 |
count -= 4; |
|
105 |
aValue >>= 4; |
|
106 |
} |
|
107 |
if(aValue>=(1<<2)) |
|
108 |
{ |
|
109 |
count -= 2; |
|
110 |
aValue >>= 2; |
|
111 |
} |
|
112 |
count -= aValue>>1; |
|
113 |
return count; |
|
114 |
} |
|
115 |
||
116 |
#endif // __MARM__ |
|
117 |
||
118 |
||
119 |
||
120 |
// |
|
121 |
// TLogAllocator |
|
122 |
// |
|
123 |
||
124 |
/** |
|
125 |
Bitmap allocator for allocating regions which have size and alignment which |
|
126 |
are a power-of-two. |
|
127 |
*/ |
|
128 |
class TLogAllocator |
|
129 |
{ |
|
130 |
public: |
|
131 |
TLogAllocator(); |
|
132 |
||
133 |
/** |
|
134 |
Find and allocate a free region in the bitmap. |
|
135 |
||
136 |
@param aSizeShift Log2 of the number of bits to allocate. |
|
137 |
||
138 |
@return If successful, the index of the first bit allocated. |
|
139 |
Otherwise, -1. |
|
140 |
*/ |
|
141 |
TInt Alloc(TUint aSizeShift); |
|
142 |
||
143 |
/** |
|
144 |
Allocate a specific region of bits. |
|
145 |
||
146 |
@param aIndex The index of the first bit to allocated. |
|
147 |
Must be a integer multiple of 2^aSizeShift. |
|
148 |
@param aSizeShift Log2 of the number of bits to allocate. |
|
149 |
||
150 |
@return KErrNone, if successful; |
|
151 |
KErrAlreadyExists, if any part of the region was already allocated. |
|
152 |
*/ |
|
153 |
TInt Alloc(TUint aIndex, TUint aSizeShift); |
|
154 |
||
155 |
/** |
|
156 |
Free a specific region of bits. |
|
157 |
||
158 |
@param aIndex The index of the first bit to free. |
|
159 |
Must be a integer multiple of 2^aSizeShift. |
|
160 |
||
161 |
@param aSizeShift Log2 of the number of bits to free. |
|
162 |
||
163 |
@return True, if the slab no longer has any bits allocated. |
|
164 |
*/ |
|
165 |
TBool Free(TUint aIndex, TUint aSizeShift); |
|
166 |
private: |
|
167 |
enum |
|
168 |
{ |
|
169 |
ENumBits = 1<<(KVirtualAllocSlabShift-KVirtualAllocShift), |
|
170 |
ENumWords = (ENumBits+31)/32 |
|
171 |
}; |
|
172 |
||
173 |
/** |
|
174 |
Number of bits which have been allocated. |
|
175 |
*/ |
|
176 |
TUint iAllocCount; |
|
177 |
||
178 |
/** |
|
179 |
Bitmap where a bit set to one indicates 'free' and a bit cleared to zero |
|
180 |
indicates 'allocated'. The most significant bit in each word has the lowest |
|
181 |
index value. E.g. |
|
182 |
- Index 0 is bit 31 of iBits[0] |
|
183 |
- Index 31 is bit 0 of iBits[0] |
|
184 |
- Index 32 is bit 31 of iBits[1] |
|
185 |
*/ |
|
186 |
TUint32 iBits[ENumWords]; |
|
187 |
}; |
|
188 |
||
189 |
||
190 |
TLogAllocator::TLogAllocator() |
|
191 |
{ |
|
192 |
iAllocCount = 0; |
|
193 |
memset(iBits,~0u,sizeof(iBits)); // unallocated bits are set to one |
|
194 |
} |
|
195 |
||
196 |
||
197 |
TInt TLogAllocator::Alloc(TUint aSizeShift) |
|
198 |
{ |
|
199 |
TUint size = 1<<aSizeShift; |
|
200 |
||
201 |
__NK_ASSERT_DEBUG(size<=ENumBits); // check in range |
|
202 |
||
203 |
TUint32* bits = iBits; |
|
204 |
TUint32* bitsEnd = bits+ENumWords; |
|
205 |
TUint32 b; |
|
206 |
switch(aSizeShift) |
|
207 |
{ |
|
208 |
case 0: // find word with any unallocated bits... |
|
209 |
do |
|
210 |
{ |
|
211 |
b = *bits++; |
|
212 |
if(b) |
|
213 |
goto small_found; |
|
214 |
} |
|
215 |
while(bits<bitsEnd); |
|
216 |
break; |
|
217 |
||
218 |
case 1: // find word with 2 adjacent unallocated bits... |
|
219 |
do |
|
220 |
{ |
|
221 |
b = *bits++; |
|
222 |
b &= b<<1; |
|
223 |
b &= 0xaaaaaaaa; |
|
224 |
if(b) |
|
225 |
goto small_found; |
|
226 |
} |
|
227 |
while(bits<bitsEnd); |
|
228 |
break; |
|
229 |
||
230 |
case 2: // find word with 4 adjacent unallocated bits... |
|
231 |
do |
|
232 |
{ |
|
233 |
b = *bits++; |
|
234 |
b &= b<<1; |
|
235 |
b &= b<<2; |
|
236 |
b &= 0x88888888; |
|
237 |
if(b) |
|
238 |
goto small_found; |
|
239 |
} |
|
240 |
while(bits<bitsEnd); |
|
241 |
break; |
|
242 |
||
243 |
case 3: // find word with 8 adjacent unallocated bits... |
|
244 |
do |
|
245 |
{ |
|
246 |
b = *bits++; |
|
247 |
b &= b<<1; |
|
248 |
b &= b<<2; |
|
249 |
b &= b<<4; |
|
250 |
b &= 0x80808080; |
|
251 |
if(b) |
|
252 |
goto small_found; |
|
253 |
} |
|
254 |
while(bits<bitsEnd); |
|
255 |
break; |
|
256 |
||
257 |
case 4: // find word with 16 adjacent unallocated bits... |
|
258 |
do |
|
259 |
{ |
|
260 |
b = *bits++; |
|
261 |
b &= b<<1; |
|
262 |
b &= b<<2; |
|
263 |
b &= b<<4; |
|
264 |
b &= b<<8; |
|
265 |
b &= 0x80008000; |
|
266 |
if(b) |
|
267 |
goto small_found; |
|
268 |
} |
|
269 |
while(bits<bitsEnd); |
|
270 |
break; |
|
271 |
||
272 |
case 5: // find word which is totally unallocated (has 32 bits free)... |
|
273 |
do |
|
274 |
{ |
|
275 |
b = *bits++; |
|
276 |
if(b==0xffffffffu) |
|
277 |
goto big_found; |
|
278 |
} |
|
279 |
while(bits<bitsEnd); |
|
280 |
break; |
|
281 |
||
282 |
default: // find relevant number of words which are unallocated... |
|
283 |
{ |
|
284 |
do |
|
285 |
{ |
|
286 |
// AND words together... |
|
287 |
TUint32* end = (TUint32*)((TUint8*)bits+(size>>3)); |
|
288 |
TUint32 b = 0xffffffffu; |
|
289 |
do b &= *bits++; |
|
290 |
while(bits<end); |
|
291 |
||
292 |
if(b==0xffffffffu) |
|
293 |
goto big_found; // all were free |
|
294 |
} |
|
295 |
while(bits<bitsEnd); |
|
296 |
break; |
|
297 |
} |
|
298 |
||
299 |
} |
|
300 |
__NK_ASSERT_DEBUG(bits==bitsEnd); |
|
301 |
return -1; |
|
302 |
||
303 |
small_found: |
|
304 |
{ |
|
305 |
// find first position in word which have free region (a bit set to one)... |
|
306 |
TUint offset = CountLeadingZeroes(b); |
|
307 |
||
308 |
// clear bits... |
|
309 |
TUint32 mask = 0xffffffffu; |
|
310 |
mask >>= size; |
|
311 |
mask = ~mask; |
|
312 |
mask >>= offset; |
|
313 |
*--bits &= ~mask; |
|
314 |
||
315 |
// calculate index for allocated region... |
|
316 |
TUint index = (bits-iBits)*32+offset; |
|
317 |
||
318 |
iAllocCount += size; |
|
319 |
return index; |
|
320 |
} |
|
321 |
||
322 |
big_found: |
|
323 |
{ |
|
324 |
// clear bits... |
|
325 |
TUint32* start = (TUint32*)((TUint8*)bits-(size>>3)); |
|
326 |
do *--bits = 0; |
|
327 |
while(bits>start); |
|
328 |
||
329 |
// calculate index for allocated region... |
|
330 |
TUint index = (bits-iBits)*32; |
|
331 |
||
332 |
iAllocCount += size; |
|
333 |
return index; |
|
334 |
} |
|
335 |
||
336 |
} |
|
337 |
||
338 |
||
339 |
TInt TLogAllocator::Alloc(TUint aIndex, TUint aSizeShift) |
|
340 |
{ |
|
341 |
TUint size = 1<<aSizeShift; |
|
342 |
||
343 |
__NK_ASSERT_DEBUG(aIndex+size>aIndex); // check overflow |
|
344 |
__NK_ASSERT_DEBUG(aIndex+size<=ENumBits); // check in range |
|
345 |
__NK_ASSERT_DEBUG(((aIndex>>aSizeShift)<<aSizeShift)==aIndex); // check alignment |
|
346 |
||
347 |
TUint32* bits = iBits+(aIndex>>5); |
|
348 |
if(size<32) |
|
349 |
{ |
|
350 |
TUint32 mask = 0xffffffffu; |
|
351 |
mask >>= size; |
|
352 |
mask = ~mask; |
|
353 |
mask >>= aIndex&31; |
|
354 |
TUint32 b = *bits; |
|
355 |
if((b&mask)!=mask) |
|
356 |
return KErrAlreadyExists; |
|
357 |
*bits = b&~mask; |
|
358 |
} |
|
359 |
else |
|
360 |
{ |
|
361 |
TUint32* start = bits; |
|
362 |
TUint32* end = bits+(size>>5); |
|
363 |
do if(*bits++!=0xffffffffu) return KErrAlreadyExists; |
|
364 |
while(bits<end); |
|
365 |
||
366 |
bits = start; |
|
367 |
do *bits++ = 0; |
|
368 |
while(bits<end); |
|
369 |
} |
|
370 |
||
371 |
iAllocCount += size; |
|
372 |
return KErrNone; |
|
373 |
} |
|
374 |
||
375 |
||
376 |
TBool TLogAllocator::Free(TUint aIndex, TUint aSizeShift) |
|
377 |
{ |
|
378 |
TUint size = 1<<aSizeShift; |
|
379 |
||
380 |
__NK_ASSERT_DEBUG(aIndex+size>aIndex); // check overflow |
|
381 |
__NK_ASSERT_DEBUG(aIndex+size<=ENumBits); // check in range |
|
382 |
__NK_ASSERT_DEBUG(((aIndex>>aSizeShift)<<aSizeShift)==aIndex); // check alignment |
|
383 |
||
384 |
TUint32* bits = iBits+(aIndex>>5); |
|
385 |
if(size<32) |
|
386 |
{ |
|
387 |
TUint32 mask = 0xffffffffu; |
|
388 |
mask >>= size; |
|
389 |
mask = ~mask; |
|
390 |
mask >>= aIndex&31; |
|
391 |
TUint32 b = *bits; |
|
392 |
__NK_ASSERT_DEBUG((b&mask)==0); // check was allocated |
|
393 |
*bits = b|mask; |
|
394 |
} |
|
395 |
else |
|
396 |
{ |
|
397 |
TUint wordCount = size>>5; |
|
398 |
do |
|
399 |
{ |
|
400 |
__NK_ASSERT_DEBUG(bits[0]==0); |
|
401 |
*bits++ = 0xffffffffu; |
|
402 |
} |
|
403 |
while(--wordCount); |
|
404 |
} |
|
405 |
||
406 |
iAllocCount -= size; |
|
407 |
return !iAllocCount; |
|
408 |
} |
|
409 |
||
410 |
||
411 |
||
412 |
// |
|
413 |
// TVirtualSlab |
|
414 |
// |
|
415 |
||
416 |
/** |
|
417 |
Class for allocating virtual addresses contained in a single 'slab'. |
|
418 |
@see RVirtualAllocSlabSet. |
|
419 |
*/ |
|
420 |
class TVirtualSlab |
|
421 |
{ |
|
422 |
public: |
|
423 |
/** |
|
424 |
@param aHead The head of a linked list of slabs to which this one should be added. |
|
425 |
@param aBase The starting virtual address of the region covered by this slab. |
|
426 |
@param aSlabType The 'slab type'. |
|
427 |
*/ |
|
428 |
TVirtualSlab(SDblQue& aHead, TUint aBase, TUint aSlabType); |
|
429 |
||
430 |
~TVirtualSlab(); |
|
431 |
||
432 |
/** |
|
433 |
Find an allocate a free region of virtual addresses. |
|
434 |
||
435 |
@param aSizeShift Log2 of the size, in bytes, of the region. |
|
436 |
||
437 |
@return If successful, the allocated virtual address. |
|
438 |
Otherwise, 0 (zero). |
|
439 |
*/ |
|
440 |
TLinAddr Alloc(TUint aSizeShift); |
|
441 |
||
442 |
/** |
|
443 |
Allocate a specific region of virtual addresses. |
|
444 |
||
445 |
@param aAddr The start address of the region. |
|
446 |
Must be a integer multiple of 2^aSizeShift. |
|
447 |
@param aSizeShift Log2 of the size, in bytes, of the region. |
|
448 |
||
449 |
||
450 |
@return KErrNone, if successful; |
|
451 |
KErrAlreadyExists, if any part of the region was already allocated. |
|
452 |
*/ |
|
453 |
TInt Alloc(TLinAddr aAddr, TUint aSizeShift); |
|
454 |
||
455 |
/** |
|
456 |
Free a specific region of virtual addresses. |
|
457 |
||
458 |
@param aAddr The start address of the region. |
|
459 |
Must be a integer multiple of 2^aSizeShift. |
|
460 |
@param aSizeShift Log2 of the size, in bytes, of the region. |
|
461 |
||
462 |
@return True, if the slab no longer has any addresses allocated. |
|
463 |
*/ |
|
464 |
TBool Free(TLinAddr aAddr, TUint aSizeShift); |
|
465 |
||
466 |
/** |
|
467 |
Return the starting virtual address of the region covered by this slab. |
|
468 |
*/ |
|
469 |
FORCE_INLINE TLinAddr Base() { return iBase; } |
|
470 |
||
471 |
/** |
|
472 |
Return this objects 'slab type'. |
|
473 |
*/ |
|
474 |
FORCE_INLINE TUint SlabType() { return iSlabType; } |
|
475 |
private: |
|
476 |
/** |
|
477 |
Link object used to insert this slab into lists. |
|
478 |
*/ |
|
479 |
SDblQueLink iLink; |
|
480 |
||
481 |
/** |
|
482 |
The starting virtual address of the region covered by this slab. |
|
483 |
*/ |
|
484 |
TLinAddr iBase; |
|
485 |
||
486 |
/** |
|
487 |
This objects 'slab type'. |
|
488 |
*/ |
|
489 |
TUint8 iSlabType; |
|
490 |
||
491 |
/** |
|
492 |
Bitmap allocator used to allocated pages in this slab's virtual address region. |
|
493 |
*/ |
|
494 |
TLogAllocator iAllocator; |
|
495 |
||
496 |
friend class RVirtualAllocSlabSet; |
|
497 |
}; |
|
498 |
||
499 |
||
500 |
TVirtualSlab::TVirtualSlab(SDblQue& aHead, TUint aBase, TUint aSlabType) |
|
501 |
: iBase(aBase),iSlabType(aSlabType) |
|
502 |
{ |
|
503 |
TRACE2(("TVirtualSlab::TVirtualSlab(?,0x%08x,%d)",aBase, aSlabType)); |
|
504 |
aHead.Add(&iLink); |
|
505 |
} |
|
506 |
||
507 |
||
508 |
TVirtualSlab::~TVirtualSlab() |
|
509 |
{ |
|
510 |
TRACE2(("TVirtualSlab::~TVirtualSlab base=0x%08x",iBase)); |
|
511 |
iLink.Deque(); |
|
512 |
} |
|
513 |
||
514 |
||
515 |
TLinAddr TVirtualSlab::Alloc(TUint aSizeShift) |
|
516 |
{ |
|
517 |
TRACE2(("TVirtualSlab::Alloc(%d)",aSizeShift)); |
|
518 |
__NK_ASSERT_DEBUG(aSizeShift>=KVirtualAllocShift); |
|
519 |
aSizeShift -= KVirtualAllocShift; |
|
520 |
TInt index = iAllocator.Alloc(aSizeShift); |
|
521 |
TLinAddr addr = 0; |
|
522 |
if(index>=0) |
|
523 |
addr = iBase+(index<<KVirtualAllocShift); |
|
524 |
TRACE2(("TVirtualSlab::Alloc returns 0x%08x",addr)); |
|
525 |
return addr; |
|
526 |
} |
|
527 |
||
528 |
||
529 |
TInt TVirtualSlab::Alloc(TLinAddr aAddr, TUint aSizeShift) |
|
530 |
{ |
|
531 |
TRACE2(("TVirtualSlab::Alloc(0x%08x,%d)",aAddr,aSizeShift)); |
|
532 |
__NK_ASSERT_DEBUG(aSizeShift>=KVirtualAllocShift); |
|
533 |
aSizeShift -= KVirtualAllocShift; |
|
534 |
TUint index = (aAddr-iBase)>>KVirtualAllocShift; |
|
535 |
__NK_ASSERT_DEBUG(iBase+(index<<KVirtualAllocShift)==aAddr); |
|
536 |
TInt r = iAllocator.Alloc(index,aSizeShift); |
|
537 |
if(r<0) |
|
538 |
return r; |
|
539 |
TRACE2(("TVirtualSlab::Alloc returns 0x%08x",iBase+(r<<KVirtualAllocShift))); |
|
540 |
return r; |
|
541 |
} |
|
542 |
||
543 |
||
544 |
TBool TVirtualSlab::Free(TLinAddr aAddr, TUint aSizeShift) |
|
545 |
{ |
|
546 |
TRACE2(("TVirtualSlab::Free(0x%08x,%d)",aAddr,aSizeShift)); |
|
547 |
__NK_ASSERT_DEBUG(aSizeShift>=KVirtualAllocShift); |
|
548 |
aSizeShift -= KVirtualAllocShift; |
|
549 |
TUint offset = aAddr-iBase; |
|
550 |
TUint index = offset>>KVirtualAllocShift; |
|
551 |
__NK_ASSERT_DEBUG((index<<KVirtualAllocShift)==offset); |
|
552 |
return iAllocator.Free(index,aSizeShift); |
|
553 |
} |
|
554 |
||
555 |
||
556 |
// |
|
557 |
// RVirtualAllocSet |
|
558 |
// |
|
559 |
||
560 |
||
561 |
/** |
|
562 |
Class used by #RVirtualAllocator for allocating virtual addresses which |
|
563 |
have a size less than a 'chunk' (#KChunkSize). |
|
564 |
||
565 |
This consists of a set of #TVirtualSlab objects. |
|
566 |
*/ |
|
567 |
class RVirtualAllocSlabSet |
|
568 |
{ |
|
569 |
public: |
|
570 |
/** |
|
571 |
Create a new slab set for use with the specified allocator. |
|
572 |
||
573 |
@param aAllocator The virtual address allocator which will use the slab set. |
|
574 |
@param aNumSlabTypes The number of slab types this allocator will support. |
|
575 |
@param aWriteLock Reference to the mutex which is being used to protect allocations |
|
576 |
with this object. This is only used for debug checks and may be |
|
577 |
a mutex assigned by #DMutexPool. In practice, this will usually be an |
|
578 |
address space lock DAddressSpace::iLock. |
|
579 |
||
580 |
@return The newly created #RVirtualAllocSlabSet or the null pointer if there was |
|
581 |
insufficient memory. |
|
582 |
*/ |
|
583 |
static RVirtualAllocSlabSet* New(RVirtualAllocator* aAllocator, TUint aNumSlabTypes, DMutex*& aWriteLock); |
|
584 |
||
585 |
~RVirtualAllocSlabSet(); |
|
586 |
||
587 |
/** |
|
588 |
Allocate a region of virtual addresses. |
|
589 |
||
590 |
@param[in,out] aAddr On entry, if this is non-zero it represents |
|
591 |
the start address a specific region to allocate. |
|
592 |
On exit, this is set to the start address of the region allocated. |
|
593 |
@param aSizeShift Log2 of the size, in bytes, of the region. |
|
594 |
@param aSlabType The 'slab type' of the address to be allocated. |
|
595 |
||
596 |
@return KErrNone, if successful; |
|
597 |
KErrAlreadyExists, if any part of the region was already allocated. |
|
598 |
||
599 |
@pre The write lock must be held. (The \a aWriteLock argument for the constructor |
|
600 |
#RVirtualAllocSlabSet::RVirtualAllocSlabSet.) |
|
601 |
*/ |
|
602 |
TInt Alloc(TLinAddr& aAddr, TUint aSizeShift, TUint aSlabType); |
|
603 |
||
604 |
/** |
|
605 |
Free a region of virtual addresses. |
|
606 |
||
607 |
@param aAddr The start address of the region. |
|
608 |
@param aSizeShift Log2 of the size, in bytes, of the region. |
|
609 |
||
610 |
@pre The write lock must be held. (The \a aWriteLock argument for the constructor |
|
611 |
#RVirtualAllocSlabSet::RVirtualAllocSlabSet.) |
|
612 |
*/ |
|
613 |
void Free(TLinAddr aAddr, TUint aSizeShift); |
|
614 |
||
615 |
/** |
|
616 |
Return true if the the address region specified by \a aAddr and \a aSizeShift was |
|
617 |
allocated by this allocator using the specified \a aSlabType. |
|
618 |
||
619 |
@pre The write lock must be held. (The \a aWriteLock argument for the constructor |
|
620 |
#RVirtualAllocSlabSet::RVirtualAllocSlabSet.) |
|
621 |
*/ |
|
622 |
TBool CheckSlabType(TLinAddr aAddr, TUint aSizeShift, TUint aSlabType); |
|
623 |
||
624 |
private: |
|
625 |
/** |
|
626 |
Create a new slab (#TVirtualSlab) for use by this slab set. |
|
627 |
Newly allocated slabs are added to #iLists[\a aSlabType]. |
|
628 |
||
629 |
The virtual address range used by the slab is obtained by |
|
630 |
by allocating a slab sized region from #iAllocator. |
|
631 |
||
632 |
@param aAddr A virtual address which must be in the region to be covered by the slab. |
|
633 |
@param aSlabType The 'slab type'. |
|
634 |
*/ |
|
635 |
TVirtualSlab* NewSlab(TLinAddr aAddr, TUint aSlabType); |
|
636 |
||
637 |
/** |
|
638 |
Delete a slab created with #NewSlab. |
|
639 |
*/ |
|
640 |
void DeleteSlab(TVirtualSlab* aSlab); |
|
641 |
||
642 |
/** |
|
643 |
Constructor, for arguments see #New. |
|
644 |
*/ |
|
645 |
RVirtualAllocSlabSet(RVirtualAllocator* aAllocator, TUint aNumSlabTypes, DMutex*& aWriteLock); |
|
646 |
||
647 |
private: |
|
648 |
/** |
|
649 |
The virtual allocator which is using this slab set. |
|
650 |
*/ |
|
651 |
RVirtualAllocator* iAllocator; |
|
652 |
||
653 |
/** |
|
654 |
Container for all slabs owned by this slab set. This is keyed on the starting |
|
655 |
virtual address of the region each slab covers. |
|
656 |
||
657 |
Each slab in this container is also linked into the #iLists member appropriate |
|
658 |
to its slab type.. |
|
659 |
*/ |
|
660 |
RAddressedContainer iSlabs; |
|
661 |
||
662 |
/** |
|
663 |
The number of different 'slab types' this object can allocate addresses for. |
|
664 |
*/ |
|
665 |
TUint iNumSlabTypes; |
|
666 |
||
667 |
/** |
|
668 |
An array of lists which each contain slabs of a single 'slab type' |
|
669 |
which this object has created. Slabs are linked by their TVirtualSlab::iLink |
|
670 |
member. |
|
671 |
||
672 |
This may extend into memory beyond the end of this object and contains |
|
673 |
#iNumSlabTypes entries. |
|
674 |
||
675 |
Each slab in these lists is also contained in #iSlabs. |
|
676 |
*/ |
|
677 |
SDblQue iLists[1]; |
|
678 |
}; |
|
679 |
||
680 |
||
681 |
FORCE_INLINE RVirtualAllocSlabSet::RVirtualAllocSlabSet(RVirtualAllocator* aAllocator, TUint aNumSlabTypes, DMutex*& aWriteLock) |
|
682 |
: iAllocator(aAllocator), iSlabs(0,aWriteLock), iNumSlabTypes(aNumSlabTypes) |
|
683 |
{ |
|
684 |
while(aNumSlabTypes--) |
|
685 |
new (&iLists+aNumSlabTypes) SDblQue; |
|
686 |
} |
|
687 |
||
688 |
||
689 |
RVirtualAllocSlabSet* RVirtualAllocSlabSet::New(RVirtualAllocator* aAllocator, TUint aNumSlabTypes, DMutex*& aWriteLock) |
|
690 |
{ |
|
22
2f92ad2dc5db
Revision: 201013
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
691 |
TUint size = sizeof(RVirtualAllocSlabSet) + sizeof(SDblQue) * (aNumSlabTypes - 1); |
0 | 692 |
RVirtualAllocSlabSet* set = (RVirtualAllocSlabSet*)Kern::AllocZ(size); |
693 |
if(set) |
|
694 |
new (set) RVirtualAllocSlabSet(aAllocator,aNumSlabTypes,aWriteLock); |
|
695 |
return set; |
|
696 |
} |
|
697 |
||
698 |
||
699 |
RVirtualAllocSlabSet::~RVirtualAllocSlabSet() |
|
700 |
{ |
|
701 |
__NK_ASSERT_DEBUG(iSlabs.Count()==0); |
|
702 |
} |
|
703 |
||
704 |
||
705 |
TVirtualSlab* RVirtualAllocSlabSet::NewSlab(TLinAddr aAddr, TUint aSlabType) |
|
706 |
{ |
|
707 |
TRACE2(("RVirtualAllocSlabSet::NewSlab(0x%08x,%d,%d)",aAddr,aSlabType)); |
|
708 |
__NK_ASSERT_DEBUG(aSlabType<iNumSlabTypes); |
|
709 |
||
710 |
TVirtualSlab* slab = 0; |
|
711 |
TLinAddr base; |
|
712 |
TUint size; |
|
713 |
TInt r = iAllocator->Alloc(base,size,aAddr&~KVirtualAllocSlabMask,KVirtualAllocSlabSize,aSlabType); |
|
714 |
if(r==KErrNone) |
|
715 |
{ |
|
716 |
slab = new TVirtualSlab(iLists[aSlabType],base,aSlabType); |
|
717 |
if(slab && iSlabs.Add(base,slab)!=KErrNone) |
|
718 |
{ |
|
719 |
delete slab; |
|
720 |
slab = 0; |
|
721 |
} |
|
722 |
if(!slab) |
|
723 |
iAllocator->Free(base,KVirtualAllocSlabSize); |
|
724 |
} |
|
725 |
||
726 |
TRACE2(("RVirtualAllocSlabSet::NewSlab returns 0x%08x",slab)); |
|
727 |
return slab; |
|
728 |
} |
|
729 |
||
730 |
||
731 |
void RVirtualAllocSlabSet::DeleteSlab(TVirtualSlab* aSlab) |
|
732 |
{ |
|
733 |
TLinAddr base = aSlab->Base(); |
|
734 |
#ifdef _DEBUG |
|
735 |
TAny* removedSlab = |
|
736 |
#endif |
|
737 |
iSlabs.Remove(base); |
|
738 |
__NK_ASSERT_DEBUG(removedSlab==aSlab); |
|
739 |
delete aSlab; |
|
740 |
iAllocator->Free(base,KVirtualAllocSlabSize); |
|
741 |
} |
|
742 |
||
743 |
||
744 |
TInt RVirtualAllocSlabSet::Alloc(TLinAddr& aAddr, TUint aSizeShift, TUint aSlabType) |
|
745 |
{ |
|
746 |
__NK_ASSERT_DEBUG(aSizeShift>=KVirtualAllocShift && aSizeShift<KVirtualAllocSlabShift); |
|
747 |
__NK_ASSERT_DEBUG(aSlabType<iNumSlabTypes); |
|
748 |
||
749 |
if(!aAddr) |
|
750 |
{ |
|
751 |
SDblQueLink* head = &iLists[aSlabType].iA; |
|
752 |
SDblQueLink* link = head; |
|
753 |
while((link=link->iNext)!=head) |
|
754 |
{ |
|
755 |
TVirtualSlab* slab = _LOFF(link,TVirtualSlab,iLink); |
|
756 |
TLinAddr addr = slab->Alloc(aSizeShift); |
|
757 |
if(addr) |
|
758 |
{ |
|
759 |
aAddr = addr; |
|
760 |
return KErrNone; |
|
761 |
} |
|
762 |
} |
|
763 |
TVirtualSlab* slab = NewSlab(0,aSlabType); |
|
764 |
if(!slab) |
|
765 |
return KErrNoMemory; |
|
766 |
TLinAddr addr = slab->Alloc(aSizeShift); |
|
22
2f92ad2dc5db
Revision: 201013
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
767 |
// Shouldn't ever fail as we've just allocated an empty slab and we can't |
2f92ad2dc5db
Revision: 201013
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
768 |
// attempt to allocate more than a whole slab. |
2f92ad2dc5db
Revision: 201013
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
769 |
__NK_ASSERT_DEBUG(addr); |
0 | 770 |
aAddr = addr; |
771 |
return KErrNone; |
|
772 |
} |
|
773 |
||
774 |
TVirtualSlab* slab = (TVirtualSlab*)iSlabs.Find(aAddr&~KVirtualAllocSlabMask); |
|
775 |
if(!slab) |
|
776 |
{ |
|
777 |
slab = NewSlab(aAddr,aSlabType); |
|
778 |
if(!slab) |
|
779 |
return KErrNoMemory; |
|
780 |
} |
|
781 |
else |
|
782 |
{ |
|
783 |
if(slab->SlabType()!=aSlabType) |
|
784 |
return KErrAlreadyExists; // slab is of incompatible type |
|
785 |
} |
|
786 |
return slab->Alloc(aAddr,aSizeShift); |
|
787 |
} |
|
788 |
||
789 |
||
790 |
void RVirtualAllocSlabSet::Free(TLinAddr aAddr, TUint aSizeShift) |
|
791 |
{ |
|
792 |
__NK_ASSERT_DEBUG(aSizeShift>=KVirtualAllocShift && aSizeShift<KVirtualAllocSlabShift); |
|
793 |
||
794 |
TVirtualSlab* slab = (TVirtualSlab*)iSlabs.Find(aAddr&~KVirtualAllocSlabMask); |
|
795 |
if(slab) |
|
796 |
if(slab->Free(aAddr,aSizeShift)) |
|
797 |
DeleteSlab(slab); |
|
798 |
} |
|
799 |
||
800 |
||
801 |
TBool RVirtualAllocSlabSet::CheckSlabType(TLinAddr aAddr, TUint aSizeShift, TUint aSlabType) |
|
802 |
{ |
|
803 |
__NK_ASSERT_DEBUG(aSizeShift>=KVirtualAllocShift && aSizeShift<KVirtualAllocSlabShift); |
|
804 |
||
805 |
TVirtualSlab* slab = (TVirtualSlab*)iSlabs.Find(aAddr&~KVirtualAllocSlabMask); |
|
806 |
if(!slab) |
|
807 |
{ |
|
808 |
TRACE2(("RVirtualAllocSlabSet::CheckSlabType returns No Slab")); |
|
809 |
return false; |
|
810 |
} |
|
811 |
||
812 |
if(slab->iSlabType!=aSlabType) |
|
813 |
{ |
|
814 |
TRACE2(("RVirtualAllocSlabSet::CheckSlabType returns Wrong Type")); |
|
815 |
return false; |
|
816 |
} |
|
817 |
||
818 |
return true; |
|
819 |
} |
|
820 |
||
821 |
||
822 |
// |
|
823 |
// RVirtualAllocator |
|
824 |
// |
|
825 |
||
826 |
RVirtualAllocator::RVirtualAllocator() |
|
827 |
: iBase(0), iSize(0), iAllocator(0), iSlabSet(0) |
|
828 |
{} |
|
829 |
||
830 |
||
831 |
RVirtualAllocator::~RVirtualAllocator() |
|
832 |
{ |
|
833 |
__NK_ASSERT_DEBUG(iAllocator==0 || iAllocator->iAvail==iAllocator->iSize); // should be empty |
|
22
2f92ad2dc5db
Revision: 201013
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
834 |
delete iAllocator; |
2f92ad2dc5db
Revision: 201013
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
835 |
delete iSlabSet; |
0 | 836 |
} |
837 |
||
838 |
||
839 |
TInt RVirtualAllocator::Construct(TLinAddr aStart, TLinAddr aEnd, TUint aNumSlabTypes, DMutex*& aWriteLock) |
|
840 |
{ |
|
841 |
if((aStart|aEnd)&KVirtualAllocSlabMask) |
|
842 |
return KErrArgument; // region not aligned to KVirtualAllocSlabSize |
|
843 |
TUint bitSize = (aEnd-aStart)>>KVirtualAllocSlabShift; |
|
844 |
iAllocator = TBitMapAllocator::New(bitSize, ETrue); |
|
845 |
if(!iAllocator) |
|
846 |
return KErrNoMemory; |
|
847 |
iSlabSet = RVirtualAllocSlabSet::New(this,aNumSlabTypes,aWriteLock); |
|
848 |
if(!iSlabSet) |
|
849 |
return KErrNoMemory; |
|
850 |
iBase = aStart; |
|
851 |
iSize = aEnd-aStart; |
|
852 |
return KErrNone; |
|
853 |
} |
|
854 |
||
855 |
||
856 |
TUint RVirtualAllocator::AdjustRegion(TLinAddr& aAddr, TUint& aSize) |
|
857 |
{ |
|
858 |
TLinAddr first = aAddr; |
|
859 |
TLinAddr last = (aAddr+aSize-1); |
|
860 |
TLinAddr dif = first^last; |
|
861 |
TUint granularity = KVirtualAllocShift; |
|
862 |
while(dif>>granularity && ++granularity<KVirtualAllocSlabShift) |
|
863 |
{} |
|
864 |
first >>= granularity; |
|
865 |
last >>= granularity; |
|
866 |
aAddr = first<<granularity; |
|
867 |
aSize = (last-first+1)<<granularity; |
|
868 |
return granularity; |
|
869 |
} |
|
870 |
||
871 |
||
872 |
TInt RVirtualAllocator::Alloc(TLinAddr& aAddr, TUint& aSize, TLinAddr aRequestedAddr, TUint aRequestedSize, TUint aSlabType) |
|
873 |
{ |
|
874 |
TRACE2(("RVirtualAllocator::Alloc(?,?,0x%08x,0x%08x,%d)",aRequestedAddr,aRequestedSize,aSlabType)); |
|
875 |
||
876 |
if(!aRequestedSize) |
|
877 |
{ |
|
878 |
TRACE2(("RVirtualAllocator::Alloc zero size")); |
|
879 |
return KErrArgument; |
|
880 |
} |
|
881 |
||
882 |
aAddr = aRequestedAddr; |
|
883 |
aSize = aRequestedSize; |
|
884 |
TUint align = AdjustRegion(aAddr,aSize); |
|
885 |
TRACE2(("RVirtualAllocator::Alloc adjusted to 0x%08x+0x%08x, align=%d",aAddr,aSize,align)); |
|
886 |
||
887 |
if(align<KVirtualAllocSlabShift) |
|
888 |
return iSlabSet->Alloc(aAddr,align,aSlabType); |
|
889 |
||
890 |
__NK_ASSERT_DEBUG(align==KVirtualAllocSlabShift); |
|
891 |
TUint size = aSize>>KVirtualAllocSlabShift; |
|
892 |
||
893 |
if(!aAddr) |
|
894 |
{ |
|
895 |
TInt r = iAllocator->AllocConsecutive(size, EFalse); |
|
896 |
if(r>=0) |
|
897 |
{ |
|
898 |
iAllocator->Alloc(r, size); |
|
899 |
aAddr = iBase+(r<<KVirtualAllocSlabShift); |
|
900 |
return KErrNone; |
|
901 |
} |
|
902 |
return KErrNoMemory; |
|
903 |
} |
|
904 |
||
905 |
// specific address requested... |
|
906 |
if(!InRange(aAddr,aSize)) |
|
907 |
{ |
|
908 |
TRACE2(("RVirtualAllocator::Alloc not in range")); |
|
909 |
return KErrArgument; |
|
910 |
} |
|
911 |
||
912 |
TUint offset = TUint(aAddr-iBase)>>KVirtualAllocSlabShift; |
|
913 |
if(!iAllocator->NotFree(offset,size)) |
|
914 |
{ |
|
915 |
iAllocator->Alloc(offset,size); |
|
916 |
return KErrNone; |
|
917 |
} |
|
918 |
else |
|
919 |
{ |
|
920 |
TRACE2(("RVirtualAllocator::Alloc already allocated!")); |
|
921 |
return KErrAlreadyExists; |
|
922 |
} |
|
923 |
} |
|
924 |
||
925 |
||
926 |
void RVirtualAllocator::Free(TLinAddr aAddr, TUint aSize) |
|
927 |
{ |
|
928 |
if(!aSize) |
|
929 |
return; |
|
930 |
||
931 |
TRACE2(("RVirtualAllocator::Free(0x%08x,0x%08x)",aAddr,aSize)); |
|
932 |
||
933 |
TUint align = AdjustRegion(aAddr,aSize); |
|
934 |
TRACE2(("RVirtualAllocator::Free adjusted to 0x%08x+0x%08x, align=%d",aAddr,aSize,align)); |
|
935 |
||
936 |
if(!InRange(aAddr,aSize)) |
|
937 |
{ |
|
938 |
TRACE2(("RVirtualAllocator::Free invalid region")); |
|
939 |
__NK_ASSERT_ALWAYS(0); |
|
940 |
return; // invalid region |
|
941 |
} |
|
942 |
||
943 |
if(align<KVirtualAllocSlabShift) |
|
944 |
{ |
|
945 |
iSlabSet->Free(aAddr,align); |
|
946 |
return; |
|
947 |
} |
|
948 |
||
949 |
__NK_ASSERT_DEBUG(align==KVirtualAllocSlabShift); |
|
950 |
TUint offset = (aAddr-iBase)>>KVirtualAllocSlabShift; |
|
951 |
TUint size = aSize>>KVirtualAllocSlabShift; |
|
952 |
iAllocator->Free(offset,size); |
|
953 |
} |
|
954 |
||
955 |
||
956 |
TBool RVirtualAllocator::CheckSlabType(TLinAddr aAddr, TUint aSize, TUint aSlabType) |
|
957 |
{ |
|
958 |
TRACE2(("RVirtualAllocator::CheckSlabType(0x%08x,0x%08x,%d)",aAddr,aSize,aSlabType)); |
|
959 |
if(!aSize) |
|
960 |
return false; |
|
961 |
||
962 |
TUint align = AdjustRegion(aAddr,aSize); |
|
963 |
||
964 |
if(!InRange(aAddr,aSize)) |
|
965 |
{ |
|
966 |
TRACE2(("RVirtualAllocator::CheckSlabType not in range")); |
|
967 |
return false; |
|
968 |
} |
|
969 |
||
970 |
if(align<KVirtualAllocSlabShift) |
|
971 |
{ |
|
972 |
return iSlabSet->CheckSlabType(aAddr,align,aSlabType); |
|
973 |
} |
|
974 |
else |
|
975 |
{ |
|
976 |
return true; |
|
977 |
} |
|
978 |
} |
|
979 |
||
980 |
||
981 |
// |
|
982 |
// RBackwardsVirtualAllocator |
|
983 |
// |
|
984 |
||
985 |
TInt RBackwardsVirtualAllocator::Alloc(TLinAddr& aAddr, TUint& aSize, TLinAddr aRequestedAddr, TUint aRequestedSize, TUint aSlabType) |
|
986 |
{ |
|
987 |
if(aRequestedAddr) |
|
988 |
aRequestedAddr = (iBase+iSize)-(aRequestedAddr+aRequestedSize-iBase); |
|
989 |
TInt r = RVirtualAllocator::Alloc(aAddr,aSize,aRequestedAddr,aRequestedSize,aSlabType); |
|
990 |
if(r==KErrNone) |
|
991 |
aAddr = (iBase+iSize)-(aAddr+aSize-iBase); |
|
992 |
return r; |
|
993 |
} |
|
994 |
||
995 |
||
996 |
void RBackwardsVirtualAllocator::Free(TLinAddr aAddr, TUint aSize) |
|
997 |
{ |
|
998 |
RVirtualAllocator::Free((iBase+iSize)-(aAddr+aSize-iBase),aSize); |
|
999 |
} |
|
1000 |
||
1001 |