|
1 /* |
|
2 * Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
3 * All rights reserved. |
|
4 * This component and the accompanying materials are made available |
|
5 * under the terms of "Eclipse Public License v1.0" |
|
6 * which accompanies this distribution, and is available |
|
7 * at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
8 * |
|
9 * Initial Contributors: |
|
10 * Nokia Corporation - initial contribution. |
|
11 * |
|
12 * Contributors: |
|
13 * |
|
14 * Description: |
|
15 * |
|
16 */ |
|
17 |
|
18 |
|
19 |
|
20 #include "allocator.h" |
|
21 #include <kernel/kern_priv.h> |
|
22 |
|
23 TAddressAllocator::TAddressAllocator(TLinAddr aMaxSize) |
|
24 :iMaxSize(aMaxSize) |
|
25 { |
|
26 TInt r = iRangeList.Append(TRange(0, iMaxSize, EFalse)); |
|
27 __NK_ASSERT_ALWAYS(KErrNone==r); |
|
28 |
|
29 _LIT(KAllocatorMutex, "PCIAllocatorMutex"); |
|
30 r = Kern::MutexCreate(iMutex, KAllocatorMutex, KMutexOrdGeneral0); |
|
31 __NK_ASSERT_ALWAYS(KErrNone==r); |
|
32 } |
|
33 |
|
34 TAddressAllocator::~TAddressAllocator() |
|
35 { |
|
36 __NK_ASSERT_DEBUG(iRangeList.Count()==1); |
|
37 iRangeList.Reset(); |
|
38 iMutex->Close(NULL); |
|
39 iMutex=NULL; |
|
40 } |
|
41 |
|
42 /** |
|
43 @return Indicates if alloc succeeded |
|
44 @param aAddress This will be set to the starting address of the range if allocation succeeded. |
|
45 @param aSize Size of memory range required in bytes. This must be a power of 2. |
|
46 */ |
|
47 TInt TAddressAllocator::Allocate(TLinAddr& aAddress, TLinAddr aSize) |
|
48 { |
|
49 //check size alignment |
|
50 __NK_ASSERT_ALWAYS( (aSize & (aSize - 1)) == 0 ); //assert aSize is power of 2. |
|
51 TInt r = KErrNotFound; |
|
52 |
|
53 NKern::ThreadEnterCS(); |
|
54 Kern::MutexWait(*iMutex); |
|
55 |
|
56 const TInt count = iRangeList.Count(); |
|
57 for(TInt i = 0; i < count; ++i) |
|
58 { |
|
59 TRange& range(iRangeList[i]); |
|
60 if(range.iAllocated == EFalse) |
|
61 { |
|
62 //amount which must be added to start address to align to aSize |
|
63 //(aSize must be a power of 2) |
|
64 const TLinAddr alignedOffset = (-range.iStart) & (aSize - 1); |
|
65 |
|
66 if((alignedOffset + aSize) <= range.iLength) |
|
67 { |
|
68 const TLinAddr alignedStart = range.iStart + alignedOffset; |
|
69 DoAllocate(alignedStart, aSize, i); |
|
70 aAddress = alignedStart; |
|
71 r = KErrNone; |
|
72 break; |
|
73 } |
|
74 |
|
75 } |
|
76 } |
|
77 __KTRACE_OPT(KPCI, Kern::Printf("TAddressAllocator::Allocate(): Allocated %d (0x%X) bytes at 0x%X", |
|
78 aSize, aSize, aAddress)); |
|
79 Kern::MutexSignal(*iMutex); |
|
80 NKern::ThreadLeaveCS(); |
|
81 return r; |
|
82 } |
|
83 |
|
84 /** |
|
85 Mark region as allocated, with the assumption that region is unallocated. |
|
86 |
|
87 @param aAddress The start address requested for new region |
|
88 @param aSize Length of new region |
|
89 @param aIndex Position to modify in iRangeList |
|
90 */ |
|
91 void TAddressAllocator::DoAllocate(TLinAddr aAddress, TLinAddr aSize, TInt aIndex) |
|
92 { |
|
93 __NK_ASSERT_DEBUG(InvariantCheck()); |
|
94 //check that new region will lie within existing range |
|
95 __NK_ASSERT_ALWAYS( |
|
96 (aAddress >= iRangeList[aIndex].iStart) && |
|
97 ((aAddress + aSize) <= (iRangeList[aIndex].End() + 1)) |
|
98 ); |
|
99 |
|
100 //allocating at start of unallocated region |
|
101 if(iRangeList[aIndex].iStart == aAddress) |
|
102 { |
|
103 //will there be space left at end of region? |
|
104 if((iRangeList[aIndex].iLength - aSize) > 0) |
|
105 { |
|
106 const TRange newFreeRange(iRangeList[aIndex].iStart + aSize, iRangeList[aIndex].iLength - aSize, EFalse); |
|
107 const TInt r = iRangeList.Insert(newFreeRange, aIndex + 1); |
|
108 __NK_ASSERT_ALWAYS(KErrNone == r); |
|
109 |
|
110 iRangeList[aIndex].iLength = aSize; |
|
111 } |
|
112 |
|
113 iRangeList[aIndex].iAllocated = ETrue; |
|
114 } |
|
115 else |
|
116 { |
|
117 //allocating from middle of an unallocated region |
|
118 const TRange newAllocRange(aAddress, aSize, ETrue); |
|
119 TInt r = iRangeList.Insert(newAllocRange, aIndex + 1); |
|
120 __NK_ASSERT_ALWAYS(KErrNone == r); |
|
121 |
|
122 //is there an unallocated gap after the newly allocated range? |
|
123 TLinAddr diff = iRangeList[aIndex].End() - newAllocRange.End(); |
|
124 if( diff > 0 ) |
|
125 { |
|
126 const TRange newFreeRange(newAllocRange.End() + 1, diff, EFalse); |
|
127 r=iRangeList.Insert(newFreeRange, aIndex + 2); |
|
128 __NK_ASSERT_ALWAYS(KErrNone == r); |
|
129 } |
|
130 |
|
131 iRangeList[aIndex].iLength = aAddress - iRangeList[aIndex].iStart; |
|
132 } |
|
133 //calculate invariant: run through array to check that ranges are contiguous |
|
134 __NK_ASSERT_DEBUG(InvariantCheck()); |
|
135 } |
|
136 |
|
137 #if defined (_DEBUG) |
|
138 void TAddressAllocator::Print() |
|
139 { |
|
140 __KTRACE_OPT(KPCI, Kern::Printf("TAddressAllocator::Print(): Printing ranges")); |
|
141 const TInt count = iRangeList.Count(); |
|
142 for(TInt i=0; i<count; ++i) |
|
143 { |
|
144 const TRange& r(iRangeList[i]); |
|
145 __KTRACE_OPT(KPCI, Kern::Printf(" index:%d, start:%x, length:%x, end:%x, allocated:%d", |
|
146 i, r.iStart, r.iLength, r.End(), r.iAllocated)); |
|
147 } |
|
148 } |
|
149 |
|
150 TBool TAddressAllocator::InvariantCheck() |
|
151 { |
|
152 //Print(); |
|
153 const TInt count = iRangeList.Count(); |
|
154 __NK_ASSERT_ALWAYS(count>0); |
|
155 __NK_ASSERT_ALWAYS(iRangeList[0].iStart==0); |
|
156 __NK_ASSERT_ALWAYS(iRangeList[count-1].End()==iMaxSize-1); |
|
157 |
|
158 for(TInt i=1; i<count; ++i) |
|
159 { |
|
160 const TRange& prev = iRangeList[i-1]; |
|
161 const TRange& curr = iRangeList[i]; |
|
162 __NK_ASSERT_ALWAYS(prev.End()==curr.iStart-1); |
|
163 __NK_ASSERT_ALWAYS(curr.iLength>0); |
|
164 |
|
165 //check that free spaces are always consolidated |
|
166 if(!prev.iAllocated) |
|
167 { |
|
168 __NK_ASSERT_ALWAYS(curr.iAllocated); |
|
169 } |
|
170 } |
|
171 return ETrue; |
|
172 } |
|
173 #endif |
|
174 |
|
175 TInt TAddressAllocator::TRange::OrderByStart(const TRange& aKeyRange, const TRange& aRange) |
|
176 { |
|
177 if(aKeyRange.iStart > aRange.iStart) |
|
178 return 1; |
|
179 else if(aKeyRange.iStart < aRange.iStart) |
|
180 return -1; |
|
181 else |
|
182 return 0; |
|
183 } |
|
184 |
|
185 /** |
|
186 Make a region of PCI memory available again. |
|
187 |
|
188 @param aAddress Start address of an allocated PCI range to deallocate. |
|
189 @return |
|
190 - KErrNone If the region was deallocated |
|
191 - KErrNotFound if aAddress is not the beginning of a previously |
|
192 allocated range. |
|
193 */ |
|
194 TInt TAddressAllocator::DeAllocate(TLinAddr aAddress) |
|
195 { |
|
196 NKern::ThreadEnterCS(); |
|
197 Kern::MutexWait(*iMutex); |
|
198 |
|
199 TInt r=KErrNone; |
|
200 |
|
201 TRange key(aAddress, 0, EFalse); |
|
202 TLinearOrder<TRange> startOrder(TRange::OrderByStart); |
|
203 TInt index = iRangeList.FindInOrder(key, startOrder); |
|
204 |
|
205 if(index==KErrNotFound || (!iRangeList[index].iAllocated)) |
|
206 { |
|
207 r=KErrNotFound; |
|
208 } |
|
209 else |
|
210 { |
|
211 Remove(index); |
|
212 } |
|
213 |
|
214 Kern::MutexSignal(*iMutex); |
|
215 NKern::ThreadLeaveCS(); |
|
216 return r; |
|
217 } |
|
218 |
|
219 /** |
|
220 Remove the allocated region and adjust remaining regions |
|
221 to consolidate free space. |
|
222 */ |
|
223 void TAddressAllocator::Remove(TInt aIndex) |
|
224 { |
|
225 __KTRACE_OPT(KPCI, Kern::Printf("TAddressAllocator::Remove(): Removing %d (0x%X) bytes at 0x%X", |
|
226 iRangeList[aIndex].iLength, iRangeList[aIndex].iLength, iRangeList[aIndex].iStart)); |
|
227 __NK_ASSERT_DEBUG(InvariantCheck()); |
|
228 const TInt count = iRangeList.Count(); |
|
229 //range is above an unallocated range |
|
230 if(aIndex > 0 && (!iRangeList[aIndex - 1].iAllocated) ) |
|
231 { |
|
232 //range is below unallocated range |
|
233 if(aIndex < (count - 1) && !(iRangeList[aIndex + 1].iAllocated)) |
|
234 { |
|
235 iRangeList[aIndex - 1].iLength = iRangeList[aIndex + 1].End() + 1 -iRangeList[aIndex - 1].iStart; |
|
236 iRangeList.Remove(aIndex); |
|
237 iRangeList.Remove(aIndex); |
|
238 #ifdef _DEBUG |
|
239 //The PCI kernel extension contains several persistent dynamic arrays and by default these RArrays |
|
240 //do not free heap for every item that is removed. In order to ensure that kernel heap checking |
|
241 //succeeds Compress() calls are needed to force this excess memory to be freed. This calls are only |
|
242 //made in UDEB builds only as KHEAP checks are only performed in UDEB mode. |
|
243 iRangeList.Compress(); |
|
244 #endif |
|
245 } |
|
246 else |
|
247 { |
|
248 iRangeList[aIndex - 1].iLength = iRangeList[aIndex].End() + 1 - iRangeList[aIndex - 1].iStart; |
|
249 iRangeList.Remove(aIndex); |
|
250 #ifdef _DEBUG |
|
251 //The PCI kernel extension contains several persistent dynamic arrays and by default these RArrays |
|
252 //do not free heap for every item that is removed. In order to ensure that kernel heap checking |
|
253 //succeeds Compress() calls are needed to force this excess memory to be freed. This calls are only |
|
254 //made in UDEB builds only as KHEAP checks are only performed in UDEB mode. |
|
255 iRangeList.Compress(); |
|
256 #endif |
|
257 } |
|
258 } |
|
259 //range is above allocated range |
|
260 else |
|
261 { |
|
262 //range is below unallocated range |
|
263 if(aIndex < (count - 1) && !(iRangeList[aIndex + 1].iAllocated)) |
|
264 { |
|
265 iRangeList[aIndex].iAllocated=EFalse; |
|
266 iRangeList[aIndex].iLength=iRangeList[aIndex + 1].End() + 1 - iRangeList[aIndex].iStart; |
|
267 iRangeList.Remove(aIndex + 1); |
|
268 #ifdef _DEBUG |
|
269 //The PCI kernel extension contains several persistent dynamic arrays and by default these RArrays |
|
270 //do not free heap for every item that is removed. In order to ensure that kernel heap checking |
|
271 //succeeds Compress() calls are needed to force this excess memory to be freed. This calls are only |
|
272 //made in UDEB builds only as KHEAP checks are only performed in UDEB mode. |
|
273 iRangeList.Compress(); |
|
274 #endif |
|
275 } |
|
276 else |
|
277 { |
|
278 iRangeList[aIndex].iAllocated = EFalse; |
|
279 } |
|
280 } |
|
281 //calculate invariant: run through array to check that ranges are contiguous |
|
282 __NK_ASSERT_DEBUG(InvariantCheck()); |
|
283 } |
|
284 |