|
1 // Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // Inline entry node organisations |
|
15 // |
|
16 // |
|
17 |
|
18 #include "UB_STD.H" |
|
19 |
|
20 EXPORT_C TBtreeInlineLeafOrg::TBtreeInlineLeafOrg() |
|
21 {} |
|
22 |
|
23 EXPORT_C void TBtreeInlineLeafOrg::SetEntrySize(TInt aSize) |
|
24 { |
|
25 __ASSERT_DEBUG(aSize<=KMaxBtreeKeyLength,Panic(EBadEntrySize)); |
|
26 iEntrySize=Align4(aSize); |
|
27 iMaxEntries=(KPoolPageSize-sizeof(SHeader))/iEntrySize; |
|
28 } |
|
29 |
|
30 EXPORT_C TBool TBtreeInlineLeafOrg::Insert(TAny *aNode,TInt aPos,const TDesC8& anEntry) const |
|
31 { |
|
32 SNode* const pn=Node(aNode); |
|
33 __ASSERT_DEBUG(aPos<=pn->iHead.iCount,Panic(EBadEntryPos)); |
|
34 if (pn->iHead.iCount==iMaxEntries) |
|
35 return EFalse; |
|
36 TUint8* pe=Entry(pn,aPos); |
|
37 Mem::Copy(pe+iEntrySize,pe,iEntrySize*(pn->iHead.iCount-aPos)); |
|
38 TInt size=anEntry.Size(); |
|
39 __ASSERT_ALWAYS(size<=iEntrySize,Panic(EBadEntrySize)); |
|
40 Mem::Copy(pe,anEntry.Ptr(),size); |
|
41 ++pn->iHead.iCount; |
|
42 return ETrue; |
|
43 } |
|
44 |
|
45 TAny *TBtreeInlineLeafOrg::DoRedistribute(TAny *aLeftNode,TAny *aRightNode,TInt aInsertPos) const |
|
46 // |
|
47 // Even out the distibution of entries in LeftNode and RightNode |
|
48 // if aInsertPos>=0 we want to insert at this cumulative position so take into account for redistribution |
|
49 // If total entries <=iMaxEntries or >iMaxEntries*2 then return 0 |
|
50 // Otherwise return the node into which insertion should take place |
|
51 // |
|
52 { |
|
53 SNode* const pl=Node(aLeftNode); |
|
54 SNode* const pr=Node(aRightNode); |
|
55 TAny *insertNode=aRightNode; |
|
56 TInt lCount=pl->iHead.iCount; |
|
57 TInt rCount=pr->iHead.iCount; |
|
58 TInt total=lCount+rCount; |
|
59 TInt left=(total+1)>>1; |
|
60 if (aInsertPos>=0) |
|
61 { // call from Insertoverflow |
|
62 __ASSERT_DEBUG(aInsertPos<=total,Panic(EBadEntryPos)); |
|
63 if (aInsertPos<left) |
|
64 { |
|
65 insertNode=aLeftNode; |
|
66 --left; |
|
67 } |
|
68 if (total>=iMaxEntries<<1) |
|
69 return 0; // no space |
|
70 } |
|
71 else |
|
72 { // from Redistribute |
|
73 if (total<=iMaxEntries) |
|
74 return 0; // underflow state |
|
75 } |
|
76 pl->iHead.iCount=left; |
|
77 pr->iHead.iCount=total-left; |
|
78 if (lCount>left) |
|
79 { // move right |
|
80 TInt move=lCount-left; |
|
81 Mem::Copy(Entry(pr,move),pr->iEntries,rCount*iEntrySize); |
|
82 Mem::Copy(pr->iEntries,Entry(pl,left),move*iEntrySize); |
|
83 } |
|
84 else if (lCount<left) |
|
85 { // move left |
|
86 TInt move=left-lCount; |
|
87 Mem::Copy(Entry(pl,lCount),pr->iEntries,move*iEntrySize); |
|
88 Mem::Copy(pr->iEntries,Entry(pr,move),(rCount-move)*iEntrySize); |
|
89 } |
|
90 return insertNode; |
|
91 } |
|
92 |
|
93 EXPORT_C TBool TBtreeInlineLeafOrg::InsertOverflow(TAny *aLeftNode,TAny *aRightNode,TInt aPos,TBool aInsertOnLeft,const TDesC8& anEntry) const |
|
94 // |
|
95 // Redistribute keys in aLeftNode and aRightNode to insert a new entry, return success or failure |
|
96 // We know that the insertion node is full (see code for CBplusTree::InsertAtCurrentL) |
|
97 // promotion done by manager |
|
98 // |
|
99 { |
|
100 __ASSERT_DEBUG(Node(aInsertOnLeft?aLeftNode:aRightNode)->iHead.iCount==iMaxEntries,Panic(EIllegalOverflow)); |
|
101 if (!aInsertOnLeft) |
|
102 aPos+=Node(aLeftNode)->iHead.iCount; |
|
103 TAny* iNode=DoRedistribute(aLeftNode,aRightNode,aPos); |
|
104 if (!iNode) |
|
105 return EFalse; |
|
106 if (iNode==aRightNode) |
|
107 aPos-=Node(aLeftNode)->iHead.iCount; |
|
108 return Insert(iNode,aPos,anEntry); |
|
109 } |
|
110 |
|
111 EXPORT_C void TBtreeInlineLeafOrg::InsertSplit(TAny *aLeftNode,TAny *aRightNode,TInt aPos,const TDesC8& anEntry) const |
|
112 // |
|
113 // Split contents of left node into left and right and insert entry |
|
114 // copy link node from left to right |
|
115 // |
|
116 { |
|
117 __ASSERT_DEBUG(Node(aLeftNode)->iHead.iCount==iMaxEntries,Panic(EIllegalSplit)); |
|
118 __ASSERT_DEBUG(Node(aRightNode)->iHead.iCount==0,Panic(EIllegalSplit)); |
|
119 TAny* iNode=DoRedistribute(aLeftNode,aRightNode,aPos); |
|
120 if (iNode==aRightNode) |
|
121 aPos-=Node(aLeftNode)->iHead.iCount; |
|
122 Insert(iNode,aPos,anEntry); |
|
123 Node(aRightNode)->iHead.iLink=Node(aLeftNode)->iHead.iLink; |
|
124 } |
|
125 |
|
126 EXPORT_C TBool TBtreeInlineLeafOrg::Delete(TAny *aNode,TInt aPos) const |
|
127 // |
|
128 // Delete the specified entry from the node |
|
129 // Part of the contract is to garauntee that the entry is deleted, even if undeflowed |
|
130 // |
|
131 { |
|
132 SNode* const pn=Node(aNode); |
|
133 __ASSERT_DEBUG(aPos<pn->iHead.iCount,Panic(EBadEntryPos)); |
|
134 |
|
135 --pn->iHead.iCount; |
|
136 TUint8* pe=Entry(pn,aPos); |
|
137 Mem::Copy(pe,pe+iEntrySize,(pn->iHead.iCount-aPos)*iEntrySize); |
|
138 return pn->iHead.iCount>=iMaxEntries>>1; // underflow criterion |
|
139 } |
|
140 |
|
141 EXPORT_C TBool TBtreeInlineLeafOrg::Redistribute(TAny *aLeftNode,TAny *aRightNode) const |
|
142 // |
|
143 // Even out the distibution of entries in LeftNode and RightNode if enough keys |
|
144 // leaf nodes do not set the pivot, done by manager |
|
145 // |
|
146 { |
|
147 return DoRedistribute(aLeftNode,aRightNode)!=0; |
|
148 } |
|
149 |
|
150 EXPORT_C void TBtreeInlineLeafOrg::Concatenate(TAny *aLeftNode,const TAny *aRightNode) const |
|
151 // |
|
152 // Join LeftNode and RightNode together in LeftNode, set link |
|
153 // contract says that it will fit |
|
154 // |
|
155 { |
|
156 SNode* const pl=Node(aLeftNode); |
|
157 const SNode* const pr=Node(aRightNode); |
|
158 TInt rCount=pr->iHead.iCount; |
|
159 __ASSERT_DEBUG(pl->iHead.iCount+rCount<=iMaxEntries,Panic(ECannotConcatenate)); |
|
160 Mem::Copy(Entry(pl,pl->iHead.iCount),pr->iEntries,rCount*iEntrySize); |
|
161 pl->iHead.iCount+=rCount; |
|
162 pl->iHead.iLink=pr->iHead.iLink; |
|
163 } |
|
164 |
|
165 EXPORT_C TInt TBtreeInlineLeafOrg::LastEntry(const TAny* aNode) const |
|
166 { |
|
167 return Node(aNode)->iHead.iCount; |
|
168 } |
|
169 |
|
170 EXPORT_C const TAny *TBtreeInlineLeafOrg::EntryPtr(const TAny* aNode,TInt aPos) const |
|
171 { |
|
172 __ASSERT_DEBUG(aPos<Node(aNode)->iHead.iCount,Panic(EBadEntryPos)); |
|
173 return Entry(Node(aNode),aPos); |
|
174 } |
|
175 |
|
176 EXPORT_C TPtrC8 TBtreeInlineLeafOrg::Entry(const TAny* aNode,TInt aPos) const |
|
177 { |
|
178 return TPtrC8((const TUint8*)EntryPtr(aNode,aPos),iEntrySize); |
|
179 } |
|
180 |
|
181 EXPORT_C TPageRef TBtreeInlineLeafOrg::LinkNode(const TAny* aNode) const |
|
182 { |
|
183 return Node(aNode)->iHead.iLink; |
|
184 } |
|
185 |
|
186 EXPORT_C void TBtreeInlineLeafOrg::SetLinkNode(TAny* aNode,TPageRef aNextNode) const |
|
187 { |
|
188 Node(aNode)->iHead.iLink=aNextNode; |
|
189 } |
|
190 |
|
191 |
|
192 // class TBtreeInlineIndexOrg |
|
193 |
|
194 EXPORT_C TBtreeInlineIndexOrg::TBtreeInlineIndexOrg() |
|
195 {} |
|
196 |
|
197 EXPORT_C void TBtreeInlineIndexOrg::SetEntrySize(TInt aSize) |
|
198 { |
|
199 __ASSERT_DEBUG(aSize<=KMaxBtreeKeyLength,Panic(EBadEntrySize)); |
|
200 iEntrySize=_FOFF(SEntry,iKey[Align4(aSize)]); |
|
201 iMaxEntries=(KPoolPageSize-sizeof(SHeader)-sizeof(TPageRef))/iEntrySize; |
|
202 } |
|
203 |
|
204 EXPORT_C TBool TBtreeInlineIndexOrg::Insert(TAny *aNode,TInt aPos,const TDesC8& anEntry,TPageRef aChild) const |
|
205 { |
|
206 SNode* const pn=Node(aNode); |
|
207 __ASSERT_DEBUG(aPos<=pn->iHead.iCount,Panic(EBadEntryPos)); |
|
208 if (pn->iHead.iCount==iMaxEntries) |
|
209 return EFalse; |
|
210 TUint8* pe=Entry(pn,aPos)->iKey; |
|
211 Mem::Copy(pe+iEntrySize,pe,iEntrySize*(pn->iHead.iCount-aPos)); |
|
212 TInt size=anEntry.Size(); |
|
213 __ASSERT_ALWAYS(size<=KeySize(),Panic(EBadEntrySize)); |
|
214 *(TPageRef*)Mem::Copy(pe,anEntry.Ptr(),size)=aChild; |
|
215 ++pn->iHead.iCount; |
|
216 return ETrue; |
|
217 } |
|
218 |
|
219 TBtreeInlineIndexOrg::SNode* TBtreeInlineIndexOrg::DoRedistribute(TAny *aLeftNode,TAny *aRightNode,const TDesC8& aPivot,TBtreePivot& aNewPivot,TInt aInsertPos) const |
|
220 // |
|
221 // Even out the distibution of entries in LeftNode and RightNode |
|
222 // if aInsertPos>=0 we want to insert at this cumulative position so take into account for redistribution |
|
223 // If total entries <=iMaxEntries or >iMaxEntries*2 then return 0 |
|
224 // Otherwise return the node into which insertion should take place |
|
225 // If insertion should be promoted insert position is beyond end of left node, otherwise |
|
226 // the new pivot is copied to aNewPivot |
|
227 // |
|
228 { |
|
229 SNode* const pl=Node(aLeftNode); |
|
230 SNode* const pr=Node(aRightNode); |
|
231 SNode *insertNode=pr; |
|
232 TInt lCount=pl->iHead.iCount; |
|
233 TInt rCount=pr->iHead.iCount; |
|
234 TInt total=lCount+rCount+1; // including pivot entry |
|
235 TInt left=total>>1; |
|
236 if (aInsertPos>=0) |
|
237 { // call from InsertOverflow |
|
238 __ASSERT_DEBUG(aInsertPos<=total,Panic(EBadEntryPos)); |
|
239 if (total>iMaxEntries<<1) |
|
240 return NULL; // no space to insert |
|
241 if (aInsertPos<=left) |
|
242 { |
|
243 if (aInsertPos<left) |
|
244 --left; |
|
245 insertNode=pl; |
|
246 } |
|
247 } |
|
248 else |
|
249 { // from Redistribute |
|
250 if (total<=iMaxEntries) |
|
251 return NULL; // underflow state |
|
252 } |
|
253 pl->iHead.iCount=left; |
|
254 pr->iHead.iCount=total-left-1; // pivot not included |
|
255 TInt pSize=aPivot.Size(); |
|
256 __ASSERT_DEBUG(pSize<=KeySize(),Panic(EBadEntrySize)); |
|
257 if (lCount>left) |
|
258 { // move right |
|
259 TInt move=lCount-left; |
|
260 Mem::Copy(Entry(pr,move),pr->iEntries,rCount*iEntrySize+sizeof(TPageRef)); |
|
261 TUint8 *pp=Mem::Copy(pr->iEntries,Entry(pl,left+1),(move-1)*iEntrySize+sizeof(TPageRef)); |
|
262 Mem::Copy(pp,aPivot.Ptr(),pSize); |
|
263 aNewPivot.Copy(Entry(pl,left)->iKey,KeySize()); // new pivot |
|
264 } |
|
265 else if (lCount<left) |
|
266 { // move left |
|
267 TInt move=left-lCount-1; |
|
268 TUint8 *pp=Mem::Copy(Entry(pl,lCount)->iKey,aPivot.Ptr(),pSize); |
|
269 Mem::Copy(pp,pr->iEntries,move*iEntrySize+sizeof(TPageRef)); |
|
270 aNewPivot.Copy(Entry(pr,move)->iKey,KeySize()); |
|
271 Mem::Copy(pr->iEntries,Entry(pr,move+1),(rCount-move-1)*iEntrySize+sizeof(TPageRef)); |
|
272 } |
|
273 else |
|
274 { // should we ever get here? (lCount==left) |
|
275 aNewPivot=aPivot; |
|
276 } |
|
277 return insertNode; |
|
278 } |
|
279 |
|
280 EXPORT_C TBool TBtreeInlineIndexOrg::InsertOverflow(TAny *aLeftNode,TAny *aRightNode,TInt aPos,TBool aInsertOnLeft,const TDesC8& anEntry, |
|
281 TPageRef aChild,const TDesC8& aPivot,TBtreePivot& aNewPivot) const |
|
282 // |
|
283 // Redistribute keys in aNodePtr and aRight to insert a new entry, return success or failure |
|
284 // We know that aNodePtr is full (see code for Insert) |
|
285 // |
|
286 { |
|
287 SNode* const pl=Node(aLeftNode); |
|
288 if (!aInsertOnLeft) |
|
289 aPos+=pl->iHead.iCount+1; // cumulative insertion point |
|
290 SNode* insert=DoRedistribute(aLeftNode,aRightNode,aPivot,aNewPivot,aPos); |
|
291 if (insert==NULL) // no space |
|
292 return EFalse; |
|
293 TInt lCount=pl->iHead.iCount; |
|
294 if (insert==aRightNode) |
|
295 aPos-=lCount+1; // insert position in right node |
|
296 else |
|
297 { |
|
298 // check for the special situation [aPos]:[aPos-1] |
|
299 // in which case re-arrange to promote the new entry |
|
300 SNode* pr=Node(aRightNode); |
|
301 if (aPos==lCount && pr->iHead.iCount<lCount) |
|
302 { |
|
303 Insert(pr,0,aNewPivot,Entry(pr,0)->iChild); |
|
304 Entry(pr,0)->iChild=aChild; |
|
305 aNewPivot=anEntry; |
|
306 return ETrue; |
|
307 } |
|
308 } |
|
309 __ASSERT_DEBUG(insert->iHead.iCount<iMaxEntries,User::Invariant()); |
|
310 Insert(insert,aPos,anEntry,aChild); |
|
311 return ETrue; |
|
312 } |
|
313 |
|
314 EXPORT_C void TBtreeInlineIndexOrg::InsertSplit(TAny *aLeftNode,TAny *aRightNode,TInt aPos,const TDesC8& anEntry,TPageRef aChild,TBtreePivot& aPromote) const |
|
315 // |
|
316 // part of the contract is not to use aPromote before anEntry |
|
317 // We know that aNodePtr is full |
|
318 // prepare right node and use insert-overflow |
|
319 // |
|
320 { |
|
321 __ASSERT_DEBUG(Node(aLeftNode)->iHead.iCount==iMaxEntries,Panic(EIllegalSplit)); |
|
322 __ASSERT_DEBUG(Node(aRightNode)->iHead.iCount==0,Panic(EIllegalSplit)); |
|
323 SNode* const pl=Node(aLeftNode); |
|
324 SNode* const pr=Node(aRightNode); |
|
325 SEntry *pe=Entry(pl,pl->iHead.iCount); |
|
326 --pl->iHead.iCount; |
|
327 Entry(pr,0)->iChild=pe->iChild; |
|
328 TPtrC8 pivot((TUint8*)pe-KeySize(),KeySize()); |
|
329 InsertOverflow(aLeftNode,aRightNode,aPos,ETrue,anEntry,aChild,pivot,aPromote); |
|
330 } |
|
331 |
|
332 EXPORT_C TBool TBtreeInlineIndexOrg::Update(TAny *aNode,TInt aPos,const TDesC8& anEntry) const |
|
333 { |
|
334 __ASSERT_DEBUG(anEntry.Size()<=KeySize(),Panic(EBadEntrySize)); |
|
335 __ASSERT_DEBUG(aPos<Node(aNode)->iHead.iCount,Panic(EBadEntryPos)); |
|
336 Mem::Copy(Entry(Node(aNode),aPos)->iKey,anEntry.Ptr(),KeySize()); |
|
337 return ETrue; |
|
338 } |
|
339 |
|
340 EXPORT_C TBool TBtreeInlineIndexOrg::Delete(TAny *aNode,TInt aPos) const |
|
341 // |
|
342 // Delete the specified key and following child reference from the node |
|
343 // Part of the contract is to garauntee that the entry is deleted, even if undeflowed |
|
344 // |
|
345 { |
|
346 SNode* const pn=Node(aNode); |
|
347 __ASSERT_DEBUG(aPos<pn->iHead.iCount,Panic(EBadEntryPos)); |
|
348 |
|
349 --pn->iHead.iCount; |
|
350 TUint8* pe=Entry(pn,aPos)->iKey; |
|
351 Mem::Copy(pe,pe+iEntrySize,(pn->iHead.iCount-aPos)*iEntrySize); |
|
352 return pn->iHead.iCount>=iMaxEntries>>1; // underflow criterion |
|
353 } |
|
354 |
|
355 EXPORT_C TBool TBtreeInlineIndexOrg::Redistribute(TAny *aLeftNode,TAny *aRightNode,const TDesC8& aPivot,TBtreePivot& aNewPivot) const |
|
356 { |
|
357 return DoRedistribute(aLeftNode,aRightNode,aPivot,aNewPivot)!=NULL; |
|
358 } |
|
359 |
|
360 EXPORT_C void TBtreeInlineIndexOrg::Concatenate(TAny *aLeftNode,const TAny *aRightNode,const TDesC8& aPivot) const |
|
361 // |
|
362 // Join LeftNode and RightNode together in LeftNode |
|
363 // contract says that it will fit |
|
364 // |
|
365 { |
|
366 SNode* const pl=Node(aLeftNode); |
|
367 const SNode* const pr=Node(aRightNode); |
|
368 TInt rCount=pr->iHead.iCount; |
|
369 TInt lCount=pl->iHead.iCount; |
|
370 __ASSERT_DEBUG(lCount+rCount+1<=iMaxEntries,Panic(ECannotConcatenate)); |
|
371 TInt pSize=aPivot.Size(); |
|
372 __ASSERT_DEBUG(pSize<=KeySize(),Panic(EBadEntrySize)); |
|
373 TUint8* pp=Mem::Copy(Entry(pl,lCount)->iKey,aPivot.Ptr(),pSize); |
|
374 Mem::Copy(pp,pr->iEntries,rCount*iEntrySize+sizeof(TPageRef)); |
|
375 pl->iHead.iCount+=rCount+1; |
|
376 } |
|
377 |
|
378 EXPORT_C void TBtreeInlineIndexOrg::MakeRoot(TAny* aNode,TPageRef aChild) const |
|
379 { |
|
380 __ASSERT_DEBUG(Node(aNode)->iHead.iCount==0,Panic(ECannotMakeRoot)); |
|
381 Entry(Node(aNode),0)->iChild=aChild; |
|
382 } |
|
383 |
|
384 EXPORT_C TInt TBtreeInlineIndexOrg::LastEntry(const TAny* aNode) const |
|
385 { |
|
386 return Node(aNode)->iHead.iCount; |
|
387 } |
|
388 |
|
389 EXPORT_C const TAny *TBtreeInlineIndexOrg::EntryPtr(const TAny* aNode,TInt aPos) const |
|
390 { |
|
391 __ASSERT_DEBUG(aPos<Node(aNode)->iHead.iCount,Panic(EBadEntryPos)); |
|
392 return Entry(Node(aNode),aPos)->iKey; |
|
393 } |
|
394 |
|
395 EXPORT_C TPtrC8 TBtreeInlineIndexOrg::Entry(const TAny* aNode,TInt aPos) const |
|
396 { |
|
397 return TPtrC8((const TUint8*)EntryPtr(aNode,aPos),KeySize()); |
|
398 } |
|
399 |
|
400 EXPORT_C TPageRef TBtreeInlineIndexOrg::ChildNode(const TAny* aNode,TInt aPos) const |
|
401 // |
|
402 // Entries are always aligned |
|
403 // |
|
404 { |
|
405 __ASSERT_DEBUG(aPos<=Node(aNode)->iHead.iCount,Panic(EBadEntryPos)); |
|
406 return Entry(Node(aNode),aPos)->iChild; |
|
407 } |
|
408 |