Update contrib.
1 // Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // Inline entry node organisations
20 EXPORT_C TBtreeInlineLeafOrg::TBtreeInlineLeafOrg()
23 EXPORT_C void TBtreeInlineLeafOrg::SetEntrySize(TInt aSize)
25 __ASSERT_DEBUG(aSize<=KMaxBtreeKeyLength,Panic(EBadEntrySize));
26 iEntrySize=Align4(aSize);
27 iMaxEntries=(KPoolPageSize-sizeof(SHeader))/iEntrySize;
30 EXPORT_C TBool TBtreeInlineLeafOrg::Insert(TAny *aNode,TInt aPos,const TDesC8& anEntry) const
32 SNode* const pn=Node(aNode);
33 __ASSERT_DEBUG(aPos<=pn->iHead.iCount,Panic(EBadEntryPos));
34 if (pn->iHead.iCount==iMaxEntries)
36 TUint8* pe=Entry(pn,aPos);
37 Mem::Copy(pe+iEntrySize,pe,iEntrySize*(pn->iHead.iCount-aPos));
38 TInt size=anEntry.Size();
39 __ASSERT_ALWAYS(size<=iEntrySize,Panic(EBadEntrySize));
40 Mem::Copy(pe,anEntry.Ptr(),size);
45 TAny *TBtreeInlineLeafOrg::DoRedistribute(TAny *aLeftNode,TAny *aRightNode,TInt aInsertPos) const
47 // Even out the distibution of entries in LeftNode and RightNode
48 // if aInsertPos>=0 we want to insert at this cumulative position so take into account for redistribution
49 // If total entries <=iMaxEntries or >iMaxEntries*2 then return 0
50 // Otherwise return the node into which insertion should take place
53 SNode* const pl=Node(aLeftNode);
54 SNode* const pr=Node(aRightNode);
55 TAny *insertNode=aRightNode;
56 TInt lCount=pl->iHead.iCount;
57 TInt rCount=pr->iHead.iCount;
58 TInt total=lCount+rCount;
59 TInt left=(total+1)>>1;
61 { // call from Insertoverflow
62 __ASSERT_DEBUG(aInsertPos<=total,Panic(EBadEntryPos));
68 if (total>=iMaxEntries<<1)
72 { // from Redistribute
73 if (total<=iMaxEntries)
74 return 0; // underflow state
76 pl->iHead.iCount=left;
77 pr->iHead.iCount=total-left;
80 TInt move=lCount-left;
81 Mem::Copy(Entry(pr,move),pr->iEntries,rCount*iEntrySize);
82 Mem::Copy(pr->iEntries,Entry(pl,left),move*iEntrySize);
86 TInt move=left-lCount;
87 Mem::Copy(Entry(pl,lCount),pr->iEntries,move*iEntrySize);
88 Mem::Copy(pr->iEntries,Entry(pr,move),(rCount-move)*iEntrySize);
93 EXPORT_C TBool TBtreeInlineLeafOrg::InsertOverflow(TAny *aLeftNode,TAny *aRightNode,TInt aPos,TBool aInsertOnLeft,const TDesC8& anEntry) const
95 // Redistribute keys in aLeftNode and aRightNode to insert a new entry, return success or failure
96 // We know that the insertion node is full (see code for CBplusTree::InsertAtCurrentL)
97 // promotion done by manager
100 __ASSERT_DEBUG(Node(aInsertOnLeft?aLeftNode:aRightNode)->iHead.iCount==iMaxEntries,Panic(EIllegalOverflow));
102 aPos+=Node(aLeftNode)->iHead.iCount;
103 TAny* iNode=DoRedistribute(aLeftNode,aRightNode,aPos);
106 if (iNode==aRightNode)
107 aPos-=Node(aLeftNode)->iHead.iCount;
108 return Insert(iNode,aPos,anEntry);
111 EXPORT_C void TBtreeInlineLeafOrg::InsertSplit(TAny *aLeftNode,TAny *aRightNode,TInt aPos,const TDesC8& anEntry) const
113 // Split contents of left node into left and right and insert entry
114 // copy link node from left to right
117 __ASSERT_DEBUG(Node(aLeftNode)->iHead.iCount==iMaxEntries,Panic(EIllegalSplit));
118 __ASSERT_DEBUG(Node(aRightNode)->iHead.iCount==0,Panic(EIllegalSplit));
119 TAny* iNode=DoRedistribute(aLeftNode,aRightNode,aPos);
120 if (iNode==aRightNode)
121 aPos-=Node(aLeftNode)->iHead.iCount;
122 Insert(iNode,aPos,anEntry);
123 Node(aRightNode)->iHead.iLink=Node(aLeftNode)->iHead.iLink;
126 EXPORT_C TBool TBtreeInlineLeafOrg::Delete(TAny *aNode,TInt aPos) const
128 // Delete the specified entry from the node
129 // Part of the contract is to garauntee that the entry is deleted, even if undeflowed
132 SNode* const pn=Node(aNode);
133 __ASSERT_DEBUG(aPos<pn->iHead.iCount,Panic(EBadEntryPos));
136 TUint8* pe=Entry(pn,aPos);
137 Mem::Copy(pe,pe+iEntrySize,(pn->iHead.iCount-aPos)*iEntrySize);
138 return pn->iHead.iCount>=iMaxEntries>>1; // underflow criterion
141 EXPORT_C TBool TBtreeInlineLeafOrg::Redistribute(TAny *aLeftNode,TAny *aRightNode) const
143 // Even out the distibution of entries in LeftNode and RightNode if enough keys
144 // leaf nodes do not set the pivot, done by manager
147 return DoRedistribute(aLeftNode,aRightNode)!=0;
150 EXPORT_C void TBtreeInlineLeafOrg::Concatenate(TAny *aLeftNode,const TAny *aRightNode) const
152 // Join LeftNode and RightNode together in LeftNode, set link
153 // contract says that it will fit
156 SNode* const pl=Node(aLeftNode);
157 const SNode* const pr=Node(aRightNode);
158 TInt rCount=pr->iHead.iCount;
159 __ASSERT_DEBUG(pl->iHead.iCount+rCount<=iMaxEntries,Panic(ECannotConcatenate));
160 Mem::Copy(Entry(pl,pl->iHead.iCount),pr->iEntries,rCount*iEntrySize);
161 pl->iHead.iCount+=rCount;
162 pl->iHead.iLink=pr->iHead.iLink;
165 EXPORT_C TInt TBtreeInlineLeafOrg::LastEntry(const TAny* aNode) const
167 return Node(aNode)->iHead.iCount;
170 EXPORT_C const TAny *TBtreeInlineLeafOrg::EntryPtr(const TAny* aNode,TInt aPos) const
172 __ASSERT_DEBUG(aPos<Node(aNode)->iHead.iCount,Panic(EBadEntryPos));
173 return Entry(Node(aNode),aPos);
176 EXPORT_C TPtrC8 TBtreeInlineLeafOrg::Entry(const TAny* aNode,TInt aPos) const
178 return TPtrC8((const TUint8*)EntryPtr(aNode,aPos),iEntrySize);
181 EXPORT_C TPageRef TBtreeInlineLeafOrg::LinkNode(const TAny* aNode) const
183 return Node(aNode)->iHead.iLink;
186 EXPORT_C void TBtreeInlineLeafOrg::SetLinkNode(TAny* aNode,TPageRef aNextNode) const
188 Node(aNode)->iHead.iLink=aNextNode;
192 // class TBtreeInlineIndexOrg
194 EXPORT_C TBtreeInlineIndexOrg::TBtreeInlineIndexOrg()
197 EXPORT_C void TBtreeInlineIndexOrg::SetEntrySize(TInt aSize)
199 __ASSERT_DEBUG(aSize<=KMaxBtreeKeyLength,Panic(EBadEntrySize));
200 iEntrySize=_FOFF(SEntry,iKey[Align4(aSize)]);
201 iMaxEntries=(KPoolPageSize-sizeof(SHeader)-sizeof(TPageRef))/iEntrySize;
204 EXPORT_C TBool TBtreeInlineIndexOrg::Insert(TAny *aNode,TInt aPos,const TDesC8& anEntry,TPageRef aChild) const
206 SNode* const pn=Node(aNode);
207 __ASSERT_DEBUG(aPos<=pn->iHead.iCount,Panic(EBadEntryPos));
208 if (pn->iHead.iCount==iMaxEntries)
210 TUint8* pe=Entry(pn,aPos)->iKey;
211 Mem::Copy(pe+iEntrySize,pe,iEntrySize*(pn->iHead.iCount-aPos));
212 TInt size=anEntry.Size();
213 __ASSERT_ALWAYS(size<=KeySize(),Panic(EBadEntrySize));
214 *(TPageRef*)Mem::Copy(pe,anEntry.Ptr(),size)=aChild;
219 TBtreeInlineIndexOrg::SNode* TBtreeInlineIndexOrg::DoRedistribute(TAny *aLeftNode,TAny *aRightNode,const TDesC8& aPivot,TBtreePivot& aNewPivot,TInt aInsertPos) const
221 // Even out the distibution of entries in LeftNode and RightNode
222 // if aInsertPos>=0 we want to insert at this cumulative position so take into account for redistribution
223 // If total entries <=iMaxEntries or >iMaxEntries*2 then return 0
224 // Otherwise return the node into which insertion should take place
225 // If insertion should be promoted insert position is beyond end of left node, otherwise
226 // the new pivot is copied to aNewPivot
229 SNode* const pl=Node(aLeftNode);
230 SNode* const pr=Node(aRightNode);
231 SNode *insertNode=pr;
232 TInt lCount=pl->iHead.iCount;
233 TInt rCount=pr->iHead.iCount;
234 TInt total=lCount+rCount+1; // including pivot entry
237 { // call from InsertOverflow
238 __ASSERT_DEBUG(aInsertPos<=total,Panic(EBadEntryPos));
239 if (total>iMaxEntries<<1)
240 return NULL; // no space to insert
241 if (aInsertPos<=left)
249 { // from Redistribute
250 if (total<=iMaxEntries)
251 return NULL; // underflow state
253 pl->iHead.iCount=left;
254 pr->iHead.iCount=total-left-1; // pivot not included
255 TInt pSize=aPivot.Size();
256 __ASSERT_DEBUG(pSize<=KeySize(),Panic(EBadEntrySize));
259 TInt move=lCount-left;
260 Mem::Copy(Entry(pr,move),pr->iEntries,rCount*iEntrySize+sizeof(TPageRef));
261 TUint8 *pp=Mem::Copy(pr->iEntries,Entry(pl,left+1),(move-1)*iEntrySize+sizeof(TPageRef));
262 Mem::Copy(pp,aPivot.Ptr(),pSize);
263 aNewPivot.Copy(Entry(pl,left)->iKey,KeySize()); // new pivot
265 else if (lCount<left)
267 TInt move=left-lCount-1;
268 TUint8 *pp=Mem::Copy(Entry(pl,lCount)->iKey,aPivot.Ptr(),pSize);
269 Mem::Copy(pp,pr->iEntries,move*iEntrySize+sizeof(TPageRef));
270 aNewPivot.Copy(Entry(pr,move)->iKey,KeySize());
271 Mem::Copy(pr->iEntries,Entry(pr,move+1),(rCount-move-1)*iEntrySize+sizeof(TPageRef));
274 { // should we ever get here? (lCount==left)
280 EXPORT_C TBool TBtreeInlineIndexOrg::InsertOverflow(TAny *aLeftNode,TAny *aRightNode,TInt aPos,TBool aInsertOnLeft,const TDesC8& anEntry,
281 TPageRef aChild,const TDesC8& aPivot,TBtreePivot& aNewPivot) const
283 // Redistribute keys in aNodePtr and aRight to insert a new entry, return success or failure
284 // We know that aNodePtr is full (see code for Insert)
287 SNode* const pl=Node(aLeftNode);
289 aPos+=pl->iHead.iCount+1; // cumulative insertion point
290 SNode* insert=DoRedistribute(aLeftNode,aRightNode,aPivot,aNewPivot,aPos);
291 if (insert==NULL) // no space
293 TInt lCount=pl->iHead.iCount;
294 if (insert==aRightNode)
295 aPos-=lCount+1; // insert position in right node
298 // check for the special situation [aPos]:[aPos-1]
299 // in which case re-arrange to promote the new entry
300 SNode* pr=Node(aRightNode);
301 if (aPos==lCount && pr->iHead.iCount<lCount)
303 Insert(pr,0,aNewPivot,Entry(pr,0)->iChild);
304 Entry(pr,0)->iChild=aChild;
309 __ASSERT_DEBUG(insert->iHead.iCount<iMaxEntries,User::Invariant());
310 Insert(insert,aPos,anEntry,aChild);
314 EXPORT_C void TBtreeInlineIndexOrg::InsertSplit(TAny *aLeftNode,TAny *aRightNode,TInt aPos,const TDesC8& anEntry,TPageRef aChild,TBtreePivot& aPromote) const
316 // part of the contract is not to use aPromote before anEntry
317 // We know that aNodePtr is full
318 // prepare right node and use insert-overflow
321 __ASSERT_DEBUG(Node(aLeftNode)->iHead.iCount==iMaxEntries,Panic(EIllegalSplit));
322 __ASSERT_DEBUG(Node(aRightNode)->iHead.iCount==0,Panic(EIllegalSplit));
323 SNode* const pl=Node(aLeftNode);
324 SNode* const pr=Node(aRightNode);
325 SEntry *pe=Entry(pl,pl->iHead.iCount);
327 Entry(pr,0)->iChild=pe->iChild;
328 TPtrC8 pivot((TUint8*)pe-KeySize(),KeySize());
329 InsertOverflow(aLeftNode,aRightNode,aPos,ETrue,anEntry,aChild,pivot,aPromote);
332 EXPORT_C TBool TBtreeInlineIndexOrg::Update(TAny *aNode,TInt aPos,const TDesC8& anEntry) const
334 __ASSERT_DEBUG(anEntry.Size()<=KeySize(),Panic(EBadEntrySize));
335 __ASSERT_DEBUG(aPos<Node(aNode)->iHead.iCount,Panic(EBadEntryPos));
336 Mem::Copy(Entry(Node(aNode),aPos)->iKey,anEntry.Ptr(),KeySize());
340 EXPORT_C TBool TBtreeInlineIndexOrg::Delete(TAny *aNode,TInt aPos) const
342 // Delete the specified key and following child reference from the node
343 // Part of the contract is to garauntee that the entry is deleted, even if undeflowed
346 SNode* const pn=Node(aNode);
347 __ASSERT_DEBUG(aPos<pn->iHead.iCount,Panic(EBadEntryPos));
350 TUint8* pe=Entry(pn,aPos)->iKey;
351 Mem::Copy(pe,pe+iEntrySize,(pn->iHead.iCount-aPos)*iEntrySize);
352 return pn->iHead.iCount>=iMaxEntries>>1; // underflow criterion
355 EXPORT_C TBool TBtreeInlineIndexOrg::Redistribute(TAny *aLeftNode,TAny *aRightNode,const TDesC8& aPivot,TBtreePivot& aNewPivot) const
357 return DoRedistribute(aLeftNode,aRightNode,aPivot,aNewPivot)!=NULL;
360 EXPORT_C void TBtreeInlineIndexOrg::Concatenate(TAny *aLeftNode,const TAny *aRightNode,const TDesC8& aPivot) const
362 // Join LeftNode and RightNode together in LeftNode
363 // contract says that it will fit
366 SNode* const pl=Node(aLeftNode);
367 const SNode* const pr=Node(aRightNode);
368 TInt rCount=pr->iHead.iCount;
369 TInt lCount=pl->iHead.iCount;
370 __ASSERT_DEBUG(lCount+rCount+1<=iMaxEntries,Panic(ECannotConcatenate));
371 TInt pSize=aPivot.Size();
372 __ASSERT_DEBUG(pSize<=KeySize(),Panic(EBadEntrySize));
373 TUint8* pp=Mem::Copy(Entry(pl,lCount)->iKey,aPivot.Ptr(),pSize);
374 Mem::Copy(pp,pr->iEntries,rCount*iEntrySize+sizeof(TPageRef));
375 pl->iHead.iCount+=rCount+1;
378 EXPORT_C void TBtreeInlineIndexOrg::MakeRoot(TAny* aNode,TPageRef aChild) const
380 __ASSERT_DEBUG(Node(aNode)->iHead.iCount==0,Panic(ECannotMakeRoot));
381 Entry(Node(aNode),0)->iChild=aChild;
384 EXPORT_C TInt TBtreeInlineIndexOrg::LastEntry(const TAny* aNode) const
386 return Node(aNode)->iHead.iCount;
389 EXPORT_C const TAny *TBtreeInlineIndexOrg::EntryPtr(const TAny* aNode,TInt aPos) const
391 __ASSERT_DEBUG(aPos<Node(aNode)->iHead.iCount,Panic(EBadEntryPos));
392 return Entry(Node(aNode),aPos)->iKey;
395 EXPORT_C TPtrC8 TBtreeInlineIndexOrg::Entry(const TAny* aNode,TInt aPos) const
397 return TPtrC8((const TUint8*)EntryPtr(aNode,aPos),KeySize());
400 EXPORT_C TPageRef TBtreeInlineIndexOrg::ChildNode(const TAny* aNode,TInt aPos) const
402 // Entries are always aligned
405 __ASSERT_DEBUG(aPos<=Node(aNode)->iHead.iCount,Panic(EBadEntryPos));
406 return Entry(Node(aNode),aPos)->iChild;