1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/persistentdata/persistentstorage/store/UBTREE/UB_INL.CPP Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,408 @@
1.4 +// Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
1.5 +// All rights reserved.
1.6 +// This component and the accompanying materials are made available
1.7 +// under the terms of "Eclipse Public License v1.0"
1.8 +// which accompanies this distribution, and is available
1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
1.10 +//
1.11 +// Initial Contributors:
1.12 +// Nokia Corporation - initial contribution.
1.13 +//
1.14 +// Contributors:
1.15 +//
1.16 +// Description:
1.17 +// Inline entry node organisations
1.18 +//
1.19 +//
1.20 +
1.21 +#include "UB_STD.H"
1.22 +
1.23 +EXPORT_C TBtreeInlineLeafOrg::TBtreeInlineLeafOrg()
1.24 + {}
1.25 +
1.26 +EXPORT_C void TBtreeInlineLeafOrg::SetEntrySize(TInt aSize)
1.27 + {
1.28 + __ASSERT_DEBUG(aSize<=KMaxBtreeKeyLength,Panic(EBadEntrySize));
1.29 + iEntrySize=Align4(aSize);
1.30 + iMaxEntries=(KPoolPageSize-sizeof(SHeader))/iEntrySize;
1.31 + }
1.32 +
1.33 +EXPORT_C TBool TBtreeInlineLeafOrg::Insert(TAny *aNode,TInt aPos,const TDesC8& anEntry) const
1.34 + {
1.35 + SNode* const pn=Node(aNode);
1.36 + __ASSERT_DEBUG(aPos<=pn->iHead.iCount,Panic(EBadEntryPos));
1.37 + if (pn->iHead.iCount==iMaxEntries)
1.38 + return EFalse;
1.39 + TUint8* pe=Entry(pn,aPos);
1.40 + Mem::Copy(pe+iEntrySize,pe,iEntrySize*(pn->iHead.iCount-aPos));
1.41 + TInt size=anEntry.Size();
1.42 + __ASSERT_ALWAYS(size<=iEntrySize,Panic(EBadEntrySize));
1.43 + Mem::Copy(pe,anEntry.Ptr(),size);
1.44 + ++pn->iHead.iCount;
1.45 + return ETrue;
1.46 + }
1.47 +
1.48 +TAny *TBtreeInlineLeafOrg::DoRedistribute(TAny *aLeftNode,TAny *aRightNode,TInt aInsertPos) const
1.49 +//
1.50 +// Even out the distibution of entries in LeftNode and RightNode
1.51 +// if aInsertPos>=0 we want to insert at this cumulative position so take into account for redistribution
1.52 +// If total entries <=iMaxEntries or >iMaxEntries*2 then return 0
1.53 +// Otherwise return the node into which insertion should take place
1.54 +//
1.55 + {
1.56 + SNode* const pl=Node(aLeftNode);
1.57 + SNode* const pr=Node(aRightNode);
1.58 + TAny *insertNode=aRightNode;
1.59 + TInt lCount=pl->iHead.iCount;
1.60 + TInt rCount=pr->iHead.iCount;
1.61 + TInt total=lCount+rCount;
1.62 + TInt left=(total+1)>>1;
1.63 + if (aInsertPos>=0)
1.64 + { // call from Insertoverflow
1.65 + __ASSERT_DEBUG(aInsertPos<=total,Panic(EBadEntryPos));
1.66 + if (aInsertPos<left)
1.67 + {
1.68 + insertNode=aLeftNode;
1.69 + --left;
1.70 + }
1.71 + if (total>=iMaxEntries<<1)
1.72 + return 0; // no space
1.73 + }
1.74 + else
1.75 + { // from Redistribute
1.76 + if (total<=iMaxEntries)
1.77 + return 0; // underflow state
1.78 + }
1.79 + pl->iHead.iCount=left;
1.80 + pr->iHead.iCount=total-left;
1.81 + if (lCount>left)
1.82 + { // move right
1.83 + TInt move=lCount-left;
1.84 + Mem::Copy(Entry(pr,move),pr->iEntries,rCount*iEntrySize);
1.85 + Mem::Copy(pr->iEntries,Entry(pl,left),move*iEntrySize);
1.86 + }
1.87 + else if (lCount<left)
1.88 + { // move left
1.89 + TInt move=left-lCount;
1.90 + Mem::Copy(Entry(pl,lCount),pr->iEntries,move*iEntrySize);
1.91 + Mem::Copy(pr->iEntries,Entry(pr,move),(rCount-move)*iEntrySize);
1.92 + }
1.93 + return insertNode;
1.94 + }
1.95 +
1.96 +EXPORT_C TBool TBtreeInlineLeafOrg::InsertOverflow(TAny *aLeftNode,TAny *aRightNode,TInt aPos,TBool aInsertOnLeft,const TDesC8& anEntry) const
1.97 +//
1.98 +// Redistribute keys in aLeftNode and aRightNode to insert a new entry, return success or failure
1.99 +// We know that the insertion node is full (see code for CBplusTree::InsertAtCurrentL)
1.100 +// promotion done by manager
1.101 +//
1.102 + {
1.103 + __ASSERT_DEBUG(Node(aInsertOnLeft?aLeftNode:aRightNode)->iHead.iCount==iMaxEntries,Panic(EIllegalOverflow));
1.104 + if (!aInsertOnLeft)
1.105 + aPos+=Node(aLeftNode)->iHead.iCount;
1.106 + TAny* iNode=DoRedistribute(aLeftNode,aRightNode,aPos);
1.107 + if (!iNode)
1.108 + return EFalse;
1.109 + if (iNode==aRightNode)
1.110 + aPos-=Node(aLeftNode)->iHead.iCount;
1.111 + return Insert(iNode,aPos,anEntry);
1.112 + }
1.113 +
1.114 +EXPORT_C void TBtreeInlineLeafOrg::InsertSplit(TAny *aLeftNode,TAny *aRightNode,TInt aPos,const TDesC8& anEntry) const
1.115 +//
1.116 +// Split contents of left node into left and right and insert entry
1.117 +// copy link node from left to right
1.118 +//
1.119 + {
1.120 + __ASSERT_DEBUG(Node(aLeftNode)->iHead.iCount==iMaxEntries,Panic(EIllegalSplit));
1.121 + __ASSERT_DEBUG(Node(aRightNode)->iHead.iCount==0,Panic(EIllegalSplit));
1.122 + TAny* iNode=DoRedistribute(aLeftNode,aRightNode,aPos);
1.123 + if (iNode==aRightNode)
1.124 + aPos-=Node(aLeftNode)->iHead.iCount;
1.125 + Insert(iNode,aPos,anEntry);
1.126 + Node(aRightNode)->iHead.iLink=Node(aLeftNode)->iHead.iLink;
1.127 + }
1.128 +
1.129 +EXPORT_C TBool TBtreeInlineLeafOrg::Delete(TAny *aNode,TInt aPos) const
1.130 +//
1.131 +// Delete the specified entry from the node
1.132 +// Part of the contract is to garauntee that the entry is deleted, even if undeflowed
1.133 +//
1.134 + {
1.135 + SNode* const pn=Node(aNode);
1.136 + __ASSERT_DEBUG(aPos<pn->iHead.iCount,Panic(EBadEntryPos));
1.137 +
1.138 + --pn->iHead.iCount;
1.139 + TUint8* pe=Entry(pn,aPos);
1.140 + Mem::Copy(pe,pe+iEntrySize,(pn->iHead.iCount-aPos)*iEntrySize);
1.141 + return pn->iHead.iCount>=iMaxEntries>>1; // underflow criterion
1.142 + }
1.143 +
1.144 +EXPORT_C TBool TBtreeInlineLeafOrg::Redistribute(TAny *aLeftNode,TAny *aRightNode) const
1.145 +//
1.146 +// Even out the distibution of entries in LeftNode and RightNode if enough keys
1.147 +// leaf nodes do not set the pivot, done by manager
1.148 +//
1.149 + {
1.150 + return DoRedistribute(aLeftNode,aRightNode)!=0;
1.151 + }
1.152 +
1.153 +EXPORT_C void TBtreeInlineLeafOrg::Concatenate(TAny *aLeftNode,const TAny *aRightNode) const
1.154 +//
1.155 +// Join LeftNode and RightNode together in LeftNode, set link
1.156 +// contract says that it will fit
1.157 +//
1.158 + {
1.159 + SNode* const pl=Node(aLeftNode);
1.160 + const SNode* const pr=Node(aRightNode);
1.161 + TInt rCount=pr->iHead.iCount;
1.162 + __ASSERT_DEBUG(pl->iHead.iCount+rCount<=iMaxEntries,Panic(ECannotConcatenate));
1.163 + Mem::Copy(Entry(pl,pl->iHead.iCount),pr->iEntries,rCount*iEntrySize);
1.164 + pl->iHead.iCount+=rCount;
1.165 + pl->iHead.iLink=pr->iHead.iLink;
1.166 + }
1.167 +
1.168 +EXPORT_C TInt TBtreeInlineLeafOrg::LastEntry(const TAny* aNode) const
1.169 + {
1.170 + return Node(aNode)->iHead.iCount;
1.171 + }
1.172 +
1.173 +EXPORT_C const TAny *TBtreeInlineLeafOrg::EntryPtr(const TAny* aNode,TInt aPos) const
1.174 + {
1.175 + __ASSERT_DEBUG(aPos<Node(aNode)->iHead.iCount,Panic(EBadEntryPos));
1.176 + return Entry(Node(aNode),aPos);
1.177 + }
1.178 +
1.179 +EXPORT_C TPtrC8 TBtreeInlineLeafOrg::Entry(const TAny* aNode,TInt aPos) const
1.180 + {
1.181 + return TPtrC8((const TUint8*)EntryPtr(aNode,aPos),iEntrySize);
1.182 + }
1.183 +
1.184 +EXPORT_C TPageRef TBtreeInlineLeafOrg::LinkNode(const TAny* aNode) const
1.185 + {
1.186 + return Node(aNode)->iHead.iLink;
1.187 + }
1.188 +
1.189 +EXPORT_C void TBtreeInlineLeafOrg::SetLinkNode(TAny* aNode,TPageRef aNextNode) const
1.190 + {
1.191 + Node(aNode)->iHead.iLink=aNextNode;
1.192 + }
1.193 +
1.194 +
1.195 +// class TBtreeInlineIndexOrg
1.196 +
1.197 +EXPORT_C TBtreeInlineIndexOrg::TBtreeInlineIndexOrg()
1.198 + {}
1.199 +
1.200 +EXPORT_C void TBtreeInlineIndexOrg::SetEntrySize(TInt aSize)
1.201 + {
1.202 + __ASSERT_DEBUG(aSize<=KMaxBtreeKeyLength,Panic(EBadEntrySize));
1.203 + iEntrySize=_FOFF(SEntry,iKey[Align4(aSize)]);
1.204 + iMaxEntries=(KPoolPageSize-sizeof(SHeader)-sizeof(TPageRef))/iEntrySize;
1.205 + }
1.206 +
1.207 +EXPORT_C TBool TBtreeInlineIndexOrg::Insert(TAny *aNode,TInt aPos,const TDesC8& anEntry,TPageRef aChild) const
1.208 + {
1.209 + SNode* const pn=Node(aNode);
1.210 + __ASSERT_DEBUG(aPos<=pn->iHead.iCount,Panic(EBadEntryPos));
1.211 + if (pn->iHead.iCount==iMaxEntries)
1.212 + return EFalse;
1.213 + TUint8* pe=Entry(pn,aPos)->iKey;
1.214 + Mem::Copy(pe+iEntrySize,pe,iEntrySize*(pn->iHead.iCount-aPos));
1.215 + TInt size=anEntry.Size();
1.216 + __ASSERT_ALWAYS(size<=KeySize(),Panic(EBadEntrySize));
1.217 + *(TPageRef*)Mem::Copy(pe,anEntry.Ptr(),size)=aChild;
1.218 + ++pn->iHead.iCount;
1.219 + return ETrue;
1.220 + }
1.221 +
1.222 +TBtreeInlineIndexOrg::SNode* TBtreeInlineIndexOrg::DoRedistribute(TAny *aLeftNode,TAny *aRightNode,const TDesC8& aPivot,TBtreePivot& aNewPivot,TInt aInsertPos) const
1.223 +//
1.224 +// Even out the distibution of entries in LeftNode and RightNode
1.225 +// if aInsertPos>=0 we want to insert at this cumulative position so take into account for redistribution
1.226 +// If total entries <=iMaxEntries or >iMaxEntries*2 then return 0
1.227 +// Otherwise return the node into which insertion should take place
1.228 +// If insertion should be promoted insert position is beyond end of left node, otherwise
1.229 +// the new pivot is copied to aNewPivot
1.230 +//
1.231 + {
1.232 + SNode* const pl=Node(aLeftNode);
1.233 + SNode* const pr=Node(aRightNode);
1.234 + SNode *insertNode=pr;
1.235 + TInt lCount=pl->iHead.iCount;
1.236 + TInt rCount=pr->iHead.iCount;
1.237 + TInt total=lCount+rCount+1; // including pivot entry
1.238 + TInt left=total>>1;
1.239 + if (aInsertPos>=0)
1.240 + { // call from InsertOverflow
1.241 + __ASSERT_DEBUG(aInsertPos<=total,Panic(EBadEntryPos));
1.242 + if (total>iMaxEntries<<1)
1.243 + return NULL; // no space to insert
1.244 + if (aInsertPos<=left)
1.245 + {
1.246 + if (aInsertPos<left)
1.247 + --left;
1.248 + insertNode=pl;
1.249 + }
1.250 + }
1.251 + else
1.252 + { // from Redistribute
1.253 + if (total<=iMaxEntries)
1.254 + return NULL; // underflow state
1.255 + }
1.256 + pl->iHead.iCount=left;
1.257 + pr->iHead.iCount=total-left-1; // pivot not included
1.258 + TInt pSize=aPivot.Size();
1.259 + __ASSERT_DEBUG(pSize<=KeySize(),Panic(EBadEntrySize));
1.260 + if (lCount>left)
1.261 + { // move right
1.262 + TInt move=lCount-left;
1.263 + Mem::Copy(Entry(pr,move),pr->iEntries,rCount*iEntrySize+sizeof(TPageRef));
1.264 + TUint8 *pp=Mem::Copy(pr->iEntries,Entry(pl,left+1),(move-1)*iEntrySize+sizeof(TPageRef));
1.265 + Mem::Copy(pp,aPivot.Ptr(),pSize);
1.266 + aNewPivot.Copy(Entry(pl,left)->iKey,KeySize()); // new pivot
1.267 + }
1.268 + else if (lCount<left)
1.269 + { // move left
1.270 + TInt move=left-lCount-1;
1.271 + TUint8 *pp=Mem::Copy(Entry(pl,lCount)->iKey,aPivot.Ptr(),pSize);
1.272 + Mem::Copy(pp,pr->iEntries,move*iEntrySize+sizeof(TPageRef));
1.273 + aNewPivot.Copy(Entry(pr,move)->iKey,KeySize());
1.274 + Mem::Copy(pr->iEntries,Entry(pr,move+1),(rCount-move-1)*iEntrySize+sizeof(TPageRef));
1.275 + }
1.276 + else
1.277 + { // should we ever get here? (lCount==left)
1.278 + aNewPivot=aPivot;
1.279 + }
1.280 + return insertNode;
1.281 + }
1.282 +
1.283 +EXPORT_C TBool TBtreeInlineIndexOrg::InsertOverflow(TAny *aLeftNode,TAny *aRightNode,TInt aPos,TBool aInsertOnLeft,const TDesC8& anEntry,
1.284 + TPageRef aChild,const TDesC8& aPivot,TBtreePivot& aNewPivot) const
1.285 +//
1.286 +// Redistribute keys in aNodePtr and aRight to insert a new entry, return success or failure
1.287 +// We know that aNodePtr is full (see code for Insert)
1.288 +//
1.289 + {
1.290 + SNode* const pl=Node(aLeftNode);
1.291 + if (!aInsertOnLeft)
1.292 + aPos+=pl->iHead.iCount+1; // cumulative insertion point
1.293 + SNode* insert=DoRedistribute(aLeftNode,aRightNode,aPivot,aNewPivot,aPos);
1.294 + if (insert==NULL) // no space
1.295 + return EFalse;
1.296 + TInt lCount=pl->iHead.iCount;
1.297 + if (insert==aRightNode)
1.298 + aPos-=lCount+1; // insert position in right node
1.299 + else
1.300 + {
1.301 + // check for the special situation [aPos]:[aPos-1]
1.302 + // in which case re-arrange to promote the new entry
1.303 + SNode* pr=Node(aRightNode);
1.304 + if (aPos==lCount && pr->iHead.iCount<lCount)
1.305 + {
1.306 + Insert(pr,0,aNewPivot,Entry(pr,0)->iChild);
1.307 + Entry(pr,0)->iChild=aChild;
1.308 + aNewPivot=anEntry;
1.309 + return ETrue;
1.310 + }
1.311 + }
1.312 + __ASSERT_DEBUG(insert->iHead.iCount<iMaxEntries,User::Invariant());
1.313 + Insert(insert,aPos,anEntry,aChild);
1.314 + return ETrue;
1.315 + }
1.316 +
1.317 +EXPORT_C void TBtreeInlineIndexOrg::InsertSplit(TAny *aLeftNode,TAny *aRightNode,TInt aPos,const TDesC8& anEntry,TPageRef aChild,TBtreePivot& aPromote) const
1.318 +//
1.319 +// part of the contract is not to use aPromote before anEntry
1.320 +// We know that aNodePtr is full
1.321 +// prepare right node and use insert-overflow
1.322 +//
1.323 + {
1.324 + __ASSERT_DEBUG(Node(aLeftNode)->iHead.iCount==iMaxEntries,Panic(EIllegalSplit));
1.325 + __ASSERT_DEBUG(Node(aRightNode)->iHead.iCount==0,Panic(EIllegalSplit));
1.326 + SNode* const pl=Node(aLeftNode);
1.327 + SNode* const pr=Node(aRightNode);
1.328 + SEntry *pe=Entry(pl,pl->iHead.iCount);
1.329 + --pl->iHead.iCount;
1.330 + Entry(pr,0)->iChild=pe->iChild;
1.331 + TPtrC8 pivot((TUint8*)pe-KeySize(),KeySize());
1.332 + InsertOverflow(aLeftNode,aRightNode,aPos,ETrue,anEntry,aChild,pivot,aPromote);
1.333 + }
1.334 +
1.335 +EXPORT_C TBool TBtreeInlineIndexOrg::Update(TAny *aNode,TInt aPos,const TDesC8& anEntry) const
1.336 + {
1.337 + __ASSERT_DEBUG(anEntry.Size()<=KeySize(),Panic(EBadEntrySize));
1.338 + __ASSERT_DEBUG(aPos<Node(aNode)->iHead.iCount,Panic(EBadEntryPos));
1.339 + Mem::Copy(Entry(Node(aNode),aPos)->iKey,anEntry.Ptr(),KeySize());
1.340 + return ETrue;
1.341 + }
1.342 +
1.343 +EXPORT_C TBool TBtreeInlineIndexOrg::Delete(TAny *aNode,TInt aPos) const
1.344 +//
1.345 +// Delete the specified key and following child reference from the node
1.346 +// Part of the contract is to garauntee that the entry is deleted, even if undeflowed
1.347 +//
1.348 + {
1.349 + SNode* const pn=Node(aNode);
1.350 + __ASSERT_DEBUG(aPos<pn->iHead.iCount,Panic(EBadEntryPos));
1.351 +
1.352 + --pn->iHead.iCount;
1.353 + TUint8* pe=Entry(pn,aPos)->iKey;
1.354 + Mem::Copy(pe,pe+iEntrySize,(pn->iHead.iCount-aPos)*iEntrySize);
1.355 + return pn->iHead.iCount>=iMaxEntries>>1; // underflow criterion
1.356 + }
1.357 +
1.358 +EXPORT_C TBool TBtreeInlineIndexOrg::Redistribute(TAny *aLeftNode,TAny *aRightNode,const TDesC8& aPivot,TBtreePivot& aNewPivot) const
1.359 + {
1.360 + return DoRedistribute(aLeftNode,aRightNode,aPivot,aNewPivot)!=NULL;
1.361 + }
1.362 +
1.363 +EXPORT_C void TBtreeInlineIndexOrg::Concatenate(TAny *aLeftNode,const TAny *aRightNode,const TDesC8& aPivot) const
1.364 +//
1.365 +// Join LeftNode and RightNode together in LeftNode
1.366 +// contract says that it will fit
1.367 +//
1.368 + {
1.369 + SNode* const pl=Node(aLeftNode);
1.370 + const SNode* const pr=Node(aRightNode);
1.371 + TInt rCount=pr->iHead.iCount;
1.372 + TInt lCount=pl->iHead.iCount;
1.373 + __ASSERT_DEBUG(lCount+rCount+1<=iMaxEntries,Panic(ECannotConcatenate));
1.374 + TInt pSize=aPivot.Size();
1.375 + __ASSERT_DEBUG(pSize<=KeySize(),Panic(EBadEntrySize));
1.376 + TUint8* pp=Mem::Copy(Entry(pl,lCount)->iKey,aPivot.Ptr(),pSize);
1.377 + Mem::Copy(pp,pr->iEntries,rCount*iEntrySize+sizeof(TPageRef));
1.378 + pl->iHead.iCount+=rCount+1;
1.379 + }
1.380 +
1.381 +EXPORT_C void TBtreeInlineIndexOrg::MakeRoot(TAny* aNode,TPageRef aChild) const
1.382 + {
1.383 + __ASSERT_DEBUG(Node(aNode)->iHead.iCount==0,Panic(ECannotMakeRoot));
1.384 + Entry(Node(aNode),0)->iChild=aChild;
1.385 + }
1.386 +
1.387 +EXPORT_C TInt TBtreeInlineIndexOrg::LastEntry(const TAny* aNode) const
1.388 + {
1.389 + return Node(aNode)->iHead.iCount;
1.390 + }
1.391 +
1.392 +EXPORT_C const TAny *TBtreeInlineIndexOrg::EntryPtr(const TAny* aNode,TInt aPos) const
1.393 + {
1.394 + __ASSERT_DEBUG(aPos<Node(aNode)->iHead.iCount,Panic(EBadEntryPos));
1.395 + return Entry(Node(aNode),aPos)->iKey;
1.396 + }
1.397 +
1.398 +EXPORT_C TPtrC8 TBtreeInlineIndexOrg::Entry(const TAny* aNode,TInt aPos) const
1.399 + {
1.400 + return TPtrC8((const TUint8*)EntryPtr(aNode,aPos),KeySize());
1.401 + }
1.402 +
1.403 +EXPORT_C TPageRef TBtreeInlineIndexOrg::ChildNode(const TAny* aNode,TInt aPos) const
1.404 +//
1.405 +// Entries are always aligned
1.406 +//
1.407 + {
1.408 + __ASSERT_DEBUG(aPos<=Node(aNode)->iHead.iCount,Panic(EBadEntryPos));
1.409 + return Entry(Node(aNode),aPos)->iChild;
1.410 + }
1.411 +