sl@0
|
1 |
// Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
|
sl@0
|
2 |
// All rights reserved.
|
sl@0
|
3 |
// This component and the accompanying materials are made available
|
sl@0
|
4 |
// under the terms of "Eclipse Public License v1.0"
|
sl@0
|
5 |
// which accompanies this distribution, and is available
|
sl@0
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
|
sl@0
|
7 |
//
|
sl@0
|
8 |
// Initial Contributors:
|
sl@0
|
9 |
// Nokia Corporation - initial contribution.
|
sl@0
|
10 |
//
|
sl@0
|
11 |
// Contributors:
|
sl@0
|
12 |
//
|
sl@0
|
13 |
// Description:
|
sl@0
|
14 |
// Inline entry node organisations
|
sl@0
|
15 |
//
|
sl@0
|
16 |
//
|
sl@0
|
17 |
|
sl@0
|
18 |
#include "UB_STD.H"
|
sl@0
|
19 |
|
sl@0
|
20 |
EXPORT_C TBtreeInlineLeafOrg::TBtreeInlineLeafOrg()
|
sl@0
|
21 |
{}
|
sl@0
|
22 |
|
sl@0
|
23 |
EXPORT_C void TBtreeInlineLeafOrg::SetEntrySize(TInt aSize)
|
sl@0
|
24 |
{
|
sl@0
|
25 |
__ASSERT_DEBUG(aSize<=KMaxBtreeKeyLength,Panic(EBadEntrySize));
|
sl@0
|
26 |
iEntrySize=Align4(aSize);
|
sl@0
|
27 |
iMaxEntries=(KPoolPageSize-sizeof(SHeader))/iEntrySize;
|
sl@0
|
28 |
}
|
sl@0
|
29 |
|
sl@0
|
30 |
EXPORT_C TBool TBtreeInlineLeafOrg::Insert(TAny *aNode,TInt aPos,const TDesC8& anEntry) const
|
sl@0
|
31 |
{
|
sl@0
|
32 |
SNode* const pn=Node(aNode);
|
sl@0
|
33 |
__ASSERT_DEBUG(aPos<=pn->iHead.iCount,Panic(EBadEntryPos));
|
sl@0
|
34 |
if (pn->iHead.iCount==iMaxEntries)
|
sl@0
|
35 |
return EFalse;
|
sl@0
|
36 |
TUint8* pe=Entry(pn,aPos);
|
sl@0
|
37 |
Mem::Copy(pe+iEntrySize,pe,iEntrySize*(pn->iHead.iCount-aPos));
|
sl@0
|
38 |
TInt size=anEntry.Size();
|
sl@0
|
39 |
__ASSERT_ALWAYS(size<=iEntrySize,Panic(EBadEntrySize));
|
sl@0
|
40 |
Mem::Copy(pe,anEntry.Ptr(),size);
|
sl@0
|
41 |
++pn->iHead.iCount;
|
sl@0
|
42 |
return ETrue;
|
sl@0
|
43 |
}
|
sl@0
|
44 |
|
sl@0
|
45 |
TAny *TBtreeInlineLeafOrg::DoRedistribute(TAny *aLeftNode,TAny *aRightNode,TInt aInsertPos) const
|
sl@0
|
46 |
//
|
sl@0
|
47 |
// Even out the distibution of entries in LeftNode and RightNode
|
sl@0
|
48 |
// if aInsertPos>=0 we want to insert at this cumulative position so take into account for redistribution
|
sl@0
|
49 |
// If total entries <=iMaxEntries or >iMaxEntries*2 then return 0
|
sl@0
|
50 |
// Otherwise return the node into which insertion should take place
|
sl@0
|
51 |
//
|
sl@0
|
52 |
{
|
sl@0
|
53 |
SNode* const pl=Node(aLeftNode);
|
sl@0
|
54 |
SNode* const pr=Node(aRightNode);
|
sl@0
|
55 |
TAny *insertNode=aRightNode;
|
sl@0
|
56 |
TInt lCount=pl->iHead.iCount;
|
sl@0
|
57 |
TInt rCount=pr->iHead.iCount;
|
sl@0
|
58 |
TInt total=lCount+rCount;
|
sl@0
|
59 |
TInt left=(total+1)>>1;
|
sl@0
|
60 |
if (aInsertPos>=0)
|
sl@0
|
61 |
{ // call from Insertoverflow
|
sl@0
|
62 |
__ASSERT_DEBUG(aInsertPos<=total,Panic(EBadEntryPos));
|
sl@0
|
63 |
if (aInsertPos<left)
|
sl@0
|
64 |
{
|
sl@0
|
65 |
insertNode=aLeftNode;
|
sl@0
|
66 |
--left;
|
sl@0
|
67 |
}
|
sl@0
|
68 |
if (total>=iMaxEntries<<1)
|
sl@0
|
69 |
return 0; // no space
|
sl@0
|
70 |
}
|
sl@0
|
71 |
else
|
sl@0
|
72 |
{ // from Redistribute
|
sl@0
|
73 |
if (total<=iMaxEntries)
|
sl@0
|
74 |
return 0; // underflow state
|
sl@0
|
75 |
}
|
sl@0
|
76 |
pl->iHead.iCount=left;
|
sl@0
|
77 |
pr->iHead.iCount=total-left;
|
sl@0
|
78 |
if (lCount>left)
|
sl@0
|
79 |
{ // move right
|
sl@0
|
80 |
TInt move=lCount-left;
|
sl@0
|
81 |
Mem::Copy(Entry(pr,move),pr->iEntries,rCount*iEntrySize);
|
sl@0
|
82 |
Mem::Copy(pr->iEntries,Entry(pl,left),move*iEntrySize);
|
sl@0
|
83 |
}
|
sl@0
|
84 |
else if (lCount<left)
|
sl@0
|
85 |
{ // move left
|
sl@0
|
86 |
TInt move=left-lCount;
|
sl@0
|
87 |
Mem::Copy(Entry(pl,lCount),pr->iEntries,move*iEntrySize);
|
sl@0
|
88 |
Mem::Copy(pr->iEntries,Entry(pr,move),(rCount-move)*iEntrySize);
|
sl@0
|
89 |
}
|
sl@0
|
90 |
return insertNode;
|
sl@0
|
91 |
}
|
sl@0
|
92 |
|
sl@0
|
93 |
EXPORT_C TBool TBtreeInlineLeafOrg::InsertOverflow(TAny *aLeftNode,TAny *aRightNode,TInt aPos,TBool aInsertOnLeft,const TDesC8& anEntry) const
|
sl@0
|
94 |
//
|
sl@0
|
95 |
// Redistribute keys in aLeftNode and aRightNode to insert a new entry, return success or failure
|
sl@0
|
96 |
// We know that the insertion node is full (see code for CBplusTree::InsertAtCurrentL)
|
sl@0
|
97 |
// promotion done by manager
|
sl@0
|
98 |
//
|
sl@0
|
99 |
{
|
sl@0
|
100 |
__ASSERT_DEBUG(Node(aInsertOnLeft?aLeftNode:aRightNode)->iHead.iCount==iMaxEntries,Panic(EIllegalOverflow));
|
sl@0
|
101 |
if (!aInsertOnLeft)
|
sl@0
|
102 |
aPos+=Node(aLeftNode)->iHead.iCount;
|
sl@0
|
103 |
TAny* iNode=DoRedistribute(aLeftNode,aRightNode,aPos);
|
sl@0
|
104 |
if (!iNode)
|
sl@0
|
105 |
return EFalse;
|
sl@0
|
106 |
if (iNode==aRightNode)
|
sl@0
|
107 |
aPos-=Node(aLeftNode)->iHead.iCount;
|
sl@0
|
108 |
return Insert(iNode,aPos,anEntry);
|
sl@0
|
109 |
}
|
sl@0
|
110 |
|
sl@0
|
111 |
EXPORT_C void TBtreeInlineLeafOrg::InsertSplit(TAny *aLeftNode,TAny *aRightNode,TInt aPos,const TDesC8& anEntry) const
|
sl@0
|
112 |
//
|
sl@0
|
113 |
// Split contents of left node into left and right and insert entry
|
sl@0
|
114 |
// copy link node from left to right
|
sl@0
|
115 |
//
|
sl@0
|
116 |
{
|
sl@0
|
117 |
__ASSERT_DEBUG(Node(aLeftNode)->iHead.iCount==iMaxEntries,Panic(EIllegalSplit));
|
sl@0
|
118 |
__ASSERT_DEBUG(Node(aRightNode)->iHead.iCount==0,Panic(EIllegalSplit));
|
sl@0
|
119 |
TAny* iNode=DoRedistribute(aLeftNode,aRightNode,aPos);
|
sl@0
|
120 |
if (iNode==aRightNode)
|
sl@0
|
121 |
aPos-=Node(aLeftNode)->iHead.iCount;
|
sl@0
|
122 |
Insert(iNode,aPos,anEntry);
|
sl@0
|
123 |
Node(aRightNode)->iHead.iLink=Node(aLeftNode)->iHead.iLink;
|
sl@0
|
124 |
}
|
sl@0
|
125 |
|
sl@0
|
126 |
EXPORT_C TBool TBtreeInlineLeafOrg::Delete(TAny *aNode,TInt aPos) const
|
sl@0
|
127 |
//
|
sl@0
|
128 |
// Delete the specified entry from the node
|
sl@0
|
129 |
// Part of the contract is to garauntee that the entry is deleted, even if undeflowed
|
sl@0
|
130 |
//
|
sl@0
|
131 |
{
|
sl@0
|
132 |
SNode* const pn=Node(aNode);
|
sl@0
|
133 |
__ASSERT_DEBUG(aPos<pn->iHead.iCount,Panic(EBadEntryPos));
|
sl@0
|
134 |
|
sl@0
|
135 |
--pn->iHead.iCount;
|
sl@0
|
136 |
TUint8* pe=Entry(pn,aPos);
|
sl@0
|
137 |
Mem::Copy(pe,pe+iEntrySize,(pn->iHead.iCount-aPos)*iEntrySize);
|
sl@0
|
138 |
return pn->iHead.iCount>=iMaxEntries>>1; // underflow criterion
|
sl@0
|
139 |
}
|
sl@0
|
140 |
|
sl@0
|
141 |
EXPORT_C TBool TBtreeInlineLeafOrg::Redistribute(TAny *aLeftNode,TAny *aRightNode) const
|
sl@0
|
142 |
//
|
sl@0
|
143 |
// Even out the distibution of entries in LeftNode and RightNode if enough keys
|
sl@0
|
144 |
// leaf nodes do not set the pivot, done by manager
|
sl@0
|
145 |
//
|
sl@0
|
146 |
{
|
sl@0
|
147 |
return DoRedistribute(aLeftNode,aRightNode)!=0;
|
sl@0
|
148 |
}
|
sl@0
|
149 |
|
sl@0
|
150 |
EXPORT_C void TBtreeInlineLeafOrg::Concatenate(TAny *aLeftNode,const TAny *aRightNode) const
|
sl@0
|
151 |
//
|
sl@0
|
152 |
// Join LeftNode and RightNode together in LeftNode, set link
|
sl@0
|
153 |
// contract says that it will fit
|
sl@0
|
154 |
//
|
sl@0
|
155 |
{
|
sl@0
|
156 |
SNode* const pl=Node(aLeftNode);
|
sl@0
|
157 |
const SNode* const pr=Node(aRightNode);
|
sl@0
|
158 |
TInt rCount=pr->iHead.iCount;
|
sl@0
|
159 |
__ASSERT_DEBUG(pl->iHead.iCount+rCount<=iMaxEntries,Panic(ECannotConcatenate));
|
sl@0
|
160 |
Mem::Copy(Entry(pl,pl->iHead.iCount),pr->iEntries,rCount*iEntrySize);
|
sl@0
|
161 |
pl->iHead.iCount+=rCount;
|
sl@0
|
162 |
pl->iHead.iLink=pr->iHead.iLink;
|
sl@0
|
163 |
}
|
sl@0
|
164 |
|
sl@0
|
165 |
EXPORT_C TInt TBtreeInlineLeafOrg::LastEntry(const TAny* aNode) const
|
sl@0
|
166 |
{
|
sl@0
|
167 |
return Node(aNode)->iHead.iCount;
|
sl@0
|
168 |
}
|
sl@0
|
169 |
|
sl@0
|
170 |
EXPORT_C const TAny *TBtreeInlineLeafOrg::EntryPtr(const TAny* aNode,TInt aPos) const
|
sl@0
|
171 |
{
|
sl@0
|
172 |
__ASSERT_DEBUG(aPos<Node(aNode)->iHead.iCount,Panic(EBadEntryPos));
|
sl@0
|
173 |
return Entry(Node(aNode),aPos);
|
sl@0
|
174 |
}
|
sl@0
|
175 |
|
sl@0
|
176 |
EXPORT_C TPtrC8 TBtreeInlineLeafOrg::Entry(const TAny* aNode,TInt aPos) const
|
sl@0
|
177 |
{
|
sl@0
|
178 |
return TPtrC8((const TUint8*)EntryPtr(aNode,aPos),iEntrySize);
|
sl@0
|
179 |
}
|
sl@0
|
180 |
|
sl@0
|
181 |
EXPORT_C TPageRef TBtreeInlineLeafOrg::LinkNode(const TAny* aNode) const
|
sl@0
|
182 |
{
|
sl@0
|
183 |
return Node(aNode)->iHead.iLink;
|
sl@0
|
184 |
}
|
sl@0
|
185 |
|
sl@0
|
186 |
EXPORT_C void TBtreeInlineLeafOrg::SetLinkNode(TAny* aNode,TPageRef aNextNode) const
|
sl@0
|
187 |
{
|
sl@0
|
188 |
Node(aNode)->iHead.iLink=aNextNode;
|
sl@0
|
189 |
}
|
sl@0
|
190 |
|
sl@0
|
191 |
|
sl@0
|
192 |
// class TBtreeInlineIndexOrg
|
sl@0
|
193 |
|
sl@0
|
194 |
EXPORT_C TBtreeInlineIndexOrg::TBtreeInlineIndexOrg()
|
sl@0
|
195 |
{}
|
sl@0
|
196 |
|
sl@0
|
197 |
EXPORT_C void TBtreeInlineIndexOrg::SetEntrySize(TInt aSize)
|
sl@0
|
198 |
{
|
sl@0
|
199 |
__ASSERT_DEBUG(aSize<=KMaxBtreeKeyLength,Panic(EBadEntrySize));
|
sl@0
|
200 |
iEntrySize=_FOFF(SEntry,iKey[Align4(aSize)]);
|
sl@0
|
201 |
iMaxEntries=(KPoolPageSize-sizeof(SHeader)-sizeof(TPageRef))/iEntrySize;
|
sl@0
|
202 |
}
|
sl@0
|
203 |
|
sl@0
|
204 |
EXPORT_C TBool TBtreeInlineIndexOrg::Insert(TAny *aNode,TInt aPos,const TDesC8& anEntry,TPageRef aChild) const
|
sl@0
|
205 |
{
|
sl@0
|
206 |
SNode* const pn=Node(aNode);
|
sl@0
|
207 |
__ASSERT_DEBUG(aPos<=pn->iHead.iCount,Panic(EBadEntryPos));
|
sl@0
|
208 |
if (pn->iHead.iCount==iMaxEntries)
|
sl@0
|
209 |
return EFalse;
|
sl@0
|
210 |
TUint8* pe=Entry(pn,aPos)->iKey;
|
sl@0
|
211 |
Mem::Copy(pe+iEntrySize,pe,iEntrySize*(pn->iHead.iCount-aPos));
|
sl@0
|
212 |
TInt size=anEntry.Size();
|
sl@0
|
213 |
__ASSERT_ALWAYS(size<=KeySize(),Panic(EBadEntrySize));
|
sl@0
|
214 |
*(TPageRef*)Mem::Copy(pe,anEntry.Ptr(),size)=aChild;
|
sl@0
|
215 |
++pn->iHead.iCount;
|
sl@0
|
216 |
return ETrue;
|
sl@0
|
217 |
}
|
sl@0
|
218 |
|
sl@0
|
219 |
TBtreeInlineIndexOrg::SNode* TBtreeInlineIndexOrg::DoRedistribute(TAny *aLeftNode,TAny *aRightNode,const TDesC8& aPivot,TBtreePivot& aNewPivot,TInt aInsertPos) const
|
sl@0
|
220 |
//
|
sl@0
|
221 |
// Even out the distibution of entries in LeftNode and RightNode
|
sl@0
|
222 |
// if aInsertPos>=0 we want to insert at this cumulative position so take into account for redistribution
|
sl@0
|
223 |
// If total entries <=iMaxEntries or >iMaxEntries*2 then return 0
|
sl@0
|
224 |
// Otherwise return the node into which insertion should take place
|
sl@0
|
225 |
// If insertion should be promoted insert position is beyond end of left node, otherwise
|
sl@0
|
226 |
// the new pivot is copied to aNewPivot
|
sl@0
|
227 |
//
|
sl@0
|
228 |
{
|
sl@0
|
229 |
SNode* const pl=Node(aLeftNode);
|
sl@0
|
230 |
SNode* const pr=Node(aRightNode);
|
sl@0
|
231 |
SNode *insertNode=pr;
|
sl@0
|
232 |
TInt lCount=pl->iHead.iCount;
|
sl@0
|
233 |
TInt rCount=pr->iHead.iCount;
|
sl@0
|
234 |
TInt total=lCount+rCount+1; // including pivot entry
|
sl@0
|
235 |
TInt left=total>>1;
|
sl@0
|
236 |
if (aInsertPos>=0)
|
sl@0
|
237 |
{ // call from InsertOverflow
|
sl@0
|
238 |
__ASSERT_DEBUG(aInsertPos<=total,Panic(EBadEntryPos));
|
sl@0
|
239 |
if (total>iMaxEntries<<1)
|
sl@0
|
240 |
return NULL; // no space to insert
|
sl@0
|
241 |
if (aInsertPos<=left)
|
sl@0
|
242 |
{
|
sl@0
|
243 |
if (aInsertPos<left)
|
sl@0
|
244 |
--left;
|
sl@0
|
245 |
insertNode=pl;
|
sl@0
|
246 |
}
|
sl@0
|
247 |
}
|
sl@0
|
248 |
else
|
sl@0
|
249 |
{ // from Redistribute
|
sl@0
|
250 |
if (total<=iMaxEntries)
|
sl@0
|
251 |
return NULL; // underflow state
|
sl@0
|
252 |
}
|
sl@0
|
253 |
pl->iHead.iCount=left;
|
sl@0
|
254 |
pr->iHead.iCount=total-left-1; // pivot not included
|
sl@0
|
255 |
TInt pSize=aPivot.Size();
|
sl@0
|
256 |
__ASSERT_DEBUG(pSize<=KeySize(),Panic(EBadEntrySize));
|
sl@0
|
257 |
if (lCount>left)
|
sl@0
|
258 |
{ // move right
|
sl@0
|
259 |
TInt move=lCount-left;
|
sl@0
|
260 |
Mem::Copy(Entry(pr,move),pr->iEntries,rCount*iEntrySize+sizeof(TPageRef));
|
sl@0
|
261 |
TUint8 *pp=Mem::Copy(pr->iEntries,Entry(pl,left+1),(move-1)*iEntrySize+sizeof(TPageRef));
|
sl@0
|
262 |
Mem::Copy(pp,aPivot.Ptr(),pSize);
|
sl@0
|
263 |
aNewPivot.Copy(Entry(pl,left)->iKey,KeySize()); // new pivot
|
sl@0
|
264 |
}
|
sl@0
|
265 |
else if (lCount<left)
|
sl@0
|
266 |
{ // move left
|
sl@0
|
267 |
TInt move=left-lCount-1;
|
sl@0
|
268 |
TUint8 *pp=Mem::Copy(Entry(pl,lCount)->iKey,aPivot.Ptr(),pSize);
|
sl@0
|
269 |
Mem::Copy(pp,pr->iEntries,move*iEntrySize+sizeof(TPageRef));
|
sl@0
|
270 |
aNewPivot.Copy(Entry(pr,move)->iKey,KeySize());
|
sl@0
|
271 |
Mem::Copy(pr->iEntries,Entry(pr,move+1),(rCount-move-1)*iEntrySize+sizeof(TPageRef));
|
sl@0
|
272 |
}
|
sl@0
|
273 |
else
|
sl@0
|
274 |
{ // should we ever get here? (lCount==left)
|
sl@0
|
275 |
aNewPivot=aPivot;
|
sl@0
|
276 |
}
|
sl@0
|
277 |
return insertNode;
|
sl@0
|
278 |
}
|
sl@0
|
279 |
|
sl@0
|
280 |
EXPORT_C TBool TBtreeInlineIndexOrg::InsertOverflow(TAny *aLeftNode,TAny *aRightNode,TInt aPos,TBool aInsertOnLeft,const TDesC8& anEntry,
|
sl@0
|
281 |
TPageRef aChild,const TDesC8& aPivot,TBtreePivot& aNewPivot) const
|
sl@0
|
282 |
//
|
sl@0
|
283 |
// Redistribute keys in aNodePtr and aRight to insert a new entry, return success or failure
|
sl@0
|
284 |
// We know that aNodePtr is full (see code for Insert)
|
sl@0
|
285 |
//
|
sl@0
|
286 |
{
|
sl@0
|
287 |
SNode* const pl=Node(aLeftNode);
|
sl@0
|
288 |
if (!aInsertOnLeft)
|
sl@0
|
289 |
aPos+=pl->iHead.iCount+1; // cumulative insertion point
|
sl@0
|
290 |
SNode* insert=DoRedistribute(aLeftNode,aRightNode,aPivot,aNewPivot,aPos);
|
sl@0
|
291 |
if (insert==NULL) // no space
|
sl@0
|
292 |
return EFalse;
|
sl@0
|
293 |
TInt lCount=pl->iHead.iCount;
|
sl@0
|
294 |
if (insert==aRightNode)
|
sl@0
|
295 |
aPos-=lCount+1; // insert position in right node
|
sl@0
|
296 |
else
|
sl@0
|
297 |
{
|
sl@0
|
298 |
// check for the special situation [aPos]:[aPos-1]
|
sl@0
|
299 |
// in which case re-arrange to promote the new entry
|
sl@0
|
300 |
SNode* pr=Node(aRightNode);
|
sl@0
|
301 |
if (aPos==lCount && pr->iHead.iCount<lCount)
|
sl@0
|
302 |
{
|
sl@0
|
303 |
Insert(pr,0,aNewPivot,Entry(pr,0)->iChild);
|
sl@0
|
304 |
Entry(pr,0)->iChild=aChild;
|
sl@0
|
305 |
aNewPivot=anEntry;
|
sl@0
|
306 |
return ETrue;
|
sl@0
|
307 |
}
|
sl@0
|
308 |
}
|
sl@0
|
309 |
__ASSERT_DEBUG(insert->iHead.iCount<iMaxEntries,User::Invariant());
|
sl@0
|
310 |
Insert(insert,aPos,anEntry,aChild);
|
sl@0
|
311 |
return ETrue;
|
sl@0
|
312 |
}
|
sl@0
|
313 |
|
sl@0
|
314 |
EXPORT_C void TBtreeInlineIndexOrg::InsertSplit(TAny *aLeftNode,TAny *aRightNode,TInt aPos,const TDesC8& anEntry,TPageRef aChild,TBtreePivot& aPromote) const
|
sl@0
|
315 |
//
|
sl@0
|
316 |
// part of the contract is not to use aPromote before anEntry
|
sl@0
|
317 |
// We know that aNodePtr is full
|
sl@0
|
318 |
// prepare right node and use insert-overflow
|
sl@0
|
319 |
//
|
sl@0
|
320 |
{
|
sl@0
|
321 |
__ASSERT_DEBUG(Node(aLeftNode)->iHead.iCount==iMaxEntries,Panic(EIllegalSplit));
|
sl@0
|
322 |
__ASSERT_DEBUG(Node(aRightNode)->iHead.iCount==0,Panic(EIllegalSplit));
|
sl@0
|
323 |
SNode* const pl=Node(aLeftNode);
|
sl@0
|
324 |
SNode* const pr=Node(aRightNode);
|
sl@0
|
325 |
SEntry *pe=Entry(pl,pl->iHead.iCount);
|
sl@0
|
326 |
--pl->iHead.iCount;
|
sl@0
|
327 |
Entry(pr,0)->iChild=pe->iChild;
|
sl@0
|
328 |
TPtrC8 pivot((TUint8*)pe-KeySize(),KeySize());
|
sl@0
|
329 |
InsertOverflow(aLeftNode,aRightNode,aPos,ETrue,anEntry,aChild,pivot,aPromote);
|
sl@0
|
330 |
}
|
sl@0
|
331 |
|
sl@0
|
332 |
EXPORT_C TBool TBtreeInlineIndexOrg::Update(TAny *aNode,TInt aPos,const TDesC8& anEntry) const
|
sl@0
|
333 |
{
|
sl@0
|
334 |
__ASSERT_DEBUG(anEntry.Size()<=KeySize(),Panic(EBadEntrySize));
|
sl@0
|
335 |
__ASSERT_DEBUG(aPos<Node(aNode)->iHead.iCount,Panic(EBadEntryPos));
|
sl@0
|
336 |
Mem::Copy(Entry(Node(aNode),aPos)->iKey,anEntry.Ptr(),KeySize());
|
sl@0
|
337 |
return ETrue;
|
sl@0
|
338 |
}
|
sl@0
|
339 |
|
sl@0
|
340 |
EXPORT_C TBool TBtreeInlineIndexOrg::Delete(TAny *aNode,TInt aPos) const
|
sl@0
|
341 |
//
|
sl@0
|
342 |
// Delete the specified key and following child reference from the node
|
sl@0
|
343 |
// Part of the contract is to garauntee that the entry is deleted, even if undeflowed
|
sl@0
|
344 |
//
|
sl@0
|
345 |
{
|
sl@0
|
346 |
SNode* const pn=Node(aNode);
|
sl@0
|
347 |
__ASSERT_DEBUG(aPos<pn->iHead.iCount,Panic(EBadEntryPos));
|
sl@0
|
348 |
|
sl@0
|
349 |
--pn->iHead.iCount;
|
sl@0
|
350 |
TUint8* pe=Entry(pn,aPos)->iKey;
|
sl@0
|
351 |
Mem::Copy(pe,pe+iEntrySize,(pn->iHead.iCount-aPos)*iEntrySize);
|
sl@0
|
352 |
return pn->iHead.iCount>=iMaxEntries>>1; // underflow criterion
|
sl@0
|
353 |
}
|
sl@0
|
354 |
|
sl@0
|
355 |
EXPORT_C TBool TBtreeInlineIndexOrg::Redistribute(TAny *aLeftNode,TAny *aRightNode,const TDesC8& aPivot,TBtreePivot& aNewPivot) const
|
sl@0
|
356 |
{
|
sl@0
|
357 |
return DoRedistribute(aLeftNode,aRightNode,aPivot,aNewPivot)!=NULL;
|
sl@0
|
358 |
}
|
sl@0
|
359 |
|
sl@0
|
360 |
EXPORT_C void TBtreeInlineIndexOrg::Concatenate(TAny *aLeftNode,const TAny *aRightNode,const TDesC8& aPivot) const
|
sl@0
|
361 |
//
|
sl@0
|
362 |
// Join LeftNode and RightNode together in LeftNode
|
sl@0
|
363 |
// contract says that it will fit
|
sl@0
|
364 |
//
|
sl@0
|
365 |
{
|
sl@0
|
366 |
SNode* const pl=Node(aLeftNode);
|
sl@0
|
367 |
const SNode* const pr=Node(aRightNode);
|
sl@0
|
368 |
TInt rCount=pr->iHead.iCount;
|
sl@0
|
369 |
TInt lCount=pl->iHead.iCount;
|
sl@0
|
370 |
__ASSERT_DEBUG(lCount+rCount+1<=iMaxEntries,Panic(ECannotConcatenate));
|
sl@0
|
371 |
TInt pSize=aPivot.Size();
|
sl@0
|
372 |
__ASSERT_DEBUG(pSize<=KeySize(),Panic(EBadEntrySize));
|
sl@0
|
373 |
TUint8* pp=Mem::Copy(Entry(pl,lCount)->iKey,aPivot.Ptr(),pSize);
|
sl@0
|
374 |
Mem::Copy(pp,pr->iEntries,rCount*iEntrySize+sizeof(TPageRef));
|
sl@0
|
375 |
pl->iHead.iCount+=rCount+1;
|
sl@0
|
376 |
}
|
sl@0
|
377 |
|
sl@0
|
378 |
EXPORT_C void TBtreeInlineIndexOrg::MakeRoot(TAny* aNode,TPageRef aChild) const
|
sl@0
|
379 |
{
|
sl@0
|
380 |
__ASSERT_DEBUG(Node(aNode)->iHead.iCount==0,Panic(ECannotMakeRoot));
|
sl@0
|
381 |
Entry(Node(aNode),0)->iChild=aChild;
|
sl@0
|
382 |
}
|
sl@0
|
383 |
|
sl@0
|
384 |
EXPORT_C TInt TBtreeInlineIndexOrg::LastEntry(const TAny* aNode) const
|
sl@0
|
385 |
{
|
sl@0
|
386 |
return Node(aNode)->iHead.iCount;
|
sl@0
|
387 |
}
|
sl@0
|
388 |
|
sl@0
|
389 |
EXPORT_C const TAny *TBtreeInlineIndexOrg::EntryPtr(const TAny* aNode,TInt aPos) const
|
sl@0
|
390 |
{
|
sl@0
|
391 |
__ASSERT_DEBUG(aPos<Node(aNode)->iHead.iCount,Panic(EBadEntryPos));
|
sl@0
|
392 |
return Entry(Node(aNode),aPos)->iKey;
|
sl@0
|
393 |
}
|
sl@0
|
394 |
|
sl@0
|
395 |
EXPORT_C TPtrC8 TBtreeInlineIndexOrg::Entry(const TAny* aNode,TInt aPos) const
|
sl@0
|
396 |
{
|
sl@0
|
397 |
return TPtrC8((const TUint8*)EntryPtr(aNode,aPos),KeySize());
|
sl@0
|
398 |
}
|
sl@0
|
399 |
|
sl@0
|
400 |
EXPORT_C TPageRef TBtreeInlineIndexOrg::ChildNode(const TAny* aNode,TInt aPos) const
|
sl@0
|
401 |
//
|
sl@0
|
402 |
// Entries are always aligned
|
sl@0
|
403 |
//
|
sl@0
|
404 |
{
|
sl@0
|
405 |
__ASSERT_DEBUG(aPos<=Node(aNode)->iHead.iCount,Panic(EBadEntryPos));
|
sl@0
|
406 |
return Entry(Node(aNode),aPos)->iChild;
|
sl@0
|
407 |
}
|
sl@0
|
408 |
|