1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/persistentdata/persistentstorage/dbms/ustor/US_REC.CPP Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,662 @@
1.4 +// Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
1.5 +// All rights reserved.
1.6 +// This component and the accompanying materials are made available
1.7 +// under the terms of "Eclipse Public License v1.0"
1.8 +// which accompanies this distribution, and is available
1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
1.10 +//
1.11 +// Initial Contributors:
1.12 +// Nokia Corporation - initial contribution.
1.13 +//
1.14 +// Contributors:
1.15 +//
1.16 +// Description:
1.17 +//
1.18 +
1.19 +#include "US_STD.H"
1.20 +
1.21 +// Class CDbStoreRecords::TIteratorC
1.22 +
1.23 +class CDbStoreRecords::TIteratorC
1.24 + {
1.25 + friend class CDbStoreRecords;
1.26 +public:
1.27 + inline TDbRecordId Current() const;
1.28 +private:
1.29 + TClusterDes iDes;
1.30 + TDbRecordId iCurrent;
1.31 + };
1.32 +
1.33 +inline TDbRecordId CDbStoreRecords::TIteratorC::Current() const
1.34 + {return iCurrent;}
1.35 +
1.36 +
1.37 +// Class CDbStoreRecords::CIter
1.38 +
1.39 +NONSHARABLE_CLASS(CDbStoreRecords::CIter) : public CDbRecordIter
1.40 + {
1.41 +public:
1.42 + CIter(CDbStoreRecords& aRecords);
1.43 +private:
1.44 + inline CDbStoreRecords& Records() const;
1.45 +//
1.46 + TInt Count() const;
1.47 + TDbRecordId CurrentL();
1.48 + TBool GotoL(TDbPosition aPosition);
1.49 + TBool GotoL(TDbRecordId aRecordId,RDbTableRow& aBuffer);
1.50 + TBool SeekL(const TDbLookupKey& aKey,RDbTable::TComparison aComparison);
1.51 + TDeleted DoDeletedL(TDbPosition aPosition,TDbRecordId aRecordId,const RDbTableRow* aRow);
1.52 +private:
1.53 + CDbStoreRecords::TIteratorC iIter;
1.54 + };
1.55 +
1.56 +CDbStoreRecords::CIter::CIter(CDbStoreRecords& aRecords)
1.57 + : CDbRecordIter(aRecords)
1.58 + {}
1.59 +
1.60 +inline CDbStoreRecords& CDbStoreRecords::CIter::Records() const
1.61 + {return STATIC_CAST(CDbStoreRecords&,Host());}
1.62 +
1.63 +TInt CDbStoreRecords::CIter::Count() const
1.64 + {
1.65 + return Records().Count();
1.66 + }
1.67 +
1.68 +TDbRecordId CDbStoreRecords::CIter::CurrentL()
1.69 + {
1.70 + return iIter.Current();
1.71 + }
1.72 +
1.73 +TBool CDbStoreRecords::CIter::GotoL(TDbPosition aPosition)
1.74 + {
1.75 + return Records().GotoL(aPosition,iIter);
1.76 + }
1.77 +
1.78 +TBool CDbStoreRecords::CIter::GotoL(TDbRecordId aRecordId,RDbTableRow&)
1.79 + {
1.80 + return Records().GotoL(aRecordId,iIter);
1.81 + }
1.82 +
1.83 +TBool CDbStoreRecords::CIter::SeekL(const TDbLookupKey&,RDbTable::TComparison)
1.84 +//
1.85 +// Cannot do this on a table iterator
1.86 +//
1.87 + {
1.88 + Panic(EDbCannotSeek);
1.89 + return EFalse;
1.90 + }
1.91 +
1.92 +CDbStoreRecords::CIter::TDeleted CDbStoreRecords::CIter::DoDeletedL(TDbPosition aPosition,TDbRecordId,const RDbTableRow*)
1.93 +//
1.94 +// reposition to next after a record is deleted
1.95 +// Previous only required for reversed (index) iterators
1.96 +//
1.97 + {
1.98 + return Records().DeletedL(aPosition,iIter) ? EAtRow : ENoRow;
1.99 + }
1.100 +
1.101 +
1.102 +// Class CDbStoreRecords::TToken
1.103 +
1.104 +void CDbStoreRecords::TToken::ExternalizeL(RWriteStream& aStream) const
1.105 + {
1.106 + aStream<<iHead<<iNext.Value()<<TCardinality(iCount)<<TUint32(iAutoIncrement);
1.107 + }
1.108 +
1.109 +void CDbStoreRecords::TToken::InternalizeL(RReadStream& aStream)
1.110 + {
1.111 + aStream>>iHead;
1.112 + iNext=aStream.ReadUint32L();
1.113 + TCardinality card;
1.114 + aStream>>card;
1.115 + iCount=card;
1.116 + iAutoIncrement=aStream.ReadUint32L();
1.117 + }
1.118 +
1.119 +
1.120 +// Class CDbStoreRecords
1.121 +
1.122 +CDbStoreRecords::CDbStoreRecords(CClusterCache& aCache)
1.123 + : iCache(aCache)
1.124 + {}
1.125 +
1.126 +CDbStoreRecords::~CDbStoreRecords()
1.127 + {
1.128 + iMap.Close();
1.129 + }
1.130 +
1.131 +TStreamId CDbStoreRecords::CreateL(CClusterCache& aCache)
1.132 +//
1.133 +// Create a new record space in the store, do not create a records object
1.134 +//
1.135 + {
1.136 + TToken token;
1.137 + token.iHead=ClusterId(aCache.Store().ExtendL());
1.138 + aCache.ClusterL().Create(token.iHead);
1.139 + token.iNext=RecordId(token.iHead,0);
1.140 + token.iCount=0;
1.141 + token.iAutoIncrement=0;
1.142 + RStoreWriteStream strm;
1.143 + TStreamId id=strm.CreateLC(aCache.Store());
1.144 + strm<<token;
1.145 + strm.CommitL();
1.146 + CleanupStack::PopAndDestroy();
1.147 + return id;
1.148 + }
1.149 +
1.150 +CDbStoreRecords* CDbStoreRecords::NewL(CClusterCache& aCache,const CDbStoreDef& aDef)
1.151 +//
1.152 +// Create a record space
1.153 +//
1.154 + {
1.155 + CDbStoreRecords* self=new(ELeave) CDbStoreRecords(aCache);
1.156 + CleanupStack::PushL(self);
1.157 + self->iClustering=aDef.Clustering();
1.158 + self->iTokenId=aDef.TokenId();
1.159 + CleanupStack::Pop();
1.160 + return self;
1.161 + }
1.162 +
1.163 +TBool CDbStoreRecords::RestoreL()
1.164 +//
1.165 +// Restore an existing record space from the store
1.166 +//
1.167 + {
1.168 + RStoreReadStream strm;
1.169 + strm.OpenLC(iCache.Store(),iTokenId);
1.170 + strm>>iToken;
1.171 + CleanupStack::PopAndDestroy();
1.172 + iLinks.Invalidate();
1.173 + iMap.ResetL(iToken.iHead);
1.174 + return EFalse;
1.175 + }
1.176 +
1.177 +void CDbStoreRecords::DestroyL()
1.178 +//
1.179 +// Destroy the record space
1.180 +//
1.181 + {
1.182 + iCache.Store().DeleteL(iTokenId);
1.183 + }
1.184 +
1.185 +TInt CDbStoreRecords::CardinalityL(CStreamStore& aStore,const CDbStoreDef& aDef)
1.186 +//
1.187 +// Return the record count without constructing the entire table
1.188 +//
1.189 + {
1.190 + RStoreReadStream strm;
1.191 + strm.OpenLC(aStore,aDef.TokenId());
1.192 + TToken token;
1.193 + strm>>token;
1.194 + CleanupStack::PopAndDestroy();
1.195 + return token.iCount;
1.196 + }
1.197 +
1.198 +void CDbStoreRecords::SynchL()
1.199 +//
1.200 +// write persistent token to the store
1.201 +//
1.202 + {
1.203 + RStoreWriteStream strm;
1.204 + strm.ReplaceLC(iCache.Store(),iTokenId);
1.205 + strm<<iToken;
1.206 + strm.CommitL();
1.207 + CleanupStack::PopAndDestroy();
1.208 + }
1.209 +
1.210 +TInt CDbStoreRecords::DiscardL(TClusterId& aCluster)
1.211 +//
1.212 +// discard the cluster as part of the incremental drop
1.213 +// aCluster is updated to the next cluster, the number of records contained is returned
1.214 +//
1.215 + {
1.216 + TClusterDes des;
1.217 + DesL(des,aCluster);
1.218 + CCluster* cluster=iCache.Cluster(aCluster);
1.219 + if (cluster)
1.220 + cluster->Discard();
1.221 + iCache.Store().DeleteL(aCluster);
1.222 + aCluster=des.iNext;
1.223 + TInt records=0;
1.224 + for (TUint members=des.iMembership;members;members&=members-1)
1.225 + ++records;
1.226 + return records;
1.227 + }
1.228 +
1.229 +TClusterId CDbStoreRecords::AlterL(TClusterId aCluster,CCluster::MAlter& aAlterer)
1.230 + {
1.231 + CCluster& cluster=iCache.ClusterL(aCluster);
1.232 + cluster.AlterL(aAlterer);
1.233 + return cluster.Des().iNext;
1.234 + }
1.235 +
1.236 +TPtrC8 CDbStoreRecords::ReadL(TDbRecordId aRecordId) const
1.237 + {
1.238 + return iCache.ClusterL(ClusterId(aRecordId)).RecordL(RecordIndex(aRecordId));
1.239 + }
1.240 +
1.241 +TUint CDbStoreRecords::AutoIncrementL()
1.242 +//
1.243 +// Provide the next value for an auto-increment column
1.244 +//
1.245 + {
1.246 + return iToken.iAutoIncrement++;
1.247 + }
1.248 +
1.249 +TUint8* CDbStoreRecords::UpdateRecordL(TDbRecordId aRecordId,TInt aNewSize)
1.250 +//
1.251 +// read the cluster and return a writable descriptor over the new record data
1.252 +//
1.253 + {
1.254 + return iCache.ClusterL(ClusterId(aRecordId)).UpdateL(RecordIndex(aRecordId),aNewSize);
1.255 + }
1.256 +
1.257 +
1.258 +TUint8* CDbStoreRecords::DoNewL(TInt aRecordSize)
1.259 +//
1.260 +// Phase 1 of appending a records
1.261 +//
1.262 + {
1.263 + return UpdateRecordL(iToken.iNext,aRecordSize);
1.264 + }
1.265 +
1.266 +TDbRecordId CDbStoreRecords::AppendL()
1.267 +//
1.268 +// Phase 2 of appending a record
1.269 +//
1.270 + {
1.271 + TDbRecordId id=iToken.iNext;
1.272 + TClusterId clusterId=ClusterId(id);
1.273 + CCluster* cluster=iCache.Cluster(clusterId);
1.274 + __ASSERT(cluster);
1.275 + TInt nextIndex=RecordIndex(id)+1;
1.276 + if (nextIndex>=iClustering || cluster->IsFull())
1.277 + {
1.278 + TClusterId newcluster=ClusterId(iCache.Store().ExtendL());
1.279 + cluster->Relink(newcluster);
1.280 + cluster->FlushL();
1.281 + cluster->Create(newcluster);
1.282 + iMap.BindL(clusterId,newcluster);
1.283 + iLinks.Bind(clusterId,newcluster,iMap);
1.284 + iToken.iNext=RecordId(newcluster,0);
1.285 + }
1.286 + else
1.287 + iToken.iNext=RecordId(clusterId,nextIndex);
1.288 + ++iToken.iCount;
1.289 + return id;
1.290 + }
1.291 +
1.292 +TUint8* CDbStoreRecords::DoReplaceL(TDbRecordId aRecordId,TInt aRecordSize)
1.293 + {
1.294 + return UpdateRecordL(aRecordId,aRecordSize);
1.295 + }
1.296 +
1.297 +void CDbStoreRecords::DoEraseL(TDbRecordId aRecordId)
1.298 + {
1.299 + TClusterId clusterId=ClusterId(aRecordId);
1.300 + CCluster& cluster=iCache.ClusterL(clusterId);
1.301 + if (!cluster.DeleteL(RecordIndex(aRecordId)) && clusterId!=ClusterId(iToken.iNext))
1.302 + { // cluster is now empty, but don't drop the last cluster, coz it hasn't all been used!
1.303 + TClusterDes des;
1.304 + TClusterId prev=PreviousClusterL(des,clusterId);
1.305 + TClusterId next=cluster.Des().iNext; // next cluster
1.306 + cluster.Discard(); // discard the cluster
1.307 + iCache.Store().DeleteL(clusterId);
1.308 + if (prev!=KNullClusterId)
1.309 + iCache.ClusterL(prev).Relink(next);
1.310 + else
1.311 + iToken.iHead=next;
1.312 + iLinks.Drop(clusterId,next);
1.313 + iMap.DropL(clusterId,next);
1.314 + }
1.315 + --iToken.iCount;
1.316 + }
1.317 +
1.318 +CDbRecordIter* CDbStoreRecords::IteratorL()
1.319 + {
1.320 + return new(ELeave) CIter(*this);
1.321 + }
1.322 +
1.323 +void CDbStoreRecords::CompleteMapL()
1.324 + {
1.325 + TClusterId cluster=iMap.LastBound();
1.326 + TClusterDes des;
1.327 + DesL(des,cluster);
1.328 + do cluster=NextClusterL(des,cluster); while (cluster!=KNullClusterId);
1.329 + }
1.330 +
1.331 +void CDbStoreRecords::DesL(TClusterDes& aDes,TClusterId aCluster)
1.332 +//
1.333 +// Read just the cluster descriptor
1.334 +//
1.335 + {
1.336 + CCluster* cluster=iCache.Cluster(aCluster);
1.337 + if (cluster)
1.338 + aDes=cluster->Des();
1.339 + else
1.340 + {
1.341 + RStoreReadStream stream;
1.342 + stream.OpenLC(iCache.Store(),aCluster);
1.343 + stream>>aDes;
1.344 + CleanupStack::PopAndDestroy();
1.345 + }
1.346 + }
1.347 +
1.348 +TClusterId CDbStoreRecords::NextClusterL(TClusterDes& aDes,TClusterId aCluster)
1.349 + {
1.350 + TClusterId next=aDes.iNext;
1.351 + if (next==KNullClusterId)
1.352 + iMap.Complete(aCluster);
1.353 + else
1.354 + {
1.355 + iMap.BindL(aCluster,next);
1.356 + iLinks.Bind(aCluster,next,iMap);
1.357 + DesL(aDes,next);
1.358 + }
1.359 + return next;
1.360 + }
1.361 +
1.362 +TBool CDbStoreRecords::LocateL(TClusterId aCluster)
1.363 +//
1.364 +// Locate the cluster in the table. If not present return EFalse
1.365 +// If present fill up the cluster link cache with the loop
1.366 +// containing the predecessor to aCluster
1.367 +// aDes will have the previous cluster des
1.368 +//
1.369 + {
1.370 + TClusterId cluster=aCluster;
1.371 + __ASSERT(aCluster!=iToken.iHead);
1.372 + __ASSERT(!iLinks.At(aCluster,cluster));
1.373 +//
1.374 + if (!iMap.IsComplete())
1.375 + CompleteMapL();
1.376 +//
1.377 + TClusterDes des;
1.378 + TClusterId links[RClusterMap::ESeparation];
1.379 + TClusterId* p=links;
1.380 + for (TInt n=RClusterMap::ESeparation;n>0;--n)
1.381 + {
1.382 + *p++=cluster;
1.383 + TBool r=iMap.At(cluster,cluster);
1.384 + DesL(des,cluster);
1.385 + if (r)
1.386 + {
1.387 + __ASSERT(cluster!=KNullClusterId); // only iHead->Null
1.388 + iLinks.Reset(cluster);
1.389 + while (aCluster!=des.iNext)
1.390 + cluster=NextClusterL(des,cluster);
1.391 + iLinks.Add(links,p);
1.392 + return ETrue;
1.393 + }
1.394 + cluster=des.iNext;
1.395 + }
1.396 + return EFalse; // not in this table!
1.397 + }
1.398 +
1.399 +TClusterId CDbStoreRecords::PreviousClusterL(TClusterDes& aDes,TClusterId aCluster)
1.400 + {
1.401 + if (aCluster==iToken.iHead)
1.402 + return KNullClusterId;
1.403 + if (!iLinks.At(aCluster,aCluster))
1.404 + {
1.405 + __DEBUG(TBool dbgchk=) LocateL(aCluster);
1.406 + __ASSERT(dbgchk);
1.407 + __DEBUG(dbgchk=) iLinks.At(aCluster,aCluster);
1.408 + __ASSERT(dbgchk);
1.409 + }
1.410 + DesL(aDes,aCluster);
1.411 + return aCluster;
1.412 + }
1.413 +
1.414 +TBool CDbStoreRecords::GotoL(TDbPosition aPosition,TIteratorC& anIterator)
1.415 + {
1.416 + TClusterId cluster=ClusterId(anIterator.iCurrent);
1.417 + TInt index=RecordIndex(anIterator.iCurrent);
1.418 + switch (aPosition)
1.419 + {
1.420 + default:
1.421 + __ASSERT(0);
1.422 + case EDbFirst:
1.423 + DesL(anIterator.iDes,cluster=iToken.iHead);
1.424 + iLinks.Reset(cluster);
1.425 + index=-1;
1.426 + // drop through to next
1.427 + case EDbNext:
1.428 + for (;;)
1.429 + {
1.430 + TUint membership=anIterator.iDes.iMembership;
1.431 + while (++index<KMaxClustering)
1.432 + {
1.433 + if ((membership>>index)&1)
1.434 + {
1.435 + __ASSERT(cluster!=ClusterId(iToken.iNext)||index<RecordIndex(iToken.iNext));
1.436 + anIterator.iCurrent=RecordId(cluster,index);
1.437 + return ETrue;
1.438 + }
1.439 + }
1.440 + cluster=NextClusterL(anIterator.iDes,cluster);
1.441 + if (cluster==KNullClusterId)
1.442 + return EFalse; // ran out of data
1.443 + index=-1;
1.444 + }
1.445 + case EDbLast:
1.446 + DesL(anIterator.iDes,cluster=ClusterId(iToken.iNext));
1.447 + index=KMaxClustering;
1.448 + // drop through to previous
1.449 + case EDbPrevious:
1.450 + for (;;)
1.451 + {
1.452 + TUint membership=anIterator.iDes.iMembership;
1.453 + while (--index>=0)
1.454 + {
1.455 + if ((membership>>index)&1)
1.456 + {
1.457 + anIterator.iCurrent=RecordId(cluster,index);
1.458 + return ETrue;
1.459 + }
1.460 + }
1.461 + __ASSERT(index==-1);
1.462 + cluster=PreviousClusterL(anIterator.iDes,cluster);
1.463 + if (cluster==KNullClusterId)
1.464 + return EFalse; // ran out of data
1.465 + index=KMaxClustering;
1.466 + }
1.467 + }
1.468 + }
1.469 +
1.470 +TBool CDbStoreRecords::DeletedL(TDbPosition aPosition,TIteratorC& anIterator)
1.471 +//
1.472 +// Record has been deleted
1.473 +//
1.474 + {
1.475 + anIterator.iDes.iMembership&=~(1<<RecordIndex(anIterator.iCurrent));
1.476 + return GotoL(aPosition,anIterator);
1.477 + }
1.478 +
1.479 +TBool CDbStoreRecords::GotoL(TDbRecordId aRecordId,TIteratorC& anIterator)
1.480 +//
1.481 +// Set the iterator to the record id, return false if the record is not present
1.482 +//
1.483 + {
1.484 + TClusterId cluster=ClusterId(aRecordId);
1.485 + if (cluster!=iToken.iHead && !iLinks.Has(cluster) && !LocateL(cluster))
1.486 + return EFalse;
1.487 + anIterator.iCurrent=aRecordId;
1.488 + DesL(anIterator.iDes,cluster);
1.489 + return (anIterator.iDes.iMembership>>RecordIndex(aRecordId))&1;
1.490 + }
1.491 +
1.492 +TBool CDbStoreRecords::ExistsL(TDbRecordId aRecordId)
1.493 +//
1.494 +// Ensure that the record is in this table
1.495 +//
1.496 + {
1.497 + TIteratorC iter;
1.498 + return GotoL(aRecordId,iter);
1.499 + }
1.500 +
1.501 +// Class HUnicodeCompressor
1.502 +
1.503 +NONSHARABLE_CLASS(HUnicodeCompressor) : public TStreamFilter
1.504 + {
1.505 +public:
1.506 + HUnicodeCompressor(MStreamBuf* aSink);
1.507 +private:
1.508 + void DoRelease();
1.509 + void DoSynchL();
1.510 + TInt Capacity(TInt aMaxLength);
1.511 + TInt FilterL(TAny* aPtr,TInt aMaxLength,const TUint8*& aFrom,const TUint8* anEnd);
1.512 +private:
1.513 + enum {EFlushBufferSize=16};
1.514 +private:
1.515 + TUnicodeCompressor iCompressor;
1.516 + };
1.517 +
1.518 +HUnicodeCompressor::HUnicodeCompressor(MStreamBuf* aSink)
1.519 + {
1.520 + Set(aSink,EAttached|EWrite);
1.521 + }
1.522 +
1.523 +void HUnicodeCompressor::DoRelease()
1.524 + {
1.525 + TStreamFilter::DoRelease();
1.526 + delete this;
1.527 + }
1.528 +
1.529 +TInt HUnicodeCompressor::Capacity(TInt aMaxLength)
1.530 +//
1.531 +// Return the maximum guaranteed input used for aMaxLength output.
1.532 +// SUC at worst expands n chars to 3n bytes
1.533 +//
1.534 + {
1.535 + aMaxLength=(aMaxLength+2)/3; // # chars input guaranteed
1.536 + return aMaxLength*2; // # bytes
1.537 + }
1.538 +
1.539 +TInt HUnicodeCompressor::FilterL(TAny* aPtr,TInt aMaxLength,const TUint8*& aFrom,const TUint8* aEnd)
1.540 + {
1.541 + TMemoryUnicodeSource source(reinterpret_cast<const TUint16*>(aFrom));
1.542 + TInt used;
1.543 + iCompressor.CompressL(reinterpret_cast<TUint8*>(aPtr),source,aMaxLength,(aEnd-aFrom)>>1,&aMaxLength,&used);
1.544 + aFrom+=used<<1;
1.545 + return aMaxLength;
1.546 + }
1.547 +
1.548 +void HUnicodeCompressor::DoSynchL()
1.549 + {
1.550 + if (IsCommitted())
1.551 + return;
1.552 +//
1.553 + TUint8 buf[EFlushBufferSize];
1.554 + TInt emit;
1.555 + iCompressor.FlushL(buf,EFlushBufferSize,emit);
1.556 + if (emit)
1.557 + EmitL(buf,emit);
1.558 +//
1.559 + TStreamFilter::DoSynchL();
1.560 + Committed();
1.561 + }
1.562 +
1.563 +// Class HUnicodeExander
1.564 +
1.565 +NONSHARABLE_CLASS(HUnicodeExpander) : public TStreamFilter
1.566 + {
1.567 +public:
1.568 + HUnicodeExpander(MStreamBuf* aSource);
1.569 +private:
1.570 + void DoRelease();
1.571 +// void DoSynchL();
1.572 + TInt Capacity(TInt aMaxLength);
1.573 + TInt FilterL(TAny* aPtr,TInt aMaxLength,const TUint8*& aFrom,const TUint8* anEnd);
1.574 +private:
1.575 + enum {EFlushBufferSize=16};
1.576 +private:
1.577 + TUnicodeExpander iExpander;
1.578 + };
1.579 +
1.580 +HUnicodeExpander::HUnicodeExpander(MStreamBuf* aSource)
1.581 + {
1.582 + Set(aSource,EAttached|ERead);
1.583 + }
1.584 +
1.585 +void HUnicodeExpander::DoRelease()
1.586 + {
1.587 + TStreamFilter::DoRelease();
1.588 + delete this;
1.589 + }
1.590 +
1.591 +TInt HUnicodeExpander::Capacity(TInt aMaxLength)
1.592 +//
1.593 +// Return the maximum guaranteed input used for aMaxLength output.
1.594 +// SUC at worst expands n chars to 3n bytes
1.595 +//
1.596 + {
1.597 + return aMaxLength>>1; // best expansion from ASCII chars
1.598 + }
1.599 +
1.600 +TInt HUnicodeExpander::FilterL(TAny* aPtr,TInt aMaxLength,const TUint8*& aFrom,const TUint8* aEnd)
1.601 + {
1.602 + TMemoryUnicodeSink sink(reinterpret_cast<TUint16*>(aPtr));
1.603 + TInt used;
1.604 + iExpander.ExpandL(sink,aFrom,aMaxLength>>1,aEnd-aFrom,&aMaxLength,&used);
1.605 + aFrom+=used;
1.606 + return aMaxLength<<1;
1.607 + }
1.608 +
1.609 +/*
1.610 +void HUnicodeExpander::DoSynchL()
1.611 + {
1.612 + if (IsCommitted())
1.613 + return;
1.614 +//
1.615 +// TUint8 buf[EFlushBufferSize];
1.616 +// TInt emit;
1.617 +// iCompressor.FlushL(buf,EFlushBufferSize,&emit);
1.618 +// if (emit)
1.619 +// EmitL(buf,emit);
1.620 +//
1.621 + TStreamFilter::DoSynchL();
1.622 + Committed();
1.623 + }
1.624 +*/
1.625 +
1.626 +// Class CDbStoreBlobs
1.627 +
1.628 +CDbStoreBlobs::CDbStoreBlobs(CDbStoreDatabase& aDatabase,TInt aInlineLimit)
1.629 + : iDatabase(aDatabase)
1.630 + {
1.631 + SetInlineLimit(aInlineLimit);
1.632 + }
1.633 +
1.634 +MStreamBuf* CDbStoreBlobs::DoCreateL(TDbBlobId& aBlobId,TDbColType aType)
1.635 + {
1.636 + __ASSERT(TDbCol::IsLong(aType));
1.637 +//
1.638 + RDbStoreWriteStream strm(iDatabase);
1.639 + aBlobId=strm.CreateLC(iDatabase.Store()).Value();
1.640 + strm.FilterL(aType!=EDbColLongBinary?strm.EText:strm.EBinary,aBlobId);
1.641 + MStreamBuf* blob=strm.Sink();
1.642 + if (aType==EDbColLongText16)
1.643 + blob=new(ELeave) HUnicodeCompressor(blob);
1.644 + CleanupStack::Pop();
1.645 + return blob;
1.646 + }
1.647 +
1.648 +MStreamBuf* CDbStoreBlobs::ReadL(TDbBlobId aBlobId,TDbColType aType) const
1.649 + {
1.650 + __ASSERT(TDbCol::IsLong(aType));
1.651 +//
1.652 + RDbStoreReadStream strm(iDatabase);
1.653 + strm.OpenLC(iDatabase.Store(),aBlobId);
1.654 + strm.FilterL(aType!=EDbColLongBinary?strm.EText:strm.EBinary,aBlobId);
1.655 + MStreamBuf* blob=strm.Source();
1.656 + if (aType==EDbColLongText16)
1.657 + blob=new(ELeave) HUnicodeExpander(blob);
1.658 + CleanupStack::Pop();
1.659 + return blob;
1.660 + }
1.661 +
1.662 +void CDbStoreBlobs::DoDeleteL(TDbBlobId aBlobId)
1.663 + {
1.664 + iDatabase.Store().DeleteL(TStreamId(aBlobId));
1.665 + }