os/persistentdata/persistentstorage/sqlite3api/TEST/TclScript/pageropt.test
changeset 0 bde4ae8d615e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/os/persistentdata/persistentstorage/sqlite3api/TEST/TclScript/pageropt.test	Fri Jun 15 03:10:57 2012 +0200
     1.3 @@ -0,0 +1,199 @@
     1.4 +# 2007 April 12
     1.5 +#
     1.6 +# The author disclaims copyright to this source code.  In place of
     1.7 +# a legal notice, here is a blessing:
     1.8 +#
     1.9 +#    May you do good and not evil.
    1.10 +#    May you find forgiveness for yourself and forgive others.
    1.11 +#    May you share freely, never taking more than you give.
    1.12 +#
    1.13 +#***********************************************************************
    1.14 +# This file implements regression tests for SQLite library.
    1.15 +# The focus of the tests in this file are to verify that the
    1.16 +# pager optimizations implemented in version 3.3.14 work.
    1.17 +#
    1.18 +# $Id: pageropt.test,v 1.5 2008/08/20 14:49:25 danielk1977 Exp $
    1.19 +
    1.20 +set testdir [file dirname $argv0]
    1.21 +source $testdir/tester.tcl
    1.22 +
    1.23 +ifcapable {!pager_pragmas||secure_delete} {
    1.24 +  finish_test
    1.25 +  return
    1.26 +}
    1.27 +
    1.28 +# Run the SQL statement supplied by the argument and return
    1.29 +# the results.  Prepend four integers to the beginning of the
    1.30 +# result which are
    1.31 +#
    1.32 +#     (1)  The number of page reads from the database
    1.33 +#     (2)  The number of page writes to the database
    1.34 +#     (3)  The number of page writes to the journal
    1.35 +#     (4)  The number of cache pages freed
    1.36 +#
    1.37 +proc pagercount_sql {sql {db db}} {
    1.38 +  global sqlite3_pager_readdb_count
    1.39 +  global sqlite3_pager_writedb_count
    1.40 +  global sqlite3_pager_writej_count
    1.41 +  global sqlite3_pager_pgfree_count
    1.42 +  set sqlite3_pager_readdb_count 0
    1.43 +  set sqlite3_pager_writedb_count 0
    1.44 +  set sqlite3_pager_writej_count 0
    1.45 +  set r [$db eval $sql]
    1.46 +  set cnt [list $sqlite3_pager_readdb_count \
    1.47 +                $sqlite3_pager_writedb_count \
    1.48 +                $sqlite3_pager_writej_count ]
    1.49 +  return [concat $cnt $r]
    1.50 +}
    1.51 +
    1.52 +# Setup the test database
    1.53 +#
    1.54 +do_test pageropt-1.1 {
    1.55 +  sqlite3_soft_heap_limit 0
    1.56 +  execsql {
    1.57 +    PRAGMA auto_vacuum = OFF;
    1.58 +    PRAGMA page_size = 1024;
    1.59 +  }
    1.60 +  pagercount_sql {
    1.61 +    CREATE TABLE t1(x);
    1.62 +  }
    1.63 +} {0 2 0}
    1.64 +do_test pageropt-1.2 {
    1.65 +  pagercount_sql {
    1.66 +    INSERT INTO t1 VALUES(randomblob(5000));
    1.67 +  }
    1.68 +} {0 6 2}
    1.69 +
    1.70 +# Verify that values remain in cache on for subsequent reads.
    1.71 +# We should not have to go back to disk.
    1.72 +#
    1.73 +do_test pageropt-1.3 {
    1.74 +  pagercount_sql {
    1.75 +    SELECT length(x) FROM t1
    1.76 +  }
    1.77 +} {0 0 0 5000}
    1.78 +
    1.79 +# If another thread reads the database, the original cache
    1.80 +# remains valid.
    1.81 +#
    1.82 +sqlite3 db2 test.db
    1.83 +set blobcontent [db2 one {SELECT hex(x) FROM t1}]
    1.84 +do_test pageropt-1.4 {
    1.85 +  pagercount_sql {
    1.86 +    SELECT hex(x) FROM t1
    1.87 +  }
    1.88 +} [list 0 0 0 $blobcontent]
    1.89 +
    1.90 +# But if the other thread modifies the database, then the cache
    1.91 +# must refill.
    1.92 +#
    1.93 +do_test pageropt-1.5 {
    1.94 +  db2 eval {CREATE TABLE t2(y)}
    1.95 +  pagercount_sql {
    1.96 +    SELECT hex(x) FROM t1
    1.97 +  }
    1.98 +} [list 6 0 0 $blobcontent]
    1.99 +do_test pageropt-1.6 {
   1.100 +  pagercount_sql {
   1.101 +    SELECT hex(x) FROM t1
   1.102 +  }
   1.103 +} [list 0 0 0 $blobcontent]
   1.104 +
   1.105 +# Verify that the last page of an overflow chain is not read from
   1.106 +# disk when deleting a row.  The one row of t1(x) has four pages
   1.107 +# of overflow.  So deleting that row from t1 should involve reading
   1.108 +# the sqlite_master table (1 page) the main page of t1 (1 page) and
   1.109 +# the three overflow pages of t1 for a total of 5 pages.
   1.110 +#
   1.111 +# Pages written are page 1 (for the freelist pointer), the root page
   1.112 +# of the table, and one of the overflow chain pointers because it
   1.113 +# becomes the trunk of the freelist.  Total 3.
   1.114 +#
   1.115 +do_test pageropt-2.1 {
   1.116 +  db close
   1.117 +  sqlite3 db test.db
   1.118 +  pagercount_sql {
   1.119 +    DELETE FROM t1 WHERE rowid=1
   1.120 +  }
   1.121 +} {5 3 3}
   1.122 +
   1.123 +# When pulling pages off of the freelist, there is no reason
   1.124 +# to actually bring in the old content.
   1.125 +#
   1.126 +do_test pageropt-2.2 {
   1.127 +  db close
   1.128 +  sqlite3 db test.db
   1.129 +  pagercount_sql {
   1.130 +    INSERT INTO t1 VALUES(randomblob(1500));
   1.131 +  }
   1.132 +} {3 4 3}
   1.133 +do_test pageropt-2.3 {
   1.134 +  pagercount_sql {
   1.135 +    INSERT INTO t1 VALUES(randomblob(1500));
   1.136 +  }
   1.137 +} {0 4 3}
   1.138 +
   1.139 +# Note the new optimization that when pulling the very last page off of the
   1.140 +# freelist we do not read the content of that page.
   1.141 +#
   1.142 +do_test pageropt-2.4 {
   1.143 +  pagercount_sql {
   1.144 +    INSERT INTO t1 VALUES(randomblob(1500));
   1.145 +  }
   1.146 +} {0 5 3}
   1.147 +
   1.148 +# Appending a large quantity of data does not involve writing much
   1.149 +# to the journal file.
   1.150 +#
   1.151 +do_test pageropt-3.1 {
   1.152 +  pagercount_sql {
   1.153 +    INSERT INTO t2 SELECT * FROM t1;
   1.154 +  }
   1.155 +} {1 7 2}
   1.156 +
   1.157 +# Once again, we do not need to read the last page of an overflow chain
   1.158 +# while deleting.
   1.159 +#
   1.160 +do_test pageropt-3.2 {
   1.161 +  pagercount_sql {
   1.162 +    DROP TABLE t2;
   1.163 +  }
   1.164 +} {0 2 3}
   1.165 +do_test pageropt-3.3 {
   1.166 +  pagercount_sql {
   1.167 +    DELETE FROM t1;
   1.168 +  }
   1.169 +} {0 3 3}
   1.170 +
   1.171 +# There are now 11 pages on the freelist.  Move them all into an
   1.172 +# overflow chain by inserting a single large record.  Starting from
   1.173 +# a cold cache, only page 1, the root page of table t1, and the trunk
   1.174 +# of the freelist need to be read (3 pages).  And only those three
   1.175 +# pages need to be journalled.  But 13 pages need to be written:
   1.176 +# page1, the root page of table t1, and an 11 page overflow chain.
   1.177 +#
   1.178 +do_test pageropt-4.1 {
   1.179 +  db close
   1.180 +  sqlite3 db test.db
   1.181 +  pagercount_sql {
   1.182 +    INSERT INTO t1 VALUES(randomblob(11300))
   1.183 +  }
   1.184 +} {3 13 3}
   1.185 +
   1.186 +# Now we delete that big entries starting from a cold cache and an
   1.187 +# empty freelist.  The first 10 of the 11 pages overflow chain have
   1.188 +# to be read, together with page1 and the root of the t1 table.  12
   1.189 +# reads total.  But only page1, the t1 root, and the trunk of the
   1.190 +# freelist need to be journalled and written back.
   1.191 +#
   1.192 +do_test pageropt-4.2 {
   1.193 +  db close
   1.194 +  sqlite3 db test.db
   1.195 +  pagercount_sql {
   1.196 +    DELETE FROM t1
   1.197 +  }
   1.198 +} {12 3 3}
   1.199 +
   1.200 +sqlite3_soft_heap_limit $soft_limit
   1.201 +catch {db2 close}
   1.202 +finish_test