diff -r 000000000000 -r 08ec8eefde2f persistentstorage/sqlite3api/TEST/TclScript/bigrow.test --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/persistentstorage/sqlite3api/TEST/TclScript/bigrow.test Fri Jan 22 11:06:30 2010 +0200 @@ -0,0 +1,223 @@ +# 2001 September 23 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is stressing the library by putting large amounts +# of data in a single row of a table. +# +# $Id: bigrow.test,v 1.5 2004/08/07 23:54:48 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Make a big string that we can use for test data +# +do_test bigrow-1.0 { + set ::bigstr {} + for {set i 1} {$i<=9999} {incr i} { + set sep [string index "abcdefghijklmnopqrstuvwxyz" [expr {$i%26}]] + append ::bigstr "$sep [format %04d $i] " + } + string length $::bigstr +} {69993} + +# Make a table into which we can insert some but records. +# +do_test bigrow-1.1 { + execsql { + CREATE TABLE t1(a text, b text, c text); + SELECT name FROM sqlite_master + WHERE type='table' OR type='index' + ORDER BY name + } +} {t1} + +do_test bigrow-1.2 { + set ::big1 [string range $::bigstr 0 65519] + set sql "INSERT INTO t1 VALUES('abc'," + append sql "'$::big1', 'xyz');" + execsql $sql + execsql {SELECT a, c FROM t1} +} {abc xyz} +do_test bigrow-1.3 { + execsql {SELECT b FROM t1} +} [list $::big1] +do_test bigrow-1.4 { + set ::big2 [string range $::bigstr 0 65520] + set sql "INSERT INTO t1 VALUES('abc2'," + append sql "'$::big2', 'xyz2');" + set r [catch {execsql $sql} msg] + lappend r $msg +} {0 {}} +do_test bigrow-1.4.1 { + execsql {SELECT b FROM t1 ORDER BY c} +} [list $::big1 $::big2] +do_test bigrow-1.4.2 { + execsql {SELECT c FROM t1 ORDER BY c} +} {xyz xyz2} +do_test bigrow-1.4.3 { + execsql {DELETE FROM t1 WHERE a='abc2'} + execsql {SELECT c FROM t1} +} {xyz} + +do_test bigrow-1.5 { + execsql { + UPDATE t1 SET a=b, b=a; + SELECT b,c FROM t1 + } +} {abc xyz} +do_test bigrow-1.6 { + execsql { + SELECT * FROM t1 + } +} [list $::big1 abc xyz] +do_test bigrow-1.7 { + execsql { + INSERT INTO t1 VALUES('1','2','3'); + INSERT INTO t1 VALUES('A','B','C'); + SELECT b FROM t1 WHERE a=='1'; + } +} {2} +do_test bigrow-1.8 { + execsql "SELECT b FROM t1 WHERE a=='$::big1'" +} {abc} +do_test bigrow-1.9 { + execsql "SELECT b FROM t1 WHERE a!='$::big1' ORDER BY a" +} {2 B} + +# Try doing some indexing on big columns +# +do_test bigrow-2.1 { + execsql { + CREATE INDEX i1 ON t1(a) + } + execsql "SELECT b FROM t1 WHERE a=='$::big1'" +} {abc} +do_test bigrow-2.2 { + execsql { + UPDATE t1 SET a=b, b=a + } + execsql "SELECT b FROM t1 WHERE a=='abc'" +} [list $::big1] +do_test bigrow-2.3 { + execsql { + UPDATE t1 SET a=b, b=a + } + execsql "SELECT b FROM t1 WHERE a=='$::big1'" +} {abc} +catch {unset ::bigstr} +catch {unset ::big1} +catch {unset ::big2} + +# Mosts of the tests above were created back when rows were limited in +# size to 64K. Now rows can be much bigger. Test that logic. Also +# make sure things work correctly at the transition boundries between +# row sizes of 256 to 257 bytes and from 65536 to 65537 bytes. +# +# We begin by testing the 256..257 transition. +# +do_test bigrow-3.1 { + execsql { + DELETE FROM t1; + INSERT INTO t1(a,b,c) VALUES('one','abcdefghijklmnopqrstuvwxyz0123','hi'); + } + execsql {SELECT a,length(b),c FROM t1} +} {one 30 hi} +do_test bigrow-3.2 { + execsql { + UPDATE t1 SET b=b||b; + UPDATE t1 SET b=b||b; + UPDATE t1 SET b=b||b; + } + execsql {SELECT a,length(b),c FROM t1} +} {one 240 hi} +for {set i 1} {$i<10} {incr i} { + do_test bigrow-3.3.$i { + execsql "UPDATE t1 SET b=b||'$i'" + execsql {SELECT a,length(b),c FROM t1} + } "one [expr {240+$i}] hi" +} + +# Now test the 65536..65537 row-size transition. +# +do_test bigrow-4.1 { + execsql { + DELETE FROM t1; + INSERT INTO t1(a,b,c) VALUES('one','abcdefghijklmnopqrstuvwxyz0123','hi'); + } + execsql {SELECT a,length(b),c FROM t1} +} {one 30 hi} +do_test bigrow-4.2 { + execsql { + UPDATE t1 SET b=b||b; + UPDATE t1 SET b=b||b; + UPDATE t1 SET b=b||b; + UPDATE t1 SET b=b||b; + UPDATE t1 SET b=b||b; + UPDATE t1 SET b=b||b; + UPDATE t1 SET b=b||b; + UPDATE t1 SET b=b||b; + UPDATE t1 SET b=b||b; + UPDATE t1 SET b=b||b; + UPDATE t1 SET b=b||b; + UPDATE t1 SET b=b||b; + } + execsql {SELECT a,length(b),c FROM t1} +} {one 122880 hi} +do_test bigrow-4.3 { + execsql { + UPDATE t1 SET b=substr(b,1,65515) + } + execsql {SELECT a,length(b),c FROM t1} +} {one 65515 hi} +for {set i 1} {$i<10} {incr i} { + do_test bigrow-4.4.$i { + execsql "UPDATE t1 SET b=b||'$i'" + execsql {SELECT a,length(b),c FROM t1} + } "one [expr {65515+$i}] hi" +} + +# Check to make sure the library recovers safely if a row contains +# too much data. +# +do_test bigrow-5.1 { + execsql { + DELETE FROM t1; + INSERT INTO t1(a,b,c) VALUES('one','abcdefghijklmnopqrstuvwxyz0123','hi'); + } + execsql {SELECT a,length(b),c FROM t1} +} {one 30 hi} +set i 1 +for {set sz 60} {$sz<1048560} {incr sz $sz} { + do_test bigrow-5.2.$i { + execsql { + UPDATE t1 SET b=b||b; + SELECT a,length(b),c FROM t1; + } + } "one $sz hi" + incr i +} +do_test bigrow-5.3 { + catchsql {UPDATE t1 SET b=b||b} +} {0 {}} +do_test bigrow-5.4 { + execsql {SELECT length(b) FROM t1} +} 1966080 +do_test bigrow-5.5 { + catchsql {UPDATE t1 SET b=b||b} +} {0 {}} +do_test bigrow-5.6 { + execsql {SELECT length(b) FROM t1} +} 3932160 +do_test bigrow-5.99 { + execsql {DROP TABLE t1} +} {} + +finish_test