Many hyperlinks are disabled.
Use anonymous login
to enable hyperlinks.
Overview
Comment: | Add some savepoint related test cases and fix a few problems. (CVS 6116) |
---|---|
Downloads: | Tarball | ZIP archive |
Timelines: | family | ancestors | descendants | both | trunk |
Files: | files | file ages | folders |
SHA1: |
8c62ea4fded2251e9daf16f2a050f943 |
User & Date: | danielk1977 2009-01-06 13:40:08.000 |
Context
2009-01-06
| ||
14:19 | Fix compiler warnings. (CVS 6117) (check-in: da770a8dff user: drh tags: trunk) | |
13:40 | Add some savepoint related test cases and fix a few problems. (CVS 6116) (check-in: 8c62ea4fde user: danielk1977 tags: trunk) | |
00:11 | Remove leftover "breakpoint" from the fuzz.test script. (CVS 6115) (check-in: c2482d8877 user: drh tags: trunk) | |
Changes
Changes to src/pager.c.
︙ | ︙ | |||
14 15 16 17 18 19 20 | ** The pager is used to access a database disk file. It implements ** atomic commit and rollback through the use of a journal file that ** is separate from the database file. The pager also implements file ** locking to prevent two processes from writing the same database ** file simultaneously, or one process from reading the database while ** another is writing. ** | | > | | | | | | 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 | ** The pager is used to access a database disk file. It implements ** atomic commit and rollback through the use of a journal file that ** is separate from the database file. The pager also implements file ** locking to prevent two processes from writing the same database ** file simultaneously, or one process from reading the database while ** another is writing. ** ** @(#) $Id: pager.c,v 1.530 2009/01/06 13:40:08 danielk1977 Exp $ */ #ifndef SQLITE_OMIT_DISKIO #include "sqliteInt.h" /* ** Macros for troubleshooting. Normally turned off */ #if 0 int sqlite3PagerTrace=1; /* True to enable tracing */ #define sqlite3DebugPrintf printf #define PAGERTRACE1(X) if( sqlite3PagerTrace ) sqlite3DebugPrintf(X) #define PAGERTRACE2(X,Y) if( sqlite3PagerTrace ) sqlite3DebugPrintf(X,Y) #define PAGERTRACE3(X,Y,Z) if( sqlite3PagerTrace ) sqlite3DebugPrintf(X,Y,Z) #define PAGERTRACE4(X,Y,Z,W) if( sqlite3PagerTrace ) sqlite3DebugPrintf(X,Y,Z,W) #define PAGERTRACE5(X,Y,Z,W,V) if( sqlite3PagerTrace ) sqlite3DebugPrintf(X,Y,Z,W,V) #else #define PAGERTRACE1(X) #define PAGERTRACE2(X,Y) #define PAGERTRACE3(X,Y,Z) #define PAGERTRACE4(X,Y,Z,W) #define PAGERTRACE5(X,Y,Z,W,V) #endif |
︙ | ︙ | |||
1174 1175 1176 1177 1178 1179 1180 | assert( isMainJrnl || pDone ); assert( isSavepnt || pDone==0 ); rc = read32bits(jfd, offset, &pgno); if( rc!=SQLITE_OK ) return rc; rc = sqlite3OsRead(jfd, aData, pPager->pageSize, offset+4); if( rc!=SQLITE_OK ) return rc; | | < | 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 | assert( isMainJrnl || pDone ); assert( isSavepnt || pDone==0 ); rc = read32bits(jfd, offset, &pgno); if( rc!=SQLITE_OK ) return rc; rc = sqlite3OsRead(jfd, aData, pPager->pageSize, offset+4); if( rc!=SQLITE_OK ) return rc; pPager->journalOff += pPager->pageSize + 4 + (isMainJrnl?4:0); /* Sanity checking on the page. This is more important that I originally ** thought. If a power failure occurs while the journal is being written, ** it could cause invalid data to be written into the journal. We need to ** detect this invalid data (with high probability) and ignore it. */ if( pgno==0 || pgno==PAGER_MJ_PGNO(pPager) ){ return SQLITE_DONE; } if( pgno>(Pgno)pPager->dbSize || sqlite3BitvecTest(pDone, pgno) ){ return SQLITE_OK; } if( isMainJrnl ){ rc = read32bits(jfd, offset+pPager->pageSize+4, &cksum); if( rc ) return rc; if( !isSavepnt && pager_cksum(pPager, aData)!=cksum ){ return SQLITE_DONE; } } if( pDone && (rc = sqlite3BitvecSet(pDone, pgno)) ){ return rc; } |
︙ | ︙ | |||
1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 | && (pPager->fd->pMethods) ){ i64 ofst = (pgno-1)*(i64)pPager->pageSize; rc = sqlite3OsWrite(pPager->fd, aData, pPager->pageSize, ofst); if( pgno>pPager->dbFileSize ){ pPager->dbFileSize = pgno; } } if( pPg ){ /* No page should ever be explicitly rolled back that is in use, except ** for page 1 which is held in use in order to keep the lock on the ** database active. However such a page may be rolled back as a result ** of an internal error resulting in an automatic call to ** sqlite3PagerRollback(). | > > > > > > > > > > > > > > > > > > > > > > > | 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 | && (pPager->fd->pMethods) ){ i64 ofst = (pgno-1)*(i64)pPager->pageSize; rc = sqlite3OsWrite(pPager->fd, aData, pPager->pageSize, ofst); if( pgno>pPager->dbFileSize ){ pPager->dbFileSize = pgno; } }else if( !isMainJrnl && pPg==0 ){ /* If this is a rollback of a savepoint and data was not written to ** the database and the page is not in-memory, there is a potential ** problem. When the page is next fetched by the b-tree layer, it ** will be read from the database file, which may or may not be ** current. ** ** There are a couple of different ways this can happen. All are quite ** obscure. When not running in synchronous mode, this can only happen ** if the page is on the free-list at the start of the transaction, then ** populated, then moved using sqlite3PagerMovepage(). ** ** The solution is to add an in-memory page to the cache containing ** the data just read from the sub-journal. Mark the page as dirty ** and if the pager requires a journal-sync, then mark the page as ** requiring a journal-sync before it is written. */ assert( isSavepnt ); if( (rc = sqlite3PagerAcquire(pPager, pgno, &pPg, 1)) ){ return rc; } pPg->flags &= ~PGHDR_NEED_READ; sqlite3PcacheMakeDirty(pPg); } if( pPg ){ /* No page should ever be explicitly rolled back that is in use, except ** for page 1 which is held in use in order to keep the lock on the ** database active. However such a page may be rolled back as a result ** of an internal error resulting in an automatic call to ** sqlite3PagerRollback(). |
︙ | ︙ | |||
1658 1659 1660 1661 1662 1663 1664 | /* ** Playback a savepoint. */ static int pagerPlaybackSavepoint(Pager *pPager, PagerSavepoint *pSavepoint){ i64 szJ; /* Size of the full journal */ i64 iHdrOff; /* End of first segment of main-journal records */ Pgno ii; /* Loop counter */ | | | | 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 | /* ** Playback a savepoint. */ static int pagerPlaybackSavepoint(Pager *pPager, PagerSavepoint *pSavepoint){ i64 szJ; /* Size of the full journal */ i64 iHdrOff; /* End of first segment of main-journal records */ Pgno ii; /* Loop counter */ int rc = SQLITE_OK; /* Return code */ Bitvec *pDone = 0; /* Bitvec to ensure pages played back only once */ /* Allocate a bitvec to use to store the set of pages rolled back */ if( pSavepoint ){ pDone = sqlite3BitvecCreate(pSavepoint->nOrig); if( !pDone ){ return SQLITE_NOMEM; } } /* Truncate the database back to the size it was before the ** savepoint being reverted was opened. */ pPager->dbSize = pSavepoint?pSavepoint->nOrig:pPager->dbOrigSize; assert( pPager->state>=PAGER_SHARED ); /* Now roll back all main journal file records that occur after byte ** byte offset PagerSavepoint.iOffset that have a page number less than ** or equal to PagerSavepoint.nOrig. As each record is played back, ** the corresponding bit in bitvec PagerSavepoint.pInSavepoint is ** cleared. |
︙ | ︙ | |||
2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 | disable_simulated_io_errors(); sqlite3BeginBenignMalloc(); pPager->errCode = 0; pPager->exclusiveMode = 0; pager_reset(pPager); if( !MEMDB ){ pagerUnlockAndRollback(pPager); } enable_simulated_io_errors(); sqlite3EndBenignMalloc(); PAGERTRACE2("CLOSE %d\n", PAGERID(pPager)); IOTRACE(("CLOSE %p\n", pPager)) if( pPager->journalOpen ){ | > > > > > > > | 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 | disable_simulated_io_errors(); sqlite3BeginBenignMalloc(); pPager->errCode = 0; pPager->exclusiveMode = 0; pager_reset(pPager); if( !MEMDB ){ /* Set Pager.journalHdr to -1 for the benefit of the pager_playback() ** call which may be made from within pagerUnlockAndRollback(). If it ** is not -1, then the unsynced portion of an open journal file may ** be played back into the database. If a power failure occurs while ** this is happening, the database may become corrupt. */ pPager->journalHdr = -1; pagerUnlockAndRollback(pPager); } enable_simulated_io_errors(); sqlite3EndBenignMalloc(); PAGERTRACE2("CLOSE %d\n", PAGERID(pPager)); IOTRACE(("CLOSE %p\n", pPager)) if( pPager->journalOpen ){ |
︙ | ︙ | |||
2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 | pList->pageHash = pager_pagehash(pList); #endif pList = pList->pDirty; } return SQLITE_OK; } /* ** This function is called by the pcache layer when it has reached some ** soft memory limit. The argument is a pointer to a purgeable Pager ** object. This function attempts to make a single dirty page that has no ** outstanding references (if one exists) clean so that it can be recycled ** by the pcache layer. | > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 | pList->pageHash = pager_pagehash(pList); #endif pList = pList->pDirty; } return SQLITE_OK; } /* ** Add the page to the sub-journal. It is the callers responsibility to ** use subjRequiresPage() to check that it is really required before ** calling this function. */ static int subjournalPage(PgHdr *pPg){ int rc; void *pData = pPg->pData; Pager *pPager = pPg->pPager; i64 offset = pPager->stmtNRec*(4+pPager->pageSize); char *pData2 = CODEC2(pPager, pData, pPg->pgno, 7); PAGERTRACE3("STMT-JOURNAL %d page %d @ %d\n", PAGERID(pPager), pPg->pgno); assert( pageInJournal(pPg) || pPg->pgno>pPager->dbOrigSize ); rc = write32bits(pPager->sjfd, offset, pPg->pgno); if( rc==SQLITE_OK ){ rc = sqlite3OsWrite(pPager->sjfd, pData2, pPager->pageSize, offset+4); } if( rc==SQLITE_OK ){ pPager->stmtNRec++; assert( pPager->nSavepoint>0 ); rc = addToSavepointBitvecs(pPager, pPg->pgno); } return rc; } /* ** This function is called by the pcache layer when it has reached some ** soft memory limit. The argument is a pointer to a purgeable Pager ** object. This function attempts to make a single dirty page that has no ** outstanding references (if one exists) clean so that it can be recycled ** by the pcache layer. |
︙ | ︙ | |||
2581 2582 2583 2584 2585 2586 2587 | ){ pPager->nRec = 0; rc = writeJournalHdr(pPager); } } if( rc==SQLITE_OK ){ pPg->pDirty = 0; | > > > > | > > | 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 | ){ pPager->nRec = 0; rc = writeJournalHdr(pPager); } } if( rc==SQLITE_OK ){ pPg->pDirty = 0; if( pPg->pgno>pPager->dbSize && subjRequiresPage(pPg) ){ rc = subjournalPage(pPg); } if( rc==SQLITE_OK ){ rc = pager_write_pagelist(pPg); } } if( rc!=SQLITE_OK ){ pager_error(pPager, rc); } } if( rc==SQLITE_OK ){ PAGERTRACE3("STRESS %d page %d\n", PAGERID(pPager), pPg->pgno); sqlite3PcacheMakeClean(pPg); } return rc; } /* |
︙ | ︙ | |||
3383 3384 3385 3386 3387 3388 3389 | /* If the statement journal is open and the page is not in it, ** then write the current page to the statement journal. Note that ** the statement journal format differs from the standard journal format ** in that it omits the checksums and the header. */ if( subjRequiresPage(pPg) ){ | < < < < < < < < < < < | < < < < < < | 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 | /* If the statement journal is open and the page is not in it, ** then write the current page to the statement journal. Note that ** the statement journal format differs from the standard journal format ** in that it omits the checksums and the header. */ if( subjRequiresPage(pPg) ){ rc = subjournalPage(pPg); } } /* Update the database size and return. */ assert( pPager->state>=PAGER_SHARED ); if( pPager->dbSize<pPg->pgno ){ |
︙ | ︙ |
Changes to test/permutations.test.
1 2 3 4 5 6 7 8 9 10 11 | # 2008 June 21 # # The author disclaims copyright to this source code. In place of # a legal notice, here is a blessing: # # May you do good and not evil. # May you find forgiveness for yourself and forgive others. # May you share freely, never taking more than you give. # #*********************************************************************** # | | | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 | # 2008 June 21 # # The author disclaims copyright to this source code. In place of # a legal notice, here is a blessing: # # May you do good and not evil. # May you find forgiveness for yourself and forgive others. # May you share freely, never taking more than you give. # #*********************************************************************** # # $Id: permutations.test,v 1.43 2009/01/06 13:40:08 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl # Argument processing. # #puts "PERM-DEBUG: argv=$argv" |
︙ | ︙ | |||
708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 | run_tests "journaltest" -description { Check that pages are synced before being written (test_journal.c). } -initialize { set ISQUICK 1 catch {db close} register_jt_vfs -default "" } -shutdown { unregister_jt_vfs } -include [concat $::ALLTESTS savepoint6.test ] -exclude { incrvacuum.test ioerr.test corrupt4.test io.test | > > | 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 | run_tests "journaltest" -description { Check that pages are synced before being written (test_journal.c). } -initialize { set ISQUICK 1 catch {db close} register_jt_vfs -default "" #sqlite3_instvfs binarylog -default binarylog ostrace.bin } -shutdown { #sqlite3_instvfs destroy binarylog unregister_jt_vfs } -include [concat $::ALLTESTS savepoint6.test ] -exclude { incrvacuum.test ioerr.test corrupt4.test io.test |
︙ | ︙ |
Changes to test/savepoint.test.
1 2 3 4 5 6 7 8 9 10 11 | # 2008 December 15 # # The author disclaims copyright to this source code. In place of # a legal notice, here is a blessing: # # May you do good and not evil. # May you find forgiveness for yourself and forgive others. # May you share freely, never taking more than you give. # #*********************************************************************** # | | | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 | # 2008 December 15 # # The author disclaims copyright to this source code. In place of # a legal notice, here is a blessing: # # May you do good and not evil. # May you find forgiveness for yourself and forgive others. # May you share freely, never taking more than you give. # #*********************************************************************** # # $Id: savepoint.test,v 1.8 2009/01/06 13:40:08 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl #---------------------------------------------------------------------- # The following tests - savepoint-1.* - test that the SAVEPOINT, RELEASE |
︙ | ︙ | |||
462 463 464 465 466 467 468 469 470 471 472 473 474 475 | PRAGMA incremental_vacuum; ROLLBACK TO one; COMMIT; } execsql { PRAGMA integrity_check } } {ok} # Test oddly named and quoted savepoints. # do_test savepoint-8-1 { execsql { SAVEPOINT "save1" } execsql { RELEASE save1 } } {} | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 | PRAGMA incremental_vacuum; ROLLBACK TO one; COMMIT; } execsql { PRAGMA integrity_check } } {ok} do_test savepoint-7.5.1 { execsql { PRAGMA incremental_vacuum; CREATE TABLE t5(x, y); INSERT INTO t5 VALUES(1, randstr(1000,1000)); INSERT INTO t5 VALUES(2, randstr(1000,1000)); INSERT INTO t5 VALUES(3, randstr(1000,1000)); BEGIN; INSERT INTO t5 VALUES(4, randstr(1000,1000)); INSERT INTO t5 VALUES(5, randstr(1000,1000)); DELETE FROM t5 WHERE x=1 OR x=2; SAVEPOINT one; PRAGMA incremental_vacuum; SAVEPOINT two; INSERT INTO t5 VALUES(1, randstr(1000,1000)); INSERT INTO t5 VALUES(2, randstr(1000,1000)); ROLLBACK TO two; ROLLBACK TO one; COMMIT; PRAGMA integrity_check; } } {ok} do_test savepoint-7.5.2 { execsql { DROP TABLE t5; } } {} # Test oddly named and quoted savepoints. # do_test savepoint-8-1 { execsql { SAVEPOINT "save1" } execsql { RELEASE save1 } } {} |
︙ | ︙ |
Changes to test/savepoint6.test.
1 2 3 4 5 6 7 8 9 10 11 | # 2009 January 3 # # The author disclaims copyright to this source code. In place of # a legal notice, here is a blessing: # # May you do good and not evil. # May you find forgiveness for yourself and forgive others. # May you share freely, never taking more than you give. # #*********************************************************************** # | | > | > > | > | < | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 | # 2009 January 3 # # The author disclaims copyright to this source code. In place of # a legal notice, here is a blessing: # # May you do good and not evil. # May you find forgiveness for yourself and forgive others. # May you share freely, never taking more than you give. # #*********************************************************************** # # $Id: savepoint6.test,v 1.2 2009/01/06 13:40:08 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl proc sql {zSql} { uplevel db eval [list $zSql] #puts stderr "$zSql ;" } set DATABASE_SCHEMA { PRAGMA auto_vacuum = incremental; CREATE TABLE t1(x, y); CREATE UNIQUE INDEX i1 ON t1(x); CREATE INDEX i2 ON t1(y); } #-------------------------------------------------------------------------- # In memory database state. # # ::lSavepoint is a list containing one entry for each active savepoint. The # first entry in the list corresponds to the most recently opened savepoint. # Each entry consists of two elements: |
︙ | ︙ | |||
56 57 58 59 60 61 62 | # rollback NAME # release NAME # # insert_rows XVALUES # delete_rows XVALUES # proc savepoint {zName} { | | | | > > > > | | | 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 | # rollback NAME # release NAME # # insert_rows XVALUES # delete_rows XVALUES # proc savepoint {zName} { catch { sql "SAVEPOINT $zName" } lappend ::lSavepoint [list $zName [array get ::aEntry]] } proc rollback {zName} { catch { sql "ROLLBACK TO $zName" } for {set i [expr {[llength $::lSavepoint]-1}]} {$i>=0} {incr i -1} { set zSavepoint [lindex $::lSavepoint $i 0] if {$zSavepoint eq $zName} { unset -nocomplain ::aEntry array set ::aEntry [lindex $::lSavepoint $i 1] if {$i+1 < [llength $::lSavepoint]} { set ::lSavepoint [lreplace $::lSavepoint [expr $i+1] end] } break } } } proc release {zName} { catch { sql "RELEASE $zName" } for {set i [expr {[llength $::lSavepoint]-1}]} {$i>=0} {incr i -1} { set zSavepoint [lindex $::lSavepoint $i 0] if {$zSavepoint eq $zName} { set ::lSavepoint [lreplace $::lSavepoint $i end] break } } if {[llength $::lSavepoint] == 0} { #puts stderr "-- End of transaction!!!!!!!!!!!!!" } } proc insert_rows {lX} { foreach x $lX { set y [x_to_y $x] # Update database [db] sql "INSERT OR REPLACE INTO t1 VALUES($x, '$y')" # Update the Tcl database. set ::aEntry($x) $y } } proc delete_rows {lX} { foreach x $lX { # Update database [db] sql "DELETE FROM t1 WHERE x = $x" # Update the Tcl database. unset -nocomplain ::aEntry($x) } } #------------------------------------------------------------------------- |
︙ | ︙ | |||
149 150 151 152 153 154 155 | for {set i 0} {$i<$nRes} {incr i} { lappend ret [expr int(rand()*$nRange)] } return $ret } #------------------------------------------------------------------------- | < < < | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | | | | | | | | | | > | | | | | | | > | 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 | for {set i 0} {$i<$nRes} {incr i} { lappend ret [expr int(rand()*$nRange)] } return $ret } #------------------------------------------------------------------------- proc database_op {} { set i [expr int(rand()*2)] if {$i==0} { insert_rows [random_integers 100 1000] } if {$i==1} { delete_rows [random_integers 100 1000] set i [expr int(rand()*3)] if {$i==0} { sql {PRAGMA incremental_vacuum} } } } proc savepoint_op {} { set names {one two three four five} set cmds {savepoint savepoint savepoint savepoint release rollback} set C [lindex $cmds [expr int(rand()*6)]] set N [lindex $names [expr int(rand()*5)]] #puts stderr " $C $N ; " #flush stderr $C $N return ok } expr srand(0) ############################################################################ ############################################################################ # Start of test cases. do_test savepoint6-1.1 { sql $DATABASE_SCHEMA } {} do_test savepoint6-1.2 { insert_rows { 497 166 230 355 779 588 394 317 290 475 362 193 805 851 564 763 44 930 389 819 765 760 966 280 538 414 500 18 25 287 320 30 382 751 87 283 981 429 630 974 421 270 810 405 } savepoint one insert_rows 858 delete_rows 930 savepoint two execsql {PRAGMA incremental_vacuum} savepoint three insert_rows 144 rollback three rollback two release one execsql {SELECT count(*) FROM t1} } {44} foreach zSetup [list { set testname normal sqlite3 db test.db } { set testname tempdb sqlite3 db "" } { set testname smallcache sqlite3 db test.db sql { PRAGMA cache_size = 10 } }] { unset -nocomplain ::lSavepoint unset -nocomplain ::aEntry db close file delete -force test.db eval $zSetup sql $DATABASE_SCHEMA do_test savepoint6-$testname.setup { savepoint one insert_rows [random_integers 100 1000] release one checkdb } {ok} for {set i 0} {$i < 1000} {incr i} { do_test savepoint6-$testname.$i.1 { savepoint_op checkdb } {ok} do_test savepoint6-$testname.$i.2 { database_op database_op checkdb } {ok} } } unset -nocomplain ::lSavepoint unset -nocomplain ::aEntry finish_test |