Many hyperlinks are disabled.
Use anonymous login
to enable hyperlinks.
Overview
Comment: | Remove the syncOk argument to pager_recycle. Now that sqlite3_memory_release uses a global lru list of page, it is no longer required. (CVS 4364) |
---|---|
Downloads: | Tarball | ZIP archive |
Timelines: | family | ancestors | descendants | both | trunk |
Files: | files | file ages | folders |
SHA1: |
fb27692ab10b22851b265348bb6b3e1d |
User & Date: | danielk1977 2007-09-01 16:16:15.000 |
Context
2007-09-01
| ||
17:00 | Remove code for calling the SQL function randstr() with 0 or 1 argument, as it is registered with sqlite as requiring exactly 2. Also test io errors in sqlite3_release_memory(). (CVS 4365) (check-in: 5842f68c1b user: danielk1977 tags: trunk) | |
16:16 | Remove the syncOk argument to pager_recycle. Now that sqlite3_memory_release uses a global lru list of page, it is no longer required. (CVS 4364) (check-in: fb27692ab1 user: danielk1977 tags: trunk) | |
11:04 | Test sqlite3_bind_zeroblob(). Only include sqlite3Utf8To8 in builds if SQLITE_DEBUG is defined. (CVS 4363) (check-in: fde6142b7b user: danielk1977 tags: trunk) | |
Changes
Changes to src/os_unix.c.
︙ | ︙ | |||
2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 | static const unsigned char zChars[] = "abcdefghijklmnopqrstuvwxyz" "ABCDEFGHIJKLMNOPQRSTUVWXYZ" "0123456789"; int i, j; struct stat buf; const char *zDir = "."; azDirs[0] = sqlite3_temp_directory; for(i=0; i<sizeof(azDirs)/sizeof(azDirs[0]); i++){ if( azDirs[i]==0 ) continue; if( stat(azDirs[i], &buf) ) continue; if( !S_ISDIR(buf.st_mode) ) continue; if( access(azDirs[i], 07) ) continue; zDir = azDirs[i]; | > > > > > > > | 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 | static const unsigned char zChars[] = "abcdefghijklmnopqrstuvwxyz" "ABCDEFGHIJKLMNOPQRSTUVWXYZ" "0123456789"; int i, j; struct stat buf; const char *zDir = "."; /* It's odd to simulate an io-error here, but really this is just ** using the io-error infrastructure to test that SQLite handles this ** function failing. */ SimulateIOError( return SQLITE_ERROR ); azDirs[0] = sqlite3_temp_directory; for(i=0; i<sizeof(azDirs)/sizeof(azDirs[0]); i++){ if( azDirs[i]==0 ) continue; if( stat(azDirs[i], &buf) ) continue; if( !S_ISDIR(buf.st_mode) ) continue; if( access(azDirs[i], 07) ) continue; zDir = azDirs[i]; |
︙ | ︙ | |||
2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 | ** zPath. ** ** zOut points to a buffer of at least sqlite3_vfs.mxPathname bytes ** (in this case, MAX_PATHNAME bytes). The full-path is written to ** this buffer before returning. */ static int unixFullPathname(sqlite3_vfs *pVfs, const char *zPath, char *zOut){ assert( pVfs->mxPathname==MAX_PATHNAME ); zOut[MAX_PATHNAME-1] = '\0'; if( zPath[0]=='/' ){ sqlite3_snprintf(MAX_PATHNAME, zOut, "%s", zPath); }else{ int nCwd; if( getcwd(zOut, MAX_PATHNAME-1)==0 ){ | > > > > > > > > | 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 | ** zPath. ** ** zOut points to a buffer of at least sqlite3_vfs.mxPathname bytes ** (in this case, MAX_PATHNAME bytes). The full-path is written to ** this buffer before returning. */ static int unixFullPathname(sqlite3_vfs *pVfs, const char *zPath, char *zOut){ /* It's odd to simulate an io-error here, but really this is just ** using the io-error infrastructure to test that SQLite handles this ** function failing. This function could fail if, for example, the ** current working directly has been unlinked. */ SimulateIOError( return SQLITE_ERROR ); assert( pVfs->mxPathname==MAX_PATHNAME ); zOut[MAX_PATHNAME-1] = '\0'; if( zPath[0]=='/' ){ sqlite3_snprintf(MAX_PATHNAME, zOut, "%s", zPath); }else{ int nCwd; if( getcwd(zOut, MAX_PATHNAME-1)==0 ){ |
︙ | ︙ |
Changes to src/pager.c.
︙ | ︙ | |||
14 15 16 17 18 19 20 | ** The pager is used to access a database disk file. It implements ** atomic commit and rollback through the use of a journal file that ** is separate from the database file. The pager also implements file ** locking to prevent two processes from writing the same database ** file simultaneously, or one process from reading the database while ** another is writing. ** | | | 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 | ** The pager is used to access a database disk file. It implements ** atomic commit and rollback through the use of a journal file that ** is separate from the database file. The pager also implements file ** locking to prevent two processes from writing the same database ** file simultaneously, or one process from reading the database while ** another is writing. ** ** @(#) $Id: pager.c,v 1.384 2007/09/01 16:16:15 danielk1977 Exp $ */ #ifndef SQLITE_OMIT_DISKIO #include "sqliteInt.h" #include <assert.h> #include <string.h> /* |
︙ | ︙ | |||
2991 2992 2993 2994 2995 2996 2997 | /* ** Try to find a page in the cache that can be recycled. ** ** This routine may return SQLITE_IOERR, SQLITE_FULL or SQLITE_OK. It ** does not set the pPager->errCode variable. */ | | > > > > | | 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 | /* ** Try to find a page in the cache that can be recycled. ** ** This routine may return SQLITE_IOERR, SQLITE_FULL or SQLITE_OK. It ** does not set the pPager->errCode variable. */ static int pager_recycle(Pager *pPager, PgHdr **ppPg){ PgHdr *pPg; *ppPg = 0; /* It is illegal to call this function unless the pager object ** pointed to by pPager has at least one free page (page with nRef==0). */ assert(!MEMDB); assert(pPager->lru.pFirst); /* Find a page to recycle. Try to locate a page that does not ** require us to do an fsync() on the journal. */ pPg = pPager->lru.pFirstSynced; /* If we could not find a page that does not require an fsync() ** on the journal file then fsync the journal file. This is a ** very slow operation, so we work hard to avoid it. But sometimes ** it can't be helped. */ if( pPg==0 && pPager->lru.pFirst){ int iDc = sqlite3OsDeviceCharacteristics(pPager->fd); int rc = syncJournal(pPager); if( rc!=0 ){ return rc; } if( pPager->fullSync && 0==(iDc&SQLITE_IOCAP_SAFE_APPEND) ){ /* If in full-sync mode, write a new journal header into the |
︙ | ︙ | |||
3030 3031 3032 3033 3034 3035 3036 | rc = writeJournalHdr(pPager); if( rc!=0 ){ return rc; } } pPg = pPager->lru.pFirst; } | < < < | 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 | rc = writeJournalHdr(pPager); if( rc!=0 ){ return rc; } } pPg = pPager->lru.pFirst; } assert( pPg->nRef==0 ); /* Write the page to the database file if it is dirty. */ if( pPg->dirty ){ int rc; |
︙ | ︙ | |||
3132 3133 3134 3135 3136 3137 3138 | */ if( !pPg ) break; pPager = pPg->pPager; assert(!pPg->needSync || pPg==pPager->lru.pFirst); assert(pPg->needSync || pPg==pPager->lru.pFirstSynced); | | | 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 | */ if( !pPg ) break; pPager = pPg->pPager; assert(!pPg->needSync || pPg==pPager->lru.pFirst); assert(pPg->needSync || pPg==pPager->lru.pFirstSynced); rc = pager_recycle(pPager, &pRecycled); assert(pRecycled==pPg || rc!=SQLITE_OK); if( rc==SQLITE_OK ){ /* We've found a page to free. At this point the page has been ** removed from the page hash-table, free-list and synced-list ** (pFirstSynced). It is still in the all pages (pAll) list. ** Remove it from this list before freeing. ** |
︙ | ︙ | |||
3429 3430 3431 3432 3433 3434 3435 | } pPg->pPager = pPager; pPg->pNextAll = pPager->pAll; pPager->pAll = pPg; pPager->nPage++; }else{ /* Recycle an existing page with a zero ref-count. */ | | | 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 | } pPg->pPager = pPager; pPg->pNextAll = pPager->pAll; pPager->pAll = pPg; pPager->nPage++; }else{ /* Recycle an existing page with a zero ref-count. */ rc = pager_recycle(pPager, &pPg); if( rc==SQLITE_BUSY ){ rc = SQLITE_IOERR_BLOCKED; } if( rc!=SQLITE_OK ){ goto pager_allocate_out; } assert( pPager->state>=SHARED_LOCK ); |
︙ | ︙ |
Changes to test/shared.test.
1 2 3 4 5 6 7 8 9 10 11 | # 2005 December 30 # # The author disclaims copyright to this source code. In place of # a legal notice, here is a blessing: # # May you do good and not evil. # May you find forgiveness for yourself and forgive others. # May you share freely, never taking more than you give. # #*********************************************************************** # | | | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 | # 2005 December 30 # # The author disclaims copyright to this source code. In place of # a legal notice, here is a blessing: # # May you do good and not evil. # May you find forgiveness for yourself and forgive others. # May you share freely, never taking more than you give. # #*********************************************************************** # # $Id: shared.test,v 1.26 2007/09/01 16:16:16 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl db close ifcapable !shared_cache { finish_test |
︙ | ︙ | |||
858 859 860 861 862 863 864 865 | } [list [file normalize test.db] 2] } do_test shared-$av.11.11 { db close db2 close } {} | > > > > > > > > | > > > > > > > > | > > > > > > > > > > > > > > > > > > > > > > > > | 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 | } [list [file normalize test.db] 2] } do_test shared-$av.11.11 { db close db2 close } {} # This tests that if it is impossible to free any pages, SQLite will # exceed the limit set by PRAGMA cache_size. do_test shared-$av.12.1 { file delete -force test.db test.db-journal sqlite3 db test.db execsql { PRAGMA cache_size = 10; PRAGMA cache_size; } } {10} do_test shared-$av.12.2 { set ::db_handles [list] for {set i 1} {$i < 15} {incr i} { lappend ::db_handles db$i sqlite3 db$i test.db execsql "CREATE TABLE db${i}(a, b, c)" db$i execsql "INSERT INTO db${i} VALUES(1, 2, 3)" } } {} proc nested_select {handles} { [lindex $handles 0] eval "SELECT * FROM [lindex $handles 0]" { lappend ::res $a $b $c if {[llength $handles]>1} { nested_select [lrange $handles 1 end] } } } do_test shared-$av.12.3 { set ::res [list] nested_select $::db_handles set ::res } [string range [string repeat "1 2 3 " [llength $::db_handles]] 0 end-1] do_test shared-$av.12.X { db close foreach h $::db_handles { $h close } } {} } sqlite3_enable_shared_cache $::enable_shared_cache finish_test |
Changes to test/sqllimits1.test.
︙ | ︙ | |||
8 9 10 11 12 13 14 | # May you share freely, never taking more than you give. # #*********************************************************************** # # This file contains tests to verify that the limits defined in # sqlite source file limits.h are enforced. # | | | 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 | # May you share freely, never taking more than you give. # #*********************************************************************** # # This file contains tests to verify that the limits defined in # sqlite source file limits.h are enforced. # # $Id: sqllimits1.test,v 1.14 2007/09/01 16:16:16 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl # Test organization: # # sqllimits-1.*: SQLITE_MAX_LENGTH |
︙ | ︙ | |||
97 98 99 100 101 102 103 104 105 106 107 108 109 110 | } {1 {string or blob too big}} do_test sqllimits-1.10 { set ::str [string repeat %J 2100] catchsql { SELECT strftime($::str, '2003-10-31') } } {1 {string or blob too big}} #-------------------------------------------------------------------- # Test cases sqllimits-2.* test that the SQLITE_MAX_SQL_LENGTH limit # is enforced. # do_test sqllimits-2.1 { set sql "SELECT 1 WHERE 1==1" set N [expr {$::SQLITE_MAX_SQL_LENGTH / [string length " AND 1==1"]}] | > > > > > > | 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 | } {1 {string or blob too big}} do_test sqllimits-1.10 { set ::str [string repeat %J 2100] catchsql { SELECT strftime($::str, '2003-10-31') } } {1 {string or blob too big}} do_test sqllimits-1.11 { set ::str1 [string repeat A [expr {$SQLITE_MAX_LENGTH - 10}]] set ::str2 [string repeat B [expr {$SQLITE_MAX_LENGTH - 10}]] catchsql { SELECT $::str1 || $::str2 } } {1 {string or blob too big}} #-------------------------------------------------------------------- # Test cases sqllimits-2.* test that the SQLITE_MAX_SQL_LENGTH limit # is enforced. # do_test sqllimits-2.1 { set sql "SELECT 1 WHERE 1==1" set N [expr {$::SQLITE_MAX_SQL_LENGTH / [string length " AND 1==1"]}] |
︙ | ︙ | |||
190 191 192 193 194 195 196 197 198 199 200 201 202 203 | } {1 {database or disk is full}} do_test sqllimits1-3.6 { catchsql { SELECT COUNT(*) FROM trig; } } {0 7} #-------------------------------------------------------------------- # Test cases sqllimits1-4.* test the SQLITE_MAX_COLUMN limit. # do_test sqllimits-1.4.1 { # Columns in a table. set cols [list] | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 | } {1 {database or disk is full}} do_test sqllimits1-3.6 { catchsql { SELECT COUNT(*) FROM trig; } } {0 7} # Now check the response of the library to opening a file larger than # the current max_page_count value. The response is to change the # internal max_page_count value to match the actual size of the file. do_test sqllimits1-3.7.1 { execsql { PRAGMA max_page_count = 1000000; CREATE TABLE abc(a, b, c); INSERT INTO abc VALUES(1, 2, 3); INSERT INTO abc SELECT a||b||c, b||c||a, c||a||b FROM abc; INSERT INTO abc SELECT a||b||c, b||c||a, c||a||b FROM abc; INSERT INTO abc SELECT a||b||c, b||c||a, c||a||b FROM abc; INSERT INTO abc SELECT a||b||c, b||c||a, c||a||b FROM abc; INSERT INTO abc SELECT a||b||c, b||c||a, c||a||b FROM abc; INSERT INTO abc SELECT a||b||c, b||c||a, c||a||b FROM abc; INSERT INTO abc SELECT a||b||c, b||c||a, c||a||b FROM abc; INSERT INTO abc SELECT a||b||c, b||c||a, c||a||b FROM abc; INSERT INTO abc SELECT a, b, c FROM abc; INSERT INTO abc SELECT b, a, c FROM abc; INSERT INTO abc SELECT c, b, a FROM abc; } expr [file size test.db] / 1024 } {1691} do_test sqllimits1-3.7.2 { db close sqlite3 db test.db execsql { PRAGMA max_page_count = 1000; } execsql { SELECT count(*) FROM sqlite_master; } } {6} do_test sqllimits1-3.7.3 { execsql { PRAGMA max_page_count; } } {1691} do_test sqllimits1-3.7.4 { execsql { DROP TABLE abc; } } {} #-------------------------------------------------------------------- # Test cases sqllimits1-4.* test the SQLITE_MAX_COLUMN limit. # do_test sqllimits-1.4.1 { # Columns in a table. set cols [list] |
︙ | ︙ |
Changes to test/zeroblob.test.
︙ | ︙ | |||
9 10 11 12 13 14 15 | # #*********************************************************************** # This file implements regression tests for SQLite library. The # focus of this file is testing of the zero-filled blob functionality # including the sqlite3_bind_zeroblob(), sqlite3_result_zeroblob(), # and the built-in zeroblob() SQL function. # | | | 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 | # #*********************************************************************** # This file implements regression tests for SQLite library. The # focus of this file is testing of the zero-filled blob functionality # including the sqlite3_bind_zeroblob(), sqlite3_result_zeroblob(), # and the built-in zeroblob() SQL function. # # $Id: zeroblob.test,v 1.8 2007/09/01 16:16:16 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl ifcapable !incrblob { finish_test return |
︙ | ︙ | |||
185 186 187 188 189 190 191 192 193 | } {SQLITE_ROW} do_test zeroblob-7.2 { sqlite3_column_int $::STMT 0 } {450} do_test zeroblob-7.3 { sqlite3_finalize $::STMT } {SQLITE_OK} finish_test | > > > > > > > > > > | 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 | } {SQLITE_ROW} do_test zeroblob-7.2 { sqlite3_column_int $::STMT 0 } {450} do_test zeroblob-7.3 { sqlite3_finalize $::STMT } {SQLITE_OK} # Test that MakeRecord can handle a value with some real content # and a zero-blob tail. # do_test zeroblob-8.1 { llength [execsql { SELECT 'hello' AS a, zeroblob(10) as b from t1 ORDER BY a, b; }] } {8} finish_test |